From 954996e231074dc7429f7be1256a579bedd8344c Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Fri, 9 Nov 2018 13:49:10 -0500 Subject: [PATCH] Move from glog to klog - Move from the old github.com/golang/glog to k8s.io/klog - klog as explicit InitFlags() so we add them as necessary - we update the other repositories that we vendor that made a similar change from glog to klog * github.com/kubernetes/repo-infra * k8s.io/gengo/ * k8s.io/kube-openapi/ * github.com/google/cadvisor - Entirely remove all references to glog - Fix some tests by explicit InitFlags in their init() methods Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135 --- .../fluentd-elasticsearch/es-image/BUILD | 2 +- .../elasticsearch_logging_discovery.go | 18 +- cluster/images/etcd-version-monitor/BUILD | 2 +- .../etcd-version-monitor.go | 8 +- cluster/images/etcd/migrate/BUILD | 2 +- cluster/images/etcd/migrate/data_dir.go | 6 +- cluster/images/etcd/migrate/migrate.go | 22 +- cluster/images/etcd/migrate/migrate_client.go | 8 +- cluster/images/etcd/migrate/migrate_server.go | 12 +- cluster/images/etcd/migrate/migrator.go | 38 +- cluster/images/etcd/migrate/rollback_v2.go | 14 +- cmd/cloud-controller-manager/app/BUILD | 2 +- .../app/controllermanager.go | 32 +- .../app/options/BUILD | 2 +- .../app/options/options.go | 4 +- cmd/controller-manager/app/BUILD | 2 +- cmd/controller-manager/app/helper.go | 4 +- cmd/genswaggertypedocs/BUILD | 2 +- cmd/genswaggertypedocs/swagger_type_docs.go | 6 +- cmd/kube-apiserver/app/BUILD | 2 +- cmd/kube-apiserver/app/aggregator.go | 4 +- cmd/kube-apiserver/app/server.go | 10 +- cmd/kube-controller-manager/app/BUILD | 2 +- .../app/certificates.go | 4 +- .../app/cloudproviders.go | 4 +- .../app/controllermanager.go | 32 +- cmd/kube-controller-manager/app/core.go | 16 +- cmd/kube-controller-manager/app/options/BUILD | 2 +- .../app/options/options.go | 4 +- cmd/kube-controller-manager/app/plugins.go | 6 +- cmd/kube-controller-manager/app/policy.go | 4 +- cmd/kube-proxy/app/BUILD | 2 +- cmd/kube-proxy/app/conntrack.go | 12 +- cmd/kube-proxy/app/server.go | 32 +- cmd/kube-proxy/app/server_others.go | 22 +- cmd/kube-proxy/app/server_windows.go | 12 +- cmd/kube-scheduler/app/BUILD | 2 +- cmd/kube-scheduler/app/options/BUILD | 2 +- cmd/kube-scheduler/app/options/options.go | 4 +- cmd/kube-scheduler/app/server.go | 8 +- cmd/kubeadm/.import-restrictions | 1 - cmd/kubeadm/BUILD | 5 +- cmd/kubeadm/app/BUILD | 2 +- cmd/kubeadm/app/cmd/BUILD | 2 +- cmd/kubeadm/app/cmd/completion.go | 8 +- cmd/kubeadm/app/cmd/config.go | 20 +- cmd/kubeadm/app/cmd/init.go | 14 +- cmd/kubeadm/app/cmd/join.go | 32 +- cmd/kubeadm/app/cmd/phases/BUILD | 2 +- cmd/kubeadm/app/cmd/phases/addons.go | 4 +- cmd/kubeadm/app/cmd/phases/etcd.go | 4 +- cmd/kubeadm/app/cmd/phases/kubelet.go | 6 +- cmd/kubeadm/app/cmd/phases/uploadconfig.go | 8 +- .../app/cmd/phases/waitcontrolplane.go | 4 +- cmd/kubeadm/app/cmd/reset.go | 34 +- cmd/kubeadm/app/cmd/token.go | 20 +- cmd/kubeadm/app/cmd/upgrade/BUILD | 2 +- cmd/kubeadm/app/cmd/upgrade/apply.go | 20 +- cmd/kubeadm/app/cmd/upgrade/diff.go | 6 +- cmd/kubeadm/app/cmd/upgrade/node.go | 4 +- cmd/kubeadm/app/cmd/upgrade/plan.go | 10 +- cmd/kubeadm/app/cmd/version.go | 6 +- cmd/kubeadm/app/kubeadm.go | 2 +- .../phases/bootstraptoken/clusterinfo/BUILD | 2 +- .../bootstraptoken/clusterinfo/clusterinfo.go | 10 +- cmd/kubeadm/app/phases/certs/BUILD | 2 +- cmd/kubeadm/app/phases/certs/certs.go | 8 +- cmd/kubeadm/app/phases/controlplane/BUILD | 2 +- .../app/phases/controlplane/manifests.go | 8 +- cmd/kubeadm/app/phases/etcd/BUILD | 2 +- cmd/kubeadm/app/phases/etcd/local.go | 14 +- cmd/kubeadm/app/phases/kubeconfig/BUILD | 2 +- .../app/phases/kubeconfig/kubeconfig.go | 6 +- cmd/kubeadm/app/phases/kubelet/BUILD | 2 +- cmd/kubeadm/app/phases/kubelet/flags.go | 6 +- cmd/kubeadm/app/phases/selfhosting/BUILD | 2 +- .../app/phases/selfhosting/selfhosting.go | 6 +- cmd/kubeadm/app/preflight/BUILD | 2 +- cmd/kubeadm/app/preflight/checks.go | 40 +- cmd/kubeadm/app/util/BUILD | 2 +- cmd/kubeadm/app/util/config/BUILD | 2 +- cmd/kubeadm/app/util/config/common.go | 8 +- .../app/util/config/initconfiguration.go | 4 +- .../app/util/config/joinconfiguration.go | 4 +- cmd/kubeadm/app/util/etcd/BUILD | 2 +- cmd/kubeadm/app/util/etcd/etcd.go | 6 +- cmd/kubeadm/app/util/system/BUILD | 2 +- .../app/util/system/kernel_validator.go | 4 +- .../app/util/system/package_validator.go | 6 +- cmd/kubeadm/app/util/version.go | 12 +- cmd/kubeadm/kubeadm.go | 2 + cmd/kubelet/app/BUILD | 2 +- cmd/kubelet/app/options/BUILD | 2 +- cmd/kubelet/app/options/globalflags.go | 4 +- cmd/kubelet/app/server.go | 94 +- cmd/kubelet/app/server_linux.go | 10 +- cmd/kubemark/BUILD | 2 +- cmd/kubemark/hollow-node.go | 12 +- pkg/apis/core/validation/BUILD | 2 +- pkg/apis/core/validation/validation.go | 4 +- pkg/auth/authorizer/abac/BUILD | 2 +- pkg/auth/authorizer/abac/abac.go | 4 +- pkg/cloudprovider/providers/aws/BUILD | 2 +- pkg/cloudprovider/providers/aws/aws.go | 206 +- pkg/cloudprovider/providers/aws/aws_fakes.go | 4 +- .../providers/aws/aws_instancegroups.go | 4 +- .../providers/aws/aws_loadbalancer.go | 86 +- pkg/cloudprovider/providers/aws/aws_routes.go | 6 +- pkg/cloudprovider/providers/aws/instances.go | 16 +- .../providers/aws/log_handler.go | 8 +- pkg/cloudprovider/providers/aws/regions.go | 6 +- .../providers/aws/retry_handler.go | 6 +- pkg/cloudprovider/providers/aws/tags.go | 10 +- pkg/cloudprovider/providers/aws/volumes.go | 6 +- pkg/cloudprovider/providers/azure/BUILD | 2 +- pkg/cloudprovider/providers/azure/auth/BUILD | 2 +- .../providers/azure/auth/azure_auth.go | 12 +- pkg/cloudprovider/providers/azure/azure.go | 14 +- .../providers/azure/azure_backoff.go | 42 +- .../azure/azure_blobDiskController.go | 48 +- .../providers/azure/azure_client.go | 174 +- .../azure/azure_controller_common.go | 10 +- .../azure/azure_controller_standard.go | 20 +- .../providers/azure/azure_controller_vmss.go | 16 +- .../providers/azure/azure_file.go | 6 +- .../providers/azure/azure_instances.go | 18 +- .../providers/azure/azure_loadbalancer.go | 150 +- .../azure/azure_managedDiskController.go | 22 +- .../providers/azure/azure_routes.go | 44 +- .../providers/azure/azure_standard.go | 46 +- .../providers/azure/azure_storage.go | 6 +- .../providers/azure/azure_storageaccount.go | 6 +- .../providers/azure/azure_vmss.go | 124 +- .../providers/azure/azure_vmss_cache.go | 12 +- .../providers/azure/azure_wrap.go | 14 +- .../providers/azure/azure_zones.go | 8 +- pkg/cloudprovider/providers/cloudstack/BUILD | 2 +- .../providers/cloudstack/cloudstack.go | 14 +- .../cloudstack/cloudstack_instances.go | 4 +- .../cloudstack/cloudstack_loadbalancer.go | 42 +- .../providers/cloudstack/metadata.go | 4 +- pkg/cloudprovider/providers/gce/BUILD | 2 +- pkg/cloudprovider/providers/gce/cloud/BUILD | 2 +- .../providers/gce/cloud/filter/BUILD | 2 +- .../providers/gce/cloud/filter/filter.go | 6 +- pkg/cloudprovider/providers/gce/cloud/gen.go | 3450 ++++++++--------- .../providers/gce/cloud/gen/main.go | 160 +- pkg/cloudprovider/providers/gce/cloud/op.go | 20 +- .../providers/gce/cloud/service.go | 12 +- pkg/cloudprovider/providers/gce/gce.go | 36 +- .../providers/gce/gce_address_manager.go | 28 +- .../providers/gce/gce_addresses.go | 6 +- .../providers/gce/gce_annotations.go | 4 +- .../providers/gce/gce_clusterid.go | 18 +- .../providers/gce/gce_clusters.go | 4 +- pkg/cloudprovider/providers/gce/gce_disks.go | 20 +- .../providers/gce/gce_healthchecks.go | 6 +- .../providers/gce/gce_instances.go | 30 +- .../providers/gce/gce_loadbalancer.go | 18 +- .../gce/gce_loadbalancer_external.go | 144 +- .../gce/gce_loadbalancer_internal.go | 102 +- pkg/cloudprovider/providers/gce/gce_routes.go | 4 +- pkg/cloudprovider/providers/gce/gce_tpu.go | 14 +- pkg/cloudprovider/providers/openstack/BUILD | 2 +- .../providers/openstack/metadata.go | 8 +- .../providers/openstack/openstack.go | 42 +- .../openstack/openstack_instances.go | 12 +- .../openstack/openstack_loadbalancer.go | 70 +- .../providers/openstack/openstack_routes.go | 28 +- .../providers/openstack/openstack_volumes.go | 36 +- pkg/cloudprovider/providers/photon/BUILD | 2 +- pkg/cloudprovider/providers/photon/photon.go | 92 +- pkg/cloudprovider/providers/vsphere/BUILD | 2 +- .../providers/vsphere/credentialmanager.go | 14 +- .../providers/vsphere/nodemanager.go | 48 +- .../providers/vsphere/vclib/BUILD | 2 +- .../providers/vsphere/vclib/connection.go | 34 +- .../providers/vsphere/vclib/datacenter.go | 48 +- .../providers/vsphere/vclib/datastore.go | 8 +- .../vsphere/vclib/diskmanagers/BUILD | 2 +- .../vsphere/vclib/diskmanagers/vdm.go | 10 +- .../vsphere/vclib/diskmanagers/virtualdisk.go | 4 +- .../vsphere/vclib/diskmanagers/vmdm.go | 44 +- .../providers/vsphere/vclib/folder.go | 4 +- .../providers/vsphere/vclib/pbm.go | 16 +- .../providers/vsphere/vclib/utils.go | 6 +- .../providers/vsphere/vclib/virtualmachine.go | 66 +- .../providers/vsphere/vclib/volumeoptions.go | 6 +- .../providers/vsphere/vsphere.go | 172 +- .../providers/vsphere/vsphere_util.go | 88 +- pkg/controller/.import-restrictions | 1 - pkg/controller/BUILD | 2 +- pkg/controller/bootstrap/BUILD | 2 +- pkg/controller/bootstrap/bootstrapsigner.go | 12 +- pkg/controller/bootstrap/tokencleaner.go | 14 +- pkg/controller/bootstrap/util.go | 14 +- pkg/controller/certificates/BUILD | 2 +- .../certificates/certificate_controller.go | 24 +- pkg/controller/certificates/cleaner/BUILD | 2 +- .../certificates/cleaner/cleaner.go | 18 +- .../certificates/rootcacertpublisher/BUILD | 2 +- .../rootcacertpublisher/publisher.go | 8 +- pkg/controller/client_builder.go | 24 +- pkg/controller/cloud/BUILD | 4 +- pkg/controller/cloud/node_controller.go | 58 +- pkg/controller/cloud/node_controller_test.go | 22 +- pkg/controller/cloud/pvlcontroller.go | 16 +- pkg/controller/clusterroleaggregation/BUILD | 2 +- .../clusterroleaggregation_controller.go | 6 +- pkg/controller/controller_ref_manager.go | 8 +- pkg/controller/controller_utils.go | 40 +- pkg/controller/cronjob/BUILD | 2 +- pkg/controller/cronjob/cronjob_controller.go | 48 +- pkg/controller/cronjob/utils.go | 6 +- pkg/controller/daemon/BUILD | 2 +- pkg/controller/daemon/daemon_controller.go | 74 +- pkg/controller/daemon/update.go | 18 +- pkg/controller/deployment/BUILD | 2 +- .../deployment/deployment_controller.go | 40 +- pkg/controller/deployment/progress.go | 6 +- pkg/controller/deployment/rollback.go | 12 +- pkg/controller/deployment/rolling.go | 12 +- pkg/controller/deployment/sync.go | 8 +- pkg/controller/deployment/util/BUILD | 2 +- .../deployment/util/deployment_util.go | 16 +- pkg/controller/disruption/BUILD | 2 +- pkg/controller/disruption/disruption.go | 54 +- pkg/controller/endpoint/BUILD | 2 +- .../endpoint/endpoints_controller.go | 30 +- pkg/controller/garbagecollector/BUILD | 2 +- .../garbagecollector/garbagecollector.go | 66 +- .../garbagecollector/graph_builder.go | 36 +- pkg/controller/garbagecollector/operations.go | 4 +- pkg/controller/job/BUILD | 2 +- pkg/controller/job/job_controller.go | 28 +- pkg/controller/namespace/BUILD | 2 +- pkg/controller/namespace/deletion/BUILD | 2 +- .../deletion/namespaced_resources_deleter.go | 48 +- .../namespace/namespace_controller.go | 14 +- pkg/controller/nodeipam/BUILD | 2 +- pkg/controller/nodeipam/ipam/BUILD | 2 +- pkg/controller/nodeipam/ipam/adapter.go | 8 +- .../nodeipam/ipam/cidr_allocator.go | 4 +- pkg/controller/nodeipam/ipam/cidrset/BUILD | 2 +- .../nodeipam/ipam/cidrset/cidr_set_test.go | 8 +- .../nodeipam/ipam/cloud_cidr_allocator.go | 44 +- pkg/controller/nodeipam/ipam/controller.go | 14 +- .../nodeipam/ipam/range_allocator.go | 44 +- pkg/controller/nodeipam/ipam/sync/BUILD | 4 +- pkg/controller/nodeipam/ipam/sync/sync.go | 62 +- .../nodeipam/ipam/sync/sync_test.go | 6 +- .../nodeipam/node_ipam_controller.go | 20 +- pkg/controller/nodelifecycle/BUILD | 2 +- .../node_lifecycle_controller.go | 120 +- pkg/controller/nodelifecycle/scheduler/BUILD | 2 +- .../scheduler/rate_limited_queue.go | 4 +- .../nodelifecycle/scheduler/taint_manager.go | 30 +- .../nodelifecycle/scheduler/timed_workers.go | 8 +- pkg/controller/podautoscaler/BUILD | 2 +- pkg/controller/podautoscaler/horizontal.go | 20 +- pkg/controller/podautoscaler/metrics/BUILD | 2 +- .../metrics/legacy_metrics_client.go | 8 +- .../metrics/rest_metrics_client.go | 4 +- pkg/controller/podgc/BUILD | 2 +- pkg/controller/podgc/gc_controller.go | 24 +- pkg/controller/replicaset/BUILD | 2 +- pkg/controller/replicaset/replica_set.go | 34 +- .../replicaset/replica_set_utils.go | 4 +- pkg/controller/replication/BUILD | 2 +- .../replication/replication_controller.go | 4 +- pkg/controller/resourcequota/BUILD | 2 +- .../resource_quota_controller.go | 24 +- .../resourcequota/resource_quota_monitor.go | 20 +- pkg/controller/route/BUILD | 2 +- pkg/controller/route/route_controller.go | 32 +- pkg/controller/service/BUILD | 2 +- pkg/controller/service/service_controller.go | 40 +- pkg/controller/serviceaccount/BUILD | 4 +- .../serviceaccounts_controller.go | 8 +- .../serviceaccount/tokens_controller.go | 40 +- .../serviceaccount/tokens_controller_test.go | 4 +- pkg/controller/statefulset/BUILD | 2 +- pkg/controller/statefulset/stateful_set.go | 28 +- .../statefulset/stateful_set_control.go | 22 +- pkg/controller/testutil/BUILD | 2 +- pkg/controller/testutil/test_utils.go | 20 +- pkg/controller/ttl/BUILD | 2 +- pkg/controller/ttl/ttl_controller.go | 14 +- pkg/controller/ttlafterfinished/BUILD | 2 +- .../ttlafterfinished_controller.go | 22 +- pkg/controller/util/node/BUILD | 2 +- pkg/controller/util/node/controller_utils.go | 22 +- pkg/controller/volume/attachdetach/BUILD | 2 +- .../attachdetach/attach_detach_controller.go | 42 +- .../volume/attachdetach/cache/BUILD | 2 +- .../cache/actual_state_of_world.go | 22 +- .../volume/attachdetach/metrics/BUILD | 2 +- .../volume/attachdetach/metrics/metrics.go | 8 +- .../volume/attachdetach/populator/BUILD | 2 +- .../desired_state_of_world_populator.go | 14 +- .../volume/attachdetach/reconciler/BUILD | 2 +- .../attachdetach/reconciler/reconciler.go | 48 +- .../volume/attachdetach/statusupdater/BUILD | 2 +- .../statusupdater/node_status_updater.go | 10 +- .../volume/attachdetach/testing/BUILD | 2 +- .../attachdetach/testing/testvolumespec.go | 16 +- pkg/controller/volume/attachdetach/util/BUILD | 2 +- .../volume/attachdetach/util/util.go | 22 +- pkg/controller/volume/expand/BUILD | 2 +- pkg/controller/volume/expand/cache/BUILD | 2 +- .../volume/expand/cache/volume_resize_map.go | 12 +- .../volume/expand/expand_controller.go | 14 +- pkg/controller/volume/expand/pvc_populator.go | 8 +- .../volume/expand/sync_volume_resize.go | 10 +- pkg/controller/volume/persistentvolume/BUILD | 4 +- .../volume/persistentvolume/framework_test.go | 60 +- .../volume/persistentvolume/metrics/BUILD | 2 +- .../persistentvolume/metrics/metrics.go | 10 +- .../volume/persistentvolume/pv_controller.go | 256 +- .../persistentvolume/pv_controller_base.go | 84 +- .../persistentvolume/pv_controller_test.go | 6 +- .../scheduler_assume_cache.go | 28 +- .../persistentvolume/scheduler_binder.go | 42 +- .../persistentvolume/scheduler_binder_test.go | 14 +- .../volume/persistentvolume/volume_host.go | 4 +- pkg/controller/volume/pvcprotection/BUILD | 4 +- .../pvc_protection_controller.go | 30 +- .../pvc_protection_controller_test.go | 6 +- pkg/controller/volume/pvprotection/BUILD | 4 +- .../pvprotection/pv_protection_controller.go | 22 +- .../pv_protection_controller_test.go | 6 +- pkg/credentialprovider/BUILD | 2 +- pkg/credentialprovider/aws/BUILD | 2 +- pkg/credentialprovider/aws/aws_credentials.go | 18 +- pkg/credentialprovider/azure/BUILD | 2 +- .../azure/azure_credentials.go | 28 +- pkg/credentialprovider/config.go | 24 +- pkg/credentialprovider/gcp/BUILD | 2 +- pkg/credentialprovider/gcp/metadata.go | 28 +- pkg/credentialprovider/keyring.go | 4 +- pkg/credentialprovider/plugins.go | 8 +- pkg/credentialprovider/provider.go | 6 +- pkg/credentialprovider/rancher/BUILD | 2 +- .../rancher/rancher_registry_credentials.go | 6 +- pkg/kubeapiserver/admission/BUILD | 2 +- pkg/kubeapiserver/admission/config.go | 4 +- pkg/kubeapiserver/options/BUILD | 2 +- pkg/kubeapiserver/options/authentication.go | 8 +- pkg/kubectl/BUILD | 2 +- pkg/kubectl/cmd/annotate/BUILD | 2 +- pkg/kubectl/cmd/annotate/annotate.go | 6 +- pkg/kubectl/cmd/apply/BUILD | 2 +- pkg/kubectl/cmd/apply/apply.go | 4 +- pkg/kubectl/cmd/attach/BUILD | 2 +- pkg/kubectl/cmd/attach/attach.go | 4 +- pkg/kubectl/cmd/auth/BUILD | 2 +- pkg/kubectl/cmd/auth/reconcile.go | 4 +- pkg/kubectl/cmd/autoscale/BUILD | 2 +- pkg/kubectl/cmd/autoscale/autoscale.go | 4 +- pkg/kubectl/cmd/convert/BUILD | 2 +- pkg/kubectl/cmd/convert/convert.go | 4 +- pkg/kubectl/cmd/create/BUILD | 2 +- pkg/kubectl/cmd/create/create.go | 4 +- pkg/kubectl/cmd/delete/BUILD | 2 +- pkg/kubectl/cmd/delete/delete.go | 6 +- pkg/kubectl/cmd/expose/BUILD | 2 +- pkg/kubectl/cmd/expose/expose.go | 4 +- pkg/kubectl/cmd/get/BUILD | 2 +- pkg/kubectl/cmd/get/get.go | 8 +- pkg/kubectl/cmd/label/BUILD | 2 +- pkg/kubectl/cmd/label/label.go | 6 +- pkg/kubectl/cmd/patch/BUILD | 2 +- pkg/kubectl/cmd/patch/patch.go | 6 +- pkg/kubectl/cmd/proxy/BUILD | 2 +- pkg/kubectl/cmd/proxy/proxy.go | 12 +- pkg/kubectl/cmd/replace/BUILD | 2 +- pkg/kubectl/cmd/replace/replace.go | 6 +- pkg/kubectl/cmd/rollingupdate/BUILD | 2 +- .../cmd/rollingupdate/rollingupdate.go | 6 +- pkg/kubectl/cmd/run/BUILD | 2 +- pkg/kubectl/cmd/run/run.go | 4 +- pkg/kubectl/cmd/scale/BUILD | 2 +- pkg/kubectl/cmd/scale/scale.go | 6 +- pkg/kubectl/cmd/set/BUILD | 2 +- pkg/kubectl/cmd/set/set_image.go | 4 +- pkg/kubectl/cmd/set/set_resources.go | 4 +- pkg/kubectl/cmd/set/set_selector.go | 4 +- pkg/kubectl/cmd/set/set_serviceaccount.go | 4 +- pkg/kubectl/cmd/taint/BUILD | 2 +- pkg/kubectl/cmd/taint/taint.go | 4 +- pkg/kubectl/cmd/top/BUILD | 2 +- pkg/kubectl/cmd/top/top_pod.go | 6 +- pkg/kubectl/cmd/util/BUILD | 2 +- pkg/kubectl/cmd/util/editor/BUILD | 2 +- pkg/kubectl/cmd/util/editor/editoptions.go | 12 +- pkg/kubectl/cmd/util/editor/editor.go | 4 +- pkg/kubectl/cmd/util/helpers.go | 30 +- pkg/kubectl/proxy/BUILD | 2 +- pkg/kubectl/proxy/proxy_server.go | 12 +- pkg/kubectl/sorter.go | 16 +- pkg/kubectl/util/i18n/BUILD | 2 +- pkg/kubectl/util/i18n/i18n.go | 10 +- pkg/kubectl/util/logs/BUILD | 2 +- pkg/kubectl/util/logs/logs.go | 21 +- pkg/kubelet/BUILD | 2 +- pkg/kubelet/cadvisor/BUILD | 2 +- pkg/kubelet/cadvisor/cadvisor_linux.go | 6 +- pkg/kubelet/certificate/BUILD | 2 +- pkg/kubelet/certificate/bootstrap/BUILD | 2 +- .../certificate/bootstrap/bootstrap.go | 14 +- pkg/kubelet/certificate/transport.go | 12 +- pkg/kubelet/checkpoint/BUILD | 2 +- pkg/kubelet/checkpoint/checkpoint.go | 6 +- pkg/kubelet/cloudresource/BUILD | 2 +- .../cloudresource/cloud_request_manager.go | 10 +- pkg/kubelet/cm/BUILD | 2 +- pkg/kubelet/cm/cgroup_manager_linux.go | 14 +- pkg/kubelet/cm/container_manager_linux.go | 50 +- pkg/kubelet/cm/container_manager_stub.go | 4 +- pkg/kubelet/cm/container_manager_windows.go | 4 +- pkg/kubelet/cm/cpumanager/BUILD | 2 +- pkg/kubelet/cm/cpumanager/cpu_assignment.go | 8 +- pkg/kubelet/cm/cpumanager/cpu_manager.go | 34 +- pkg/kubelet/cm/cpumanager/fake_cpu_manager.go | 10 +- pkg/kubelet/cm/cpumanager/policy_none.go | 4 +- pkg/kubelet/cm/cpumanager/policy_static.go | 18 +- pkg/kubelet/cm/cpumanager/state/BUILD | 2 +- .../cm/cpumanager/state/state_checkpoint.go | 6 +- pkg/kubelet/cm/cpumanager/state/state_file.go | 14 +- pkg/kubelet/cm/cpumanager/state/state_mem.go | 14 +- pkg/kubelet/cm/cpumanager/topology/BUILD | 2 +- .../cm/cpumanager/topology/topology.go | 4 +- pkg/kubelet/cm/cpuset/BUILD | 2 +- pkg/kubelet/cm/cpuset/cpuset.go | 4 +- pkg/kubelet/cm/devicemanager/BUILD | 2 +- pkg/kubelet/cm/devicemanager/endpoint.go | 10 +- pkg/kubelet/cm/devicemanager/manager.go | 58 +- pkg/kubelet/cm/devicemanager/pod_devices.go | 34 +- .../cm/node_container_manager_linux.go | 12 +- pkg/kubelet/cm/pod_container_manager_linux.go | 16 +- pkg/kubelet/cm/qos_container_manager_linux.go | 18 +- pkg/kubelet/config/BUILD | 2 +- pkg/kubelet/config/common.go | 10 +- pkg/kubelet/config/config.go | 22 +- pkg/kubelet/config/file.go | 20 +- pkg/kubelet/config/file_linux.go | 8 +- pkg/kubelet/config/file_unsupported.go | 4 +- pkg/kubelet/config/http.go | 12 +- pkg/kubelet/container/BUILD | 2 +- pkg/kubelet/container/container_gc.go | 4 +- pkg/kubelet/container/helpers.go | 8 +- pkg/kubelet/container/runtime.go | 4 +- pkg/kubelet/dockershim/BUILD | 2 +- pkg/kubelet/dockershim/cm/BUILD | 2 +- .../dockershim/cm/container_manager_linux.go | 12 +- pkg/kubelet/dockershim/docker_container.go | 14 +- pkg/kubelet/dockershim/docker_image.go | 4 +- .../dockershim/docker_image_windows.go | 6 +- pkg/kubelet/dockershim/docker_sandbox.go | 26 +- pkg/kubelet/dockershim/docker_service.go | 30 +- pkg/kubelet/dockershim/docker_streaming.go | 4 +- pkg/kubelet/dockershim/exec.go | 4 +- pkg/kubelet/dockershim/helpers.go | 16 +- pkg/kubelet/dockershim/helpers_unsupported.go | 8 +- pkg/kubelet/dockershim/helpers_windows.go | 4 +- pkg/kubelet/dockershim/libdocker/BUILD | 2 +- pkg/kubelet/dockershim/libdocker/client.go | 8 +- pkg/kubelet/dockershim/libdocker/helpers.go | 18 +- .../libdocker/kube_docker_client.go | 12 +- pkg/kubelet/dockershim/network/BUILD | 2 +- pkg/kubelet/dockershim/network/cni/BUILD | 2 +- pkg/kubelet/dockershim/network/cni/cni.go | 38 +- .../dockershim/network/cni/cni_windows.go | 8 +- pkg/kubelet/dockershim/network/hairpin/BUILD | 2 +- .../dockershim/network/hairpin/hairpin.go | 4 +- pkg/kubelet/dockershim/network/hostport/BUILD | 2 +- .../dockershim/network/hostport/hostport.go | 6 +- .../network/hostport/hostport_manager.go | 14 +- .../network/hostport/hostport_syncer.go | 12 +- pkg/kubelet/dockershim/network/kubenet/BUILD | 2 +- .../network/kubenet/kubenet_linux.go | 62 +- pkg/kubelet/dockershim/network/plugins.go | 16 +- pkg/kubelet/dockershim/remote/BUILD | 2 +- .../dockershim/remote/docker_server.go | 8 +- pkg/kubelet/eviction/BUILD | 2 +- pkg/kubelet/eviction/eviction_manager.go | 54 +- pkg/kubelet/eviction/helpers.go | 22 +- .../eviction/memory_threshold_notifier.go | 6 +- .../eviction/threshold_notifier_linux.go | 8 +- .../threshold_notifier_unsupported.go | 4 +- pkg/kubelet/images/BUILD | 2 +- pkg/kubelet/images/image_gc_manager.go | 34 +- pkg/kubelet/images/image_manager.go | 20 +- pkg/kubelet/kubelet.go | 128 +- pkg/kubelet/kubelet_getters.go | 6 +- pkg/kubelet/kubelet_network.go | 4 +- pkg/kubelet/kubelet_network_linux.go | 30 +- pkg/kubelet/kubelet_node_status.go | 52 +- pkg/kubelet/kubelet_pods.go | 66 +- pkg/kubelet/kubelet_resources.go | 4 +- pkg/kubelet/kubelet_volumes.go | 18 +- pkg/kubelet/kubeletconfig/BUILD | 2 +- pkg/kubelet/kubeletconfig/configsync.go | 4 +- pkg/kubelet/kubeletconfig/util/log/BUILD | 2 +- pkg/kubelet/kubeletconfig/util/log/log.go | 6 +- pkg/kubelet/kuberuntime/BUILD | 2 +- pkg/kubelet/kuberuntime/helpers.go | 4 +- .../kuberuntime/kuberuntime_container.go | 46 +- pkg/kubelet/kuberuntime/kuberuntime_gc.go | 14 +- pkg/kubelet/kuberuntime/kuberuntime_image.go | 14 +- .../kuberuntime/kuberuntime_manager.go | 100 +- .../kuberuntime/kuberuntime_sandbox.go | 16 +- pkg/kubelet/kuberuntime/labels.go | 24 +- pkg/kubelet/kuberuntime/logs/BUILD | 2 +- pkg/kubelet/kuberuntime/logs/logs.go | 18 +- pkg/kubelet/lifecycle/BUILD | 2 +- pkg/kubelet/lifecycle/handlers.go | 10 +- pkg/kubelet/lifecycle/predicate.go | 20 +- pkg/kubelet/logs/BUILD | 2 +- pkg/kubelet/logs/container_log_manager.go | 16 +- pkg/kubelet/metrics/BUILD | 2 +- pkg/kubelet/metrics/collectors/BUILD | 2 +- .../metrics/collectors/volume_stats.go | 4 +- pkg/kubelet/metrics/metrics.go | 4 +- pkg/kubelet/mountpod/BUILD | 2 +- pkg/kubelet/mountpod/mount_pod_test.go | 4 +- pkg/kubelet/network/dns/BUILD | 2 +- pkg/kubelet/network/dns/dns.go | 22 +- pkg/kubelet/nodelease/BUILD | 2 +- pkg/kubelet/nodelease/controller.go | 12 +- pkg/kubelet/nodestatus/BUILD | 2 +- pkg/kubelet/nodestatus/setters.go | 24 +- pkg/kubelet/oom_watcher.go | 6 +- pkg/kubelet/pleg/BUILD | 2 +- pkg/kubelet/pleg/generic.go | 18 +- pkg/kubelet/pod/BUILD | 2 +- pkg/kubelet/pod/mirror_client.go | 8 +- pkg/kubelet/pod/pod_manager.go | 6 +- pkg/kubelet/pod_container_deletor.go | 6 +- pkg/kubelet/pod_workers.go | 4 +- pkg/kubelet/preemption/BUILD | 2 +- pkg/kubelet/preemption/preemption.go | 6 +- pkg/kubelet/prober/BUILD | 4 +- pkg/kubelet/prober/prober.go | 22 +- pkg/kubelet/prober/prober_manager.go | 6 +- pkg/kubelet/prober/prober_manager_test.go | 8 +- pkg/kubelet/prober/worker.go | 10 +- pkg/kubelet/remote/BUILD | 2 +- pkg/kubelet/remote/remote_image.go | 20 +- pkg/kubelet/remote/remote_runtime.go | 60 +- pkg/kubelet/runonce.go | 30 +- pkg/kubelet/server/BUILD | 2 +- pkg/kubelet/server/auth.go | 4 +- pkg/kubelet/server/portforward/BUILD | 2 +- pkg/kubelet/server/portforward/httpstream.go | 26 +- pkg/kubelet/server/portforward/websocket.go | 6 +- pkg/kubelet/server/remotecommand/BUILD | 2 +- .../server/remotecommand/httpstream.go | 6 +- pkg/kubelet/server/server.go | 26 +- pkg/kubelet/server/stats/BUILD | 2 +- .../server/stats/fs_resource_analyzer.go | 6 +- pkg/kubelet/server/stats/handler.go | 10 +- .../server/stats/summary_sys_containers.go | 6 +- .../server/stats/volume_stat_calculator.go | 4 +- pkg/kubelet/stats/BUILD | 2 +- pkg/kubelet/stats/cadvisor_stats_provider.go | 6 +- pkg/kubelet/stats/cri_stats_provider.go | 16 +- pkg/kubelet/stats/helper.go | 4 +- pkg/kubelet/status/BUILD | 2 +- pkg/kubelet/status/status_manager.go | 60 +- pkg/kubelet/token/BUILD | 2 +- pkg/kubelet/token/token_manager.go | 6 +- pkg/kubelet/util/BUILD | 6 +- pkg/kubelet/util/pluginwatcher/BUILD | 3 +- .../util/pluginwatcher/example_handler.go | 4 +- .../util/pluginwatcher/example_plugin.go | 16 +- .../util/pluginwatcher/plugin_watcher.go | 24 +- .../util/pluginwatcher/plugin_watcher_test.go | 2 + pkg/kubelet/util/util_unix.go | 4 +- pkg/kubelet/volume_host.go | 14 +- pkg/kubelet/volumemanager/BUILD | 2 +- pkg/kubelet/volumemanager/cache/BUILD | 2 +- .../cache/actual_state_of_world.go | 16 +- pkg/kubelet/volumemanager/metrics/BUILD | 2 +- pkg/kubelet/volumemanager/metrics/metrics.go | 4 +- pkg/kubelet/volumemanager/populator/BUILD | 2 +- .../desired_state_of_world_populator.go | 32 +- pkg/kubelet/volumemanager/reconciler/BUILD | 2 +- .../volumemanager/reconciler/reconciler.go | 92 +- pkg/kubelet/volumemanager/volume_manager.go | 12 +- pkg/kubelet/winstats/BUILD | 2 +- pkg/kubelet/winstats/perfcounter_nodestats.go | 8 +- pkg/kubemark/BUILD | 2 +- pkg/kubemark/controller.go | 12 +- pkg/kubemark/hollow_kubelet.go | 6 +- pkg/kubemark/hollow_proxy.go | 4 +- pkg/master/BUILD | 2 +- pkg/master/controller.go | 18 +- pkg/master/controller/crdregistration/BUILD | 2 +- .../crdregistration_controller.go | 10 +- pkg/master/master.go | 28 +- pkg/master/reconcilers/BUILD | 2 +- pkg/master/reconcilers/lease.go | 8 +- pkg/master/reconcilers/mastercount.go | 6 +- pkg/master/services.go | 6 +- pkg/master/tunneler/BUILD | 2 +- pkg/master/tunneler/ssh.go | 30 +- pkg/printers/internalversion/BUILD | 2 +- pkg/printers/internalversion/describe.go | 10 +- pkg/probe/exec/BUILD | 2 +- pkg/probe/exec/exec.go | 4 +- pkg/probe/http/BUILD | 2 +- pkg/probe/http/http.go | 6 +- pkg/probe/tcp/BUILD | 2 +- pkg/probe/tcp/tcp.go | 4 +- pkg/proxy/BUILD | 2 +- pkg/proxy/config/BUILD | 2 +- pkg/proxy/config/config.go | 26 +- pkg/proxy/endpoints.go | 12 +- pkg/proxy/healthcheck/BUILD | 2 +- pkg/proxy/healthcheck/healthcheck.go | 32 +- pkg/proxy/iptables/BUILD | 4 +- pkg/proxy/iptables/proxier.go | 76 +- pkg/proxy/iptables/proxier_test.go | 6 +- pkg/proxy/ipvs/BUILD | 2 +- pkg/proxy/ipvs/graceful_termination.go | 22 +- pkg/proxy/ipvs/ipset.go | 18 +- pkg/proxy/ipvs/proxier.go | 162 +- pkg/proxy/service.go | 12 +- pkg/proxy/userspace/BUILD | 2 +- pkg/proxy/userspace/proxier.go | 110 +- pkg/proxy/userspace/proxysocket.go | 40 +- pkg/proxy/userspace/roundrobin.go | 26 +- pkg/proxy/util/BUILD | 2 +- pkg/proxy/util/endpoints.go | 10 +- pkg/proxy/util/port.go | 4 +- pkg/proxy/util/utils.go | 8 +- pkg/proxy/winkernel/BUILD | 2 +- pkg/proxy/winkernel/proxier.go | 118 +- pkg/proxy/winuserspace/BUILD | 2 +- pkg/proxy/winuserspace/proxier.go | 32 +- pkg/proxy/winuserspace/proxysocket.go | 78 +- pkg/proxy/winuserspace/roundrobin.go | 28 +- pkg/registry/core/rest/BUILD | 2 +- pkg/registry/core/rest/storage_core.go | 6 +- pkg/registry/core/service/portallocator/BUILD | 2 +- .../core/service/portallocator/allocator.go | 4 +- pkg/registry/core/service/storage/BUILD | 2 +- pkg/registry/core/service/storage/rest.go | 14 +- pkg/registry/rbac/rest/BUILD | 2 +- pkg/registry/rbac/rest/storage_rbac.go | 32 +- pkg/registry/rbac/validation/BUILD | 2 +- pkg/registry/rbac/validation/rule.go | 4 +- pkg/registry/scheduling/rest/BUILD | 2 +- .../scheduling/rest/storage_scheduling.go | 8 +- pkg/scheduler/BUILD | 2 +- pkg/scheduler/algorithm/predicates/BUILD | 2 +- .../predicates/csi_volume_predicate.go | 10 +- .../algorithm/predicates/metadata.go | 10 +- .../algorithm/predicates/predicates.go | 76 +- pkg/scheduler/algorithm/priorities/BUILD | 2 +- .../algorithm/priorities/interpod_affinity.go | 8 +- .../priorities/resource_allocation.go | 8 +- .../algorithm/priorities/resource_limits.go | 8 +- .../priorities/selector_spreading.go | 8 +- .../algorithmprovider/defaults/BUILD | 2 +- .../algorithmprovider/defaults/defaults.go | 6 +- pkg/scheduler/cache/BUILD | 2 +- pkg/scheduler/cache/node_info.go | 6 +- pkg/scheduler/core/BUILD | 2 +- pkg/scheduler/core/equivalence/BUILD | 2 +- pkg/scheduler/core/equivalence/eqivalence.go | 12 +- pkg/scheduler/core/generic_scheduler.go | 32 +- pkg/scheduler/factory/BUILD | 2 +- pkg/scheduler/factory/factory.go | 104 +- pkg/scheduler/factory/plugins.go | 18 +- pkg/scheduler/internal/cache/BUILD | 2 +- pkg/scheduler/internal/cache/cache.go | 20 +- pkg/scheduler/internal/cache/debugger/BUILD | 2 +- .../internal/cache/debugger/comparer.go | 10 +- .../internal/cache/debugger/dumper.go | 8 +- pkg/scheduler/internal/cache/node_tree.go | 12 +- pkg/scheduler/internal/queue/BUILD | 2 +- .../internal/queue/scheduling_queue.go | 16 +- pkg/scheduler/scheduler.go | 44 +- pkg/scheduler/util/BUILD | 2 +- pkg/scheduler/util/backoff_utils.go | 4 +- pkg/serviceaccount/BUILD | 2 +- pkg/serviceaccount/claims.go | 24 +- pkg/serviceaccount/legacy.go | 16 +- pkg/ssh/BUILD | 4 +- pkg/ssh/ssh.go | 28 +- pkg/ssh/ssh_test.go | 10 +- pkg/util/async/BUILD | 2 +- pkg/util/async/bounded_frequency_runner.go | 12 +- pkg/util/bandwidth/BUILD | 2 +- pkg/util/bandwidth/linux.go | 8 +- pkg/util/coverage/coverage.go | 4 +- pkg/util/flag/BUILD | 2 +- pkg/util/flag/flags.go | 4 +- pkg/util/goroutinemap/BUILD | 2 +- pkg/util/goroutinemap/goroutinemap.go | 6 +- pkg/util/ipconfig/BUILD | 2 +- pkg/util/ipconfig/ipconfig.go | 6 +- pkg/util/ipset/BUILD | 2 +- pkg/util/ipset/ipset.go | 32 +- pkg/util/iptables/BUILD | 2 +- pkg/util/iptables/iptables.go | 34 +- pkg/util/ipvs/BUILD | 2 +- pkg/util/ipvs/ipvs_linux.go | 4 +- pkg/util/ipvs/kernelcheck_linux.go | 4 +- pkg/util/keymutex/BUILD | 2 +- pkg/util/keymutex/hashed.go | 10 +- pkg/util/mount/BUILD | 4 +- pkg/util/mount/exec_mount.go | 10 +- pkg/util/mount/fake.go | 10 +- pkg/util/mount/mount_linux.go | 114 +- pkg/util/mount/mount_linux_test.go | 20 +- pkg/util/mount/mount_windows.go | 36 +- pkg/util/mount/nsenter_mount.go | 32 +- pkg/util/netsh/BUILD | 2 +- pkg/util/netsh/netsh.go | 16 +- pkg/util/node/BUILD | 2 +- pkg/util/node/node.go | 6 +- pkg/util/nsenter/BUILD | 2 +- pkg/util/nsenter/exec.go | 6 +- pkg/util/nsenter/nsenter.go | 6 +- pkg/util/oom/BUILD | 2 +- pkg/util/oom/oom_linux.go | 18 +- pkg/util/procfs/BUILD | 2 +- pkg/util/procfs/procfs_linux.go | 4 +- pkg/util/resizefs/BUILD | 2 +- pkg/util/resizefs/resizefs_linux.go | 8 +- pkg/volume/BUILD | 2 +- pkg/volume/awsebs/BUILD | 4 +- pkg/volume/awsebs/attacher.go | 20 +- pkg/volume/awsebs/attacher_test.go | 6 +- pkg/volume/awsebs/aws_ebs.go | 34 +- pkg/volume/awsebs/aws_ebs_block.go | 4 +- pkg/volume/awsebs/aws_util.go | 24 +- pkg/volume/azure_dd/BUILD | 2 +- pkg/volume/azure_dd/attacher.go | 46 +- pkg/volume/azure_dd/azure_common_linux.go | 34 +- pkg/volume/azure_dd/azure_common_windows.go | 26 +- pkg/volume/azure_dd/azure_dd.go | 16 +- pkg/volume/azure_dd/azure_dd_block.go | 6 +- pkg/volume/azure_dd/azure_mounter.go | 28 +- pkg/volume/azure_file/BUILD | 2 +- pkg/volume/azure_file/azure_file.go | 20 +- pkg/volume/azure_file/azure_provision.go | 8 +- pkg/volume/cephfs/BUILD | 2 +- pkg/volume/cephfs/cephfs.go | 22 +- pkg/volume/cinder/BUILD | 4 +- pkg/volume/cinder/attacher.go | 36 +- pkg/volume/cinder/attacher_test.go | 20 +- pkg/volume/cinder/cinder.go | 54 +- pkg/volume/cinder/cinder_block.go | 4 +- pkg/volume/cinder/cinder_util.go | 30 +- pkg/volume/configmap/BUILD | 2 +- pkg/volume/configmap/configmap.go | 18 +- pkg/volume/csi/BUILD | 4 +- pkg/volume/csi/csi_attacher.go | 126 +- pkg/volume/csi/csi_attacher_test.go | 6 +- pkg/volume/csi/csi_block.go | 80 +- pkg/volume/csi/csi_client.go | 16 +- pkg/volume/csi/csi_mounter.go | 84 +- pkg/volume/csi/csi_mounter_test.go | 4 +- pkg/volume/csi/csi_plugin.go | 72 +- pkg/volume/csi/csi_util.go | 18 +- pkg/volume/csi/nodeinfomanager/BUILD | 2 +- .../csi/nodeinfomanager/nodeinfomanager.go | 4 +- pkg/volume/downwardapi/BUILD | 2 +- pkg/volume/downwardapi/downwardapi.go | 24 +- pkg/volume/emptydir/BUILD | 2 +- pkg/volume/emptydir/empty_dir.go | 10 +- pkg/volume/emptydir/empty_dir_linux.go | 6 +- pkg/volume/fc/BUILD | 2 +- pkg/volume/fc/attacher.go | 12 +- pkg/volume/fc/disk_manager.go | 16 +- pkg/volume/fc/fc.go | 22 +- pkg/volume/fc/fc_util.go | 34 +- pkg/volume/flexvolume/BUILD | 2 +- pkg/volume/flexvolume/attacher-defaults.go | 8 +- pkg/volume/flexvolume/attacher.go | 4 +- pkg/volume/flexvolume/detacher-defaults.go | 8 +- pkg/volume/flexvolume/detacher.go | 6 +- pkg/volume/flexvolume/driver-call.go | 12 +- pkg/volume/flexvolume/expander-defaults.go | 6 +- pkg/volume/flexvolume/mounter-defaults.go | 6 +- pkg/volume/flexvolume/plugin-defaults.go | 4 +- pkg/volume/flexvolume/plugin.go | 4 +- pkg/volume/flexvolume/probe.go | 10 +- pkg/volume/flexvolume/unmounter-defaults.go | 4 +- pkg/volume/flexvolume/unmounter.go | 4 +- pkg/volume/flexvolume/util.go | 8 +- pkg/volume/flocker/BUILD | 2 +- pkg/volume/flocker/flocker.go | 22 +- pkg/volume/flocker/flocker_util.go | 6 +- pkg/volume/gcepd/BUILD | 4 +- pkg/volume/gcepd/attacher.go | 30 +- pkg/volume/gcepd/attacher_test.go | 8 +- pkg/volume/gcepd/gce_pd.go | 26 +- pkg/volume/gcepd/gce_pd_block.go | 4 +- pkg/volume/gcepd/gce_util.go | 30 +- pkg/volume/glusterfs/BUILD | 2 +- pkg/volume/glusterfs/glusterfs.go | 116 +- pkg/volume/glusterfs/glusterfs_util.go | 4 +- pkg/volume/iscsi/BUILD | 2 +- pkg/volume/iscsi/attacher.go | 12 +- pkg/volume/iscsi/disk_manager.go | 16 +- pkg/volume/iscsi/iscsi.go | 16 +- pkg/volume/iscsi/iscsi_util.go | 80 +- pkg/volume/local/BUILD | 2 +- pkg/volume/local/local.go | 32 +- pkg/volume/nfs/BUILD | 2 +- pkg/volume/nfs/nfs.go | 12 +- pkg/volume/photon_pd/BUILD | 4 +- pkg/volume/photon_pd/attacher.go | 44 +- pkg/volume/photon_pd/attacher_test.go | 8 +- pkg/volume/photon_pd/photon_pd.go | 24 +- pkg/volume/photon_pd/photon_util.go | 26 +- pkg/volume/plugins.go | 20 +- pkg/volume/portworx/BUILD | 2 +- pkg/volume/portworx/portworx.go | 16 +- pkg/volume/portworx/portworx_util.go | 52 +- pkg/volume/projected/BUILD | 2 +- pkg/volume/projected/projected.go | 26 +- pkg/volume/quobyte/BUILD | 2 +- pkg/volume/quobyte/quobyte.go | 10 +- pkg/volume/quobyte/quobyte_util.go | 6 +- pkg/volume/rbd/BUILD | 2 +- pkg/volume/rbd/attacher.go | 24 +- pkg/volume/rbd/disk_manager.go | 22 +- pkg/volume/rbd/rbd.go | 30 +- pkg/volume/rbd/rbd_util.go | 96 +- pkg/volume/scaleio/BUILD | 4 +- pkg/volume/scaleio/sio_client.go | 78 +- pkg/volume/scaleio/sio_mgr.go | 52 +- pkg/volume/scaleio/sio_plugin.go | 10 +- pkg/volume/scaleio/sio_util.go | 34 +- pkg/volume/scaleio/sio_volume.go | 108 +- pkg/volume/scaleio/sio_volume_test.go | 4 +- pkg/volume/secret/BUILD | 2 +- pkg/volume/secret/secret.go | 20 +- pkg/volume/storageos/BUILD | 2 +- pkg/volume/storageos/storageos.go | 50 +- pkg/volume/storageos/storageos_util.go | 26 +- pkg/volume/util/BUILD | 2 +- pkg/volume/util/atomic_writer.go | 48 +- pkg/volume/util/device_util_linux.go | 6 +- pkg/volume/util/nestedpendingoperations/BUILD | 2 +- .../nestedpendingoperations.go | 8 +- pkg/volume/util/operationexecutor/BUILD | 2 +- .../operationexecutor/operation_executor.go | 16 +- .../operationexecutor/operation_generator.go | 96 +- pkg/volume/util/recyclerclient/BUILD | 2 +- .../util/recyclerclient/recycler_client.go | 14 +- pkg/volume/util/util.go | 28 +- pkg/volume/util/volumepathhandler/BUILD | 2 +- .../volumepathhandler/volume_path_handler.go | 28 +- .../volume_path_handler_linux.go | 10 +- pkg/volume/volume_linux.go | 8 +- pkg/volume/vsphere_volume/BUILD | 4 +- pkg/volume/vsphere_volume/attacher.go | 30 +- pkg/volume/vsphere_volume/attacher_test.go | 8 +- pkg/volume/vsphere_volume/vsphere_volume.go | 24 +- .../vsphere_volume/vsphere_volume_block.go | 8 +- .../vsphere_volume/vsphere_volume_util.go | 16 +- pkg/windows/service/BUILD | 2 +- pkg/windows/service/service.go | 6 +- plugin/pkg/admission/admit/BUILD | 2 +- plugin/pkg/admission/admit/admission.go | 4 +- plugin/pkg/admission/deny/BUILD | 2 +- plugin/pkg/admission/deny/admission.go | 4 +- plugin/pkg/admission/imagepolicy/BUILD | 2 +- plugin/pkg/admission/imagepolicy/admission.go | 10 +- plugin/pkg/admission/imagepolicy/config.go | 6 +- plugin/pkg/admission/podnodeselector/BUILD | 2 +- .../admission/podnodeselector/admission.go | 4 +- plugin/pkg/admission/podpreset/BUILD | 2 +- plugin/pkg/admission/podpreset/admission.go | 10 +- .../admission/podtolerationrestriction/BUILD | 2 +- .../podtolerationrestriction/admission.go | 4 +- plugin/pkg/admission/priority/BUILD | 2 +- .../pkg/admission/priority/admission_test.go | 12 +- plugin/pkg/admission/resourcequota/BUILD | 2 +- .../pkg/admission/resourcequota/controller.go | 12 +- .../security/podsecuritypolicy/BUILD | 2 +- .../security/podsecuritypolicy/admission.go | 18 +- .../storage/persistentvolume/label/BUILD | 2 +- .../persistentvolume/label/admission.go | 6 +- .../storage/storageclass/setdefault/BUILD | 4 +- .../storageclass/setdefault/admission.go | 10 +- .../storageclass/setdefault/admission_test.go | 6 +- .../storageobjectinuseprotection/BUILD | 2 +- .../storageobjectinuseprotection/admission.go | 6 +- .../auth/authenticator/token/bootstrap/BUILD | 2 +- .../token/bootstrap/bootstrap.go | 10 +- plugin/pkg/auth/authorizer/node/BUILD | 2 +- .../auth/authorizer/node/graph_populator.go | 22 +- .../auth/authorizer/node/node_authorizer.go | 44 +- plugin/pkg/auth/authorizer/rbac/BUILD | 2 +- .../authorizer/rbac/bootstrappolicy/BUILD | 2 +- .../rbac/bootstrappolicy/controller_policy.go | 6 +- .../rbac/bootstrappolicy/namespace_policy.go | 10 +- plugin/pkg/auth/authorizer/rbac/rbac.go | 6 +- staging/src/k8s.io/api/Godeps/Godeps.json | 4 - .../src/k8s.io/apiextensions-apiserver/BUILD | 2 +- .../Godeps/Godeps.json | 4 - .../k8s.io/apiextensions-apiserver/main.go | 4 +- .../pkg/apiserver/BUILD | 2 +- .../customresource_discovery_controller.go | 16 +- .../pkg/apiserver/customresource_handler.go | 10 +- .../pkg/controller/establish/BUILD | 2 +- .../establish/establishing_controller.go | 6 +- .../pkg/controller/finalizer/BUILD | 2 +- .../pkg/controller/finalizer/crd_finalizer.go | 8 +- .../pkg/controller/status/BUILD | 2 +- .../controller/status/naming_controller.go | 16 +- .../k8s.io/apimachinery/Godeps/Godeps.json | 4 - .../k8s.io/apimachinery/pkg/api/meta/BUILD | 2 +- .../k8s.io/apimachinery/pkg/api/meta/meta.go | 10 +- .../src/k8s.io/apimachinery/pkg/labels/BUILD | 2 +- .../apimachinery/pkg/labels/selector.go | 8 +- .../src/k8s.io/apimachinery/pkg/runtime/BUILD | 2 +- .../apimachinery/pkg/runtime/converter.go | 10 +- .../pkg/util/httpstream/spdy/BUILD | 2 +- .../pkg/util/httpstream/spdy/connection.go | 4 +- .../k8s.io/apimachinery/pkg/util/intstr/BUILD | 2 +- .../apimachinery/pkg/util/intstr/intstr.go | 4 +- .../k8s.io/apimachinery/pkg/util/net/BUILD | 2 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 8 +- .../apimachinery/pkg/util/net/interface.go | 38 +- .../k8s.io/apimachinery/pkg/util/proxy/BUILD | 2 +- .../apimachinery/pkg/util/proxy/dial.go | 8 +- .../apimachinery/pkg/util/proxy/transport.go | 6 +- .../pkg/util/proxy/upgradeaware.go | 22 +- .../apimachinery/pkg/util/runtime/BUILD | 2 +- .../apimachinery/pkg/util/runtime/runtime.go | 6 +- .../k8s.io/apimachinery/pkg/util/yaml/BUILD | 2 +- .../apimachinery/pkg/util/yaml/decoder.go | 8 +- .../src/k8s.io/apimachinery/pkg/watch/BUILD | 2 +- .../apimachinery/pkg/watch/streamwatcher.go | 8 +- .../k8s.io/apimachinery/pkg/watch/watch.go | 6 +- .../src/k8s.io/apiserver/Godeps/Godeps.json | 4 - .../src/k8s.io/apiserver/pkg/admission/BUILD | 2 +- .../k8s.io/apiserver/pkg/admission/config.go | 4 +- .../pkg/admission/configuration/BUILD | 2 +- .../configuration/initializer_manager.go | 4 +- .../pkg/admission/plugin/initialization/BUILD | 2 +- .../plugin/initialization/initialization.go | 16 +- .../plugin/namespace/lifecycle/BUILD | 2 +- .../plugin/namespace/lifecycle/admission.go | 6 +- .../admission/plugin/webhook/mutating/BUILD | 2 +- .../plugin/webhook/mutating/dispatcher.go | 8 +- .../admission/plugin/webhook/validating/BUILD | 2 +- .../plugin/webhook/validating/dispatcher.go | 10 +- .../k8s.io/apiserver/pkg/admission/plugins.go | 12 +- staging/src/k8s.io/apiserver/pkg/audit/BUILD | 2 +- .../src/k8s.io/apiserver/pkg/audit/metrics.go | 4 +- .../k8s.io/apiserver/pkg/audit/policy/BUILD | 2 +- .../apiserver/pkg/audit/policy/reader.go | 4 +- .../src/k8s.io/apiserver/pkg/audit/request.go | 8 +- .../pkg/authentication/token/tokenfile/BUILD | 2 +- .../token/tokenfile/tokenfile.go | 6 +- .../apiserver/pkg/endpoints/filters/BUILD | 2 +- .../pkg/endpoints/filters/authentication.go | 6 +- .../pkg/endpoints/filters/authorization.go | 6 +- .../pkg/endpoints/filters/impersonation.go | 8 +- .../apiserver/pkg/endpoints/handlers/BUILD | 2 +- .../apiserver/pkg/endpoints/handlers/get.go | 4 +- .../apiserver/pkg/endpoints/handlers/rest.go | 6 +- .../apiserver/pkg/endpoints/request/BUILD | 2 +- .../pkg/endpoints/request/requestinfo.go | 4 +- .../apiserver/pkg/registry/generic/BUILD | 2 +- .../pkg/registry/generic/registry/BUILD | 2 +- .../generic/registry/storage_factory.go | 6 +- .../pkg/registry/generic/registry/store.go | 16 +- .../pkg/registry/generic/storage_decorator.go | 4 +- staging/src/k8s.io/apiserver/pkg/server/BUILD | 2 +- .../src/k8s.io/apiserver/pkg/server/config.go | 6 +- .../pkg/server/deprecated_insecure_serving.go | 6 +- .../k8s.io/apiserver/pkg/server/filters/BUILD | 2 +- .../apiserver/pkg/server/filters/cors.go | 4 +- .../pkg/server/filters/maxinflight.go | 4 +- .../apiserver/pkg/server/filters/wrap.go | 4 +- .../apiserver/pkg/server/genericapiserver.go | 6 +- .../k8s.io/apiserver/pkg/server/handler.go | 10 +- .../k8s.io/apiserver/pkg/server/healthz/BUILD | 2 +- .../apiserver/pkg/server/healthz/healthz.go | 10 +- .../src/k8s.io/apiserver/pkg/server/hooks.go | 8 +- .../k8s.io/apiserver/pkg/server/httplog/BUILD | 2 +- .../apiserver/pkg/server/httplog/httplog.go | 14 +- .../src/k8s.io/apiserver/pkg/server/mux/BUILD | 2 +- .../apiserver/pkg/server/mux/pathrecorder.go | 8 +- .../k8s.io/apiserver/pkg/server/options/BUILD | 2 +- .../apiserver/pkg/server/options/audit.go | 4 +- .../pkg/server/options/authentication.go | 14 +- .../pkg/server/options/authorization.go | 6 +- .../apiserver/pkg/server/options/serving.go | 6 +- .../k8s.io/apiserver/pkg/server/routes/BUILD | 2 +- .../apiserver/pkg/server/routes/flags.go | 4 +- .../apiserver/pkg/server/routes/openapi.go | 6 +- .../apiserver/pkg/server/secure_serving.go | 6 +- .../k8s.io/apiserver/pkg/server/storage/BUILD | 2 +- .../pkg/server/storage/storage_factory.go | 8 +- .../k8s.io/apiserver/pkg/storage/cacher/BUILD | 2 +- .../apiserver/pkg/storage/cacher/cacher.go | 14 +- .../apiserver/pkg/storage/etcd/testing/BUILD | 2 +- .../pkg/storage/etcd/testing/utils.go | 4 +- .../k8s.io/apiserver/pkg/storage/etcd3/BUILD | 2 +- .../apiserver/pkg/storage/etcd3/compact.go | 8 +- .../apiserver/pkg/storage/etcd3/store.go | 6 +- .../apiserver/pkg/storage/etcd3/watcher.go | 12 +- .../pkg/storage/value/encrypt/envelope/BUILD | 2 +- .../value/encrypt/envelope/grpc_service.go | 8 +- .../k8s.io/apiserver/pkg/util/feature/BUILD | 2 +- .../pkg/util/feature/feature_gate.go | 8 +- .../src/k8s.io/apiserver/pkg/util/flag/BUILD | 2 +- .../k8s.io/apiserver/pkg/util/flag/flags.go | 6 +- .../src/k8s.io/apiserver/pkg/util/logs/BUILD | 2 +- .../k8s.io/apiserver/pkg/util/logs/logs.go | 27 +- .../src/k8s.io/apiserver/pkg/util/trace/BUILD | 2 +- .../k8s.io/apiserver/pkg/util/trace/trace.go | 8 +- .../k8s.io/apiserver/pkg/util/wsstream/BUILD | 2 +- .../apiserver/pkg/util/wsstream/conn.go | 8 +- .../authenticator/password/passwordfile/BUILD | 2 +- .../password/passwordfile/passwordfile.go | 4 +- .../plugin/pkg/authenticator/token/oidc/BUILD | 4 +- .../pkg/authenticator/token/oidc/oidc.go | 10 +- .../pkg/authenticator/token/oidc/oidc_test.go | 12 +- .../pkg/authenticator/token/webhook/BUILD | 2 +- .../authenticator/token/webhook/webhook.go | 4 +- .../plugin/pkg/authorizer/webhook/BUILD | 2 +- .../plugin/pkg/authorizer/webhook/webhook.go | 4 +- .../src/k8s.io/cli-runtime/Godeps/Godeps.json | 4 - .../src/k8s.io/client-go/Godeps/Godeps.json | 4 - staging/src/k8s.io/client-go/discovery/BUILD | 2 +- .../client-go/discovery/cached_discovery.go | 18 +- .../client-go/discovery/round_tripper.go | 4 +- .../k8s.io/client-go/examples/workqueue/BUILD | 2 +- .../client-go/examples/workqueue/main.go | 16 +- .../client-go/listers/policy/v1beta1/BUILD | 2 +- .../v1beta1/poddisruptionbudget_expansion.go | 4 +- .../plugin/pkg/client/auth/azure/BUILD | 2 +- .../plugin/pkg/client/auth/azure/azure.go | 6 +- .../plugin/pkg/client/auth/exec/BUILD | 2 +- .../plugin/pkg/client/auth/exec/exec.go | 4 +- .../plugin/pkg/client/auth/gcp/BUILD | 2 +- .../plugin/pkg/client/auth/gcp/gcp.go | 10 +- .../plugin/pkg/client/auth/oidc/BUILD | 2 +- .../plugin/pkg/client/auth/oidc/oidc.go | 6 +- .../plugin/pkg/client/auth/openstack/BUILD | 2 +- .../pkg/client/auth/openstack/openstack.go | 10 +- staging/src/k8s.io/client-go/rest/BUILD | 4 +- staging/src/k8s.io/client-go/rest/config.go | 4 +- staging/src/k8s.io/client-go/rest/plugin.go | 4 +- staging/src/k8s.io/client-go/rest/request.go | 30 +- .../src/k8s.io/client-go/rest/request_test.go | 8 +- .../src/k8s.io/client-go/rest/token_source.go | 4 +- .../src/k8s.io/client-go/rest/urlbackoff.go | 8 +- staging/src/k8s.io/client-go/restmapper/BUILD | 2 +- .../k8s.io/client-go/restmapper/discovery.go | 4 +- .../k8s.io/client-go/restmapper/shortcut.go | 6 +- .../src/k8s.io/client-go/tools/cache/BUILD | 2 +- .../client-go/tools/cache/delta_fifo.go | 10 +- .../client-go/tools/cache/expiration_cache.go | 4 +- .../k8s.io/client-go/tools/cache/listers.go | 4 +- .../client-go/tools/cache/mutation_cache.go | 4 +- .../tools/cache/mutation_detector.go | 4 +- .../k8s.io/client-go/tools/cache/reflector.go | 14 +- .../client-go/tools/cache/shared_informer.go | 16 +- .../k8s.io/client-go/tools/clientcmd/BUILD | 2 +- .../tools/clientcmd/client_config.go | 6 +- .../client-go/tools/clientcmd/config.go | 4 +- .../client-go/tools/clientcmd/loader.go | 4 +- .../tools/clientcmd/merged_client_builder.go | 6 +- .../client-go/tools/leaderelection/BUILD | 2 +- .../tools/leaderelection/leaderelection.go | 20 +- .../src/k8s.io/client-go/tools/record/BUILD | 2 +- .../k8s.io/client-go/tools/record/event.go | 16 +- .../client-go/tools/remotecommand/BUILD | 2 +- .../tools/remotecommand/remotecommand.go | 4 +- .../client-go/tools/remotecommand/v1.go | 8 +- .../src/k8s.io/client-go/tools/watch/BUILD | 2 +- .../src/k8s.io/client-go/tools/watch/until.go | 4 +- staging/src/k8s.io/client-go/transport/BUILD | 2 +- .../client-go/transport/round_trippers.go | 38 +- .../k8s.io/client-go/util/certificate/BUILD | 2 +- .../util/certificate/certificate_manager.go | 24 +- .../util/certificate/certificate_store.go | 8 +- .../client-go/util/certificate/csr/BUILD | 2 +- .../client-go/util/certificate/csr/csr.go | 6 +- staging/src/k8s.io/cloud-provider/BUILD | 2 +- .../k8s.io/cloud-provider/Godeps/Godeps.json | 4 - staging/src/k8s.io/cloud-provider/plugins.go | 14 +- .../cluster-bootstrap/Godeps/Godeps.json | 4 - .../k8s.io/code-generator/Godeps/Godeps.json | 4 - .../code-generator/cmd/client-gen/BUILD | 2 +- .../cmd/client-gen/generators/BUILD | 2 +- .../client-gen/generators/client_generator.go | 6 +- .../code-generator/cmd/client-gen/main.go | 7 +- .../code-generator/cmd/conversion-gen/BUILD | 2 +- .../cmd/conversion-gen/generators/BUILD | 2 +- .../conversion-gen/generators/conversion.go | 66 +- .../code-generator/cmd/conversion-gen/main.go | 9 +- .../code-generator/cmd/deepcopy-gen/BUILD | 2 +- .../code-generator/cmd/deepcopy-gen/main.go | 9 +- .../code-generator/cmd/defaulter-gen/BUILD | 2 +- .../code-generator/cmd/defaulter-gen/main.go | 9 +- .../cmd/go-to-protobuf/protobuf/BUILD | 2 +- .../cmd/go-to-protobuf/protobuf/generator.go | 4 +- .../cmd/go-to-protobuf/protobuf/tags.go | 4 +- .../code-generator/cmd/import-boss/BUILD | 2 +- .../code-generator/cmd/import-boss/main.go | 7 +- .../code-generator/cmd/informer-gen/BUILD | 2 +- .../cmd/informer-gen/generators/BUILD | 2 +- .../cmd/informer-gen/generators/factory.go | 4 +- .../generators/factoryinterface.go | 4 +- .../cmd/informer-gen/generators/informer.go | 4 +- .../cmd/informer-gen/generators/packages.go | 10 +- .../cmd/informer-gen/generators/tags.go | 4 +- .../code-generator/cmd/informer-gen/main.go | 9 +- .../code-generator/cmd/lister-gen/BUILD | 2 +- .../cmd/lister-gen/generators/BUILD | 2 +- .../cmd/lister-gen/generators/lister.go | 10 +- .../cmd/lister-gen/generators/tags.go | 4 +- .../code-generator/cmd/lister-gen/main.go | 9 +- .../code-generator/cmd/register-gen/BUILD | 2 +- .../cmd/register-gen/generators/BUILD | 2 +- .../cmd/register-gen/generators/packages.go | 18 +- .../code-generator/cmd/register-gen/main.go | 9 +- .../k8s.io/code-generator/cmd/set-gen/BUILD | 2 +- .../k8s.io/code-generator/cmd/set-gen/main.go | 7 +- staging/src/k8s.io/csi-api/Godeps/Godeps.json | 4 - staging/src/k8s.io/kube-aggregator/BUILD | 2 +- .../k8s.io/kube-aggregator/Godeps/Godeps.json | 4 - staging/src/k8s.io/kube-aggregator/main.go | 4 +- .../kube-aggregator/pkg/apiserver/BUILD | 2 +- .../pkg/apiserver/apiservice_controller.go | 18 +- .../pkg/apiserver/handler_proxy.go | 6 +- .../kube-aggregator/pkg/controllers/BUILD | 2 +- .../pkg/controllers/autoregister/BUILD | 2 +- .../autoregister/autoregister_controller.go | 10 +- .../kube-aggregator/pkg/controllers/cache.go | 6 +- .../pkg/controllers/openapi/BUILD | 2 +- .../pkg/controllers/openapi/controller.go | 14 +- .../pkg/controllers/status/BUILD | 2 +- .../status/available_controller.go | 26 +- .../Godeps/Godeps.json | 4 - .../src/k8s.io/kube-proxy/Godeps/Godeps.json | 4 - .../k8s.io/kube-scheduler/Godeps/Godeps.json | 4 - staging/src/k8s.io/kubelet/Godeps/Godeps.json | 4 - staging/src/k8s.io/metrics/Godeps/Godeps.json | 4 - staging/src/k8s.io/sample-apiserver/BUILD | 2 +- .../sample-apiserver/Godeps/Godeps.json | 4 - staging/src/k8s.io/sample-apiserver/main.go | 4 +- .../sample-cli-plugin/Godeps/Godeps.json | 4 - staging/src/k8s.io/sample-controller/BUILD | 2 +- .../sample-controller/Godeps/Godeps.json | 4 - .../k8s.io/sample-controller/controller.go | 28 +- staging/src/k8s.io/sample-controller/main.go | 10 +- test/e2e/BUILD | 2 +- test/e2e/autoscaling/BUILD | 2 +- .../cluster_autoscaler_scalability.go | 18 +- .../autoscaling/cluster_size_autoscaling.go | 90 +- test/e2e/common/BUILD | 2 +- test/e2e/common/kubelet_etc_hosts.go | 4 +- test/e2e/e2e.go | 10 +- test/e2e/framework/BUILD | 2 +- test/e2e/framework/authorizer_util.go | 8 +- test/e2e/framework/gpu_util.go | 8 +- test/e2e/framework/ingress/BUILD | 2 +- test/e2e/framework/ingress/ingress_utils.go | 6 +- test/e2e/framework/metrics/BUILD | 2 +- test/e2e/framework/metrics/generic_metrics.go | 4 +- test/e2e/framework/metrics/metrics_grabber.go | 8 +- test/e2e/framework/test_context.go | 8 +- test/e2e/framework/util.go | 8 +- test/e2e/generated/BUILD | 2 +- test/e2e/generated/gobindata_util.go | 6 +- test/e2e/network/scale/localrun/BUILD | 2 +- .../network/scale/localrun/ingress_scale.go | 26 +- test/e2e/storage/vsphere/BUILD | 2 +- test/e2e/storage/vsphere/connection.go | 14 +- test/e2e/storage/vsphere/vsphere_utils.go | 4 +- test/e2e_kubeadm/runner/local/BUILD | 2 +- test/e2e_kubeadm/runner/local/run_local.go | 10 +- test/e2e_node/BUILD | 4 +- test/e2e_node/apparmor_test.go | 10 +- test/e2e_node/builder/BUILD | 2 +- test/e2e_node/builder/build.go | 6 +- test/e2e_node/e2e_node_suite_test.go | 26 +- test/e2e_node/image_list.go | 8 +- test/e2e_node/pods_container_manager_test.go | 4 +- test/e2e_node/remote/BUILD | 2 +- test/e2e_node/remote/cadvisor_e2e.go | 4 +- test/e2e_node/remote/node_conformance.go | 18 +- test/e2e_node/remote/node_e2e.go | 6 +- test/e2e_node/remote/remote.go | 22 +- test/e2e_node/remote/ssh.go | 4 +- test/e2e_node/remote/utils.go | 10 +- test/e2e_node/runner/local/BUILD | 2 +- test/e2e_node/runner/local/run_local.go | 15 +- test/e2e_node/runner/remote/BUILD | 2 +- test/e2e_node/runner/remote/run_remote.go | 43 +- test/e2e_node/services/BUILD | 2 +- test/e2e_node/services/internal_services.go | 30 +- test/e2e_node/services/kubelet.go | 6 +- test/e2e_node/services/server.go | 32 +- test/e2e_node/services/services.go | 20 +- test/e2e_node/services/util.go | 4 +- test/e2e_node/util.go | 8 +- test/images/apparmor-loader/BUILD | 2 +- test/images/apparmor-loader/loader.go | 40 +- test/images/logs-generator/BUILD | 2 +- test/images/logs-generator/logs_generator.go | 8 +- test/images/webhook/BUILD | 2 +- test/images/webhook/addlabel.go | 6 +- test/images/webhook/alwaysdeny.go | 4 +- test/images/webhook/config.go | 4 +- test/images/webhook/configmap.go | 14 +- test/images/webhook/crd.go | 8 +- test/images/webhook/customresource.go | 10 +- test/images/webhook/main.go | 14 +- test/images/webhook/pods.go | 24 +- test/integration/apiserver/BUILD | 2 +- test/integration/apiserver/apiserver_test.go | 4 +- test/integration/auth/BUILD | 2 +- test/integration/auth/rbac_test.go | 4 +- test/integration/framework/BUILD | 2 +- test/integration/framework/etcd.go | 16 +- test/integration/framework/master_utils.go | 12 +- test/integration/framework/perf_utils.go | 10 +- test/integration/ipamperf/BUILD | 4 +- test/integration/ipamperf/ipam_test.go | 14 +- test/integration/ipamperf/main_test.go | 4 +- test/integration/ipamperf/results.go | 8 +- test/integration/ipamperf/util.go | 14 +- test/integration/master/BUILD | 20 +- test/integration/master/kms_plugin_mock.go | 8 +- test/integration/metrics/BUILD | 2 +- test/integration/metrics/metrics_test.go | 4 +- test/integration/scheduler/BUILD | 2 +- test/integration/scheduler/preemption_test.go | 4 +- .../scheduler/volume_binding_test.go | 14 +- test/integration/scheduler_perf/BUILD | 2 +- .../scheduler_perf/scheduler_bench_test.go | 8 +- .../scheduler_perf/scheduler_test.go | 6 +- test/integration/util/BUILD | 2 +- test/integration/util/util.go | 12 +- test/integration/volume/BUILD | 2 +- .../volume/persistent_volumes_test.go | 114 +- test/soak/cauldron/BUILD | 2 +- test/soak/cauldron/cauldron.go | 74 +- test/soak/serve_hostnames/BUILD | 2 +- test/soak/serve_hostnames/serve_hostnames.go | 80 +- test/utils/BUILD | 2 +- test/utils/density_utils.go | 4 +- test/utils/harness/BUILD | 2 +- test/utils/harness/harness.go | 4 +- test/utils/runners.go | 12 +- test/utils/tmpdir.go | 4 +- 1263 files changed, 10023 insertions(+), 10076 deletions(-) diff --git a/cluster/addons/fluentd-elasticsearch/es-image/BUILD b/cluster/addons/fluentd-elasticsearch/es-image/BUILD index 95b1699e345ff..2d97357375a2a 100644 --- a/cluster/addons/fluentd-elasticsearch/es-image/BUILD +++ b/cluster/addons/fluentd-elasticsearch/es-image/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go index 75f728662d7a7..a9158128bb625 100644 --- a/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go +++ b/cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go @@ -24,11 +24,11 @@ import ( "strings" "time" - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/klog" api "k8s.io/kubernetes/pkg/apis/core" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" ) @@ -61,22 +61,22 @@ func flattenSubsets(subsets []api.EndpointSubset) []string { func main() { flag.Parse() - glog.Info("Kubernetes Elasticsearch logging discovery") + klog.Info("Kubernetes Elasticsearch logging discovery") cc, err := buildConfigFromEnvs(os.Getenv("APISERVER_HOST"), os.Getenv("KUBE_CONFIG_FILE")) if err != nil { - glog.Fatalf("Failed to make client: %v", err) + klog.Fatalf("Failed to make client: %v", err) } client, err := clientset.NewForConfig(cc) if err != nil { - glog.Fatalf("Failed to make client: %v", err) + klog.Fatalf("Failed to make client: %v", err) } namespace := metav1.NamespaceSystem envNamespace := os.Getenv("NAMESPACE") if envNamespace != "" { if _, err := client.Core().Namespaces().Get(envNamespace, metav1.GetOptions{}); err != nil { - glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err) + klog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err) } namespace = envNamespace } @@ -98,7 +98,7 @@ func main() { // If we did not find an elasticsearch logging service then log a warning // and return without adding any unicast hosts. if elasticsearch == nil { - glog.Warningf("Failed to find the elasticsearch-logging service: %v", err) + klog.Warningf("Failed to find the elasticsearch-logging service: %v", err) return } @@ -112,17 +112,17 @@ func main() { continue } addrs = flattenSubsets(endpoints.Subsets) - glog.Infof("Found %s", addrs) + klog.Infof("Found %s", addrs) if len(addrs) > 0 && len(addrs) >= count { break } } // If there was an error finding endpoints then log a warning and quit. if err != nil { - glog.Warningf("Error finding endpoints: %v", err) + klog.Warningf("Error finding endpoints: %v", err) return } - glog.Infof("Endpoints = %s", addrs) + klog.Infof("Endpoints = %s", addrs) fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(addrs, ", ")) } diff --git a/cluster/images/etcd-version-monitor/BUILD b/cluster/images/etcd-version-monitor/BUILD index c138071f02b96..377c66f1faaad 100644 --- a/cluster/images/etcd-version-monitor/BUILD +++ b/cluster/images/etcd-version-monitor/BUILD @@ -17,12 +17,12 @@ go_library( importpath = "k8s.io/kubernetes/cluster/images/etcd-version-monitor", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", "//vendor/github.com/prometheus/client_model/go:go_default_library", "//vendor/github.com/prometheus/common/expfmt:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster/images/etcd-version-monitor/etcd-version-monitor.go b/cluster/images/etcd-version-monitor/etcd-version-monitor.go index 30a585038ec1b..c2b3e679e7c93 100644 --- a/cluster/images/etcd-version-monitor/etcd-version-monitor.go +++ b/cluster/images/etcd-version-monitor/etcd-version-monitor.go @@ -25,12 +25,12 @@ import ( "time" "github.com/gogo/protobuf/proto" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "github.com/spf13/pflag" + "k8s.io/klog" ) // Initialize the prometheus instrumentation and client related flags. @@ -245,7 +245,7 @@ func getVersionPeriodically(stopCh <-chan struct{}) { lastSeenBinaryVersion := "" for { if err := getVersion(&lastSeenBinaryVersion); err != nil { - glog.Errorf("Failed to fetch etcd version: %v", err) + klog.Errorf("Failed to fetch etcd version: %v", err) } select { case <-stopCh: @@ -399,7 +399,7 @@ func main() { go getVersionPeriodically(stopCh) // Serve our metrics on listenAddress/metricsPath. - glog.Infof("Listening on: %v", listenAddress) + klog.Infof("Listening on: %v", listenAddress) http.Handle(metricsPath, promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) - glog.Errorf("Stopped listening/serving metrics: %v", http.ListenAndServe(listenAddress, nil)) + klog.Errorf("Stopped listening/serving metrics: %v", http.ListenAndServe(listenAddress, nil)) } diff --git a/cluster/images/etcd/migrate/BUILD b/cluster/images/etcd/migrate/BUILD index 7212e4e9624e8..b79c4ab191b15 100644 --- a/cluster/images/etcd/migrate/BUILD +++ b/cluster/images/etcd/migrate/BUILD @@ -42,8 +42,8 @@ go_library( "//vendor/github.com/coreos/etcd/wal:go_default_library", "//vendor/github.com/coreos/etcd/wal/walpb:go_default_library", "//vendor/github.com/coreos/go-semver/semver:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cluster/images/etcd/migrate/data_dir.go b/cluster/images/etcd/migrate/data_dir.go index 75a605bf8e674..3052afa735d84 100644 --- a/cluster/images/etcd/migrate/data_dir.go +++ b/cluster/images/etcd/migrate/data_dir.go @@ -25,7 +25,7 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // DataDirectory provides utilities for initializing and backing up an @@ -45,7 +45,7 @@ func OpenOrCreateDataDirectory(path string) (*DataDirectory, error) { return nil, err } if !exists { - glog.Infof("data directory '%s' does not exist, creating it", path) + klog.Infof("data directory '%s' does not exist, creating it", path) err := os.MkdirAll(path, 0777) if err != nil { return nil, fmt.Errorf("failed to create data directory %s: %v", path, err) @@ -67,7 +67,7 @@ func (d *DataDirectory) Initialize(target *EtcdVersionPair) error { return err } if isEmpty { - glog.Infof("data directory '%s' is empty, writing target version '%s' to version.txt", d.path, target) + klog.Infof("data directory '%s' is empty, writing target version '%s' to version.txt", d.path, target) err = d.versionFile.Write(target) if err != nil { return fmt.Errorf("failed to write version.txt to '%s': %v", d.path, err) diff --git a/cluster/images/etcd/migrate/migrate.go b/cluster/images/etcd/migrate/migrate.go index 7b877db0b9112..cb44382a6ddcd 100644 --- a/cluster/images/etcd/migrate/migrate.go +++ b/cluster/images/etcd/migrate/migrate.go @@ -21,8 +21,8 @@ import ( "os" "path/filepath" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" ) const ( @@ -85,7 +85,7 @@ func runMigrate() { if opts.name == "" { hostname, err := os.Hostname() if err != nil { - glog.Errorf("Error while getting hostname to supply default --name: %v", err) + klog.Errorf("Error while getting hostname to supply default --name: %v", err) os.Exit(1) } opts.name = fmt.Sprintf("etcd-%s", hostname) @@ -98,29 +98,29 @@ func runMigrate() { opts.initialCluster = fmt.Sprintf("%s=http://localhost:2380", opts.name) } if opts.targetStorage == "" { - glog.Errorf("--target-storage is required") + klog.Errorf("--target-storage is required") os.Exit(1) } if opts.targetVersion == "" { - glog.Errorf("--target-version is required") + klog.Errorf("--target-version is required") os.Exit(1) } if opts.dataDir == "" { - glog.Errorf("--data-dir is required") + klog.Errorf("--data-dir is required") os.Exit(1) } if opts.bundledVersionString == "" { - glog.Errorf("--bundled-versions is required") + klog.Errorf("--bundled-versions is required") os.Exit(1) } bundledVersions, err := ParseSupportedVersions(opts.bundledVersionString) if err != nil { - glog.Errorf("Failed to parse --supported-versions: %v", err) + klog.Errorf("Failed to parse --supported-versions: %v", err) } err = validateBundledVersions(bundledVersions, opts.binDir) if err != nil { - glog.Errorf("Failed to validate that 'etcd-' and 'etcdctl-' binaries exist in --bin-dir '%s' for all --bundled-verions '%s': %v", + klog.Errorf("Failed to validate that 'etcd-' and 'etcdctl-' binaries exist in --bin-dir '%s' for all --bundled-verions '%s': %v", opts.binDir, opts.bundledVersionString, err) os.Exit(1) } @@ -139,7 +139,7 @@ func migrate(name string, port uint64, peerListenUrls string, peerAdvertiseUrls dataDir, err := OpenOrCreateDataDirectory(dataDirPath) if err != nil { - glog.Errorf("Error opening or creating data directory %s: %v", dataDirPath, err) + klog.Errorf("Error opening or creating data directory %s: %v", dataDirPath, err) os.Exit(1) } @@ -158,7 +158,7 @@ func migrate(name string, port uint64, peerListenUrls string, peerAdvertiseUrls } client, err := NewEtcdMigrateClient(cfg) if err != nil { - glog.Errorf("Migration failed: %v", err) + klog.Errorf("Migration failed: %v", err) os.Exit(1) } defer client.Close() @@ -167,7 +167,7 @@ func migrate(name string, port uint64, peerListenUrls string, peerAdvertiseUrls err = migrator.MigrateIfNeeded(target) if err != nil { - glog.Errorf("Migration failed: %v", err) + klog.Errorf("Migration failed: %v", err) os.Exit(1) } } diff --git a/cluster/images/etcd/migrate/migrate_client.go b/cluster/images/etcd/migrate/migrate_client.go index 5bb183cdaf91d..b9c9cfb62bb36 100644 --- a/cluster/images/etcd/migrate/migrate_client.go +++ b/cluster/images/etcd/migrate/migrate_client.go @@ -29,7 +29,7 @@ import ( clientv2 "github.com/coreos/etcd/client" "github.com/coreos/etcd/clientv3" - "github.com/golang/glog" + "k8s.io/klog" ) // CombinedEtcdClient provides an implementation of EtcdMigrateClient using a combination of the etcd v2 client, v3 client @@ -202,13 +202,13 @@ func (e *CombinedEtcdClient) AttachLease(leaseDuration time.Duration) error { if err != nil { return fmt.Errorf("Error while creating lease: %v", err) } - glog.Infof("Lease with TTL: %v created", lease.TTL) + klog.Infof("Lease with TTL: %v created", lease.TTL) - glog.Infof("Attaching lease to %d entries", len(objectsResp.Kvs)) + klog.Infof("Attaching lease to %d entries", len(objectsResp.Kvs)) for _, kv := range objectsResp.Kvs { putResp, err := v3client.KV.Put(ctx, string(kv.Key), string(kv.Value), clientv3.WithLease(lease.ID), clientv3.WithPrevKV()) if err != nil { - glog.Errorf("Error while attaching lease to: %s", string(kv.Key)) + klog.Errorf("Error while attaching lease to: %s", string(kv.Key)) } if bytes.Compare(putResp.PrevKv.Value, kv.Value) != 0 { return fmt.Errorf("concurrent access to key detected when setting lease on %s, expected previous value of %s but got %s", diff --git a/cluster/images/etcd/migrate/migrate_server.go b/cluster/images/etcd/migrate/migrate_server.go index a1dd1a732f561..ea630ff8b4a0d 100644 --- a/cluster/images/etcd/migrate/migrate_server.go +++ b/cluster/images/etcd/migrate/migrate_server.go @@ -23,7 +23,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" ) // EtcdMigrateServer manages starting and stopping a versioned etcd server binary. @@ -75,10 +75,10 @@ func (r *EtcdMigrateServer) Start(version *EtcdVersion) error { case <-interval.C: err := r.client.SetEtcdVersionKeyValue(version) if err != nil { - glog.Infof("Still waiting for etcd to start, current error: %v", err) + klog.Infof("Still waiting for etcd to start, current error: %v", err) // keep waiting } else { - glog.Infof("Etcd on port %d is up.", r.cfg.port) + klog.Infof("Etcd on port %d is up.", r.cfg.port) r.cmd = etcdCmd return nil } @@ -114,7 +114,7 @@ func (r *EtcdMigrateServer) Stop() error { case <-stopped: return case <-timedout: - glog.Infof("etcd server has not terminated gracefully after %s, killing it.", gracefulWait) + klog.Infof("etcd server has not terminated gracefully after %s, killing it.", gracefulWait) r.cmd.Process.Kill() return } @@ -122,11 +122,11 @@ func (r *EtcdMigrateServer) Stop() error { err = r.cmd.Wait() stopped <- true if exiterr, ok := err.(*exec.ExitError); ok { - glog.Infof("etcd server stopped (signal: %s)", exiterr.Error()) + klog.Infof("etcd server stopped (signal: %s)", exiterr.Error()) // stopped } else if err != nil { return fmt.Errorf("error waiting for etcd to stop: %v", err) } - glog.Infof("Stopped etcd server %s", r.cfg.name) + klog.Infof("Stopped etcd server %s", r.cfg.name) return nil } diff --git a/cluster/images/etcd/migrate/migrator.go b/cluster/images/etcd/migrate/migrator.go index e1e64f18238fc..6235096612d8c 100644 --- a/cluster/images/etcd/migrate/migrator.go +++ b/cluster/images/etcd/migrate/migrator.go @@ -23,7 +23,7 @@ import ( "time" "github.com/blang/semver" - "github.com/golang/glog" + "k8s.io/klog" ) // EtcdMigrateCfg provides all configuration required to perform etcd data upgrade/downgrade migrations. @@ -63,7 +63,7 @@ type Migrator struct { // MigrateIfNeeded upgrades or downgrades the etcd data directory to the given target version. func (m *Migrator) MigrateIfNeeded(target *EtcdVersionPair) error { - glog.Infof("Starting migration to %s", target) + klog.Infof("Starting migration to %s", target) err := m.dataDirectory.Initialize(target) if err != nil { return fmt.Errorf("failed to initialize data directory %s: %v", m.dataDirectory.path, err) @@ -84,28 +84,28 @@ func (m *Migrator) MigrateIfNeeded(target *EtcdVersionPair) error { } for { - glog.Infof("Converging current version '%s' to target version '%s'", current, target) + klog.Infof("Converging current version '%s' to target version '%s'", current, target) currentNextMinorVersion := &EtcdVersion{Version: semver.Version{Major: current.version.Major, Minor: current.version.Minor + 1}} switch { case current.version.MajorMinorEquals(target.version) || currentNextMinorVersion.MajorMinorEquals(target.version): - glog.Infof("current version '%s' equals or is one minor version previous of target version '%s' - migration complete", current, target) + klog.Infof("current version '%s' equals or is one minor version previous of target version '%s' - migration complete", current, target) err = m.dataDirectory.versionFile.Write(target) if err != nil { return fmt.Errorf("failed to write version.txt to '%s': %v", m.dataDirectory.path, err) } return nil case current.storageVersion == storageEtcd2 && target.storageVersion == storageEtcd3: - glog.Infof("upgrading from etcd2 storage to etcd3 storage") + klog.Infof("upgrading from etcd2 storage to etcd3 storage") current, err = m.etcd2ToEtcd3Upgrade(current, target) case current.version.Major == 3 && target.version.Major == 2: - glog.Infof("downgrading from etcd 3.x to 2.x") + klog.Infof("downgrading from etcd 3.x to 2.x") current, err = m.rollbackToEtcd2(current, target) case current.version.Major == target.version.Major && current.version.Minor < target.version.Minor: stepVersion := m.cfg.supportedVersions.NextVersionPair(current) - glog.Infof("upgrading etcd from %s to %s", current, stepVersion) + klog.Infof("upgrading etcd from %s to %s", current, stepVersion) current, err = m.minorVersionUpgrade(current, stepVersion) case current.version.Major == 3 && target.version.Major == 3 && current.version.Minor > target.version.Minor: - glog.Infof("rolling etcd back from %s to %s", current, target) + klog.Infof("rolling etcd back from %s to %s", current, target) current, err = m.rollbackEtcd3MinorVersion(current, target) } if err != nil { @@ -116,13 +116,13 @@ func (m *Migrator) MigrateIfNeeded(target *EtcdVersionPair) error { func (m *Migrator) backupEtcd2(current *EtcdVersion) error { backupDir := fmt.Sprintf("%s/%s", m.dataDirectory, "migration-backup") - glog.Infof("Backup etcd before starting migration") + klog.Infof("Backup etcd before starting migration") err := os.Mkdir(backupDir, 0666) if err != nil { return fmt.Errorf("failed to create backup directory before starting migration: %v", err) } m.client.Backup(current, backupDir) - glog.Infof("Backup done in %s", backupDir) + klog.Infof("Backup done in %s", backupDir) return nil } @@ -131,7 +131,7 @@ func (m *Migrator) rollbackEtcd3MinorVersion(current *EtcdVersionPair, target *E return nil, fmt.Errorf("rollback from %s to %s not supported, only rollbacks to the previous minor version are supported", current.version, target.version) } - glog.Infof("Performing etcd %s -> %s rollback", current.version, target.version) + klog.Infof("Performing etcd %s -> %s rollback", current.version, target.version) err := m.dataDirectory.Backup() if err != nil { return nil, err @@ -145,14 +145,14 @@ func (m *Migrator) rollbackEtcd3MinorVersion(current *EtcdVersionPair, target *E // Start current version of etcd. runner := m.newServer() - glog.Infof("Starting etcd version %s to capture rollback snapshot.", current.version) + klog.Infof("Starting etcd version %s to capture rollback snapshot.", current.version) err = runner.Start(current.version) if err != nil { - glog.Fatalf("Unable to automatically downgrade etcd: starting etcd version %s to capture rollback snapshot failed: %v", current.version, err) + klog.Fatalf("Unable to automatically downgrade etcd: starting etcd version %s to capture rollback snapshot failed: %v", current.version, err) return nil, err } - glog.Infof("Snapshotting etcd %s to %s", current.version, snapshotFilename) + klog.Infof("Snapshotting etcd %s to %s", current.version, snapshotFilename) err = m.client.Snapshot(current.version, snapshotFilename) if err != nil { return nil, err @@ -163,7 +163,7 @@ func (m *Migrator) rollbackEtcd3MinorVersion(current *EtcdVersionPair, target *E return nil, err } - glog.Infof("Backing up data before rolling back") + klog.Infof("Backing up data before rolling back") backupDir := fmt.Sprintf("%s.bak", m.dataDirectory) err = os.RemoveAll(backupDir) if err != nil { @@ -178,7 +178,7 @@ func (m *Migrator) rollbackEtcd3MinorVersion(current *EtcdVersionPair, target *E return nil, err } - glog.Infof("Restoring etcd %s from %s", target.version, snapshotFilename) + klog.Infof("Restoring etcd %s from %s", target.version, snapshotFilename) err = m.client.Restore(target.version, snapshotFilename) if err != nil { return nil, err @@ -195,7 +195,7 @@ func (m *Migrator) rollbackToEtcd2(current *EtcdVersionPair, target *EtcdVersion if !(current.version.Major == 3 && current.version.Minor == 0 && target.version.Major == 2 && target.version.Minor == 2) { return nil, fmt.Errorf("etcd3 -> etcd2 downgrade is supported only between 3.0.x and 2.2.x, got current %s target %s", current, target) } - glog.Infof("Backup and remove all existing v2 data") + klog.Infof("Backup and remove all existing v2 data") err := m.dataDirectory.Backup() if err != nil { return nil, err @@ -214,12 +214,12 @@ func (m *Migrator) etcd2ToEtcd3Upgrade(current *EtcdVersionPair, target *EtcdVer } runner := m.newServer() - glog.Infof("Performing etcd2 -> etcd3 migration") + klog.Infof("Performing etcd2 -> etcd3 migration") err := m.client.Migrate(target.version) if err != nil { return nil, err } - glog.Infof("Attaching leases to TTL entries") + klog.Infof("Attaching leases to TTL entries") // Now attach lease to all keys. // To do it, we temporarily start etcd on a random port (so that diff --git a/cluster/images/etcd/migrate/rollback_v2.go b/cluster/images/etcd/migrate/rollback_v2.go index 0d3b3ce2b01e2..1b4655770fc1f 100644 --- a/cluster/images/etcd/migrate/rollback_v2.go +++ b/cluster/images/etcd/migrate/rollback_v2.go @@ -42,7 +42,7 @@ import ( "github.com/coreos/etcd/wal" "github.com/coreos/etcd/wal/walpb" "github.com/coreos/go-semver/semver" - "github.com/golang/glog" + "k8s.io/klog" ) const rollbackVersion = "2.2.0" @@ -50,7 +50,7 @@ const rollbackVersion = "2.2.0" // RollbackV3ToV2 rolls back an etcd 3.0.x data directory to the 2.x.x version specified by rollbackVersion. func RollbackV3ToV2(migrateDatadir string, ttl time.Duration) error { dbpath := path.Join(migrateDatadir, "member", "snap", "db") - glog.Infof("Rolling db file %s back to etcd 2.x", dbpath) + klog.Infof("Rolling db file %s back to etcd 2.x", dbpath) // etcd3 store backend. We will use it to parse v3 data files and extract information. be := backend.NewDefaultBackend(dbpath) @@ -139,7 +139,7 @@ func RollbackV3ToV2(migrateDatadir string, ttl time.Duration) error { v = rollbackVersion } if _, err := st.Set(n.Key, n.Dir, v, store.TTLOptionSet{}); err != nil { - glog.Error(err) + klog.Error(err) } // update nodes @@ -147,7 +147,7 @@ func RollbackV3ToV2(migrateDatadir string, ttl time.Duration) error { if len(fields) == 4 && fields[2] == "members" { nodeID, err := strconv.ParseUint(fields[3], 16, 64) if err != nil { - glog.Fatalf("failed to parse member ID (%s): %v", fields[3], err) + klog.Fatalf("failed to parse member ID (%s): %v", fields[3], err) } nodes = append(nodes, nodeID) } @@ -172,7 +172,7 @@ func RollbackV3ToV2(migrateDatadir string, ttl time.Duration) error { if err := snapshotter.SaveSnap(raftSnap); err != nil { return err } - glog.Infof("Finished successfully") + klog.Infof("Finished successfully") return nil } @@ -214,7 +214,7 @@ func traverseAndDeleteEmptyDir(st store.Store, dir string) error { } for _, node := range e.Node.Nodes { if !node.Dir { - glog.V(2).Infof("key: %s", node.Key[len(etcdserver.StoreKeysPrefix):]) + klog.V(2).Infof("key: %s", node.Key[len(etcdserver.StoreKeysPrefix):]) } else { err := traverseAndDeleteEmptyDir(st, node.Key) if err != nil { @@ -344,6 +344,6 @@ func applyRequest(r *pb.Request, applyV2 etcdserver.ApplierV2) { case "POST", "QGET", "SYNC": return default: - glog.Fatal("unknown command") + klog.Fatal("unknown command") } } diff --git a/cmd/cloud-controller-manager/app/BUILD b/cmd/cloud-controller-manager/app/BUILD index e0fe5bd07037d..4e15aeaf93dc1 100644 --- a/cmd/cloud-controller-manager/app/BUILD +++ b/cmd/cloud-controller-manager/app/BUILD @@ -24,8 +24,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index cccc5df80935d..4dc73c66162b6 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -24,8 +24,8 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" @@ -58,7 +58,7 @@ const ( func NewCloudControllerManagerCommand() *cobra.Command { s, err := options.NewCloudControllerManagerOptions() if err != nil { - glog.Fatalf("unable to initialize command options: %v", err) + klog.Fatalf("unable to initialize command options: %v", err) } cmd := &cobra.Command{ @@ -106,21 +106,21 @@ the cloud specific control loops shipped with Kubernetes.`, // Run runs the ExternalCMServer. This should never exit. func Run(c *cloudcontrollerconfig.CompletedConfig, stopCh <-chan struct{}) error { // To help debugging, immediately log version - glog.Infof("Version: %+v", version.Get()) + klog.Infof("Version: %+v", version.Get()) cloud, err := cloudprovider.InitCloudProvider(c.ComponentConfig.KubeCloudShared.CloudProvider.Name, c.ComponentConfig.KubeCloudShared.CloudProvider.CloudConfigFile) if err != nil { - glog.Fatalf("Cloud provider could not be initialized: %v", err) + klog.Fatalf("Cloud provider could not be initialized: %v", err) } if cloud == nil { - glog.Fatalf("cloud provider is nil") + klog.Fatalf("cloud provider is nil") } if cloud.HasClusterID() == false { if c.ComponentConfig.KubeCloudShared.AllowUntaggedCloud == true { - glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues") + klog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues") } else { - glog.Fatalf("no ClusterID found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option") + klog.Fatalf("no ClusterID found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option") } } @@ -128,7 +128,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig, stopCh <-chan struct{}) error if cz, err := configz.New(ConfigzName); err == nil { cz.Set(c.ComponentConfig) } else { - glog.Errorf("unable to register configz: %c", err) + klog.Errorf("unable to register configz: %c", err) } // Start the controller manager HTTP server @@ -150,7 +150,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig, stopCh <-chan struct{}) error run := func(ctx context.Context) { if err := startControllers(c, ctx.Done(), cloud); err != nil { - glog.Fatalf("error running controllers: %v", err) + klog.Fatalf("error running controllers: %v", err) } } @@ -177,7 +177,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig, stopCh <-chan struct{}) error EventRecorder: c.EventRecorder, }) if err != nil { - glog.Fatalf("error creating lock: %v", err) + klog.Fatalf("error creating lock: %v", err) } // Try and become the leader and start cloud controller manager loops @@ -189,7 +189,7 @@ func Run(c *cloudcontrollerconfig.CompletedConfig, stopCh <-chan struct{}) error Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { - glog.Fatalf("leaderelection lost") + klog.Fatalf("leaderelection lost") }, }, }) @@ -230,7 +230,7 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, stop <-chan stru c.ComponentConfig.KubeCloudShared.ClusterName, ) if err != nil { - glog.Errorf("Failed to start service controller: %v", err) + klog.Errorf("Failed to start service controller: %v", err) } else { go serviceController.Run(stop, int(c.ComponentConfig.ServiceController.ConcurrentServiceSyncs)) time.Sleep(wait.Jitter(c.ComponentConfig.Generic.ControllerStartInterval.Duration, ControllerStartJitter)) @@ -239,13 +239,13 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, stop <-chan stru // If CIDRs should be allocated for pods and set on the CloudProvider, then start the route controller if c.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs && c.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes { if routes, ok := cloud.Routes(); !ok { - glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") + klog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") } else { var clusterCIDR *net.IPNet if len(strings.TrimSpace(c.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 { _, clusterCIDR, err = net.ParseCIDR(c.ComponentConfig.KubeCloudShared.ClusterCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.ComponentConfig.KubeCloudShared.ClusterCIDR, err) + klog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.ComponentConfig.KubeCloudShared.ClusterCIDR, err) } } @@ -254,14 +254,14 @@ func startControllers(c *cloudcontrollerconfig.CompletedConfig, stop <-chan stru time.Sleep(wait.Jitter(c.ComponentConfig.Generic.ControllerStartInterval.Duration, ControllerStartJitter)) } } else { - glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, c.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes) + klog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, c.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes) } // If apiserver is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and controller manager at the same time. err = genericcontrollermanager.WaitForAPIServer(c.VersionedClient, 10*time.Second) if err != nil { - glog.Fatalf("Failed to wait for apiserver being healthy: %v", err) + klog.Fatalf("Failed to wait for apiserver being healthy: %v", err) } c.SharedInformers.Start(stop) diff --git a/cmd/cloud-controller-manager/app/options/BUILD b/cmd/cloud-controller-manager/app/options/BUILD index 07f82ee0096a0..c2707eb973c45 100644 --- a/cmd/cloud-controller-manager/app/options/BUILD +++ b/cmd/cloud-controller-manager/app/options/BUILD @@ -32,7 +32,7 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/cloud-controller-manager/app/options/options.go b/cmd/cloud-controller-manager/app/options/options.go index e15a659f012fe..27c60157a2966 100644 --- a/cmd/cloud-controller-manager/app/options/options.go +++ b/cmd/cloud-controller-manager/app/options/options.go @@ -22,7 +22,7 @@ import ( "net" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -265,7 +265,7 @@ func (o *CloudControllerManagerOptions) Config() (*cloudcontrollerconfig.Config, func createRecorder(kubeClient clientset.Interface, userAgent string) record.EventRecorder { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) // TODO: remove dependence on the legacyscheme return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: userAgent}) diff --git a/cmd/controller-manager/app/BUILD b/cmd/controller-manager/app/BUILD index 1ba2af4425515..4f75ca22f4419 100644 --- a/cmd/controller-manager/app/BUILD +++ b/cmd/controller-manager/app/BUILD @@ -21,8 +21,8 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/server/mux:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/routes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/controller-manager/app/helper.go b/cmd/controller-manager/app/helper.go index 38ec45417d496..8bf1edee52a83 100644 --- a/cmd/controller-manager/app/helper.go +++ b/cmd/controller-manager/app/helper.go @@ -21,9 +21,9 @@ import ( "net/http" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" ) // WaitForAPIServer waits for the API Server's /healthz endpoint to report "ok" with timeout. @@ -40,7 +40,7 @@ func WaitForAPIServer(client clientset.Interface, timeout time.Duration) error { if healthStatus != http.StatusOK { content, _ := result.Raw() lastErr = fmt.Errorf("APIServer isn't healthy: %v", string(content)) - glog.Warningf("APIServer isn't healthy yet: %v. Waiting a little while.", string(content)) + klog.Warningf("APIServer isn't healthy yet: %v. Waiting a little while.", string(content)) return false, nil } diff --git a/cmd/genswaggertypedocs/BUILD b/cmd/genswaggertypedocs/BUILD index be4c59a897640..5fabe41559d88 100644 --- a/cmd/genswaggertypedocs/BUILD +++ b/cmd/genswaggertypedocs/BUILD @@ -17,8 +17,8 @@ go_library( importpath = "k8s.io/kubernetes/cmd/genswaggertypedocs", deps = [ "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/genswaggertypedocs/swagger_type_docs.go b/cmd/genswaggertypedocs/swagger_type_docs.go index 8916d42ae81b4..03621f44ed26d 100644 --- a/cmd/genswaggertypedocs/swagger_type_docs.go +++ b/cmd/genswaggertypedocs/swagger_type_docs.go @@ -23,8 +23,8 @@ import ( kruntime "k8s.io/apimachinery/pkg/runtime" - "github.com/golang/glog" flag "github.com/spf13/pflag" + "k8s.io/klog" ) var ( @@ -37,7 +37,7 @@ func main() { flag.Parse() if *typeSrc == "" { - glog.Fatalf("Please define -s flag as it is the source file") + klog.Fatalf("Please define -s flag as it is the source file") } var funcOut io.Writer @@ -46,7 +46,7 @@ func main() { } else { file, err := os.Create(*functionDest) if err != nil { - glog.Fatalf("Couldn't open %v: %v", *functionDest, err) + klog.Fatalf("Couldn't open %v: %v", *functionDest, err) } defer file.Close() funcOut = file diff --git a/cmd/kube-apiserver/app/BUILD b/cmd/kube-apiserver/app/BUILD index eb338464cbbca..a107cf0dc7040 100644 --- a/cmd/kube-apiserver/app/BUILD +++ b/cmd/kube-apiserver/app/BUILD @@ -73,8 +73,8 @@ go_library( "//staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/cmd/kube-apiserver/app/aggregator.go b/cmd/kube-apiserver/app/aggregator.go index 4ff4726a4a3bb..4c7d217ee4caf 100644 --- a/cmd/kube-apiserver/app/aggregator.go +++ b/cmd/kube-apiserver/app/aggregator.go @@ -26,7 +26,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -167,7 +167,7 @@ func makeAPIService(gv schema.GroupVersion) *apiregistration.APIService { if !ok { // if we aren't found, then we shouldn't register ourselves because it could result in a CRD group version // being permanently stuck in the APIServices list. - glog.Infof("Skipping APIService creation for %v", gv) + klog.Infof("Skipping APIService creation for %v", gv) return nil } return &apiregistration.APIService{ diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 0da41a9b1b78c..a5aa5bf9cbb15 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -32,8 +32,8 @@ import ( "time" "github.com/go-openapi/spec" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" extensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver" "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -142,7 +142,7 @@ cluster's shared state through which all other components interact.`, // Run runs the specified APIServer. This should never exit. func Run(completeOptions completedServerRunOptions, stopCh <-chan struct{}) error { // To help debugging, immediately log version - glog.Infof("Version: %+v", version.Get()) + klog.Infof("Version: %+v", version.Get()) server, err := CreateServerChain(completeOptions, stopCh) if err != nil { @@ -585,7 +585,7 @@ func Complete(s *options.ServerRunOptions) (completedServerRunOptions, error) { return options, fmt.Errorf("error finding host name: %v", err) } } - glog.Infof("external host was not specified, using %v", s.GenericServerRunOptions.ExternalHost) + klog.Infof("external host was not specified, using %v", s.GenericServerRunOptions.ExternalHost) } s.Authentication.ApplyAuthorization(s.Authorization) @@ -601,13 +601,13 @@ func Complete(s *options.ServerRunOptions) (completedServerRunOptions, error) { if kubeauthenticator.IsValidServiceAccountKeyFile(s.SecureServing.ServerCert.CertKey.KeyFile) { s.Authentication.ServiceAccounts.KeyFiles = []string{s.SecureServing.ServerCert.CertKey.KeyFile} } else { - glog.Warning("No TLS key provided, service account token authentication disabled") + klog.Warning("No TLS key provided, service account token authentication disabled") } } } if s.Etcd.EnableWatchCache { - glog.V(2).Infof("Initializing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB) + klog.V(2).Infof("Initializing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB) sizes := cachesize.NewHeuristicWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB) if userSpecified, err := serveroptions.ParseWatchCacheSizes(s.Etcd.WatchCacheSizes); err == nil { for resource, size := range userSpecified { diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index f5807e2dcb75a..488f63859a20f 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -132,8 +132,8 @@ go_library( "//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1:go_default_library", "//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library", "//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cmd/kube-controller-manager/app/certificates.go b/cmd/kube-controller-manager/app/certificates.go index bb9ca7afe2ce9..f254362d54cb3 100644 --- a/cmd/kube-controller-manager/app/certificates.go +++ b/cmd/kube-controller-manager/app/certificates.go @@ -24,7 +24,7 @@ import ( "fmt" "os" - "github.com/golang/glog" + "k8s.io/klog" "net/http" @@ -69,7 +69,7 @@ func startCSRSigningController(ctx ControllerContext) (http.Handler, bool, error switch { case (keyFileExists && keyUsesDefault) || (certFileExists && certUsesDefault): - glog.Warningf("You might be using flag defaulting for --cluster-signing-cert-file and" + + klog.Warningf("You might be using flag defaulting for --cluster-signing-cert-file and" + " --cluster-signing-key-file. These defaults are deprecated and will be removed" + " in a subsequent release. Please pass these options explicitly.") case (!keyFileExists && keyUsesDefault) && (!certFileExists && certUsesDefault): diff --git a/cmd/kube-controller-manager/app/cloudproviders.go b/cmd/kube-controller-manager/app/cloudproviders.go index 687d71cb8b46c..762f5828b4dae 100644 --- a/cmd/kube-controller-manager/app/cloudproviders.go +++ b/cmd/kube-controller-manager/app/cloudproviders.go @@ -19,7 +19,7 @@ package app import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/client-go/informers" cloudprovider "k8s.io/cloud-provider" @@ -50,7 +50,7 @@ func createCloudProvider(cloudProvider string, externalCloudVolumePlugin string, if cloud != nil && cloud.HasClusterID() == false { if allowUntaggedCloud == true { - glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues") + klog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues") } else { return nil, loopMode, fmt.Errorf("no ClusterID Found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option") } diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 980c25cd0ad03..0710e4c3b1f3b 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -29,8 +29,8 @@ import ( "os" "time" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -79,7 +79,7 @@ const ( func NewControllerManagerCommand() *cobra.Command { s, err := options.NewKubeControllerManagerOptions() if err != nil { - glog.Fatalf("unable to initialize command options: %v", err) + klog.Fatalf("unable to initialize command options: %v", err) } cmd := &cobra.Command{ @@ -142,12 +142,12 @@ func ResyncPeriod(c *config.CompletedConfig) func() time.Duration { // Run runs the KubeControllerManagerOptions. This should never exit. func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error { // To help debugging, immediately log version - glog.Infof("Version: %+v", version.Get()) + klog.Infof("Version: %+v", version.Get()) if cfgz, err := configz.New(ConfigzName); err == nil { cfgz.Set(c.ComponentConfig) } else { - glog.Errorf("unable to register configz: %c", err) + klog.Errorf("unable to register configz: %c", err) } // Start the controller manager HTTP server @@ -178,7 +178,7 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error { if len(c.ComponentConfig.SAController.ServiceAccountKeyFile) == 0 { // It'c possible another controller process is creating the tokens for us. // If one isn't, we'll timeout and exit when our client builder is unable to create the tokens. - glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file") + klog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file") } clientBuilder = controller.SAControllerClientBuilder{ ClientConfig: restclient.AnonymousClientConfig(c.Kubeconfig), @@ -191,12 +191,12 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error { } controllerContext, err := CreateControllerContext(c, rootClientBuilder, clientBuilder, ctx.Done()) if err != nil { - glog.Fatalf("error building controller context: %v", err) + klog.Fatalf("error building controller context: %v", err) } saTokenControllerInitFunc := serviceAccountTokenControllerStarter{rootClientBuilder: rootClientBuilder}.startServiceAccountTokenController if err := StartControllers(controllerContext, saTokenControllerInitFunc, NewControllerInitializers(controllerContext.LoopMode), unsecuredMux); err != nil { - glog.Fatalf("error starting controllers: %v", err) + klog.Fatalf("error starting controllers: %v", err) } controllerContext.InformerFactory.Start(controllerContext.Stop) @@ -226,7 +226,7 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error { EventRecorder: c.EventRecorder, }) if err != nil { - glog.Fatalf("error creating lock: %v", err) + klog.Fatalf("error creating lock: %v", err) } leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{ @@ -237,7 +237,7 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error { Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { - glog.Fatalf("leaderelection lost") + klog.Fatalf("leaderelection lost") }, }, }) @@ -476,20 +476,20 @@ func StartControllers(ctx ControllerContext, startSATokenController InitFunc, co for controllerName, initFn := range controllers { if !ctx.IsControllerEnabled(controllerName) { - glog.Warningf("%q is disabled", controllerName) + klog.Warningf("%q is disabled", controllerName) continue } time.Sleep(wait.Jitter(ctx.ComponentConfig.Generic.ControllerStartInterval.Duration, ControllerStartJitter)) - glog.V(1).Infof("Starting %q", controllerName) + klog.V(1).Infof("Starting %q", controllerName) debugHandler, started, err := initFn(ctx) if err != nil { - glog.Errorf("Error starting %q", controllerName) + klog.Errorf("Error starting %q", controllerName) return err } if !started { - glog.Warningf("Skipping %q", controllerName) + klog.Warningf("Skipping %q", controllerName) continue } if debugHandler != nil && unsecuredMux != nil { @@ -497,7 +497,7 @@ func StartControllers(ctx ControllerContext, startSATokenController InitFunc, co unsecuredMux.UnlistedHandle(basePath, http.StripPrefix(basePath, debugHandler)) unsecuredMux.UnlistedHandlePrefix(basePath+"/", http.StripPrefix(basePath, debugHandler)) } - glog.Infof("Started %q", controllerName) + klog.Infof("Started %q", controllerName) } return nil @@ -512,12 +512,12 @@ type serviceAccountTokenControllerStarter struct { func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController(ctx ControllerContext) (http.Handler, bool, error) { if !ctx.IsControllerEnabled(saTokenControllerName) { - glog.Warningf("%q is disabled", saTokenControllerName) + klog.Warningf("%q is disabled", saTokenControllerName) return nil, false, nil } if len(ctx.ComponentConfig.SAController.ServiceAccountKeyFile) == 0 { - glog.Warningf("%q is disabled because there is no private key", saTokenControllerName) + klog.Warningf("%q is disabled because there is no private key", saTokenControllerName) return nil, false, nil } privateKey, err := certutil.PrivateKeyFromFile(ctx.ComponentConfig.SAController.ServiceAccountKeyFile) diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index ace08a9818e69..f485ff7419194 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "net/http" @@ -73,7 +73,7 @@ func startServiceController(ctx ControllerContext) (http.Handler, bool, error) { ) if err != nil { // This error shouldn't fail. It lives like this as a legacy. - glog.Errorf("Failed to start service controller: %v", err) + klog.Errorf("Failed to start service controller: %v", err) return nil, false, nil } go serviceController.Run(ctx.Stop, int(ctx.ComponentConfig.ServiceController.ConcurrentServiceSyncs)) @@ -92,14 +92,14 @@ func startNodeIpamController(ctx ControllerContext) (http.Handler, bool, error) if len(strings.TrimSpace(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR)) != 0 { _, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err) + klog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err) } } if len(strings.TrimSpace(ctx.ComponentConfig.NodeIPAMController.ServiceCIDR)) != 0 { _, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.NodeIPAMController.ServiceCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIPAMController.ServiceCIDR, err) + klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.NodeIPAMController.ServiceCIDR, err) } } @@ -148,21 +148,21 @@ func startNodeLifecycleController(ctx ControllerContext) (http.Handler, bool, er func startRouteController(ctx ControllerContext) (http.Handler, bool, error) { if !ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs || !ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes { - glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes) + klog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes) return nil, false, nil } if ctx.Cloud == nil { - glog.Warning("configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes.") + klog.Warning("configure-cloud-routes is set, but no cloud provider specified. Will not configure cloud provider routes.") return nil, false, nil } routes, ok := ctx.Cloud.Routes() if !ok { - glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") + klog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.") return nil, false, nil } _, clusterCIDR, err := net.ParseCIDR(ctx.ComponentConfig.KubeCloudShared.ClusterCIDR) if err != nil { - glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err) + klog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.KubeCloudShared.ClusterCIDR, err) } routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.ComponentConfig.KubeCloudShared.ClusterName, clusterCIDR) go routeController.Run(ctx.Stop, ctx.ComponentConfig.KubeCloudShared.RouteReconciliationPeriod.Duration) diff --git a/cmd/kube-controller-manager/app/options/BUILD b/cmd/kube-controller-manager/app/options/BUILD index fcae9358cf74f..9d14caa78f7e3 100644 --- a/cmd/kube-controller-manager/app/options/BUILD +++ b/cmd/kube-controller-manager/app/options/BUILD @@ -53,8 +53,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/kube-controller-manager/config/v1alpha1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kube-controller-manager/app/options/options.go b/cmd/kube-controller-manager/app/options/options.go index 377a6ee3de971..a93d3fd8fae30 100644 --- a/cmd/kube-controller-manager/app/options/options.go +++ b/cmd/kube-controller-manager/app/options/options.go @@ -45,7 +45,7 @@ import ( // add the kubernetes feature gates _ "k8s.io/kubernetes/pkg/features" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -438,7 +438,7 @@ func (s KubeControllerManagerOptions) Config(allControllers []string, disabledBy func createRecorder(kubeClient clientset.Interface, userAgent string) record.EventRecorder { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) // TODO: remove dependency on the legacyscheme return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: userAgent}) diff --git a/cmd/kube-controller-manager/app/plugins.go b/cmd/kube-controller-manager/app/plugins.go index de70fc9844769..e56752b22ad0a 100644 --- a/cmd/kube-controller-manager/app/plugins.go +++ b/cmd/kube-controller-manager/app/plugins.go @@ -23,7 +23,7 @@ import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" // Cloud providers cloudprovider "k8s.io/cloud-provider" @@ -133,7 +133,7 @@ func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config kubectrl ProvisioningEnabled: config.EnableHostPathProvisioning, } if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, &hostPathConfig); err != nil { - glog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err) + klog.Fatalf("Could not create hostpath recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, err) } allPlugins = append(allPlugins, host_path.ProbeVolumePlugins(hostPathConfig)...) @@ -143,7 +143,7 @@ func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config kubectrl RecyclerPodTemplate: volume.NewPersistentVolumeRecyclerPodTemplate(), } if err := AttemptToLoadRecycler(config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, &nfsConfig); err != nil { - glog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err) + klog.Fatalf("Could not create NFS recycler pod from file %s: %+v", config.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, err) } allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(nfsConfig)...) allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) diff --git a/cmd/kube-controller-manager/app/policy.go b/cmd/kube-controller-manager/app/policy.go index 3dc430300cb0c..d841d8dd14444 100644 --- a/cmd/kube-controller-manager/app/policy.go +++ b/cmd/kube-controller-manager/app/policy.go @@ -26,7 +26,7 @@ import ( "net/http" - "github.com/golang/glog" + "k8s.io/klog" ) func startDisruptionController(ctx ControllerContext) (http.Handler, bool, error) { @@ -35,7 +35,7 @@ func startDisruptionController(ctx ControllerContext) (http.Handler, bool, error var resource = "poddisruptionbudgets" if !ctx.AvailableResources[schema.GroupVersionResource{Group: group, Version: version, Resource: resource}] { - glog.Infof( + klog.Infof( "Refusing to start disruption because resource %q in group %q is not available.", resource, group+"/"+version) return nil, false, nil diff --git a/cmd/kube-proxy/app/BUILD b/cmd/kube-proxy/app/BUILD index 70a79a359780e..b99cf2ca1559e 100644 --- a/cmd/kube-proxy/app/BUILD +++ b/cmd/kube-proxy/app/BUILD @@ -61,10 +61,10 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/kube-proxy/config/v1alpha1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ] + select({ diff --git a/cmd/kube-proxy/app/conntrack.go b/cmd/kube-proxy/app/conntrack.go index 5e858663fb180..f3b7776e36154 100644 --- a/cmd/kube-proxy/app/conntrack.go +++ b/cmd/kube-proxy/app/conntrack.go @@ -22,7 +22,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/sysctl" @@ -49,7 +49,7 @@ func (rct realConntracker) SetMax(max int) error { if err := rct.setIntSysCtl("nf_conntrack_max", max); err != nil { return err } - glog.Infof("Setting nf_conntrack_max to %d", max) + klog.Infof("Setting nf_conntrack_max to %d", max) // Linux does not support writing to /sys/module/nf_conntrack/parameters/hashsize // when the writer process is not in the initial network namespace @@ -80,7 +80,7 @@ func (rct realConntracker) SetMax(max int) error { return readOnlySysFSError } // TODO: generify this and sysctl to a new sysfs.WriteInt() - glog.Infof("Setting conntrack hashsize to %d", max/4) + klog.Infof("Setting conntrack hashsize to %d", max/4) return writeIntStringFile("/sys/module/nf_conntrack/parameters/hashsize", max/4) } @@ -97,7 +97,7 @@ func (realConntracker) setIntSysCtl(name string, value int) error { sys := sysctl.New() if val, _ := sys.GetSysctl(entry); val != value { - glog.Infof("Set sysctl '%v' to %v", entry, value) + klog.Infof("Set sysctl '%v' to %v", entry, value) if err := sys.SetSysctl(entry, value); err != nil { return err } @@ -112,7 +112,7 @@ func isSysFSWritable() (bool, error) { m := mount.New("" /* default mount path */) mountPoints, err := m.List() if err != nil { - glog.Errorf("failed to list mount points: %v", err) + klog.Errorf("failed to list mount points: %v", err) return false, err } @@ -124,7 +124,7 @@ func isSysFSWritable() (bool, error) { if len(mountPoint.Opts) > 0 && mountPoint.Opts[0] == permWritable { return true, nil } - glog.Errorf("sysfs is not writable: %+v (mount options are %v)", + klog.Errorf("sysfs is not writable: %+v (mount options are %v)", mountPoint, mountPoint.Opts) return false, readOnlySysFSError } diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 793004f6fa114..04189b98d30f3 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -72,10 +72,10 @@ import ( "k8s.io/utils/exec" utilpointer "k8s.io/utils/pointer" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/klog" ) const ( @@ -191,7 +191,7 @@ func NewOptions() *Options { // Complete completes all the required options. func (o *Options) Complete() error { if len(o.ConfigFile) == 0 && len(o.WriteConfigTo) == 0 { - glog.Warning("WARNING: all flags other than --config, --write-config-to, and --cleanup are deprecated. Please begin using a config file ASAP.") + klog.Warning("WARNING: all flags other than --config, --write-config-to, and --cleanup are deprecated. Please begin using a config file ASAP.") o.applyDeprecatedHealthzPortToConfig() } @@ -280,7 +280,7 @@ func (o *Options) writeConfigFile() error { return err } - glog.Infof("Wrote configuration to: %s\n", o.WriteConfigTo) + klog.Infof("Wrote configuration to: %s\n", o.WriteConfigTo) return nil } @@ -365,23 +365,23 @@ with the apiserver API to configure the proxy.`, utilflag.PrintFlags(cmd.Flags()) if err := initForOS(opts.WindowsService); err != nil { - glog.Fatalf("failed OS init: %v", err) + klog.Fatalf("failed OS init: %v", err) } if err := opts.Complete(); err != nil { - glog.Fatalf("failed complete: %v", err) + klog.Fatalf("failed complete: %v", err) } if err := opts.Validate(args); err != nil { - glog.Fatalf("failed validate: %v", err) + klog.Fatalf("failed validate: %v", err) } - glog.Fatal(opts.Run()) + klog.Fatal(opts.Run()) }, } var err error opts.config, err = opts.ApplyDefaults(opts.config) if err != nil { - glog.Fatalf("unable to create flag defaults: %v", err) + klog.Fatalf("unable to create flag defaults: %v", err) } opts.AddFlags(cmd.Flags()) @@ -426,7 +426,7 @@ func createClients(config apimachineryconfig.ClientConnectionConfiguration, mast var err error if len(config.Kubeconfig) == 0 && len(masterOverride) == 0 { - glog.Info("Neither kubeconfig file nor master URL was specified. Falling back to in-cluster config.") + klog.Info("Neither kubeconfig file nor master URL was specified. Falling back to in-cluster config.") kubeConfig, err = rest.InClusterConfig() } else { // This creates a client, first loading any specified kubeconfig @@ -461,7 +461,7 @@ func createClients(config apimachineryconfig.ClientConnectionConfiguration, mast // Run runs the specified ProxyServer. This should never exit (unless CleanupAndExit is set). func (s *ProxyServer) Run() error { // To help debugging, immediately log version - glog.Infof("Version: %+v", version.Get()) + klog.Infof("Version: %+v", version.Get()) // remove iptables rules and exit if s.CleanupAndExit { encounteredError := userspace.CleanupLeftovers(s.IptInterface) @@ -478,16 +478,16 @@ func (s *ProxyServer) Run() error { if s.OOMScoreAdj != nil { oomAdjuster = oom.NewOOMAdjuster() if err := oomAdjuster.ApplyOOMScoreAdj(0, int(*s.OOMScoreAdj)); err != nil { - glog.V(2).Info(err) + klog.V(2).Info(err) } } if len(s.ResourceContainer) != 0 { // Run in its own container. if err := resourcecontainer.RunInResourceContainer(s.ResourceContainer); err != nil { - glog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err) + klog.Warningf("Failed to start in resource-only container %q: %v", s.ResourceContainer, err) } else { - glog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer) + klog.V(2).Infof("Running in resource-only container %q", s.ResourceContainer) } } @@ -595,7 +595,7 @@ func getConntrackMax(config kubeproxyconfig.KubeProxyConntrackConfiguration) (in if config.MaxPerCore != nil && *config.MaxPerCore > 0 { return -1, fmt.Errorf("invalid config: Conntrack Max and Conntrack MaxPerCore are mutually exclusive") } - glog.V(3).Infof("getConntrackMax: using absolute conntrack-max (deprecated)") + klog.V(3).Infof("getConntrackMax: using absolute conntrack-max (deprecated)") return int(*config.Max), nil } if config.MaxPerCore != nil && *config.MaxPerCore > 0 { @@ -605,10 +605,10 @@ func getConntrackMax(config kubeproxyconfig.KubeProxyConntrackConfiguration) (in } scaled := int(*config.MaxPerCore) * goruntime.NumCPU() if scaled > floor { - glog.V(3).Infof("getConntrackMax: using scaled conntrack-max-per-core") + klog.V(3).Infof("getConntrackMax: using scaled conntrack-max-per-core") return scaled, nil } - glog.V(3).Infof("getConntrackMax: using conntrack-min") + klog.V(3).Infof("getConntrackMax: using conntrack-min") return floor, nil } return 0, nil diff --git a/cmd/kube-proxy/app/server_others.go b/cmd/kube-proxy/app/server_others.go index 83e359cf45730..a92c1492f5d6f 100644 --- a/cmd/kube-proxy/app/server_others.go +++ b/cmd/kube-proxy/app/server_others.go @@ -48,7 +48,7 @@ import ( utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) // NewProxyServer returns a new ProxyServer. @@ -75,7 +75,7 @@ func newProxyServer( protocol := utiliptables.ProtocolIpv4 if net.ParseIP(config.BindAddress).To4() == nil { - glog.V(0).Infof("IPv6 bind address (%s), assume IPv6 operation", config.BindAddress) + klog.V(0).Infof("IPv6 bind address (%s), assume IPv6 operation", config.BindAddress) protocol = utiliptables.ProtocolIpv6 } @@ -145,7 +145,7 @@ func newProxyServer( nodeIP = utilnode.GetNodeIP(client, hostname) } if proxyMode == proxyModeIPTables { - glog.V(0).Info("Using iptables Proxier.") + klog.V(0).Info("Using iptables Proxier.") if config.IPTables.MasqueradeBit == nil { // MasqueradeBit must be specified or defaulted. return nil, fmt.Errorf("unable to read IPTables MasqueradeBit from config") @@ -175,7 +175,7 @@ func newProxyServer( serviceEventHandler = proxierIPTables endpointsEventHandler = proxierIPTables // No turning back. Remove artifacts that might still exist from the userspace Proxier. - glog.V(0).Info("Tearing down inactive rules.") + klog.V(0).Info("Tearing down inactive rules.") // TODO this has side effects that should only happen when Run() is invoked. userspace.CleanupLeftovers(iptInterface) // IPVS Proxier will generate some iptables rules, need to clean them before switching to other proxy mode. @@ -186,7 +186,7 @@ func newProxyServer( ipvs.CleanupLeftovers(ipvsInterface, iptInterface, ipsetInterface, cleanupIPVS) } } else if proxyMode == proxyModeIPVS { - glog.V(0).Info("Using ipvs Proxier.") + klog.V(0).Info("Using ipvs Proxier.") proxierIPVS, err := ipvs.NewProxier( iptInterface, ipvsInterface, @@ -213,12 +213,12 @@ func newProxyServer( proxier = proxierIPVS serviceEventHandler = proxierIPVS endpointsEventHandler = proxierIPVS - glog.V(0).Info("Tearing down inactive rules.") + klog.V(0).Info("Tearing down inactive rules.") // TODO this has side effects that should only happen when Run() is invoked. userspace.CleanupLeftovers(iptInterface) iptables.CleanupLeftovers(iptInterface) } else { - glog.V(0).Info("Using userspace Proxier.") + klog.V(0).Info("Using userspace Proxier.") // This is a proxy.LoadBalancer which NewProxier needs but has methods we don't need for // our config.EndpointsConfigHandler. loadBalancer := userspace.NewLoadBalancerRR() @@ -244,7 +244,7 @@ func newProxyServer( proxier = proxierUserspace // Remove artifacts from the iptables and ipvs Proxier, if not on Windows. - glog.V(0).Info("Tearing down inactive rules.") + klog.V(0).Info("Tearing down inactive rules.") // TODO this has side effects that should only happen when Run() is invoked. iptables.CleanupLeftovers(iptInterface) // IPVS Proxier will generate some iptables rules, need to clean them before switching to other proxy mode. @@ -292,7 +292,7 @@ func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, khandle i case proxyModeIPVS: return tryIPVSProxy(iptver, khandle, ipsetver, kcompat) } - glog.Warningf("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode) + klog.Warningf("Flag proxy-mode=%q unknown, assuming iptables proxy", proxyMode) return tryIPTablesProxy(iptver, kcompat) } @@ -309,7 +309,7 @@ func tryIPVSProxy(iptver iptables.IPTablesVersioner, khandle ipvs.KernelHandler, } // Try to fallback to iptables before falling back to userspace - glog.V(1).Infof("Can't use ipvs proxier, trying iptables proxier") + klog.V(1).Infof("Can't use ipvs proxier, trying iptables proxier") return tryIPTablesProxy(iptver, kcompat) } @@ -324,6 +324,6 @@ func tryIPTablesProxy(iptver iptables.IPTablesVersioner, kcompat iptables.Kernel return proxyModeIPTables } // Fallback. - glog.V(1).Infof("Can't use iptables proxy, using userspace proxier") + klog.V(1).Infof("Can't use iptables proxy, using userspace proxier") return proxyModeUserspace } diff --git a/cmd/kube-proxy/app/server_windows.go b/cmd/kube-proxy/app/server_windows.go index 725b0df718592..5ef2ce1618243 100644 --- a/cmd/kube-proxy/app/server_windows.go +++ b/cmd/kube-proxy/app/server_windows.go @@ -42,7 +42,7 @@ import ( utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) // NewProxyServer returns a new ProxyServer. @@ -99,7 +99,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi proxyMode := getProxyMode(string(config.Mode), winkernel.WindowsKernelCompatTester{}) if proxyMode == proxyModeKernelspace { - glog.V(0).Info("Using Kernelspace Proxier.") + klog.V(0).Info("Using Kernelspace Proxier.") proxierKernelspace, err := winkernel.NewProxier( config.IPTables.SyncPeriod.Duration, config.IPTables.MinSyncPeriod.Duration, @@ -118,7 +118,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi endpointsEventHandler = proxierKernelspace serviceEventHandler = proxierKernelspace } else { - glog.V(0).Info("Using userspace Proxier.") + klog.V(0).Info("Using userspace Proxier.") execer := exec.New() var netshInterface utilnetsh.Interface netshInterface = utilnetsh.New(execer) @@ -143,7 +143,7 @@ func newProxyServer(config *proxyconfigapi.KubeProxyConfiguration, cleanupAndExi } proxier = proxierUserspace serviceEventHandler = proxierUserspace - glog.V(0).Info("Tearing down pure-winkernel proxy rules.") + klog.V(0).Info("Tearing down pure-winkernel proxy rules.") winkernel.CleanupLeftovers() } @@ -182,13 +182,13 @@ func tryWinKernelSpaceProxy(kcompat winkernel.KernelCompatTester) string { // guaranteed false on error, error only necessary for debugging useWinKerelProxy, err := winkernel.CanUseWinKernelProxier(kcompat) if err != nil { - glog.Errorf("Can't determine whether to use windows kernel proxy, using userspace proxier: %v", err) + klog.Errorf("Can't determine whether to use windows kernel proxy, using userspace proxier: %v", err) return proxyModeUserspace } if useWinKerelProxy { return proxyModeKernelspace } // Fallback. - glog.V(1).Infof("Can't use winkernel proxy, using userspace proxier") + klog.V(1).Infof("Can't use winkernel proxy, using userspace proxier") return proxyModeUserspace } diff --git a/cmd/kube-scheduler/app/BUILD b/cmd/kube-scheduler/app/BUILD index db61d56183d9a..33a2b5a6c8d87 100644 --- a/cmd/kube-scheduler/app/BUILD +++ b/cmd/kube-scheduler/app/BUILD @@ -42,9 +42,9 @@ go_library( "//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kube-scheduler/app/options/BUILD b/cmd/kube-scheduler/app/options/BUILD index 5806f6cc9b505..dc66d81886942 100644 --- a/cmd/kube-scheduler/app/options/BUILD +++ b/cmd/kube-scheduler/app/options/BUILD @@ -39,8 +39,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/kube-scheduler/config/v1alpha1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kube-scheduler/app/options/options.go b/cmd/kube-scheduler/app/options/options.go index fdee98c5982d1..9fe7127334301 100644 --- a/cmd/kube-scheduler/app/options/options.go +++ b/cmd/kube-scheduler/app/options/options.go @@ -23,8 +23,8 @@ import ( "strconv" "time" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" apimachineryconfig "k8s.io/apimachinery/pkg/apis/config" @@ -291,7 +291,7 @@ func makeLeaderElectionConfig(config kubeschedulerconfig.KubeSchedulerLeaderElec // TODO remove masterOverride when CLI flags are removed. func createClients(config apimachineryconfig.ClientConnectionConfiguration, masterOverride string, timeout time.Duration) (clientset.Interface, clientset.Interface, v1core.EventsGetter, error) { if len(config.Kubeconfig) == 0 && len(masterOverride) == 0 { - glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") + klog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go index 2fb574d8b203b..7973b9cd50d0e 100644 --- a/cmd/kube-scheduler/app/server.go +++ b/cmd/kube-scheduler/app/server.go @@ -59,16 +59,16 @@ import ( "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/version/verflag" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" + "k8s.io/klog" ) // NewSchedulerCommand creates a *cobra.Command object with default parameters func NewSchedulerCommand() *cobra.Command { opts, err := options.NewOptions() if err != nil { - glog.Fatalf("unable to initialize command options: %v", err) + klog.Fatalf("unable to initialize command options: %v", err) } cmd := &cobra.Command{ @@ -113,7 +113,7 @@ func runCommand(cmd *cobra.Command, args []string, opts *options.Options) error fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } - glog.Infof("Wrote configuration to: %s\n", opts.WriteConfigTo) + klog.Infof("Wrote configuration to: %s\n", opts.WriteConfigTo) } c, err := opts.Config() @@ -128,7 +128,7 @@ func runCommand(cmd *cobra.Command, args []string, opts *options.Options) error cc := c.Complete() // To help debugging, immediately log version - glog.Infof("Version: %+v", version.Get()) + klog.Infof("Version: %+v", version.Get()) // Apply algorithms based on feature gates. // TODO: make configurable? diff --git a/cmd/kubeadm/.import-restrictions b/cmd/kubeadm/.import-restrictions index 0149df472621c..988c92e551eb4 100644 --- a/cmd/kubeadm/.import-restrictions +++ b/cmd/kubeadm/.import-restrictions @@ -132,7 +132,6 @@ "github.com/ghodss/yaml", "github.com/gogo/protobuf/proto", "github.com/gogo/protobuf/sortkeys", - "github.com/golang/glog", "github.com/golang/groupcache/lru", "github.com/golang/protobuf/proto", "github.com/golang/protobuf/protoc-gen-go/descriptor", diff --git a/cmd/kubeadm/BUILD b/cmd/kubeadm/BUILD index 8e35ea32b0d95..8e45f031df0ad 100644 --- a/cmd/kubeadm/BUILD +++ b/cmd/kubeadm/BUILD @@ -18,7 +18,10 @@ go_library( name = "go_default_library", srcs = ["kubeadm.go"], importpath = "k8s.io/kubernetes/cmd/kubeadm", - deps = ["//cmd/kubeadm/app:go_default_library"], + deps = [ + "//cmd/kubeadm/app:go_default_library", + "//vendor/k8s.io/klog:go_default_library", + ], ) filegroup( diff --git a/cmd/kubeadm/app/BUILD b/cmd/kubeadm/app/BUILD index b3411caef4042..de0b69c61c0d9 100644 --- a/cmd/kubeadm/app/BUILD +++ b/cmd/kubeadm/app/BUILD @@ -12,8 +12,8 @@ go_library( deps = [ "//cmd/kubeadm/app/cmd:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/cmd/BUILD b/cmd/kubeadm/app/cmd/BUILD index e306ae8a5557c..7d92c1e0d4f85 100644 --- a/cmd/kubeadm/app/cmd/BUILD +++ b/cmd/kubeadm/app/cmd/BUILD @@ -67,11 +67,11 @@ go_library( "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/renstrom/dedent:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], diff --git a/cmd/kubeadm/app/cmd/completion.go b/cmd/kubeadm/app/cmd/completion.go index 12da3e2fe0958..2fae2b34326e2 100644 --- a/cmd/kubeadm/app/cmd/completion.go +++ b/cmd/kubeadm/app/cmd/completion.go @@ -20,10 +20,10 @@ import ( "bytes" "io" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/renstrom/dedent" "github.com/spf13/cobra" + "k8s.io/klog" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" ) @@ -138,7 +138,7 @@ func RunCompletion(out io.Writer, boilerPlate string, cmd *cobra.Command, args [ } func runCompletionBash(out io.Writer, kubeadm *cobra.Command) error { - glog.V(1).Infoln("[completion] writing completion code for Bash") + klog.V(1).Infoln("[completion] writing completion code for Bash") return kubeadm.GenBashCompletion(out) } @@ -284,12 +284,12 @@ __kubeadm_convert_bash_to_zsh() { -e "s/\\\$(type${RWORD}/\$(__kubeadm_type/g" \ <<'BASH_COMPLETION_EOF' ` - glog.V(1).Infoln("[completion] writing completion code for Zsh") + klog.V(1).Infoln("[completion] writing completion code for Zsh") out.Write([]byte(zshInitialization)) buf := new(bytes.Buffer) kubeadm.GenBashCompletion(buf) - glog.V(1).Infoln("[completion] writing completion code for Bash") + klog.V(1).Infoln("[completion] writing completion code for Bash") out.Write(buf.Bytes()) zshTail := ` diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index c7a1f1aa0db13..5f4ff01b87dba 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -23,11 +23,11 @@ import ( "io/ioutil" "strings" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/renstrom/dedent" "github.com/spf13/cobra" flag "github.com/spf13/pflag" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -373,7 +373,7 @@ func NewCmdConfigView(out io.Writer, kubeConfigFile *string) *cobra.Command { The configuration is located in the %q namespace in the %q ConfigMap. `), metav1.NamespaceSystem, constants.KubeadmConfigConfigMap), Run: func(cmd *cobra.Command, args []string) { - glog.V(1).Infoln("[config] retrieving ClientSet from file") + klog.V(1).Infoln("[config] retrieving ClientSet from file") client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile) kubeadmutil.CheckErr(err) @@ -402,15 +402,15 @@ func NewCmdConfigUploadFromFile(out io.Writer, kubeConfigFile *string) *cobra.Co kubeadmutil.CheckErr(errors.New("The --config flag is mandatory")) } - glog.V(1).Infoln("[config] retrieving ClientSet from file") + klog.V(1).Infoln("[config] retrieving ClientSet from file") client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile) kubeadmutil.CheckErr(err) // The default configuration is empty; everything should come from the file on disk - glog.V(1).Infoln("[config] creating empty default configuration") + klog.V(1).Infoln("[config] creating empty default configuration") defaultcfg := &kubeadmapiv1beta1.InitConfiguration{} // Upload the configuration using the file; don't care about the defaultcfg really - glog.V(1).Infof("[config] uploading configuration") + klog.V(1).Infof("[config] uploading configuration") err = uploadConfiguration(client, cfgPath, defaultcfg) kubeadmutil.CheckErr(err) }, @@ -438,17 +438,17 @@ func NewCmdConfigUploadFromFlags(out io.Writer, kubeConfigFile *string) *cobra.C `), metav1.NamespaceSystem, constants.KubeadmConfigConfigMap), Run: func(cmd *cobra.Command, args []string) { var err error - glog.V(1).Infoln("[config] creating new FeatureGates") + klog.V(1).Infoln("[config] creating new FeatureGates") if cfg.FeatureGates, err = features.NewFeatureGate(&features.InitFeatureGates, featureGatesString); err != nil { kubeadmutil.CheckErr(err) } - glog.V(1).Infoln("[config] retrieving ClientSet from file") + klog.V(1).Infoln("[config] retrieving ClientSet from file") client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile) kubeadmutil.CheckErr(err) // Default both statically and dynamically, convert to internal API type, and validate everything // The cfgPath argument is unset here as we shouldn't load a config file from disk, just go with cfg - glog.V(1).Infof("[config] uploading configuration") + klog.V(1).Infof("[config] uploading configuration") err = uploadConfiguration(client, "", cfg) kubeadmutil.CheckErr(err) }, @@ -460,7 +460,7 @@ func NewCmdConfigUploadFromFlags(out io.Writer, kubeConfigFile *string) *cobra.C // RunConfigView gets the configuration persisted in the cluster func RunConfigView(out io.Writer, client clientset.Interface) error { - glog.V(1).Infoln("[config] getting the cluster configuration") + klog.V(1).Infoln("[config] getting the cluster configuration") cfgConfigMap, err := client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(constants.KubeadmConfigConfigMap, metav1.GetOptions{}) if err != nil { return err @@ -478,7 +478,7 @@ func uploadConfiguration(client clientset.Interface, cfgPath string, defaultcfg // Default both statically and dynamically, convert to internal API type, and validate everything // First argument is unset here as we shouldn't load a config file from disk - glog.V(1).Infoln("[config] converting to internal API type") + klog.V(1).Infoln("[config] converting to internal API type") internalcfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(cfgPath, defaultcfg) if err != nil { return err diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index c299717c93b53..43355b1b1bd46 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -25,13 +25,13 @@ import ( "strings" "text/template" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/renstrom/dedent" "github.com/spf13/cobra" flag "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" @@ -460,7 +460,7 @@ func (d initData) Tokens() []string { func runInit(i *initData, out io.Writer) error { // Get directories to write files to; can be faked if we're dry-running - glog.V(1).Infof("[init] Getting certificates directory from configuration") + klog.V(1).Infof("[init] Getting certificates directory from configuration") certsDirToWriteTo, kubeConfigDir, _, _, err := getDirectoriesToUse(i.dryRun, i.dryRunDir, i.cfg.CertificatesDir) if err != nil { return errors.Wrap(err, "error getting directories to use") @@ -481,18 +481,18 @@ func runInit(i *initData, out io.Writer) error { // Upload currently used configuration to the cluster // Note: This is done right in the beginning of cluster initialization; as we might want to make other phases // depend on centralized information from this source in the future - glog.V(1).Infof("[init] uploading currently used configuration to the cluster") + klog.V(1).Infof("[init] uploading currently used configuration to the cluster") if err := uploadconfigphase.UploadConfiguration(i.cfg, client); err != nil { return errors.Wrap(err, "error uploading configuration") } - glog.V(1).Infof("[init] creating kubelet configuration configmap") + klog.V(1).Infof("[init] creating kubelet configuration configmap") if err := kubeletphase.CreateConfigMap(i.cfg, client); err != nil { return errors.Wrap(err, "error creating kubelet configuration ConfigMap") } // PHASE 4: Mark the master with the right label/taint - glog.V(1).Infof("[init] marking the master with right label") + klog.V(1).Infof("[init] marking the master with right label") if err := markmasterphase.MarkMaster(client, i.cfg.NodeRegistration.Name, i.cfg.NodeRegistration.Taints); err != nil { return errors.Wrap(err, "error marking master") } @@ -510,12 +510,12 @@ func runInit(i *initData, out io.Writer) error { } } - glog.V(1).Infof("[init] ensuring DNS addon") + klog.V(1).Infof("[init] ensuring DNS addon") if err := dnsaddonphase.EnsureDNSAddon(i.cfg, client); err != nil { return errors.Wrap(err, "error ensuring dns addon") } - glog.V(1).Infof("[init] ensuring proxy addon") + klog.V(1).Infof("[init] ensuring proxy addon") if err := proxyaddonphase.EnsureProxyAddon(i.cfg, client); err != nil { return errors.Wrap(err, "error ensuring proxy addon") } diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index da7277520df84..56b7b2821fd2b 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -24,7 +24,6 @@ import ( "path/filepath" "text/template" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/renstrom/dedent" "github.com/spf13/cobra" @@ -33,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" certutil "k8s.io/client-go/util/cert" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" @@ -180,7 +180,7 @@ func NewCmdJoin(out io.Writer) *cobra.Command { } if len(args) > 0 { if len(cfgPath) == 0 && len(args) > 1 { - glog.Warningf("[join] WARNING: More than one API server endpoint supplied on command line %v. Using the first one.", args) + klog.Warningf("[join] WARNING: More than one API server endpoint supplied on command line %v. Using the first one.", args) } cfg.Discovery.BootstrapToken.APIServerEndpoint = args[0] } @@ -289,11 +289,11 @@ type Join struct { func NewJoin(cfgPath string, defaultcfg *kubeadmapiv1beta1.JoinConfiguration, ignorePreflightErrors sets.String) (*Join, error) { if defaultcfg.NodeRegistration.Name == "" { - glog.V(1).Infoln("[join] found NodeName empty; using OS hostname as NodeName") + klog.V(1).Infoln("[join] found NodeName empty; using OS hostname as NodeName") } if defaultcfg.APIEndpoint.AdvertiseAddress == "" { - glog.V(1).Infoln("[join] found advertiseAddress empty; using default interface's IP address as advertiseAddress") + klog.V(1).Infoln("[join] found advertiseAddress empty; using default interface's IP address as advertiseAddress") } internalCfg, err := configutil.JoinConfigFileAndDefaultsToInternalConfig(cfgPath, defaultcfg) @@ -307,20 +307,20 @@ func NewJoin(cfgPath string, defaultcfg *kubeadmapiv1beta1.JoinConfiguration, ig fmt.Println("[preflight] Running pre-flight checks") // Start with general checks - glog.V(1).Infoln("[preflight] Running general checks") + klog.V(1).Infoln("[preflight] Running general checks") if err := preflight.RunJoinNodeChecks(utilsexec.New(), internalCfg, ignorePreflightErrors); err != nil { return nil, err } // Fetch the init configuration based on the join configuration - glog.V(1).Infoln("[preflight] Fetching init configuration") + klog.V(1).Infoln("[preflight] Fetching init configuration") initCfg, tlsBootstrapCfg, err := fetchInitConfigurationFromJoinConfiguration(internalCfg) if err != nil { return nil, err } // Continue with more specific checks based on the init configuration - glog.V(1).Infoln("[preflight] Running configuration dependant checks") + klog.V(1).Infoln("[preflight] Running configuration dependant checks") if err := preflight.RunOptionalJoinNodeChecks(utilsexec.New(), initCfg, ignorePreflightErrors); err != nil { return nil, err } @@ -457,7 +457,7 @@ func (j *Join) BootstrapKubelet(tlsBootstrapCfg *clientcmdapi.Config) error { bootstrapKubeConfigFile := kubeadmconstants.GetBootstrapKubeletKubeConfigPath() // Write the bootstrap kubelet config file or the TLS-Boostrapped kubelet config file down to disk - glog.V(1).Infoln("[join] writing bootstrap kubelet config file at", bootstrapKubeConfigFile) + klog.V(1).Infoln("[join] writing bootstrap kubelet config file at", bootstrapKubeConfigFile) if err := kubeconfigutil.WriteToDisk(bootstrapKubeConfigFile, tlsBootstrapCfg); err != nil { return errors.Wrap(err, "couldn't save bootstrap-kubelet.conf to disk") } @@ -482,7 +482,7 @@ func (j *Join) BootstrapKubelet(tlsBootstrapCfg *clientcmdapi.Config) error { // Configure the kubelet. In this short timeframe, kubeadm is trying to stop/restart the kubelet // Try to stop the kubelet service so no race conditions occur when configuring it - glog.V(1).Infof("Stopping the kubelet") + klog.V(1).Infof("Stopping the kubelet") kubeletphase.TryStopKubelet() // Write the configuration for the kubelet (using the bootstrap token credentials) to disk so the kubelet can start @@ -499,7 +499,7 @@ func (j *Join) BootstrapKubelet(tlsBootstrapCfg *clientcmdapi.Config) error { } // Try to start the kubelet service in case it's inactive - glog.V(1).Infof("Starting the kubelet") + klog.V(1).Infof("Starting the kubelet") kubeletphase.TryStartKubelet() // Now the kubelet will perform the TLS Bootstrap, transforming /etc/kubernetes/bootstrap-kubelet.conf to /etc/kubernetes/kubelet.conf @@ -517,7 +517,7 @@ func (j *Join) BootstrapKubelet(tlsBootstrapCfg *clientcmdapi.Config) error { return err } - glog.V(1).Infof("[join] preserving the crisocket information for the node") + klog.V(1).Infof("[join] preserving the crisocket information for the node") if err := patchnodephase.AnnotateCRISocket(client, j.cfg.NodeRegistration.Name, j.cfg.NodeRegistration.CRISocket); err != nil { return errors.Wrap(err, "error uploading crisocket") } @@ -552,18 +552,18 @@ func (j *Join) PostInstallControlPlane(initConfiguration *kubeadmapi.InitConfigu // "If you add a new member to a 1-node cluster, the cluster cannot make progress before the new member starts // because it needs two members as majority to agree on the consensus. You will only see this behavior between the time // etcdctl member add informs the cluster about the new member and the new member successfully establishing a connection to the existing one." - glog.V(1).Info("[join] adding etcd") + klog.V(1).Info("[join] adding etcd") if err := etcdphase.CreateStackedEtcdStaticPodManifestFile(client, kubeadmconstants.GetStaticPodDirectory(), initConfiguration); err != nil { return errors.Wrap(err, "error creating local etcd static pod manifest file") } } - glog.V(1).Info("[join] uploading currently used configuration to the cluster") + klog.V(1).Info("[join] uploading currently used configuration to the cluster") if err := uploadconfigphase.UploadConfiguration(initConfiguration, client); err != nil { return errors.Wrap(err, "error uploading configuration") } - glog.V(1).Info("[join] marking the master with right label") + klog.V(1).Info("[join] marking the master with right label") if err = markmasterphase.MarkMaster(client, initConfiguration.NodeRegistration.Name, initConfiguration.NodeRegistration.Taints); err != nil { return errors.Wrap(err, "error applying master label and taints") } @@ -587,14 +587,14 @@ func waitForTLSBootstrappedClient() error { func fetchInitConfigurationFromJoinConfiguration(cfg *kubeadmapi.JoinConfiguration) (*kubeadmapi.InitConfiguration, *clientcmdapi.Config, error) { // Perform the Discovery, which turns a Bootstrap Token and optionally (and preferably) a CA cert hash into a KubeConfig // file that may be used for the TLS Bootstrapping process the kubelet performs using the Certificates API. - glog.V(1).Infoln("[join] Discovering cluster-info") + klog.V(1).Infoln("[join] Discovering cluster-info") tlsBootstrapCfg, err := discovery.For(cfg) if err != nil { return nil, nil, err } // Retrieves the kubeadm configuration - glog.V(1).Infoln("[join] Retrieving KubeConfig objects") + klog.V(1).Infoln("[join] Retrieving KubeConfig objects") initConfiguration, err := fetchInitConfiguration(tlsBootstrapCfg) if err != nil { return nil, nil, err diff --git a/cmd/kubeadm/app/cmd/phases/BUILD b/cmd/kubeadm/app/cmd/phases/BUILD index 0012660fdf8cc..456383e7cb381 100644 --- a/cmd/kubeadm/app/cmd/phases/BUILD +++ b/cmd/kubeadm/app/cmd/phases/BUILD @@ -52,10 +52,10 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/renstrom/dedent:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cmd/kubeadm/app/cmd/phases/addons.go b/cmd/kubeadm/app/cmd/phases/addons.go index ebc82f6a5a197..730e7aa237472 100644 --- a/cmd/kubeadm/app/cmd/phases/addons.go +++ b/cmd/kubeadm/app/cmd/phases/addons.go @@ -19,8 +19,8 @@ package phases import ( "strings" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" clientset "k8s.io/client-go/kubernetes" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -83,7 +83,7 @@ func EnsureAllAddons(cfg *kubeadmapi.InitConfiguration, client clientset.Interfa proxyaddon.EnsureProxyAddon, } - glog.V(1).Infoln("[addon] installing all addons") + klog.V(1).Infoln("[addon] installing all addons") for _, action := range addonActions { err := action(cfg, client) if err != nil { diff --git a/cmd/kubeadm/app/cmd/phases/etcd.go b/cmd/kubeadm/app/cmd/phases/etcd.go index 5da45c2dbb645..384ae23beb918 100644 --- a/cmd/kubeadm/app/cmd/phases/etcd.go +++ b/cmd/kubeadm/app/cmd/phases/etcd.go @@ -19,8 +19,8 @@ package phases import ( "fmt" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" @@ -93,7 +93,7 @@ func runEtcdPhaseLocal() func(c workflow.RunData) error { return errors.Wrap(err, "error creating local etcd static pod manifest file") } } else { - glog.V(1).Infof("[etcd] External etcd mode. Skipping the creation of a manifest for local etcd") + klog.V(1).Infof("[etcd] External etcd mode. Skipping the creation of a manifest for local etcd") } return nil } diff --git a/cmd/kubeadm/app/cmd/phases/kubelet.go b/cmd/kubeadm/app/cmd/phases/kubelet.go index 72ef901e61d6b..8973f6d35e375 100644 --- a/cmd/kubeadm/app/cmd/phases/kubelet.go +++ b/cmd/kubeadm/app/cmd/phases/kubelet.go @@ -17,8 +17,8 @@ limitations under the License. package phases import ( - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" @@ -68,7 +68,7 @@ func runKubeletStart(c workflow.RunData) error { // First off, configure the kubelet. In this short timeframe, kubeadm is trying to stop/restart the kubelet // Try to stop the kubelet service so no race conditions occur when configuring it if !data.DryRun() { - glog.V(1).Infof("Stopping the kubelet") + klog.V(1).Infof("Stopping the kubelet") kubeletphase.TryStopKubelet() } @@ -86,7 +86,7 @@ func runKubeletStart(c workflow.RunData) error { // Try to start the kubelet service in case it's inactive if !data.DryRun() { - glog.V(1).Infof("Starting the kubelet") + klog.V(1).Infof("Starting the kubelet") kubeletphase.TryStartKubelet() } diff --git a/cmd/kubeadm/app/cmd/phases/uploadconfig.go b/cmd/kubeadm/app/cmd/phases/uploadconfig.go index f7318f03122f0..0aa4f17a26ea5 100644 --- a/cmd/kubeadm/app/cmd/phases/uploadconfig.go +++ b/cmd/kubeadm/app/cmd/phases/uploadconfig.go @@ -19,10 +19,10 @@ package phases import ( "fmt" - "github.com/golang/glog" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" @@ -105,7 +105,7 @@ func runUploadKubeadmConfig(c workflow.RunData) error { return err } - glog.V(1).Infof("[upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap") + klog.V(1).Infof("[upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap") if err := uploadconfig.UploadConfiguration(cfg, client); err != nil { return errors.Wrap(err, "error uploading the kubeadm ClusterConfiguration") } @@ -119,12 +119,12 @@ func runUploadKubeletConfig(c workflow.RunData) error { return err } - glog.V(1).Infof("[upload-config] Uploading the kubelet component config to a ConfigMap") + klog.V(1).Infof("[upload-config] Uploading the kubelet component config to a ConfigMap") if err = kubeletphase.CreateConfigMap(cfg, client); err != nil { return errors.Wrap(err, "error creating kubelet configuration ConfigMap") } - glog.V(1).Infof("[upload-config] Preserving the CRISocket information for the control-plane node") + klog.V(1).Infof("[upload-config] Preserving the CRISocket information for the control-plane node") if err := patchnodephase.AnnotateCRISocket(client, cfg.NodeRegistration.Name, cfg.NodeRegistration.CRISocket); err != nil { return errors.Wrap(err, "Error writing Crisocket information for the control-plane node") } diff --git a/cmd/kubeadm/app/cmd/phases/waitcontrolplane.go b/cmd/kubeadm/app/cmd/phases/waitcontrolplane.go index 00f52e6c7ab3a..b2983e17d8467 100644 --- a/cmd/kubeadm/app/cmd/phases/waitcontrolplane.go +++ b/cmd/kubeadm/app/cmd/phases/waitcontrolplane.go @@ -23,10 +23,10 @@ import ( "text/template" "time" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/renstrom/dedent" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -86,7 +86,7 @@ func runWaitControlPlanePhase(c workflow.RunData) error { } // waiter holds the apiclient.Waiter implementation of choice, responsible for querying the API server in various ways and waiting for conditions to be fulfilled - glog.V(1).Infof("[wait-control-plane] Waiting for the API server to be healthy") + klog.V(1).Infof("[wait-control-plane] Waiting for the API server to be healthy") client, err := data.Client() if err != nil { diff --git a/cmd/kubeadm/app/cmd/reset.go b/cmd/kubeadm/app/cmd/reset.go index ce2e7883cc7b1..94def138059a4 100644 --- a/cmd/kubeadm/app/cmd/reset.go +++ b/cmd/kubeadm/app/cmd/reset.go @@ -26,8 +26,8 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" @@ -129,7 +129,7 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface) error { // Only clear etcd data when using local etcd. etcdManifestPath := filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName, "etcd.yaml") - glog.V(1).Infof("[reset] checking for etcd config") + klog.V(1).Infof("[reset] checking for etcd config") etcdDataDir, err := getEtcdDataDir(etcdManifestPath, client) if err == nil { dirsToClean = append(dirsToClean, etcdDataDir) @@ -139,16 +139,16 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface) error { } // Try to stop the kubelet service - glog.V(1).Infof("[reset] getting init system") + klog.V(1).Infof("[reset] getting init system") initSystem, err := initsystem.GetInitSystem() if err != nil { - glog.Warningln("[reset] the kubelet service could not be stopped by kubeadm. Unable to detect a supported init system!") - glog.Warningln("[reset] please ensure kubelet is stopped manually") + klog.Warningln("[reset] the kubelet service could not be stopped by kubeadm. Unable to detect a supported init system!") + klog.Warningln("[reset] please ensure kubelet is stopped manually") } else { fmt.Println("[reset] stopping the kubelet service") if err := initSystem.ServiceStop("kubelet"); err != nil { - glog.Warningf("[reset] the kubelet service could not be stopped by kubeadm: [%v]\n", err) - glog.Warningln("[reset] please ensure kubelet is stopped manually") + klog.Warningf("[reset] the kubelet service could not be stopped by kubeadm: [%v]\n", err) + klog.Warningln("[reset] please ensure kubelet is stopped manually") } } @@ -156,29 +156,29 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface) error { fmt.Printf("[reset] unmounting mounted directories in %q\n", kubeadmconstants.KubeletRunDirectory) umountDirsCmd := fmt.Sprintf("awk '$2 ~ path {print $2}' path=%s /proc/mounts | xargs -r umount", kubeadmconstants.KubeletRunDirectory) - glog.V(1).Infof("[reset] executing command %q", umountDirsCmd) + klog.V(1).Infof("[reset] executing command %q", umountDirsCmd) umountOutputBytes, err := exec.Command("sh", "-c", umountDirsCmd).Output() if err != nil { - glog.Errorf("[reset] failed to unmount mounted directories in %s: %s\n", kubeadmconstants.KubeletRunDirectory, string(umountOutputBytes)) + klog.Errorf("[reset] failed to unmount mounted directories in %s: %s\n", kubeadmconstants.KubeletRunDirectory, string(umountOutputBytes)) } - glog.V(1).Info("[reset] removing Kubernetes-managed containers") + klog.V(1).Info("[reset] removing Kubernetes-managed containers") if err := removeContainers(utilsexec.New(), r.criSocketPath); err != nil { - glog.Errorf("[reset] failed to remove containers: %+v", err) + klog.Errorf("[reset] failed to remove containers: %+v", err) } dirsToClean = append(dirsToClean, []string{kubeadmconstants.KubeletRunDirectory, "/etc/cni/net.d", "/var/lib/dockershim", "/var/run/kubernetes"}...) // Then clean contents from the stateful kubelet, etcd and cni directories fmt.Printf("[reset] deleting contents of stateful directories: %v\n", dirsToClean) for _, dir := range dirsToClean { - glog.V(1).Infof("[reset] deleting content of %s", dir) + klog.V(1).Infof("[reset] deleting content of %s", dir) cleanDir(dir) } // Remove contents from the config and pki directories - glog.V(1).Infoln("[reset] removing contents from the config and pki directories") + klog.V(1).Infoln("[reset] removing contents from the config and pki directories") if r.certsDir != kubeadmapiv1beta1.DefaultCertificatesDir { - glog.Warningf("[reset] WARNING: cleaning a non-default certificates directory: %q\n", r.certsDir) + klog.Warningf("[reset] WARNING: cleaning a non-default certificates directory: %q\n", r.certsDir) } resetConfigDir(kubeadmconstants.KubernetesDir, r.certsDir) @@ -193,7 +193,7 @@ func getEtcdDataDir(manifestPath string, client clientset.Interface) (string, er if err == nil { return cfg.Etcd.Local.DataDir, nil } - glog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap, using etcd pod spec as fallback: %v", err) + klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap, using etcd pod spec as fallback: %v", err) etcdPod, err := utilstaticpod.ReadStaticPodFromDisk(manifestPath) if err != nil { @@ -261,7 +261,7 @@ func resetConfigDir(configPathDir, pkiPathDir string) { fmt.Printf("[reset] deleting contents of config directories: %v\n", dirsToClean) for _, dir := range dirsToClean { if err := cleanDir(dir); err != nil { - glog.Errorf("[reset] failed to remove directory: %q [%v]\n", dir, err) + klog.Errorf("[reset] failed to remove directory: %q [%v]\n", dir, err) } } @@ -275,7 +275,7 @@ func resetConfigDir(configPathDir, pkiPathDir string) { fmt.Printf("[reset] deleting files: %v\n", filesToClean) for _, path := range filesToClean { if err := os.RemoveAll(path); err != nil { - glog.Errorf("[reset] failed to remove file: %q [%v]\n", path, err) + klog.Errorf("[reset] failed to remove file: %q [%v]\n", path, err) } } } diff --git a/cmd/kubeadm/app/cmd/token.go b/cmd/kubeadm/app/cmd/token.go index b5fc0334e4892..9e28e9000d642 100644 --- a/cmd/kubeadm/app/cmd/token.go +++ b/cmd/kubeadm/app/cmd/token.go @@ -24,10 +24,10 @@ import ( "text/tabwriter" "time" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/renstrom/dedent" "github.com/spf13/cobra" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -113,14 +113,14 @@ func NewCmdToken(out io.Writer, errW io.Writer) *cobra.Command { if len(args) > 0 { bto.TokenStr = args[0] } - glog.V(1).Infoln("[token] validating mixed arguments") + klog.V(1).Infoln("[token] validating mixed arguments") err := validation.ValidateMixedArguments(tokenCmd.Flags()) kubeadmutil.CheckErr(err) err = bto.ApplyTo(cfg) kubeadmutil.CheckErr(err) - glog.V(1).Infoln("[token] getting Clientsets from kubeconfig file") + klog.V(1).Infoln("[token] getting Clientsets from kubeconfig file") kubeConfigFile = cmdutil.FindExistingKubeConfig(kubeConfigFile) client, err := getClientset(kubeConfigFile, dryRun) kubeadmutil.CheckErr(err) @@ -215,13 +215,13 @@ func RunCreateToken(out io.Writer, client clientset.Interface, cfgPath string, c phaseutil.SetKubernetesVersion(cfg) // This call returns the ready-to-use configuration based on the configuration file that might or might not exist and the default cfg populated by flags - glog.V(1).Infoln("[token] loading configurations") + klog.V(1).Infoln("[token] loading configurations") internalcfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(cfgPath, cfg) if err != nil { return err } - glog.V(1).Infoln("[token] creating token") + klog.V(1).Infoln("[token] creating token") if err := tokenphase.CreateNewTokens(client, internalcfg.BootstrapTokens); err != nil { return err } @@ -243,7 +243,7 @@ func RunCreateToken(out io.Writer, client clientset.Interface, cfgPath string, c // RunGenerateToken just generates a random token for the user func RunGenerateToken(out io.Writer) error { - glog.V(1).Infoln("[token] generating random token") + klog.V(1).Infoln("[token] generating random token") token, err := bootstraputil.GenerateBootstrapToken() if err != nil { return err @@ -256,7 +256,7 @@ func RunGenerateToken(out io.Writer) error { // RunListTokens lists details on all existing bootstrap tokens on the server. func RunListTokens(out io.Writer, errW io.Writer, client clientset.Interface) error { // First, build our selector for bootstrap tokens only - glog.V(1).Infoln("[token] preparing selector for bootstrap token") + klog.V(1).Infoln("[token] preparing selector for bootstrap token") tokenSelector := fields.SelectorFromSet( map[string]string{ // TODO: We hard-code "type" here until `field_constants.go` that is @@ -269,7 +269,7 @@ func RunListTokens(out io.Writer, errW io.Writer, client clientset.Interface) er FieldSelector: tokenSelector.String(), } - glog.V(1).Infoln("[token] retrieving list of bootstrap tokens") + klog.V(1).Infoln("[token] retrieving list of bootstrap tokens") secrets, err := client.CoreV1().Secrets(metav1.NamespaceSystem).List(listOptions) if err != nil { return errors.Wrap(err, "failed to list bootstrap tokens") @@ -298,7 +298,7 @@ func RunListTokens(out io.Writer, errW io.Writer, client clientset.Interface) er func RunDeleteToken(out io.Writer, client clientset.Interface, tokenIDOrToken string) error { // Assume the given first argument is a token id and try to parse it tokenID := tokenIDOrToken - glog.V(1).Infoln("[token] parsing token ID") + klog.V(1).Infoln("[token] parsing token ID") if !bootstraputil.IsValidBootstrapTokenID(tokenIDOrToken) { // Okay, the full token with both id and secret was probably passed. Parse it and extract the ID only bts, err := kubeadmapiv1beta1.NewBootstrapTokenString(tokenIDOrToken) @@ -310,7 +310,7 @@ func RunDeleteToken(out io.Writer, client clientset.Interface, tokenIDOrToken st } tokenSecretName := bootstraputil.BootstrapTokenSecretName(tokenID) - glog.V(1).Infoln("[token] deleting token") + klog.V(1).Infoln("[token] deleting token") if err := client.CoreV1().Secrets(metav1.NamespaceSystem).Delete(tokenSecretName, nil); err != nil { return errors.Wrap(err, "failed to delete bootstrap token") } diff --git a/cmd/kubeadm/app/cmd/upgrade/BUILD b/cmd/kubeadm/app/cmd/upgrade/BUILD index 87a415563750a..fa0e144a6f86a 100644 --- a/cmd/kubeadm/app/cmd/upgrade/BUILD +++ b/cmd/kubeadm/app/cmd/upgrade/BUILD @@ -39,11 +39,11 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/discovery/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/github.com/pmezard/go-difflib/difflib:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index 53d76d6c65668..7d3ad3859c1bf 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -21,11 +21,11 @@ import ( "os" "time" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" @@ -83,13 +83,13 @@ func NewCmdApply(apf *applyPlanFlags) *cobra.Command { kubeadmutil.CheckErr(err) // Ensure the user is root - glog.V(1).Infof("running preflight checks") + klog.V(1).Infof("running preflight checks") err = runPreflightChecks(flags.ignorePreflightErrorsSet) kubeadmutil.CheckErr(err) // If the version is specified in config file, pick up that value. if flags.cfgPath != "" { - glog.V(1).Infof("fetching configuration from file %s", flags.cfgPath) + klog.V(1).Infof("fetching configuration from file %s", flags.cfgPath) // Note that cfg isn't preserved here, it's just an one-off to populate flags.newK8sVersionStr based on --config cfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(flags.cfgPath, &kubeadmapiv1beta1.InitConfiguration{}) kubeadmutil.CheckErr(err) @@ -147,8 +147,8 @@ func NewCmdApply(apf *applyPlanFlags) *cobra.Command { func RunApply(flags *applyFlags) error { // Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap) - glog.V(1).Infof("[upgrade/apply] verifying health of cluster") - glog.V(1).Infof("[upgrade/apply] retrieving configuration from cluster") + klog.V(1).Infof("[upgrade/apply] verifying health of cluster") + klog.V(1).Infof("[upgrade/apply] retrieving configuration from cluster") upgradeVars, err := enforceRequirements(flags.applyPlanFlags, flags.dryRun, flags.newK8sVersionStr) if err != nil { return err @@ -160,7 +160,7 @@ func RunApply(flags *applyFlags) error { } // Validate requested and validate actual version - glog.V(1).Infof("[upgrade/apply] validating requested and actual version") + klog.V(1).Infof("[upgrade/apply] validating requested and actual version") if err := configutil.NormalizeKubernetesVersion(&upgradeVars.cfg.ClusterConfiguration); err != nil { return err } @@ -178,7 +178,7 @@ func RunApply(flags *applyFlags) error { } // Enforce the version skew policies - glog.V(1).Infof("[upgrade/version] enforcing version skew policies") + klog.V(1).Infof("[upgrade/version] enforcing version skew policies") if err := EnforceVersionPolicies(flags, upgradeVars.versionGetter); err != nil { return errors.Wrap(err, "[upgrade/version] FATAL") } @@ -192,7 +192,7 @@ func RunApply(flags *applyFlags) error { // Use a prepuller implementation based on creating DaemonSets // and block until all DaemonSets are ready; then we know for sure that all control plane images are cached locally - glog.V(1).Infof("[upgrade/apply] creating prepuller") + klog.V(1).Infof("[upgrade/apply] creating prepuller") prepuller := upgrade.NewDaemonSetPrepuller(upgradeVars.client, upgradeVars.waiter, &upgradeVars.cfg.ClusterConfiguration) componentsToPrepull := constants.MasterComponents if upgradeVars.cfg.Etcd.External != nil { @@ -203,13 +203,13 @@ func RunApply(flags *applyFlags) error { } // Now; perform the upgrade procedure - glog.V(1).Infof("[upgrade/apply] performing upgrade") + klog.V(1).Infof("[upgrade/apply] performing upgrade") if err := PerformControlPlaneUpgrade(flags, upgradeVars.client, upgradeVars.waiter, upgradeVars.cfg); err != nil { return errors.Wrap(err, "[upgrade/apply] FATAL") } // Upgrade RBAC rules and addons. - glog.V(1).Infof("[upgrade/postupgrade] upgrading RBAC rules and addons") + klog.V(1).Infof("[upgrade/postupgrade] upgrading RBAC rules and addons") if err := upgrade.PerformPostUpgradeTasks(upgradeVars.client, upgradeVars.cfg, flags.newK8sVersion, flags.dryRun); err != nil { return errors.Wrap(err, "[upgrade/postupgrade] FATAL post-upgrade error") } diff --git a/cmd/kubeadm/app/cmd/upgrade/diff.go b/cmd/kubeadm/app/cmd/upgrade/diff.go index 5edd203d0347b..90c8855d44aa6 100644 --- a/cmd/kubeadm/app/cmd/upgrade/diff.go +++ b/cmd/kubeadm/app/cmd/upgrade/diff.go @@ -20,12 +20,12 @@ import ( "io" "io/ioutil" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/pmezard/go-difflib/difflib" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/version" + "k8s.io/klog" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" @@ -78,7 +78,7 @@ func NewCmdDiff(out io.Writer) *cobra.Command { func runDiff(flags *diffFlags, args []string) error { // If the version is specified in config file, pick up that value. - glog.V(1).Infof("fetching configuration from file %s", flags.cfgPath) + klog.V(1).Infof("fetching configuration from file %s", flags.cfgPath) cfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(flags.cfgPath, &kubeadmapiv1beta1.InitConfiguration{}) if err != nil { return err @@ -116,7 +116,7 @@ func runDiff(flags *diffFlags, args []string) error { case constants.KubeScheduler: path = flags.schedulerManifestPath default: - glog.Errorf("[diff] unknown spec %v", spec) + klog.Errorf("[diff] unknown spec %v", spec) continue } diff --git a/cmd/kubeadm/app/cmd/upgrade/node.go b/cmd/kubeadm/app/cmd/upgrade/node.go index dbe3c9d9c5a52..9e3c99a097c46 100644 --- a/cmd/kubeadm/app/cmd/upgrade/node.go +++ b/cmd/kubeadm/app/cmd/upgrade/node.go @@ -22,10 +22,10 @@ import ( "os" "path/filepath" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/util/version" + "k8s.io/klog" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -126,7 +126,7 @@ func NewCmdUpgradeControlPlane() *cobra.Command { Run: func(cmd *cobra.Command, args []string) { if flags.nodeName == "" { - glog.V(1).Infoln("[upgrade] found NodeName empty; considered OS hostname as NodeName") + klog.V(1).Infoln("[upgrade] found NodeName empty; considered OS hostname as NodeName") } nodeName, err := node.GetHostname(flags.nodeName) if err != nil { diff --git a/cmd/kubeadm/app/cmd/upgrade/plan.go b/cmd/kubeadm/app/cmd/upgrade/plan.go index b71719c1adad2..8ce7506f7b77d 100644 --- a/cmd/kubeadm/app/cmd/upgrade/plan.go +++ b/cmd/kubeadm/app/cmd/upgrade/plan.go @@ -24,10 +24,10 @@ import ( "strings" "text/tabwriter" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/util/version" + "k8s.io/klog" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -62,7 +62,7 @@ func NewCmdPlan(apf *applyPlanFlags) *cobra.Command { // If the version is specified in config file, pick up that value. if flags.cfgPath != "" { - glog.V(1).Infof("fetching configuration from file %s", flags.cfgPath) + klog.V(1).Infof("fetching configuration from file %s", flags.cfgPath) cfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(flags.cfgPath, &kubeadmapiv1beta1.InitConfiguration{}) kubeadmutil.CheckErr(err) @@ -88,8 +88,8 @@ func NewCmdPlan(apf *applyPlanFlags) *cobra.Command { // RunPlan takes care of outputting available versions to upgrade to for the user func RunPlan(flags *planFlags) error { // Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never dry-run when planning. - glog.V(1).Infof("[upgrade/plan] verifying health of cluster") - glog.V(1).Infof("[upgrade/plan] retrieving configuration from cluster") + klog.V(1).Infof("[upgrade/plan] verifying health of cluster") + klog.V(1).Infof("[upgrade/plan] retrieving configuration from cluster") upgradeVars, err := enforceRequirements(flags.applyPlanFlags, false, flags.newK8sVersionStr) if err != nil { return err @@ -120,7 +120,7 @@ func RunPlan(flags *planFlags) error { } // Compute which upgrade possibilities there are - glog.V(1).Infof("[upgrade/plan] computing upgrade possibilities") + klog.V(1).Infof("[upgrade/plan] computing upgrade possibilities") availUpgrades, err := upgrade.GetAvailableUpgrades(upgradeVars.versionGetter, flags.allowExperimentalUpgrades, flags.allowRCUpgrades, etcdClient, upgradeVars.cfg.FeatureGates, upgradeVars.client) if err != nil { return errors.Wrap(err, "[upgrade/versions] FATAL") diff --git a/cmd/kubeadm/app/cmd/version.go b/cmd/kubeadm/app/cmd/version.go index 1adb534af151e..2d237d50b2076 100644 --- a/cmd/kubeadm/app/cmd/version.go +++ b/cmd/kubeadm/app/cmd/version.go @@ -21,9 +21,9 @@ import ( "fmt" "io" - "github.com/golang/glog" "github.com/pkg/errors" "github.com/spf13/cobra" + "k8s.io/klog" "sigs.k8s.io/yaml" apimachineryversion "k8s.io/apimachinery/pkg/version" @@ -53,7 +53,7 @@ func NewCmdVersion(out io.Writer) *cobra.Command { // RunVersion provides the version information of kubeadm in format depending on arguments // specified in cobra.Command. func RunVersion(out io.Writer, cmd *cobra.Command) error { - glog.V(1).Infoln("[version] retrieving version info") + klog.V(1).Infoln("[version] retrieving version info") clientVersion := version.Get() v := Version{ ClientVersion: &clientVersion, @@ -62,7 +62,7 @@ func RunVersion(out io.Writer, cmd *cobra.Command) error { const flag = "output" of, err := cmd.Flags().GetString(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } switch of { diff --git a/cmd/kubeadm/app/kubeadm.go b/cmd/kubeadm/app/kubeadm.go index 01cd3a968a7ea..030abca5d9479 100644 --- a/cmd/kubeadm/app/kubeadm.go +++ b/cmd/kubeadm/app/kubeadm.go @@ -20,8 +20,8 @@ import ( "flag" "os" - _ "github.com/golang/glog" "github.com/spf13/pflag" + _ "k8s.io/klog" utilflag "k8s.io/apiserver/pkg/util/flag" "k8s.io/kubernetes/cmd/kubeadm/app/cmd" diff --git a/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/BUILD b/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/BUILD index 8bd587953ccfd..41d801a698095 100644 --- a/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/BUILD +++ b/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/BUILD @@ -34,8 +34,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo.go b/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo.go index 5e46d346e2a21..1cc41c41a3234 100644 --- a/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo.go +++ b/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo/clusterinfo.go @@ -19,8 +19,8 @@ package clusterinfo import ( "fmt" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" @@ -44,7 +44,7 @@ func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string fmt.Printf("[bootstraptoken] creating the %q ConfigMap in the %q namespace\n", bootstrapapi.ConfigMapClusterInfo, metav1.NamespacePublic) - glog.V(1).Infoln("[bootstraptoken] loading admin kubeconfig") + klog.V(1).Infoln("[bootstraptoken] loading admin kubeconfig") adminConfig, err := clientcmd.LoadFromFile(file) if err != nil { return errors.Wrap(err, "failed to load admin kubeconfig") @@ -52,7 +52,7 @@ func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string adminCluster := adminConfig.Contexts[adminConfig.CurrentContext].Cluster // Copy the cluster from admin.conf to the bootstrap kubeconfig, contains the CA cert and the server URL - glog.V(1).Infoln("[bootstraptoken] copying the cluster from admin.conf to the bootstrap kubeconfig") + klog.V(1).Infoln("[bootstraptoken] copying the cluster from admin.conf to the bootstrap kubeconfig") bootstrapConfig := &clientcmdapi.Config{ Clusters: map[string]*clientcmdapi.Cluster{ "": adminConfig.Clusters[adminCluster], @@ -64,7 +64,7 @@ func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string } // Create or update the ConfigMap in the kube-public namespace - glog.V(1).Infoln("[bootstraptoken] creating/updating ConfigMap in kube-public namespace") + klog.V(1).Infoln("[bootstraptoken] creating/updating ConfigMap in kube-public namespace") return apiclient.CreateOrUpdateConfigMap(client, &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: bootstrapapi.ConfigMapClusterInfo, @@ -78,7 +78,7 @@ func CreateBootstrapConfigMapIfNotExists(client clientset.Interface, file string // CreateClusterInfoRBACRules creates the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace to unauthenticated users func CreateClusterInfoRBACRules(client clientset.Interface) error { - glog.V(1).Infoln("creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace") + klog.V(1).Infoln("creating the RBAC rules for exposing the cluster-info ConfigMap in the kube-public namespace") err := apiclient.CreateOrUpdateRole(client, &rbac.Role{ ObjectMeta: metav1.ObjectMeta{ Name: BootstrapSignerClusterRoleName, diff --git a/cmd/kubeadm/app/phases/certs/BUILD b/cmd/kubeadm/app/phases/certs/BUILD index ee39c18a4e6b0..b6947cdc12cc2 100644 --- a/cmd/kubeadm/app/phases/certs/BUILD +++ b/cmd/kubeadm/app/phases/certs/BUILD @@ -37,8 +37,8 @@ go_library( "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util/pkiutil:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/certs/certs.go b/cmd/kubeadm/app/phases/certs/certs.go index b25fea917eada..8872ac75df7b8 100644 --- a/cmd/kubeadm/app/phases/certs/certs.go +++ b/cmd/kubeadm/app/phases/certs/certs.go @@ -23,9 +23,9 @@ import ( "os" "path/filepath" - "github.com/golang/glog" "github.com/pkg/errors" certutil "k8s.io/client-go/util/cert" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil" @@ -34,7 +34,7 @@ import ( // CreatePKIAssets will create and write to disk all PKI assets necessary to establish the control plane. // If the PKI assets already exists in the target folder, they are used only if evaluated equal; otherwise an error is returned. func CreatePKIAssets(cfg *kubeadmapi.InitConfiguration) error { - glog.V(1).Infoln("creating PKI assets") + klog.V(1).Infoln("creating PKI assets") // This structure cannot handle multilevel CA hierarchies. // This isn't a problem right now, but may become one in the future. @@ -69,7 +69,7 @@ func CreatePKIAssets(cfg *kubeadmapi.InitConfiguration) error { // CreateServiceAccountKeyAndPublicKeyFiles create a new public/private key files for signing service account users. // If the sa public/private key files already exists in the target folder, they are used only if evaluated equals; otherwise an error is returned. func CreateServiceAccountKeyAndPublicKeyFiles(cfg *kubeadmapi.InitConfiguration) error { - glog.V(1).Infoln("creating a new public/private key files for signing service account users") + klog.V(1).Infoln("creating a new public/private key files for signing service account users") saSigningKey, err := NewServiceAccountSigningKey() if err != nil { return err @@ -110,7 +110,7 @@ func CreateCACertAndKeyFiles(certSpec *KubeadmCert, cfg *kubeadmapi.InitConfigur if certSpec.CAName != "" { return errors.Errorf("this function should only be used for CAs, but cert %s has CA %s", certSpec.Name, certSpec.CAName) } - glog.V(1).Infof("creating a new certificate authority for %s", certSpec.Name) + klog.V(1).Infof("creating a new certificate authority for %s", certSpec.Name) certConfig, err := certSpec.GetConfig(cfg) if err != nil { diff --git a/cmd/kubeadm/app/phases/controlplane/BUILD b/cmd/kubeadm/app/phases/controlplane/BUILD index faa2809ce902d..12afc6d5ce9c3 100644 --- a/cmd/kubeadm/app/phases/controlplane/BUILD +++ b/cmd/kubeadm/app/phases/controlplane/BUILD @@ -47,8 +47,8 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/controlplane/manifests.go b/cmd/kubeadm/app/phases/controlplane/manifests.go index c52c5bd268f13..616d3f0e52367 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests.go @@ -24,8 +24,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/version" @@ -42,7 +42,7 @@ import ( // CreateInitStaticPodManifestFiles will write all static pod manifest files needed to bring up the control plane. func CreateInitStaticPodManifestFiles(manifestDir string, cfg *kubeadmapi.InitConfiguration) error { - glog.V(1).Infoln("[control-plane] creating static Pod files") + klog.V(1).Infoln("[control-plane] creating static Pod files") return CreateStaticPodFiles(manifestDir, cfg, kubeadmconstants.KubeAPIServer, kubeadmconstants.KubeControllerManager, kubeadmconstants.KubeScheduler) } @@ -97,7 +97,7 @@ func CreateStaticPodFiles(manifestDir string, cfg *kubeadmapi.InitConfiguration, } // gets the StaticPodSpecs, actualized for the current InitConfiguration - glog.V(1).Infoln("[control-plane] getting StaticPodSpecs") + klog.V(1).Infoln("[control-plane] getting StaticPodSpecs") specs := GetStaticPodSpecs(cfg, k8sVersion) // creates required static pod specs @@ -113,7 +113,7 @@ func CreateStaticPodFiles(manifestDir string, cfg *kubeadmapi.InitConfiguration, return errors.Wrapf(err, "failed to create static pod manifest file for %q", componentName) } - glog.V(1).Infof("[control-plane] wrote static Pod manifest for component %q to %q\n", componentName, kubeadmconstants.GetStaticPodFilepath(componentName, manifestDir)) + klog.V(1).Infof("[control-plane] wrote static Pod manifest for component %q to %q\n", componentName, kubeadmconstants.GetStaticPodFilepath(componentName, manifestDir)) } return nil diff --git a/cmd/kubeadm/app/phases/etcd/BUILD b/cmd/kubeadm/app/phases/etcd/BUILD index d3268aa6e1afd..c8182e8cb74d3 100644 --- a/cmd/kubeadm/app/phases/etcd/BUILD +++ b/cmd/kubeadm/app/phases/etcd/BUILD @@ -31,8 +31,8 @@ go_library( "//cmd/kubeadm/app/util/staticpod:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/etcd/local.go b/cmd/kubeadm/app/phases/etcd/local.go index 2aeef63207d7d..afe36b53702b0 100644 --- a/cmd/kubeadm/app/phases/etcd/local.go +++ b/cmd/kubeadm/app/phases/etcd/local.go @@ -21,8 +21,8 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -54,7 +54,7 @@ func CreateLocalEtcdStaticPodManifestFile(manifestDir string, cfg *kubeadmapi.In return err } - glog.V(1).Infof("[etcd] wrote Static Pod manifest for a local etcd instance to %q\n", kubeadmconstants.GetStaticPodFilepath(kubeadmconstants.Etcd, manifestDir)) + klog.V(1).Infof("[etcd] wrote Static Pod manifest for a local etcd instance to %q\n", kubeadmconstants.GetStaticPodFilepath(kubeadmconstants.Etcd, manifestDir)) return nil } @@ -63,7 +63,7 @@ func CheckLocalEtcdClusterStatus(client clientset.Interface, cfg *kubeadmapi.Ini fmt.Println("[etcd] Checking Etcd cluster health") // creates an etcd client that connects to all the local/stacked etcd members - glog.V(1).Info("creating etcd client that connects to etcd pods") + klog.V(1).Info("creating etcd client that connects to etcd pods") etcdClient, err := etcdutil.NewFromCluster(client, cfg.CertificatesDir) if err != nil { return err @@ -83,7 +83,7 @@ func CheckLocalEtcdClusterStatus(client clientset.Interface, cfg *kubeadmapi.Ini // Other members of the etcd cluster will be notified of the joining node in beforehand as well. func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifestDir string, cfg *kubeadmapi.InitConfiguration) error { // creates an etcd client that connects to all the local/stacked etcd members - glog.V(1).Info("creating etcd client that connects to etcd pods") + klog.V(1).Info("creating etcd client that connects to etcd pods") etcdClient, err := etcdutil.NewFromCluster(client, cfg.CertificatesDir) if err != nil { return err @@ -92,15 +92,15 @@ func CreateStackedEtcdStaticPodManifestFile(client clientset.Interface, manifest // notifies the other members of the etcd cluster about the joining member etcdPeerAddress := fmt.Sprintf("https://%s:%d", cfg.LocalAPIEndpoint.AdvertiseAddress, kubeadmconstants.EtcdListenPeerPort) - glog.V(1).Infof("Adding etcd member: %s", etcdPeerAddress) + klog.V(1).Infof("Adding etcd member: %s", etcdPeerAddress) initialCluster, err := etcdClient.AddMember(cfg.NodeRegistration.Name, etcdPeerAddress) if err != nil { return err } fmt.Println("[etcd] Announced new etcd member joining to the existing etcd cluster") - glog.V(1).Infof("Updated etcd member list: %v", initialCluster) + klog.V(1).Infof("Updated etcd member list: %v", initialCluster) - glog.V(1).Info("Creating local etcd static pod manifest file") + klog.V(1).Info("Creating local etcd static pod manifest file") // gets etcd StaticPodSpec, actualized for the current InitConfiguration and the new list of etcd members spec := GetEtcdPodSpec(cfg, initialCluster) // writes etcd StaticPod to disk diff --git a/cmd/kubeadm/app/phases/kubeconfig/BUILD b/cmd/kubeadm/app/phases/kubeconfig/BUILD index 74e70c2bab719..c23e735b972f4 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/BUILD +++ b/cmd/kubeadm/app/phases/kubeconfig/BUILD @@ -22,8 +22,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go index b18baf4ca902a..805d3a4215526 100644 --- a/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go +++ b/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go @@ -25,11 +25,11 @@ import ( "os" "path/filepath" - "github.com/golang/glog" "github.com/pkg/errors" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" certutil "k8s.io/client-go/util/cert" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" @@ -61,7 +61,7 @@ type kubeConfigSpec struct { // to establish the control plane, including also the admin kubeconfig file. // If kubeconfig files already exists, they are used only if evaluated equal; otherwise an error is returned. func CreateInitKubeConfigFiles(outDir string, cfg *kubeadmapi.InitConfiguration) error { - glog.V(1).Infoln("creating all kubeconfig files") + klog.V(1).Infoln("creating all kubeconfig files") return createKubeConfigFiles( outDir, cfg, @@ -89,7 +89,7 @@ func CreateJoinControlPlaneKubeConfigFiles(outDir string, cfg *kubeadmapi.InitCo // CreateKubeConfigFile creates a kubeconfig file. // If the kubeconfig file already exists, it is used only if evaluated equal; otherwise an error is returned. func CreateKubeConfigFile(kubeConfigFileName string, outDir string, cfg *kubeadmapi.InitConfiguration) error { - glog.V(1).Infof("creating kubeconfig file for %s", kubeConfigFileName) + klog.V(1).Infof("creating kubeconfig file for %s", kubeConfigFileName) return createKubeConfigFiles(outDir, cfg, kubeConfigFileName) } diff --git a/cmd/kubeadm/app/phases/kubelet/BUILD b/cmd/kubeadm/app/phases/kubelet/BUILD index 4e440c47721b5..ea79e49361a8e 100644 --- a/cmd/kubeadm/app/phases/kubelet/BUILD +++ b/cmd/kubeadm/app/phases/kubelet/BUILD @@ -29,8 +29,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/kubelet/flags.go b/cmd/kubeadm/app/phases/kubelet/flags.go index 6037e20292af0..073752e9e4312 100644 --- a/cmd/kubeadm/app/phases/kubelet/flags.go +++ b/cmd/kubeadm/app/phases/kubelet/flags.go @@ -23,8 +23,8 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" @@ -78,7 +78,7 @@ func buildKubeletArgMap(opts kubeletFlagsOpts) map[string]string { kubeletFlags["network-plugin"] = "cni" driver, err := kubeadmutil.GetCgroupDriverDocker(opts.execer) if err != nil { - glog.Warningf("cannot automatically assign a '--cgroup-driver' value when starting the Kubelet: %v\n", err) + klog.Warningf("cannot automatically assign a '--cgroup-driver' value when starting the Kubelet: %v\n", err) } else { kubeletFlags["cgroup-driver"] = driver } @@ -103,7 +103,7 @@ func buildKubeletArgMap(opts kubeletFlagsOpts) map[string]string { // Make sure the node name we're passed will work with Kubelet if opts.nodeRegOpts.Name != "" && opts.nodeRegOpts.Name != opts.defaultHostname { - glog.V(1).Infof("setting kubelet hostname-override to %q", opts.nodeRegOpts.Name) + klog.V(1).Infof("setting kubelet hostname-override to %q", opts.nodeRegOpts.Name) kubeletFlags["hostname-override"] = opts.nodeRegOpts.Name } diff --git a/cmd/kubeadm/app/phases/selfhosting/BUILD b/cmd/kubeadm/app/phases/selfhosting/BUILD index 66ebc01a03c40..6a94bb81da6dd 100644 --- a/cmd/kubeadm/app/phases/selfhosting/BUILD +++ b/cmd/kubeadm/app/phases/selfhosting/BUILD @@ -42,8 +42,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go index 3d149a19d2845..eb4d1dcdb29b1 100644 --- a/cmd/kubeadm/app/phases/selfhosting/selfhosting.go +++ b/cmd/kubeadm/app/phases/selfhosting/selfhosting.go @@ -22,7 +22,7 @@ import ( "os" "time" - "github.com/golang/glog" + "k8s.io/klog" "github.com/pkg/errors" apps "k8s.io/api/apps/v1" @@ -57,12 +57,12 @@ const ( // Otherwise, there is a race condition when we proceed without kubelet having restarted the API server correctly and the next .Create call flakes // 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubeadmapi.InitConfiguration, client clientset.Interface, waiter apiclient.Waiter, dryRun bool, certsInSecrets bool) error { - glog.V(1).Infoln("creating self hosted control plane") + klog.V(1).Infoln("creating self hosted control plane") // Adjust the timeout slightly to something self-hosting specific waiter.SetTimeout(selfHostingWaitTimeout) // Here the map of different mutators to use for the control plane's PodSpec is stored - glog.V(1).Infoln("getting mutators") + klog.V(1).Infoln("getting mutators") mutators := GetMutatorsFromFeatureGates(certsInSecrets) if certsInSecrets { diff --git a/cmd/kubeadm/app/preflight/BUILD b/cmd/kubeadm/app/preflight/BUILD index b8ef0920f81f7..0c248791a5709 100644 --- a/cmd/kubeadm/app/preflight/BUILD +++ b/cmd/kubeadm/app/preflight/BUILD @@ -30,8 +30,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//vendor/github.com/PuerkitoBio/purell:go_default_library", "//vendor/github.com/blang/semver:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 0980110190d9d..b27a93646a203 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -36,8 +36,8 @@ import ( "github.com/PuerkitoBio/purell" "github.com/blang/semver" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" netutil "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/sets" @@ -102,7 +102,7 @@ func (ContainerRuntimeCheck) Name() string { // Check validates the container runtime func (crc ContainerRuntimeCheck) Check() (warnings, errorList []error) { - glog.V(1).Infoln("validating the container runtime") + klog.V(1).Infoln("validating the container runtime") if err := crc.runtime.IsRunning(); err != nil { errorList = append(errorList, err) } @@ -128,7 +128,7 @@ func (sc ServiceCheck) Name() string { // Check validates if the service is enabled and active. func (sc ServiceCheck) Check() (warnings, errorList []error) { - glog.V(1).Infoln("validating if the service is enabled and active") + klog.V(1).Infoln("validating if the service is enabled and active") initSystem, err := initsystem.GetInitSystem() if err != nil { return []error{err}, nil @@ -169,7 +169,7 @@ func (FirewalldCheck) Name() string { // Check validates if the firewall is enabled and active. func (fc FirewalldCheck) Check() (warnings, errorList []error) { - glog.V(1).Infoln("validating if the firewall is enabled and active") + klog.V(1).Infoln("validating if the firewall is enabled and active") initSystem, err := initsystem.GetInitSystem() if err != nil { return []error{err}, nil @@ -206,7 +206,7 @@ func (poc PortOpenCheck) Name() string { // Check validates if the particular port is available. func (poc PortOpenCheck) Check() (warnings, errorList []error) { - glog.V(1).Infof("validating availability of port %d", poc.port) + klog.V(1).Infof("validating availability of port %d", poc.port) errorList = []error{} ln, err := net.Listen("tcp", fmt.Sprintf(":%d", poc.port)) if err != nil { @@ -243,7 +243,7 @@ func (dac DirAvailableCheck) Name() string { // Check validates if a directory does not exist or empty. func (dac DirAvailableCheck) Check() (warnings, errorList []error) { - glog.V(1).Infof("validating the existence and emptiness of directory %s", dac.Path) + klog.V(1).Infof("validating the existence and emptiness of directory %s", dac.Path) errorList = []error{} // If it doesn't exist we are good: if _, err := os.Stat(dac.Path); os.IsNotExist(err) { @@ -281,7 +281,7 @@ func (fac FileAvailableCheck) Name() string { // Check validates if the given file does not already exist. func (fac FileAvailableCheck) Check() (warnings, errorList []error) { - glog.V(1).Infof("validating the existence of file %s", fac.Path) + klog.V(1).Infof("validating the existence of file %s", fac.Path) errorList = []error{} if _, err := os.Stat(fac.Path); err == nil { errorList = append(errorList, errors.Errorf("%s already exists", fac.Path)) @@ -305,7 +305,7 @@ func (fac FileExistingCheck) Name() string { // Check validates if the given file already exists. func (fac FileExistingCheck) Check() (warnings, errorList []error) { - glog.V(1).Infof("validating the existence of file %s", fac.Path) + klog.V(1).Infof("validating the existence of file %s", fac.Path) errorList = []error{} if _, err := os.Stat(fac.Path); err != nil { errorList = append(errorList, errors.Errorf("%s doesn't exist", fac.Path)) @@ -330,7 +330,7 @@ func (fcc FileContentCheck) Name() string { // Check validates if the given file contains the given content. func (fcc FileContentCheck) Check() (warnings, errorList []error) { - glog.V(1).Infof("validating the contents of file %s", fcc.Path) + klog.V(1).Infof("validating the contents of file %s", fcc.Path) f, err := os.Open(fcc.Path) if err != nil { return nil, []error{errors.Errorf("%s does not exist", fcc.Path)} @@ -371,7 +371,7 @@ func (ipc InPathCheck) Name() string { // Check validates if the given executable is present in the path. func (ipc InPathCheck) Check() (warnings, errs []error) { - glog.V(1).Infof("validating the presence of executable %s", ipc.executable) + klog.V(1).Infof("validating the presence of executable %s", ipc.executable) _, err := ipc.exec.LookPath(ipc.executable) if err != nil { if ipc.mandatory { @@ -401,7 +401,7 @@ func (HostnameCheck) Name() string { // Check validates if hostname match dns sub domain regex. func (hc HostnameCheck) Check() (warnings, errorList []error) { - glog.V(1).Infof("checking whether the given node name is reachable using net.LookupHost") + klog.V(1).Infof("checking whether the given node name is reachable using net.LookupHost") errorList = []error{} warnings = []error{} addr, err := net.LookupHost(hc.nodeName) @@ -428,7 +428,7 @@ func (hst HTTPProxyCheck) Name() string { // Check validates http connectivity type, direct or via proxy. func (hst HTTPProxyCheck) Check() (warnings, errorList []error) { - glog.V(1).Infof("validating if the connectivity type is via proxy or direct") + klog.V(1).Infof("validating if the connectivity type is via proxy or direct") u := (&url.URL{Scheme: hst.Proto, Host: hst.Host}).String() req, err := http.NewRequest("GET", u, nil) @@ -464,7 +464,7 @@ func (HTTPProxyCIDRCheck) Name() string { // Check validates http connectivity to first IP address in the CIDR. // If it is not directly connected and goes via proxy it will produce warning. func (subnet HTTPProxyCIDRCheck) Check() (warnings, errorList []error) { - glog.V(1).Infoln("validating http connectivity to first IP address in the CIDR") + klog.V(1).Infoln("validating http connectivity to first IP address in the CIDR") if len(subnet.CIDR) == 0 { return nil, nil } @@ -513,7 +513,7 @@ func (SystemVerificationCheck) Name() string { // Check runs all individual checks func (sysver SystemVerificationCheck) Check() (warnings, errorList []error) { - glog.V(1).Infoln("running all checks") + klog.V(1).Infoln("running all checks") // Create a buffered writer and choose a quite large value (1M) and suppose the output from the system verification test won't exceed the limit // Run the system verification check, but write to out buffered writer instead of stdout bufw := bufio.NewWriterSize(os.Stdout, 1*1024*1024) @@ -570,7 +570,7 @@ func (KubernetesVersionCheck) Name() string { // Check validates Kubernetes and kubeadm versions func (kubever KubernetesVersionCheck) Check() (warnings, errorList []error) { - glog.V(1).Infoln("validating Kubernetes and kubeadm version") + klog.V(1).Infoln("validating Kubernetes and kubeadm version") // Skip this check for "super-custom builds", where apimachinery/the overall codebase version is not set. if strings.HasPrefix(kubever.KubeadmVersion, "v0.0.0") { return nil, nil @@ -611,7 +611,7 @@ func (KubeletVersionCheck) Name() string { // Check validates kubelet version. It should be not less than minimal supported version func (kubever KubeletVersionCheck) Check() (warnings, errorList []error) { - glog.V(1).Infoln("validating kubelet version") + klog.V(1).Infoln("validating kubelet version") kubeletVersion, err := GetKubeletVersion(kubever.exec) if err != nil { return nil, []error{errors.Wrap(err, "couldn't get kubelet version")} @@ -642,7 +642,7 @@ func (SwapCheck) Name() string { // Check validates whether swap is enabled or not func (swc SwapCheck) Check() (warnings, errorList []error) { - glog.V(1).Infoln("validating whether swap is enabled or not") + klog.V(1).Infoln("validating whether swap is enabled or not") f, err := os.Open("/proc/swaps") if err != nil { // /proc/swaps not available, thus no reasons to warn @@ -683,7 +683,7 @@ func (ExternalEtcdVersionCheck) Name() string { // Check validates external etcd version // TODO: Use the official etcd Golang client for this instead? func (evc ExternalEtcdVersionCheck) Check() (warnings, errorList []error) { - glog.V(1).Infoln("validating the external etcd version") + klog.V(1).Infoln("validating the external etcd version") // Return quickly if the user isn't using external etcd if evc.Etcd.External.Endpoints == nil { @@ -831,13 +831,13 @@ func (ipc ImagePullCheck) Check() (warnings, errorList []error) { for _, image := range ipc.imageList { ret, err := ipc.runtime.ImageExists(image) if ret && err == nil { - glog.V(1).Infof("image exists: %s", image) + klog.V(1).Infof("image exists: %s", image) continue } if err != nil { errorList = append(errorList, errors.Wrapf(err, "failed to check if image %s exists", image)) } - glog.V(1).Infof("pulling %s", image) + klog.V(1).Infof("pulling %s", image) if err := ipc.runtime.PullImage(image); err != nil { errorList = append(errorList, errors.Wrapf(err, "failed to pull image %s", image)) } diff --git a/cmd/kubeadm/app/util/BUILD b/cmd/kubeadm/app/util/BUILD index 0e4d79acd7bd1..becafa093b232 100644 --- a/cmd/kubeadm/app/util/BUILD +++ b/cmd/kubeadm/app/util/BUILD @@ -34,8 +34,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], diff --git a/cmd/kubeadm/app/util/config/BUILD b/cmd/kubeadm/app/util/config/BUILD index 531318c4835b7..6fcd1e1791673 100644 --- a/cmd/kubeadm/app/util/config/BUILD +++ b/cmd/kubeadm/app/util/config/BUILD @@ -35,8 +35,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/util/config/common.go b/cmd/kubeadm/app/util/config/common.go index 3dc712f7b06c0..adbe56424e36f 100644 --- a/cmd/kubeadm/app/util/config/common.go +++ b/cmd/kubeadm/app/util/config/common.go @@ -21,8 +21,8 @@ import ( "net" "strings" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" netutil "k8s.io/apimachinery/pkg/util/net" @@ -103,7 +103,7 @@ func DetectUnsupportedVersion(b []byte) error { } } if mutuallyExclusiveCount > 1 { - glog.Warningf("WARNING: Detected resource kinds that may not apply: %v", mutuallyExclusive) + klog.Warningf("WARNING: Detected resource kinds that may not apply: %v", mutuallyExclusive) } return nil @@ -141,7 +141,7 @@ func LowercaseSANs(sans []string) { for i, san := range sans { lowercase := strings.ToLower(san) if lowercase != san { - glog.V(1).Infof("lowercasing SAN %q to %q", san, lowercase) + klog.V(1).Infof("lowercasing SAN %q to %q", san, lowercase) sans[i] = lowercase } } @@ -166,7 +166,7 @@ func ChooseAPIServerBindAddress(bindAddress net.IP) (net.IP, error) { ip, err := netutil.ChooseBindAddress(bindAddress) if err != nil { if netutil.IsNoRoutesError(err) { - glog.Warningf("WARNING: could not obtain a bind address for the API Server: %v; using: %s", err, constants.DefaultAPIServerBindAddress) + klog.Warningf("WARNING: could not obtain a bind address for the API Server: %v; using: %s", err, constants.DefaultAPIServerBindAddress) defaultIP := net.ParseIP(constants.DefaultAPIServerBindAddress) if defaultIP == nil { return nil, errors.Errorf("cannot parse default IP address: %s", constants.DefaultAPIServerBindAddress) diff --git a/cmd/kubeadm/app/util/config/initconfiguration.go b/cmd/kubeadm/app/util/config/initconfiguration.go index 9738c034d3aef..3847afd365798 100644 --- a/cmd/kubeadm/app/util/config/initconfiguration.go +++ b/cmd/kubeadm/app/util/config/initconfiguration.go @@ -25,8 +25,8 @@ import ( "sort" "strconv" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -165,7 +165,7 @@ func ConfigFileAndDefaultsToInternalConfig(cfgPath string, defaultversionedcfg * if cfgPath != "" { // Loads configuration from config file, if provided // Nb. --config overrides command line flags - glog.V(1).Infoln("loading configuration from the given file") + klog.V(1).Infoln("loading configuration from the given file") b, err := ioutil.ReadFile(cfgPath) if err != nil { diff --git a/cmd/kubeadm/app/util/config/joinconfiguration.go b/cmd/kubeadm/app/util/config/joinconfiguration.go index 278fa81c87e12..f33d259932a13 100644 --- a/cmd/kubeadm/app/util/config/joinconfiguration.go +++ b/cmd/kubeadm/app/util/config/joinconfiguration.go @@ -19,8 +19,8 @@ package config import ( "io/ioutil" - "github.com/golang/glog" "github.com/pkg/errors" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -56,7 +56,7 @@ func JoinConfigFileAndDefaultsToInternalConfig(cfgPath string, defaultversionedc if cfgPath != "" { // Loads configuration from config file, if provided // Nb. --config overrides command line flags, TODO: fix this - glog.V(1).Infoln("loading configuration from the given file") + klog.V(1).Infoln("loading configuration from the given file") b, err := ioutil.ReadFile(cfgPath) if err != nil { diff --git a/cmd/kubeadm/app/util/etcd/BUILD b/cmd/kubeadm/app/util/etcd/BUILD index b3792bb4aefdd..bb666f7cabb2a 100644 --- a/cmd/kubeadm/app/util/etcd/BUILD +++ b/cmd/kubeadm/app/util/etcd/BUILD @@ -13,8 +13,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/util/etcd/etcd.go b/cmd/kubeadm/app/util/etcd/etcd.go index a11bba3cfe0fc..8d41809c58da6 100644 --- a/cmd/kubeadm/app/util/etcd/etcd.go +++ b/cmd/kubeadm/app/util/etcd/etcd.go @@ -26,9 +26,9 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/pkg/transport" - "github.com/golang/glog" "github.com/pkg/errors" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/util/config" @@ -145,7 +145,7 @@ func NewFromCluster(client clientset.Interface, certificatesDir string) (*Client for _, e := range clusterStatus.APIEndpoints { endpoints = append(endpoints, fmt.Sprintf("https://%s:%d", e.AdvertiseAddress, constants.EtcdListenClientPort)) } - glog.V(1).Infof("etcd endpoints read from pods: %s", strings.Join(endpoints, ",")) + klog.V(1).Infof("etcd endpoints read from pods: %s", strings.Join(endpoints, ",")) // Creates an etcd client etcdClient, err := New( @@ -185,7 +185,7 @@ func (c Client) Sync() error { if err != nil { return err } - glog.V(1).Infof("etcd endpoints read from etcd: %s", strings.Join(cli.Endpoints(), ",")) + klog.V(1).Infof("etcd endpoints read from etcd: %s", strings.Join(cli.Endpoints(), ",")) c.Endpoints = cli.Endpoints() return nil diff --git a/cmd/kubeadm/app/util/system/BUILD b/cmd/kubeadm/app/util/system/BUILD index 4435ddfbe864d..eb40387854320 100644 --- a/cmd/kubeadm/app/util/system/BUILD +++ b/cmd/kubeadm/app/util/system/BUILD @@ -27,8 +27,8 @@ go_library( "//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/docker/docker/client:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/util/system/kernel_validator.go b/cmd/kubeadm/app/util/system/kernel_validator.go index 5add3190f18c5..36ecd20dbdbcc 100644 --- a/cmd/kubeadm/app/util/system/kernel_validator.go +++ b/cmd/kubeadm/app/util/system/kernel_validator.go @@ -29,8 +29,8 @@ import ( "regexp" "strings" - "github.com/golang/glog" pkgerrors "github.com/pkg/errors" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/errors" ) @@ -254,7 +254,7 @@ func (k *KernelValidator) parseKernelConfig(r io.Reader) (map[string]kConfigOpti } fields := strings.Split(line, "=") if len(fields) != 2 { - glog.Errorf("Unexpected fields number in config %q", line) + klog.Errorf("Unexpected fields number in config %q", line) continue } config[fields[0]] = kConfigOption(fields[1]) diff --git a/cmd/kubeadm/app/util/system/package_validator.go b/cmd/kubeadm/app/util/system/package_validator.go index 3c9fa9c319299..820c68e6c2b40 100644 --- a/cmd/kubeadm/app/util/system/package_validator.go +++ b/cmd/kubeadm/app/util/system/package_validator.go @@ -25,8 +25,8 @@ import ( "k8s.io/apimachinery/pkg/util/errors" "github.com/blang/semver" - "github.com/golang/glog" pkgerrors "github.com/pkg/errors" + "k8s.io/klog" ) // semVerDotsCount is the number of dots in a valid semantic version. @@ -127,7 +127,7 @@ func (self *packageValidator) validate(packageSpecs []PackageSpec, manager packa // Get the version of the package on the running machine. version, err := manager.getPackageVersion(packageName) if err != nil { - glog.V(1).Infof("Failed to get the version for the package %q: %s\n", packageName, err) + klog.V(1).Infof("Failed to get the version for the package %q: %s\n", packageName, err) errs = append(errs, err) self.reporter.Report(nameWithVerRange, "not installed", bad) continue @@ -145,7 +145,7 @@ func (self *packageValidator) validate(packageSpecs []PackageSpec, manager packa // the version is in the range. sv, err := semver.Make(toSemVer(version)) if err != nil { - glog.Errorf("Failed to convert %q to semantic version: %s\n", version, err) + klog.Errorf("Failed to convert %q to semantic version: %s\n", version, err) errs = append(errs, err) self.reporter.Report(nameWithVerRange, "internal error", bad) continue diff --git a/cmd/kubeadm/app/util/version.go b/cmd/kubeadm/app/util/version.go index 3830ed1a08a6d..4da6ce48aef00 100644 --- a/cmd/kubeadm/app/util/version.go +++ b/cmd/kubeadm/app/util/version.go @@ -25,10 +25,10 @@ import ( "strings" "time" - "github.com/golang/glog" pkgerrors "github.com/pkg/errors" netutil "k8s.io/apimachinery/pkg/util/net" versionutil "k8s.io/apimachinery/pkg/util/version" + "k8s.io/klog" pkgversion "k8s.io/kubernetes/pkg/version" ) @@ -91,8 +91,8 @@ func KubernetesReleaseVersion(version string) (string, error) { return "", err } // Handle air-gapped environments by falling back to the client version. - glog.Infof("could not fetch a Kubernetes version from the internet: %v", err) - glog.Infof("falling back to the local client version: %s", clientVersion) + klog.Infof("could not fetch a Kubernetes version from the internet: %v", err) + klog.Infof("falling back to the local client version: %s", clientVersion) return KubernetesReleaseVersion(clientVersion) } // both the client and the remote version are obtained; validate them and pick a stable version @@ -160,7 +160,7 @@ func splitVersion(version string) (string, string, error) { // Internal helper: return content of URL func fetchFromURL(url string, timeout time.Duration) (string, error) { - glog.V(2).Infof("fetching Kubernetes version from URL: %s", url) + klog.V(2).Infof("fetching Kubernetes version from URL: %s", url) client := &http.Client{Timeout: timeout, Transport: netutil.SetOldTransportDefaults(&http.Transport{})} resp, err := client.Get(url) if err != nil { @@ -217,7 +217,7 @@ func kubeadmVersion(info string) (string, error) { // the same Patch level release. func validateStableVersion(remoteVersion, clientVersion string) (string, error) { if clientVersion == "" { - glog.Infof("could not obtain client version; using remote version: %s", remoteVersion) + klog.Infof("could not obtain client version; using remote version: %s", remoteVersion) return remoteVersion, nil } @@ -234,7 +234,7 @@ func validateStableVersion(remoteVersion, clientVersion string) (string, error) if verClient.Major() < verRemote.Major() || (verClient.Major() == verRemote.Major()) && verClient.Minor() < verRemote.Minor() { estimatedRelease := fmt.Sprintf("stable-%d.%d", verClient.Major(), verClient.Minor()) - glog.Infof("remote version is much newer: %s; falling back to: %s", remoteVersion, estimatedRelease) + klog.Infof("remote version is much newer: %s; falling back to: %s", remoteVersion, estimatedRelease) return estimatedRelease, nil } return remoteVersion, nil diff --git a/cmd/kubeadm/kubeadm.go b/cmd/kubeadm/kubeadm.go index 233b13f5cbc27..1b3d58fa171c8 100644 --- a/cmd/kubeadm/kubeadm.go +++ b/cmd/kubeadm/kubeadm.go @@ -20,10 +20,12 @@ import ( "fmt" "os" + "k8s.io/klog" "k8s.io/kubernetes/cmd/kubeadm/app" ) func main() { + klog.InitFlags(nil) if err := app.Run(); err != nil { fmt.Fprintf(os.Stderr, "error: %v\n", err) os.Exit(1) diff --git a/cmd/kubelet/app/BUILD b/cmd/kubelet/app/BUILD index f79a45ca3087c..4dc0bbcc4ee89 100644 --- a/cmd/kubelet/app/BUILD +++ b/cmd/kubelet/app/BUILD @@ -123,9 +123,9 @@ go_library( "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", "//vendor/github.com/coreos/go-systemd/daemon:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ diff --git a/cmd/kubelet/app/options/BUILD b/cmd/kubelet/app/options/BUILD index 42ba81e52838f..711883b11e1f0 100644 --- a/cmd/kubelet/app/options/BUILD +++ b/cmd/kubelet/app/options/BUILD @@ -37,8 +37,8 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library", "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/google/cadvisor/container/common:go_default_library", diff --git a/cmd/kubelet/app/options/globalflags.go b/cmd/kubelet/app/options/globalflags.go index dafd5447989f8..a2ea7a7a7ca81 100644 --- a/cmd/kubelet/app/options/globalflags.go +++ b/cmd/kubelet/app/options/globalflags.go @@ -29,7 +29,7 @@ import ( "k8s.io/kubernetes/pkg/version/verflag" // ensure libs have a chance to globally register their flags - _ "github.com/golang/glog" + _ "k8s.io/klog" _ "k8s.io/kubernetes/pkg/credentialprovider/azure" _ "k8s.io/kubernetes/pkg/credentialprovider/gcp" ) @@ -91,7 +91,7 @@ func addCredentialProviderFlags(fs *pflag.FlagSet) { fs.AddFlagSet(local) } -// addGlogFlags adds flags from github.com/golang/glog +// addGlogFlags adds flags from k8s.io/klog func addGlogFlags(fs *pflag.FlagSet) { // lookup flags in global flag set and re-register the values with our flagset global := flag.CommandLine diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 606d60c295da6..9a324ee1d4b35 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -34,9 +34,9 @@ import ( "time" "github.com/coreos/go-systemd/daemon" - "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -111,7 +111,7 @@ func NewKubeletCommand(stopCh <-chan struct{}) *cobra.Command { kubeletConfig, err := options.NewKubeletConfiguration() // programmer error if err != nil { - glog.Fatal(err) + klog.Fatal(err) } cmd := &cobra.Command{ @@ -144,20 +144,20 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // initial flag parse, since we disable cobra's flag parsing if err := cleanFlagSet.Parse(args); err != nil { cmd.Usage() - glog.Fatal(err) + klog.Fatal(err) } // check if there are non-flag arguments in the command line cmds := cleanFlagSet.Args() if len(cmds) > 0 { cmd.Usage() - glog.Fatalf("unknown command: %s", cmds[0]) + klog.Fatalf("unknown command: %s", cmds[0]) } // short-circuit on help help, err := cleanFlagSet.GetBool("help") if err != nil { - glog.Fatal(`"help" flag is non-bool, programmer error, please correct`) + klog.Fatal(`"help" flag is non-bool, programmer error, please correct`) } if help { cmd.Help() @@ -170,40 +170,40 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // set feature gates from initial flags-based config if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { - glog.Fatal(err) + klog.Fatal(err) } // validate the initial KubeletFlags if err := options.ValidateKubeletFlags(kubeletFlags); err != nil { - glog.Fatal(err) + klog.Fatal(err) } if kubeletFlags.ContainerRuntime == "remote" && cleanFlagSet.Changed("pod-infra-container-image") { - glog.Warning("Warning: For remote container runtime, --pod-infra-container-image is ignored in kubelet, which should be set in that remote runtime instead") + klog.Warning("Warning: For remote container runtime, --pod-infra-container-image is ignored in kubelet, which should be set in that remote runtime instead") } // load kubelet config file, if provided if configFile := kubeletFlags.KubeletConfigFile; len(configFile) > 0 { kubeletConfig, err = loadConfigFile(configFile) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } // We must enforce flag precedence by re-parsing the command line into the new object. // This is necessary to preserve backwards-compatibility across binary upgrades. // See issue #56171 for more details. if err := kubeletConfigFlagPrecedence(kubeletConfig, args); err != nil { - glog.Fatal(err) + klog.Fatal(err) } // update feature gates based on new config if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { - glog.Fatal(err) + klog.Fatal(err) } } // We always validate the local configuration (command line + config file). // This is the default "last-known-good" config for dynamic config, and must always remain valid. if err := kubeletconfigvalidation.ValidateKubeletConfiguration(kubeletConfig); err != nil { - glog.Fatal(err) + klog.Fatal(err) } // use dynamic kubelet config, if enabled @@ -219,7 +219,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API return kubeletConfigFlagPrecedence(kc, args) }) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } // If we should just use our existing, local config, the controller will return a nil config if dynamicKubeletConfig != nil { @@ -227,7 +227,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // Note: flag precedence was already enforced in the controller, prior to validation, // by our above transform function. Now we simply update feature gates from the new config. if err := utilfeature.DefaultFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil { - glog.Fatal(err) + klog.Fatal(err) } } } @@ -241,7 +241,7 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // use kubeletServer to construct the default KubeletDeps kubeletDeps, err := UnsecuredDependencies(kubeletServer) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } // add the kubelet config controller to kubeletDeps @@ -250,15 +250,15 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API // start the experimental docker shim, if enabled if kubeletServer.KubeletFlags.ExperimentalDockershim { if err := RunDockershim(&kubeletServer.KubeletFlags, kubeletConfig, stopCh); err != nil { - glog.Fatal(err) + klog.Fatal(err) } return } // run the kubelet - glog.V(5).Infof("KubeletConfiguration: %#v", kubeletServer.KubeletConfiguration) + klog.V(5).Infof("KubeletConfiguration: %#v", kubeletServer.KubeletConfiguration) if err := Run(kubeletServer, kubeletDeps, stopCh); err != nil { - glog.Fatal(err) + klog.Fatal(err) } }, } @@ -361,7 +361,7 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err mounter := mount.New(s.ExperimentalMounterPath) var pluginRunner = exec.New() if s.Containerized { - glog.V(2).Info("Running kubelet in containerized mode") + klog.V(2).Info("Running kubelet in containerized mode") ne, err := nsenter.NewNsenter(nsenter.DefaultHostRootFsPath, exec.New()) if err != nil { return nil, err @@ -404,7 +404,7 @@ func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, err // not be generated. func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan struct{}) error { // To help debugging, immediately log version - glog.Infof("Version: %+v", version.Get()) + klog.Infof("Version: %+v", version.Get()) if err := initForOS(s.KubeletFlags.WindowsService); err != nil { return fmt.Errorf("failed OS init: %v", err) } @@ -439,11 +439,11 @@ func setConfigz(cz *configz.Config, kc *kubeletconfiginternal.KubeletConfigurati func initConfigz(kc *kubeletconfiginternal.KubeletConfiguration) error { cz, err := configz.New("kubeletconfig") if err != nil { - glog.Errorf("unable to register configz: %s", err) + klog.Errorf("unable to register configz: %s", err) return err } if err := setConfigz(cz, kc); err != nil { - glog.Errorf("unable to register config: %s", err) + klog.Errorf("unable to register config: %s", err) return err } return nil @@ -456,12 +456,12 @@ func makeEventRecorder(kubeDeps *kubelet.Dependencies, nodeName types.NodeName) } eventBroadcaster := record.NewBroadcaster() kubeDeps.Recorder = eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: componentKubelet, Host: string(nodeName)}) - eventBroadcaster.StartLogging(glog.V(3).Infof) + eventBroadcaster.StartLogging(klog.V(3).Infof) if kubeDeps.EventClient != nil { - glog.V(4).Infof("Sending events to api server.") + klog.V(4).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeDeps.EventClient.Events("")}) } else { - glog.Warning("No api server defined - no events will be sent to API server.") + klog.Warning("No api server defined - no events will be sent to API server.") } } @@ -482,12 +482,12 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan } done := make(chan struct{}) if s.LockFilePath != "" { - glog.Infof("acquiring file lock on %q", s.LockFilePath) + klog.Infof("acquiring file lock on %q", s.LockFilePath) if err := flock.Acquire(s.LockFilePath); err != nil { return fmt.Errorf("unable to acquire file lock on %q: %v", s.LockFilePath, err) } if s.ExitOnLockContention { - glog.Infof("watching for inotify events for: %v", s.LockFilePath) + klog.Infof("watching for inotify events for: %v", s.LockFilePath) if err := watchForLockfileContention(s.LockFilePath, done); err != nil { return err } @@ -497,7 +497,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan // Register current configuration with /configz endpoint err = initConfigz(&s.KubeletConfiguration) if err != nil { - glog.Errorf("unable to register KubeletConfiguration with configz, error: %v", err) + klog.Errorf("unable to register KubeletConfiguration with configz, error: %v", err) } // About to get clients and such, detect standaloneMode @@ -520,9 +520,9 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan return err } if cloud == nil { - glog.V(2).Infof("No cloud provider specified: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) + klog.V(2).Infof("No cloud provider specified: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) } else { - glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) + klog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) } kubeDeps.Cloud = cloud } @@ -549,7 +549,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan kubeDeps.DynamicKubeClient = nil kubeDeps.EventClient = nil kubeDeps.HeartbeatClient = nil - glog.Warningf("standalone mode, no API client") + klog.Warningf("standalone mode, no API client") } else if kubeDeps.KubeClient == nil || kubeDeps.EventClient == nil || kubeDeps.HeartbeatClient == nil || kubeDeps.DynamicKubeClient == nil { // initialize clients if not standalone mode and any of the clients are not provided var kubeClient clientset.Interface @@ -579,15 +579,15 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan kubeClient, err = clientset.NewForConfig(clientConfig) if err != nil { - glog.Warningf("New kubeClient from clientConfig error: %v", err) + klog.Warningf("New kubeClient from clientConfig error: %v", err) } else if kubeClient.CertificatesV1beta1() != nil && clientCertificateManager != nil { - glog.V(2).Info("Starting client certificate rotation.") + klog.V(2).Info("Starting client certificate rotation.") clientCertificateManager.SetCertificateSigningRequestClient(kubeClient.CertificatesV1beta1().CertificateSigningRequests()) clientCertificateManager.Start() } dynamicKubeClient, err = dynamic.NewForConfig(clientConfig) if err != nil { - glog.Warningf("Failed to initialize dynamic KubeClient: %v", err) + klog.Warningf("Failed to initialize dynamic KubeClient: %v", err) } // make a separate client for events @@ -596,7 +596,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan eventClientConfig.Burst = int(s.EventBurst) eventClient, err = v1core.NewForConfig(&eventClientConfig) if err != nil { - glog.Warningf("Failed to create API Server client for Events: %v", err) + klog.Warningf("Failed to create API Server client for Events: %v", err) } // make a separate client for heartbeat with throttling disabled and a timeout attached @@ -612,14 +612,14 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan heartbeatClientConfig.QPS = float32(-1) heartbeatClient, err = clientset.NewForConfig(&heartbeatClientConfig) if err != nil { - glog.Warningf("Failed to create API Server client for heartbeat: %v", err) + klog.Warningf("Failed to create API Server client for heartbeat: %v", err) } // csiClient works with CRDs that support json only clientConfig.ContentType = "application/json" csiClient, err := csiclientset.NewForConfig(clientConfig) if err != nil { - glog.Warningf("Failed to create CSI API client: %v", err) + klog.Warningf("Failed to create CSI API client: %v", err) } kubeDeps.KubeClient = kubeClient @@ -663,7 +663,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan if kubeDeps.ContainerManager == nil { if s.CgroupsPerQOS && s.CgroupRoot == "" { - glog.Infof("--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /") + klog.Infof("--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /") s.CgroupRoot = "/" } kubeReserved, err := parseResourceList(s.KubeReserved) @@ -727,7 +727,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan } if err := checkPermissions(); err != nil { - glog.Error(err) + klog.Error(err) } utilruntime.ReallyCrash = s.ReallyCrashForTesting @@ -737,7 +737,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan // TODO(vmarmol): Do this through container config. oomAdjuster := kubeDeps.OOMAdjuster if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil { - glog.Warning(err) + klog.Warning(err) } if err := RunKubelet(s, kubeDeps, s.RunOnce); err != nil { @@ -749,7 +749,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan go wait.Until(func() { err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), nil) if err != nil { - glog.Errorf("Starting health server failed: %v", err) + klog.Errorf("Starting health server failed: %v", err) } }, 5*time.Second, wait.NeverStop) } @@ -788,7 +788,7 @@ func getNodeName(cloud cloudprovider.Interface, hostname string) (types.NodeName return "", fmt.Errorf("error fetching current node name from cloud provider: %v", err) } - glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) + klog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) return nodeName, nil } @@ -822,7 +822,7 @@ func InitializeTLS(kf *options.KubeletFlags, kc *kubeletconfiginternal.KubeletCo return nil, err } - glog.V(4).Infof("Using self-signed cert (%s, %s)", kc.TLSCertFile, kc.TLSPrivateKeyFile) + klog.V(4).Infof("Using self-signed cert (%s, %s)", kc.TLSCertFile, kc.TLSPrivateKeyFile) } } @@ -938,7 +938,7 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie capabilities.Setup(kubeServer.AllowPrivileged, privilegedSources, 0) credentialprovider.SetPreferredDockercfgPath(kubeServer.RootDirectory) - glog.V(2).Infof("Using root directory: %v", kubeServer.RootDirectory) + klog.V(2).Infof("Using root directory: %v", kubeServer.RootDirectory) if kubeDeps.OSInterface == nil { kubeDeps.OSInterface = kubecontainer.RealOS{} @@ -993,10 +993,10 @@ func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencie if _, err := k.RunOnce(podCfg.Updates()); err != nil { return fmt.Errorf("runonce failed: %v", err) } - glog.Infof("Started kubelet as runonce") + klog.Infof("Started kubelet as runonce") } else { startKubelet(k, podCfg, &kubeServer.KubeletConfiguration, kubeDeps, kubeServer.EnableServer) - glog.Infof("Started kubelet") + klog.Infof("Started kubelet") } return nil } @@ -1180,7 +1180,7 @@ func RunDockershim(f *options.KubeletFlags, c *kubeletconfiginternal.KubeletConf if err != nil { return err } - glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") + klog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") server := dockerremote.NewDockerServer(f.RemoteRuntimeEndpoint, ds) if err := server.Start(); err != nil { return err diff --git a/cmd/kubelet/app/server_linux.go b/cmd/kubelet/app/server_linux.go index ceab334c0ba77..f7c39d4cb7dc7 100644 --- a/cmd/kubelet/app/server_linux.go +++ b/cmd/kubelet/app/server_linux.go @@ -17,26 +17,26 @@ limitations under the License. package app import ( - "github.com/golang/glog" "golang.org/x/exp/inotify" + "k8s.io/klog" ) func watchForLockfileContention(path string, done chan struct{}) error { watcher, err := inotify.NewWatcher() if err != nil { - glog.Errorf("unable to create watcher for lockfile: %v", err) + klog.Errorf("unable to create watcher for lockfile: %v", err) return err } if err = watcher.AddWatch(path, inotify.IN_OPEN|inotify.IN_DELETE_SELF); err != nil { - glog.Errorf("unable to watch lockfile: %v", err) + klog.Errorf("unable to watch lockfile: %v", err) return err } go func() { select { case ev := <-watcher.Event: - glog.Infof("inotify event: %v", ev) + klog.Infof("inotify event: %v", ev) case err = <-watcher.Error: - glog.Errorf("inotify watcher error: %v", err) + klog.Errorf("inotify watcher error: %v", err) } close(done) }() diff --git a/cmd/kubemark/BUILD b/cmd/kubemark/BUILD index f1a9e2fc3467d..489d3d696154e 100644 --- a/cmd/kubemark/BUILD +++ b/cmd/kubemark/BUILD @@ -35,9 +35,9 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", ], ) diff --git a/cmd/kubemark/hollow-node.go b/cmd/kubemark/hollow-node.go index c63d6ebd0f1f5..2653e863a9ddb 100644 --- a/cmd/kubemark/hollow-node.go +++ b/cmd/kubemark/hollow-node.go @@ -23,9 +23,9 @@ import ( "os" "time" - "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -138,18 +138,18 @@ func newHollowNodeCommand() *cobra.Command { func run(config *HollowNodeConfig) { if !knownMorphs.Has(config.Morph) { - glog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List()) + klog.Fatalf("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List()) } // create a client to communicate with API server. clientConfig, err := config.createClientConfigFromFile() if err != nil { - glog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err) + klog.Fatalf("Failed to create a ClientConfig: %v. Exiting.", err) } client, err := clientset.NewForConfig(clientConfig) if err != nil { - glog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err) + klog.Fatalf("Failed to create a ClientSet: %v. Exiting.", err) } if config.Morph == "kubelet" { @@ -181,7 +181,7 @@ func run(config *HollowNodeConfig) { if config.Morph == "proxy" { client, err := clientset.NewForConfig(clientConfig) if err != nil { - glog.Fatalf("Failed to create API Server client: %v", err) + klog.Fatalf("Failed to create API Server client: %v", err) } iptInterface := fakeiptables.NewFake() sysctl := fakesysctl.NewFake() @@ -203,7 +203,7 @@ func run(config *HollowNodeConfig) { config.ProxierMinSyncPeriod, ) if err != nil { - glog.Fatalf("Failed to create hollowProxy instance: %v", err) + klog.Fatalf("Failed to create hollowProxy instance: %v", err) } hollowProxy.Run() } diff --git a/pkg/apis/core/validation/BUILD b/pkg/apis/core/validation/BUILD index 9ea47dc5d6fbc..ed546217cf664 100644 --- a/pkg/apis/core/validation/BUILD +++ b/pkg/apis/core/validation/BUILD @@ -39,7 +39,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index 9211bb348e195..a6089c9b306b7 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -27,7 +27,7 @@ import ( "regexp" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -4301,7 +4301,7 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList { // We made allowed changes to oldNode, and now we compare oldNode to node. Any remaining differences indicate changes to protected fields. // TODO: Add a 'real' error type for this error and provide print actual diffs. if !apiequality.Semantic.DeepEqual(oldNode, node) { - glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) + klog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels, taints, or capacity (or configSource, if the DynamicKubeletConfig feature gate is enabled)")) } diff --git a/pkg/auth/authorizer/abac/BUILD b/pkg/auth/authorizer/abac/BUILD index e059fd567147f..4d55745831ed7 100644 --- a/pkg/auth/authorizer/abac/BUILD +++ b/pkg/auth/authorizer/abac/BUILD @@ -17,7 +17,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/auth/authorizer/abac/abac.go b/pkg/auth/authorizer/abac/abac.go index 86e7f8ed3e0bb..8f49c98246b4b 100644 --- a/pkg/auth/authorizer/abac/abac.go +++ b/pkg/auth/authorizer/abac/abac.go @@ -25,7 +25,7 @@ import ( "os" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authentication/user" @@ -105,7 +105,7 @@ func NewFromFile(path string) (policyList, error) { } if unversionedLines > 0 { - glog.Warningf("Policy file %s contained unversioned rules. See docs/admin/authorization.md#abac-mode for ABAC file format details.", path) + klog.Warningf("Policy file %s contained unversioned rules. See docs/admin/authorization.md#abac-mode for ABAC file format details.", path) } if err := scanner.Err(); err != nil { diff --git a/pkg/cloudprovider/providers/aws/BUILD b/pkg/cloudprovider/providers/aws/BUILD index 22c40fd0ce4ee..ca34b569b24a0 100644 --- a/pkg/cloudprovider/providers/aws/BUILD +++ b/pkg/cloudprovider/providers/aws/BUILD @@ -58,9 +58,9 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/service/elbv2:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/kms:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/sts:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index b80dff30e51e5..d4f38dc75f4e7 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -42,8 +42,8 @@ import ( "github.com/aws/aws-sdk-go/service/elbv2" "github.com/aws/aws-sdk-go/service/kms" "github.com/aws/aws-sdk-go/service/sts" - "github.com/golang/glog" gcfg "gopkg.in/gcfg.v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -967,7 +967,7 @@ func init() { Client: ec2metadata.New(sess), } } else { - glog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN) + klog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN) provider = &stscreds.AssumeRoleProvider{ Client: sts.New(sess), RoleARN: cfg.Global.RoleARN, @@ -1004,7 +1004,7 @@ func readAWSCloudConfig(config io.Reader) (*CloudConfig, error) { func updateConfigZone(cfg *CloudConfig, metadata EC2Metadata) error { if cfg.Global.Zone == "" { if metadata != nil { - glog.Info("Zone not specified in configuration file; querying AWS metadata service") + klog.Info("Zone not specified in configuration file; querying AWS metadata service") var err error cfg.Global.Zone, err = getAvailabilityZone(metadata) if err != nil { @@ -1038,7 +1038,7 @@ func azToRegion(az string) (string, error) { func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { // We have some state in the Cloud object - in particular the attaching map // Log so that if we are building multiple Cloud objects, it is obvious! - glog.Infof("Building AWS cloudprovider") + klog.Infof("Building AWS cloudprovider") metadata, err := awsServices.Metadata() if err != nil { @@ -1070,7 +1070,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { return nil, fmt.Errorf("not a valid AWS zone (unknown region): %s", zone) } } else { - glog.Warningf("Strict AWS zone checking is disabled. Proceeding with zone: %s", zone) + klog.Warningf("Strict AWS zone checking is disabled. Proceeding with zone: %s", zone) } ec2, err := awsServices.Compute(regionName) @@ -1117,7 +1117,7 @@ func newAWSCloud(cfg CloudConfig, awsServices Services) (*Cloud, error) { if cfg.Global.VPC != "" && (cfg.Global.SubnetID != "" || cfg.Global.RoleARN != "") && tagged { // When the master is running on a different AWS account, cloud provider or on-premise // build up a dummy instance and use the VPC from the nodes account - glog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premises") + klog.Info("Master is configured to run on a different AWS account, different cloud provider or on-premises") awsCloud.selfAWSInstance = &awsInstance{ nodeName: "master-dummy", vpcID: cfg.Global.VPC, @@ -1161,7 +1161,7 @@ func (c *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, c.clientBuilder = clientBuilder c.kubeClient = clientBuilder.ClientOrDie("aws-cloud-provider") c.eventBroadcaster = record.NewBroadcaster() - c.eventBroadcaster.StartLogging(glog.Infof) + c.eventBroadcaster.StartLogging(klog.Infof) c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.kubeClient.CoreV1().Events("")}) c.eventRecorder = c.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "aws-cloud-provider"}) } @@ -1232,7 +1232,7 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No if err != nil { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(4).Info("Could not determine public IP from AWS metadata.") + klog.V(4).Info("Could not determine public IP from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalIP, Address: externalIP}) } @@ -1241,7 +1241,7 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No if err != nil || len(internalDNS) == 0 { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(4).Info("Could not determine private DNS from AWS metadata.") + klog.V(4).Info("Could not determine private DNS from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: internalDNS}) addresses = append(addresses, v1.NodeAddress{Type: v1.NodeHostName, Address: internalDNS}) @@ -1251,7 +1251,7 @@ func (c *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.No if err != nil || len(externalDNS) == 0 { //TODO: It would be nice to be able to determine the reason for the failure, // but the AWS client masks all failures with the same error description. - glog.V(4).Info("Could not determine public DNS from AWS metadata.") + klog.V(4).Info("Could not determine public DNS from AWS metadata.") } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeExternalDNS, Address: externalDNS}) } @@ -1360,7 +1360,7 @@ func (c *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID strin state := instances[0].State.Name if *state == ec2.InstanceStateNameTerminated { - glog.Warningf("the instance %s is terminated", instanceID) + klog.Warningf("the instance %s is terminated", instanceID) return false, nil } @@ -1383,7 +1383,7 @@ func (c *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID str return false, err } if len(instances) == 0 { - glog.Warningf("the instance %s does not exist anymore", providerID) + klog.Warningf("the instance %s does not exist anymore", providerID) // returns false, because otherwise node is not deleted from cluster // false means that it will continue to check InstanceExistsByProviderID return false, nil @@ -1485,7 +1485,7 @@ func (c *Cloud) GetCandidateZonesForDynamicVolume() (sets.String, error) { } if master { - glog.V(4).Infof("Ignoring master instance %q in zone discovery", aws.StringValue(instance.InstanceId)) + klog.V(4).Infof("Ignoring master instance %q in zone discovery", aws.StringValue(instance.InstanceId)) continue } @@ -1495,7 +1495,7 @@ func (c *Cloud) GetCandidateZonesForDynamicVolume() (sets.String, error) { } } - glog.V(2).Infof("Found instances in zones %s", zones) + klog.V(2).Infof("Found instances in zones %s", zones) return zones, nil } @@ -1614,7 +1614,7 @@ func (c *Cloud) getMountDevice( name = name[8:] } if len(name) < 1 || len(name) > 2 { - glog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName)) + klog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName)) } deviceMappings[mountDevice(name)] = EBSVolumeID(aws.StringValue(blockDevice.Ebs.VolumeId)) } @@ -1633,7 +1633,7 @@ func (c *Cloud) getMountDevice( for mountDevice, mappingVolumeID := range deviceMappings { if volumeID == mappingVolumeID { if assign { - glog.Warningf("Got assignment call for already-assigned volume: %s@%s", mountDevice, mappingVolumeID) + klog.Warningf("Got assignment call for already-assigned volume: %s@%s", mountDevice, mappingVolumeID) } return mountDevice, true, nil } @@ -1658,7 +1658,7 @@ func (c *Cloud) getMountDevice( chosen, err := deviceAllocator.GetNext(deviceMappings) if err != nil { - glog.Warningf("Could not assign a mount device. mappings=%v, error: %v", deviceMappings, err) + klog.Warningf("Could not assign a mount device. mappings=%v, error: %v", deviceMappings, err) return "", false, fmt.Errorf("too many EBS volumes attached to node %s", i.nodeName) } @@ -1668,7 +1668,7 @@ func (c *Cloud) getMountDevice( c.attaching[i.nodeName] = attaching } attaching[chosen] = volumeID - glog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID) + klog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID) return chosen, false, nil } @@ -1688,10 +1688,10 @@ func (c *Cloud) endAttaching(i *awsInstance, volumeID EBSVolumeID, mountDevice m // attached to the instance (as reported by the EC2 API). So if endAttaching comes after // a 10 second poll delay, we might well have had a concurrent request to allocate a mountpoint, // which because we allocate sequentially is _very_ likely to get the immediately freed volume - glog.Infof("endAttaching on device %q assigned to different volume: %q vs %q", mountDevice, volumeID, existingVolumeID) + klog.Infof("endAttaching on device %q assigned to different volume: %q vs %q", mountDevice, volumeID, existingVolumeID) return false } - glog.V(2).Infof("Releasing in-process attachment entry: %s -> volume %s", mountDevice, volumeID) + klog.V(2).Infof("Releasing in-process attachment entry: %s -> volume %s", mountDevice, volumeID) delete(c.attaching[i.nodeName], mountDevice) return true } @@ -1815,7 +1815,7 @@ func (d *awsDisk) modifyVolume(requestGiB int64) (int64, error) { func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) { node, fetchErr := c.kubeClient.CoreV1().Nodes().Get(string(nodeName), metav1.GetOptions{}) if fetchErr != nil { - glog.Errorf("Error fetching node %s with %v", nodeName, fetchErr) + klog.Errorf("Error fetching node %s with %v", nodeName, fetchErr) return } @@ -1826,7 +1826,7 @@ func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) } err := controller.AddOrUpdateTaintOnNode(c.kubeClient, string(nodeName), taint) if err != nil { - glog.Errorf("Error applying taint to node %s with error %v", nodeName, err) + klog.Errorf("Error applying taint to node %s with error %v", nodeName, err) return } c.eventRecorder.Eventf(node, v1.EventTypeWarning, volumeAttachmentStuck, reason) @@ -1854,7 +1854,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, if isAWSErrorVolumeNotFound(err) { if status == "detached" { // The disk doesn't exist, assume it's detached, log warning and stop waiting - glog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID) + klog.Warningf("Waiting for volume %q to be detached but the volume does not exist", d.awsID) stateStr := "detached" attachment = &ec2.VolumeAttachment{ State: &stateStr, @@ -1863,7 +1863,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, } if status == "attached" { // The disk doesn't exist, complain, give up waiting and report error - glog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID) + klog.Warningf("Waiting for volume %q to be attached but the volume does not exist", d.awsID) return false, err } } @@ -1873,7 +1873,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, return false, err } - glog.Warningf("Ignoring error from describe volume for volume %q; will retry: %q", d.awsID, err) + klog.Warningf("Ignoring error from describe volume for volume %q; will retry: %q", d.awsID, err) return false, nil } @@ -1881,20 +1881,20 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, if len(info.Attachments) > 1 { // Shouldn't happen; log so we know if it is - glog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) + klog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) } attachmentStatus := "" for _, a := range info.Attachments { if attachmentStatus != "" { // Shouldn't happen; log so we know if it is - glog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) + klog.Warningf("Found multiple attachments for volume %q: %v", d.awsID, info) } if a.State != nil { attachment = a attachmentStatus = *a.State } else { // Shouldn't happen; log so we know if it is - glog.Warningf("Ignoring nil attachment state for volume %q: %v", d.awsID, a) + klog.Warningf("Ignoring nil attachment state for volume %q: %v", d.awsID, a) } } if attachmentStatus == "" { @@ -1905,7 +1905,7 @@ func (d *awsDisk) waitForAttachmentStatus(status string) (*ec2.VolumeAttachment, return true, nil } // continue waiting - glog.V(2).Infof("Waiting for volume %q state: actual=%s, desired=%s", d.awsID, attachmentStatus, status) + klog.V(2).Infof("Waiting for volume %q state: actual=%s, desired=%s", d.awsID, attachmentStatus, status) return false, nil }) @@ -1963,11 +1963,11 @@ func wrapAttachError(err error, disk *awsDisk, instance string) error { if awsError.Code() == "VolumeInUse" { info, err := disk.describeVolume() if err != nil { - glog.Errorf("Error describing volume %q: %q", disk.awsID, err) + klog.Errorf("Error describing volume %q: %q", disk.awsID, err) } else { for _, a := range info.Attachments { if disk.awsID != EBSVolumeID(aws.StringValue(a.VolumeId)) { - glog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId)) + klog.Warningf("Expected to get attachment info of volume %q but instead got info of %q", disk.awsID, aws.StringValue(a.VolumeId)) } else if aws.StringValue(a.State) == "attached" { return fmt.Errorf("Error attaching EBS volume %q to instance %q: %q. The volume is currently attached to instance %q", disk.awsID, instance, awsError, aws.StringValue(a.InstanceId)) } @@ -2001,7 +2001,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) defer func() { if attachEnded { if !c.endAttaching(awsInstance, disk.awsID, mountDevice) { - glog.Errorf("endAttaching called for disk %q when attach not in progress", disk.awsID) + klog.Errorf("endAttaching called for disk %q when attach not in progress", disk.awsID) } } }() @@ -2020,7 +2020,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) if !alreadyAttached { available, err := c.checkIfAvailable(disk, "attaching", awsInstance.awsID) if err != nil { - glog.Error(err) + klog.Error(err) } if !available { @@ -2042,7 +2042,7 @@ func (c *Cloud) AttachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) if da, ok := c.deviceAllocators[awsInstance.nodeName]; ok { da.Deprioritize(mountDevice) } - glog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse) + klog.V(2).Infof("AttachVolume volume=%q instance=%q request returned %v", disk.awsID, awsInstance.awsID, attachResponse) } attachment, err := disk.waitForAttachmentStatus("attached") @@ -2080,7 +2080,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) if err != nil { if isAWSErrorVolumeNotFound(err) { // Someone deleted the volume being detached; complain, but do nothing else and return success - glog.Warningf("DetachDisk %s called for node %s but volume does not exist; assuming the volume is detached", diskName, nodeName) + klog.Warningf("DetachDisk %s called for node %s but volume does not exist; assuming the volume is detached", diskName, nodeName) return "", nil } @@ -2088,7 +2088,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) } if !attached && diskInfo.ec2Instance != nil { - glog.Warningf("DetachDisk %s called for node %s but volume is attached to node %s", diskName, nodeName, diskInfo.nodeName) + klog.Warningf("DetachDisk %s called for node %s but volume is attached to node %s", diskName, nodeName, diskInfo.nodeName) return "", nil } @@ -2104,7 +2104,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) } if !alreadyAttached { - glog.Warningf("DetachDisk called on non-attached disk: %s", diskName) + klog.Warningf("DetachDisk called on non-attached disk: %s", diskName) // TODO: Continue? Tolerate non-attached error from the AWS DetachVolume call? } @@ -2131,7 +2131,7 @@ func (c *Cloud) DetachDisk(diskName KubernetesVolumeID, nodeName types.NodeName) } if attachment != nil { // We expect it to be nil, it is (maybe) interesting if it is not - glog.V(2).Infof("waitForAttachmentStatus returned non-nil attachment with state=detached: %v", attachment) + klog.V(2).Infof("waitForAttachmentStatus returned non-nil attachment with state=detached: %v", attachment) } if mountDevice != "" { @@ -2268,10 +2268,10 @@ func (c *Cloud) DeleteDisk(volumeName KubernetesVolumeID) (bool, error) { available, err := c.checkIfAvailable(awsDisk, "deleting", "") if err != nil { if isAWSErrorVolumeNotFound(err) { - glog.V(2).Infof("Volume %s not found when deleting it, assuming it's deleted", awsDisk.awsID) + klog.V(2).Infof("Volume %s not found when deleting it, assuming it's deleted", awsDisk.awsID) return false, nil } - glog.Error(err) + klog.Error(err) } if !available { @@ -2285,7 +2285,7 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) info, err := disk.describeVolume() if err != nil { - glog.Errorf("Error describing volume %q: %q", disk.awsID, err) + klog.Errorf("Error describing volume %q: %q", disk.awsID, err) // if for some reason we can not describe volume we will return error return false, err } @@ -2305,7 +2305,7 @@ func (c *Cloud) checkIfAvailable(disk *awsDisk, opName string, instance string) attachedInstance, ierr := c.getInstanceByID(instanceID) attachErr := fmt.Sprintf("%s since volume is currently attached to %q", opError, instanceID) if ierr != nil { - glog.Error(attachErr) + klog.Error(attachErr) return false, errors.New(attachErr) } devicePath := aws.StringValue(attachment.Device) @@ -2386,7 +2386,7 @@ func (c *Cloud) DiskIsAttached(diskName KubernetesVolumeID, nodeName types.NodeN if err != nil { if isAWSErrorVolumeNotFound(err) { // The disk doesn't exist, can't be attached - glog.Warningf("DiskIsAttached called for volume %s on node %s but the volume does not exist", diskName, nodeName) + klog.Warningf("DiskIsAttached called for volume %s on node %s but the volume does not exist", diskName, nodeName) return false, nil } @@ -2423,7 +2423,7 @@ func (c *Cloud) DisksAreAttached(nodeDisks map[types.NodeName][]KubernetesVolume } if len(awsInstances) == 0 { - glog.V(2).Infof("DisksAreAttached found no instances matching node names; will assume disks not attached") + klog.V(2).Infof("DisksAreAttached found no instances matching node names; will assume disks not attached") return attached, nil } @@ -2518,7 +2518,7 @@ func (c *Cloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, var ret *elb.LoadBalancerDescription for _, loadBalancer := range response.LoadBalancerDescriptions { if ret != nil { - glog.Errorf("Found multiple load balancers with name: %s", name) + klog.Errorf("Found multiple load balancers with name: %s", name) } ret = loadBalancer } @@ -2603,7 +2603,7 @@ func (c *Cloud) findSecurityGroup(securityGroupID string) (*ec2.SecurityGroup, e groups, err := c.ec2.DescribeSecurityGroups(describeSecurityGroupsRequest) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return nil, err } @@ -2650,7 +2650,7 @@ func ipPermissionExists(newPermission, existing *ec2.IpPermission, compareGroupU } // Check only if newPermission is a subset of existing. Usually it has zero or one elements. // Not doing actual CIDR math yet; not clear it's needed, either. - glog.V(4).Infof("Comparing %v to %v", newPermission, existing) + klog.V(4).Infof("Comparing %v to %v", newPermission, existing) if len(newPermission.IpRanges) > len(existing.IpRanges) { return false } @@ -2685,7 +2685,7 @@ func ipPermissionExists(newPermission, existing *ec2.IpPermission, compareGroupU } func isEqualUserGroupPair(l, r *ec2.UserIdGroupPair, compareGroupUserIDs bool) bool { - glog.V(2).Infof("Comparing %v to %v", *l.GroupId, *r.GroupId) + klog.V(2).Infof("Comparing %v to %v", *l.GroupId, *r.GroupId) if isEqualStringPointer(l.GroupId, r.GroupId) { if compareGroupUserIDs { if isEqualStringPointer(l.UserId, r.UserId) { @@ -2710,7 +2710,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group %q", err) + klog.Warningf("Error retrieving security group %q", err) return false, err } @@ -2718,7 +2718,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe return false, fmt.Errorf("security group not found: %s", securityGroupID) } - glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) + klog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) actual := NewIPPermissionSet(group.IpPermissions...) @@ -2749,7 +2749,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe // don't want to accidentally open more than intended while we're // applying changes. if add.Len() != 0 { - glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, add.List()) + klog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, add.List()) request := &ec2.AuthorizeSecurityGroupIngressInput{} request.GroupId = &securityGroupID @@ -2760,7 +2760,7 @@ func (c *Cloud) setSecurityGroupIngress(securityGroupID string, permissions IPPe } } if remove.Len() != 0 { - glog.V(2).Infof("Remove security group ingress: %s %v", securityGroupID, remove.List()) + klog.V(2).Infof("Remove security group ingress: %s %v", securityGroupID, remove.List()) request := &ec2.RevokeSecurityGroupIngressInput{} request.GroupId = &securityGroupID @@ -2785,7 +2785,7 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return false, err } @@ -2793,7 +2793,7 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ return false, fmt.Errorf("security group not found: %s", securityGroupID) } - glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) + klog.V(2).Infof("Existing security group ingress: %s %v", securityGroupID, group.IpPermissions) changes := []*ec2.IpPermission{} for _, addPermission := range addPermissions { @@ -2821,14 +2821,14 @@ func (c *Cloud) addSecurityGroupIngress(securityGroupID string, addPermissions [ return false, nil } - glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, changes) + klog.V(2).Infof("Adding security group ingress: %s %v", securityGroupID, changes) request := &ec2.AuthorizeSecurityGroupIngressInput{} request.GroupId = &securityGroupID request.IpPermissions = changes _, err = c.ec2.AuthorizeSecurityGroupIngress(request) if err != nil { - glog.Warningf("Error authorizing security group ingress %q", err) + klog.Warningf("Error authorizing security group ingress %q", err) return false, fmt.Errorf("error authorizing security group ingress: %q", err) } @@ -2846,12 +2846,12 @@ func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermiss group, err := c.findSecurityGroup(securityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return false, err } if group == nil { - glog.Warning("Security group not found: ", securityGroupID) + klog.Warning("Security group not found: ", securityGroupID) return false, nil } @@ -2881,14 +2881,14 @@ func (c *Cloud) removeSecurityGroupIngress(securityGroupID string, removePermiss return false, nil } - glog.V(2).Infof("Removing security group ingress: %s %v", securityGroupID, changes) + klog.V(2).Infof("Removing security group ingress: %s %v", securityGroupID, changes) request := &ec2.RevokeSecurityGroupIngressInput{} request.GroupId = &securityGroupID request.IpPermissions = changes _, err = c.ec2.RevokeSecurityGroupIngress(request) if err != nil { - glog.Warningf("Error revoking security group ingress: %q", err) + klog.Warningf("Error revoking security group ingress: %q", err) return false, err } @@ -2924,7 +2924,7 @@ func (c *Cloud) ensureSecurityGroup(name string, description string, additionalT if len(securityGroups) >= 1 { if len(securityGroups) > 1 { - glog.Warningf("Found multiple security groups with name: %q", name) + klog.Warningf("Found multiple security groups with name: %q", name) } err := c.tagging.readRepairClusterTags( c.ec2, aws.StringValue(securityGroups[0].GroupId), @@ -2947,12 +2947,12 @@ func (c *Cloud) ensureSecurityGroup(name string, description string, additionalT switch err := err.(type) { case awserr.Error: if err.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries { - glog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry") + klog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry") ignore = true } } if !ignore { - glog.Errorf("Error creating security group: %q", err) + klog.Errorf("Error creating security group: %q", err) return "", err } time.Sleep(1 * time.Second) @@ -3011,7 +3011,7 @@ func (c *Cloud) findSubnets() ([]*ec2.Subnet, error) { } // Fall back to the current instance subnets, if nothing is tagged - glog.Warningf("No tagged subnets found; will fall-back to the current subnet only. This is likely to be an error in a future version of k8s.") + klog.Warningf("No tagged subnets found; will fall-back to the current subnet only. This is likely to be an error in a future version of k8s.") request = &ec2.DescribeSubnetsInput{} filters = []*ec2.Filter{newEc2Filter("subnet-id", c.selfAWSInstance.subnetID)} @@ -3048,7 +3048,7 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { az := aws.StringValue(subnet.AvailabilityZone) id := aws.StringValue(subnet.SubnetId) if az == "" || id == "" { - glog.Warningf("Ignoring subnet with empty az/id: %v", subnet) + klog.Warningf("Ignoring subnet with empty az/id: %v", subnet) continue } @@ -3057,7 +3057,7 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { return nil, err } if !internalELB && !isPublic { - glog.V(2).Infof("Ignoring private subnet for public ELB %q", id) + klog.V(2).Infof("Ignoring private subnet for public ELB %q", id) continue } @@ -3088,12 +3088,12 @@ func (c *Cloud) findELBSubnets(internalELB bool) ([]string, error) { // If we have two subnets for the same AZ we arbitrarily choose the one that is first lexicographically. // TODO: Should this be an error. if strings.Compare(*existing.SubnetId, *subnet.SubnetId) > 0 { - glog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *subnet.SubnetId, *existing.SubnetId, *subnet.SubnetId) + klog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *subnet.SubnetId, *existing.SubnetId, *subnet.SubnetId) subnetsByAZ[az] = subnet continue } - glog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *existing.SubnetId, *existing.SubnetId, *subnet.SubnetId) + klog.Warningf("Found multiple subnets in AZ %q; choosing %q between subnets %q and %q", az, *existing.SubnetId, *existing.SubnetId, *subnet.SubnetId) continue } @@ -3122,7 +3122,7 @@ func isSubnetPublic(rt []*ec2.RouteTable, subnetID string) (bool, error) { for _, table := range rt { for _, assoc := range table.Associations { if aws.BoolValue(assoc.Main) == true { - glog.V(4).Infof("Assuming implicit use of main routing table %s for %s", + klog.V(4).Infof("Assuming implicit use of main routing table %s for %s", aws.StringValue(table.RouteTableId), subnetID) subnetTable = table break @@ -3195,7 +3195,7 @@ func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, load sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName) securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription, getLoadBalancerAdditionalTags(annotations)) if err != nil { - glog.Errorf("Error creating load balancer security group: %q", err) + klog.Errorf("Error creating load balancer security group: %q", err) return nil, err } } @@ -3263,7 +3263,7 @@ func buildListener(port v1.ServicePort, annotations map[string]string, sslPorts // EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { annotations := apiService.Annotations - glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", + klog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, c.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, annotations) if apiService.Spec.SessionAffinity != v1.ServiceAffinityNone { @@ -3285,7 +3285,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS return nil, fmt.Errorf("Only TCP LoadBalancer is supported for AWS ELB") } if port.NodePort == 0 { - glog.Errorf("Ignoring port without NodePort defined: %v", port) + klog.Errorf("Ignoring port without NodePort defined: %v", port) continue } @@ -3342,7 +3342,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS // Find the subnets that the ELB will live in subnetIDs, err := c.findELBSubnets(internalELB) if err != nil { - glog.Errorf("Error listing subnets in VPC: %q", err) + klog.Errorf("Error listing subnets in VPC: %q", err) return nil, err } // Bail out early if there are no subnets @@ -3381,7 +3381,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS err = c.updateInstanceSecurityGroupsForNLB(v2Mappings, instances, loadBalancerName, sourceRangeCidrs) if err != nil { - glog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) + klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) return nil, err } @@ -3503,7 +3503,7 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS // Find the subnets that the ELB will live in subnetIDs, err := c.findELBSubnets(internalELB) if err != nil { - glog.Errorf("Error listing subnets in VPC: %q", err) + klog.Errorf("Error listing subnets in VPC: %q", err) return nil, err } @@ -3590,13 +3590,13 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS } if path, healthCheckNodePort := service.GetServiceHealthCheckPathPort(apiService); path != "" { - glog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path) + klog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path) err = c.ensureLoadBalancerHealthCheck(loadBalancer, "HTTP", healthCheckNodePort, path, annotations) if err != nil { return nil, fmt.Errorf("Failed to ensure health check for localized service %v on node port %v: %q", loadBalancerName, healthCheckNodePort, err) } } else { - glog.V(4).Infof("service %v does not need custom health checks", apiService.Name) + klog.V(4).Infof("service %v does not need custom health checks", apiService.Name) // We only configure a TCP health-check on the first port var tcpHealthCheckPort int32 for _, listener := range listeners { @@ -3615,17 +3615,17 @@ func (c *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, apiS err = c.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances) if err != nil { - glog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) + klog.Warningf("Error opening ingress rules for the load balancer to the instances: %q", err) return nil, err } err = c.ensureLoadBalancerInstances(aws.StringValue(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances) if err != nil { - glog.Warningf("Error registering instances with the load balancer: %q", err) + klog.Warningf("Error registering instances with the load balancer: %q", err) return nil, err } - glog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, aws.StringValue(loadBalancer.DNSName)) + klog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, aws.StringValue(loadBalancer.DNSName)) // TODO: Wait for creation? @@ -3682,7 +3682,7 @@ func toStatus(lb *elb.LoadBalancerDescription) *v1.LoadBalancerStatus { func v2toStatus(lb *elbv2.LoadBalancer) *v1.LoadBalancerStatus { status := &v1.LoadBalancerStatus{} if lb == nil { - glog.Error("[BUG] v2toStatus got nil input, this is a Kubernetes bug, please report") + klog.Error("[BUG] v2toStatus got nil input, this is a Kubernetes bug, please report") return status } @@ -3709,7 +3709,7 @@ func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups m for _, group := range instance.SecurityGroups { groupID := aws.StringValue(group.GroupId) if groupID == "" { - glog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group) + klog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group) continue } _, isTagged := taggedSecurityGroups[groupID] @@ -3741,7 +3741,7 @@ func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups m return untagged[0], nil } - glog.Warningf("No security group found for instance %q", instanceID) + klog.Warningf("No security group found for instance %q", instanceID) return nil, nil } @@ -3762,7 +3762,7 @@ func (c *Cloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, error) id := aws.StringValue(group.GroupId) if id == "" { - glog.Warningf("Ignoring group without id: %v", group) + klog.Warningf("Ignoring group without id: %v", group) continue } m[id] = group @@ -3785,7 +3785,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer } if loadBalancerSecurityGroupID != "" { // We create LBs with one SG - glog.Warningf("Multiple security groups for load balancer: %q", aws.StringValue(lb.LoadBalancerName)) + klog.Warningf("Multiple security groups for load balancer: %q", aws.StringValue(lb.LoadBalancerName)) } loadBalancerSecurityGroupID = *securityGroup } @@ -3834,12 +3834,12 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer } if securityGroup == nil { - glog.Warning("Ignoring instance without security group: ", aws.StringValue(instance.InstanceId)) + klog.Warning("Ignoring instance without security group: ", aws.StringValue(instance.InstanceId)) continue } id := aws.StringValue(securityGroup.GroupId) if id == "" { - glog.Warningf("found security group without id: %v", securityGroup) + klog.Warningf("found security group without id: %v", securityGroup) continue } @@ -3850,7 +3850,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer for _, actualGroup := range actualGroups { actualGroupID := aws.StringValue(actualGroup.GroupId) if actualGroupID == "" { - glog.Warning("Ignoring group without ID: ", actualGroup) + klog.Warning("Ignoring group without ID: ", actualGroup) continue } @@ -3866,9 +3866,9 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer for instanceSecurityGroupID, add := range instanceSecurityGroupIds { if add { - glog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) } else { - glog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupID, instanceSecurityGroupID) } sourceGroupID := &ec2.UserIdGroupPair{} sourceGroupID.GroupId = &loadBalancerSecurityGroupID @@ -3887,7 +3887,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer return err } if !changed { - glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } else { changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, permissions) @@ -3895,7 +3895,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancer return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } } @@ -3913,7 +3913,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin return err } if lb == nil { - glog.Info("Load balancer already deleted: ", loadBalancerName) + klog.Info("Load balancer already deleted: ", loadBalancerName) return nil } @@ -4037,7 +4037,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", *matchingGroups[i].GroupId) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", *matchingGroups[i].GroupId) } } @@ -4055,7 +4055,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin } if lb == nil { - glog.Info("Load balancer already deleted: ", loadBalancerName) + klog.Info("Load balancer already deleted: ", loadBalancerName) return nil } @@ -4063,7 +4063,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin // De-authorize the load balancer security group from the instances security group err = c.updateInstanceSecurityGroupsForLoadBalancer(lb, nil) if err != nil { - glog.Errorf("Error deregistering load balancer from instance security groups: %q", err) + klog.Errorf("Error deregistering load balancer from instance security groups: %q", err) return err } } @@ -4076,7 +4076,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin _, err = c.elb.DeleteLoadBalancer(request) if err != nil { // TODO: Check if error was because load balancer was concurrently deleted - glog.Errorf("Error deleting load balancer: %q", err) + klog.Errorf("Error deleting load balancer: %q", err) return err } } @@ -4094,7 +4094,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin continue } if aws.StringValue(securityGroupID) == "" { - glog.Warning("Ignoring empty security group in ", service.Name) + klog.Warning("Ignoring empty security group in ", service.Name) continue } securityGroupIDs[*securityGroupID] = struct{}{} @@ -4113,7 +4113,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin ignore := false if awsError, ok := err.(awserr.Error); ok { if awsError.Code() == "DependencyViolation" { - glog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID) + klog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID) ignore = true } } @@ -4124,7 +4124,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin } if len(securityGroupIDs) == 0 { - glog.V(2).Info("Deleted all security groups for load balancer: ", service.Name) + klog.V(2).Info("Deleted all security groups for load balancer: ", service.Name) break } @@ -4137,7 +4137,7 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin return fmt.Errorf("timed out deleting ELB: %s. Could not delete security groups %v", service.Name, strings.Join(ids, ",")) } - glog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name) + klog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name) time.Sleep(10 * time.Second) } @@ -4268,14 +4268,14 @@ func (c *Cloud) getInstancesByNodeNames(nodeNames []string, states ...string) ([ instances, err := c.describeInstances(filters) if err != nil { - glog.V(2).Infof("Failed to describe instances %v", nodeNames) + klog.V(2).Infof("Failed to describe instances %v", nodeNames) return nil, err } ec2Instances = append(ec2Instances, instances...) } if len(ec2Instances) == 0 { - glog.V(3).Infof("Failed to find any instances %v", nodeNames) + klog.V(3).Infof("Failed to find any instances %v", nodeNames) return nil, nil } return ec2Instances, nil diff --git a/pkg/cloudprovider/providers/aws/aws_fakes.go b/pkg/cloudprovider/providers/aws/aws_fakes.go index f398ab87cd8f7..28946fa35c03b 100644 --- a/pkg/cloudprovider/providers/aws/aws_fakes.go +++ b/pkg/cloudprovider/providers/aws/aws_fakes.go @@ -25,7 +25,7 @@ import ( "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elbv2" "github.com/aws/aws-sdk-go/service/kms" - "github.com/golang/glog" + "k8s.io/klog" ) // FakeAWSServices is an fake AWS session used for testing @@ -141,7 +141,7 @@ func (ec2i *FakeEC2Impl) DescribeInstances(request *ec2.DescribeInstancesInput) for _, instance := range ec2i.aws.instances { if request.InstanceIds != nil { if instance.InstanceId == nil { - glog.Warning("Instance with no instance id: ", instance) + klog.Warning("Instance with no instance id: ", instance) continue } diff --git a/pkg/cloudprovider/providers/aws/aws_instancegroups.go b/pkg/cloudprovider/providers/aws/aws_instancegroups.go index df6f3084ddf54..6c4c59b039e57 100644 --- a/pkg/cloudprovider/providers/aws/aws_instancegroups.go +++ b/pkg/cloudprovider/providers/aws/aws_instancegroups.go @@ -21,7 +21,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/golang/glog" + "k8s.io/klog" ) // AWSCloud implements InstanceGroups @@ -64,7 +64,7 @@ func DescribeInstanceGroup(asg ASG, instanceGroupName string) (InstanceGroupInfo return nil, nil } if len(response.AutoScalingGroups) > 1 { - glog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName) + klog.Warning("AWS returned multiple autoscaling groups with name ", instanceGroupName) } group := response.AutoScalingGroups[0] return &awsInstanceGroup{group: group}, nil diff --git a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index 27d1443304cdd..0c2e755306752 100644 --- a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -28,7 +28,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -134,7 +134,7 @@ func (c *Cloud) ensureLoadBalancerv2(namespacedName types.NamespacedName, loadBa }) } - glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) + klog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) createResponse, err := c.elbv2.CreateLoadBalancer(createRequest) if err != nil { return nil, fmt.Errorf("Error creating load balancer: %q", err) @@ -340,7 +340,7 @@ func createTargetName(namespacedName types.NamespacedName, frontendPort, nodePor func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping, namespacedName types.NamespacedName, instanceIDs []string, vpcID string) (listener *elbv2.Listener, targetGroupArn *string, err error) { targetName := createTargetName(namespacedName, mapping.FrontendPort, mapping.TrafficPort) - glog.Infof("Creating load balancer target group for %v with name: %s", namespacedName, targetName) + klog.Infof("Creating load balancer target group for %v with name: %s", namespacedName, targetName) target, err := c.ensureTargetGroup( nil, mapping, @@ -361,7 +361,7 @@ func (c *Cloud) createListenerV2(loadBalancerArn *string, mapping nlbPortMapping Type: aws.String(elbv2.ActionTypeEnumForward), }}, } - glog.Infof("Creating load balancer listener for %v", namespacedName) + klog.Infof("Creating load balancer listener for %v", namespacedName) createListenerOutput, err := c.elbv2.CreateListener(createListernerInput) if err != nil { return nil, aws.String(""), fmt.Errorf("Error creating load balancer listener: %q", err) @@ -617,7 +617,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se for _, actualGroup := range actualGroups { actualGroupID := aws.StringValue(actualGroup.GroupId) if actualGroupID == "" { - glog.Warning("Ignoring group without ID: ", actualGroup) + klog.Warning("Ignoring group without ID: ", actualGroup) continue } @@ -652,17 +652,17 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se for port, add := range portMap { if add { if clientTraffic { - glog.V(2).Infof("Adding rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) - glog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for client traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) } else { - glog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Adding rule for health check traffic from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) } } else { if clientTraffic { - glog.V(2).Infof("Removing rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) - glog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for client MTU discovery from the network load balancer (%s) to instances (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for client traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) } - glog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) + klog.V(2).Infof("Removing rule for health check traffic from the network load balancer (%s) to instance (%s)", clientCidrs, instanceSecurityGroupID) } if clientTraffic { @@ -717,7 +717,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se return err } if !changed { - glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } @@ -727,7 +727,7 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } @@ -750,12 +750,12 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se group, err := c.findSecurityGroup(instanceSecurityGroupID) if err != nil { - glog.Warningf("Error retrieving security group: %q", err) + klog.Warningf("Error retrieving security group: %q", err) return err } if group == nil { - glog.Warning("Security group not found: ", instanceSecurityGroupID) + klog.Warning("Security group not found: ", instanceSecurityGroupID) return nil } @@ -776,21 +776,21 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLBTraffic(actualGroups []*ec2.Se // the icmp permission is missing changed, err := c.addSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission}) if err != nil { - glog.Warningf("Error adding MTU permission to security group: %q", err) + klog.Warningf("Error adding MTU permission to security group: %q", err) return err } if !changed { - glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } else if icmpExists && permCount == 0 { // there is no additional permissions, remove icmp changed, err := c.removeSecurityGroupIngress(instanceSecurityGroupID, []*ec2.IpPermission{mtuPermission}) if err != nil { - glog.Warningf("Error removing MTU permission to security group: %q", err) + klog.Warningf("Error removing MTU permission to security group: %q", err) return err } if !changed { - glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) + klog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupID) } } } @@ -869,13 +869,13 @@ func (c *Cloud) updateInstanceSecurityGroupsForNLB(mappings []nlbPortMapping, in } if securityGroup == nil { - glog.Warningf("Ignoring instance without security group: %s", aws.StringValue(instance.InstanceId)) + klog.Warningf("Ignoring instance without security group: %s", aws.StringValue(instance.InstanceId)) continue } id := aws.StringValue(securityGroup.GroupId) if id == "" { - glog.Warningf("found security group without id: %v", securityGroup) + klog.Warningf("found security group without id: %v", securityGroup) continue } @@ -942,7 +942,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala }) } - glog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) + klog.Infof("Creating load balancer for %v with name: %s", namespacedName, loadBalancerName) _, err := c.elb.CreateLoadBalancer(createRequest) if err != nil { return nil, err @@ -955,7 +955,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala } for _, listener := range listeners { - glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort) + klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort) err := c.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)}) if err != nil { return nil, err @@ -979,7 +979,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.DetachLoadBalancerFromSubnetsInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.Subnets = stringSetToPointers(removals) - glog.V(2).Info("Detaching load balancer from removed subnets") + klog.V(2).Info("Detaching load balancer from removed subnets") _, err := c.elb.DetachLoadBalancerFromSubnets(request) if err != nil { return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %q", err) @@ -991,7 +991,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.AttachLoadBalancerToSubnetsInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.Subnets = stringSetToPointers(additions) - glog.V(2).Info("Attaching load balancer to added subnets") + klog.V(2).Info("Attaching load balancer to added subnets") _, err := c.elb.AttachLoadBalancerToSubnets(request) if err != nil { return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %q", err) @@ -1014,7 +1014,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala } else { request.SecurityGroups = aws.StringSlice(securityGroupIDs) } - glog.V(2).Info("Applying updated security groups to load balancer") + klog.V(2).Info("Applying updated security groups to load balancer") _, err := c.elb.ApplySecurityGroupsToLoadBalancer(request) if err != nil { return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %q", err) @@ -1032,7 +1032,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala for _, listenerDescription := range listenerDescriptions { actual := listenerDescription.Listener if actual == nil { - glog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName) + klog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName) continue } @@ -1074,7 +1074,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.DeleteLoadBalancerListenersInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.LoadBalancerPorts = removals - glog.V(2).Info("Deleting removed load balancer listeners") + klog.V(2).Info("Deleting removed load balancer listeners") _, err := c.elb.DeleteLoadBalancerListeners(request) if err != nil { return nil, fmt.Errorf("error deleting AWS loadbalancer listeners: %q", err) @@ -1086,7 +1086,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala request := &elb.CreateLoadBalancerListenersInput{} request.LoadBalancerName = aws.String(loadBalancerName) request.Listeners = additions - glog.V(2).Info("Creating added load balancer listeners") + klog.V(2).Info("Creating added load balancer listeners") _, err := c.elb.CreateLoadBalancerListeners(request) if err != nil { return nil, fmt.Errorf("error creating AWS loadbalancer listeners: %q", err) @@ -1138,7 +1138,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala } if setPolicy { - glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol) + klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to %t", instancePort, proxyProtocol) err := c.setBackendPolicies(loadBalancerName, instancePort, proxyPolicies) if err != nil { return nil, err @@ -1152,7 +1152,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala // corresponding listener anymore for instancePort, found := range foundBackends { if !found { - glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort) + klog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to false", instancePort) err := c.setBackendPolicies(loadBalancerName, instancePort, []*string{}) if err != nil { return nil, err @@ -1164,7 +1164,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala { // Add additional tags - glog.V(2).Infof("Creating additional load balancer tags for %s", loadBalancerName) + klog.V(2).Infof("Creating additional load balancer tags for %s", loadBalancerName) tags := getLoadBalancerAdditionalTags(annotations) if len(tags) > 0 { err := c.addLoadBalancerTags(loadBalancerName, tags) @@ -1183,7 +1183,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala describeAttributesRequest.LoadBalancerName = aws.String(loadBalancerName) describeAttributesOutput, err := c.elb.DescribeLoadBalancerAttributes(describeAttributesRequest) if err != nil { - glog.Warning("Unable to retrieve load balancer attributes during attribute sync") + klog.Warning("Unable to retrieve load balancer attributes during attribute sync") return nil, err } @@ -1191,7 +1191,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala // Update attributes if they're dirty if !reflect.DeepEqual(loadBalancerAttributes, foundAttributes) { - glog.V(2).Infof("Updating load-balancer attributes for %q", loadBalancerName) + klog.V(2).Infof("Updating load-balancer attributes for %q", loadBalancerName) modifyAttributesRequest := &elb.ModifyLoadBalancerAttributesInput{} modifyAttributesRequest.LoadBalancerName = aws.String(loadBalancerName) @@ -1207,7 +1207,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala if dirty { loadBalancer, err = c.describeLoadBalancer(loadBalancerName) if err != nil { - glog.Warning("Unable to retrieve load balancer after creation/update") + klog.Warning("Unable to retrieve load balancer after creation/update") return nil, err } } @@ -1352,7 +1352,7 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances if err != nil { return err } - glog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName) + klog.V(1).Infof("Instances added to load-balancer %s", loadBalancerName) } if len(removeInstances) > 0 { @@ -1363,7 +1363,7 @@ func (c *Cloud) ensureLoadBalancerInstances(loadBalancerName string, lbInstances if err != nil { return err } - glog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName) + klog.V(1).Infof("Instances removed from load-balancer %s", loadBalancerName) } return nil @@ -1382,7 +1382,7 @@ func (c *Cloud) getLoadBalancerTLSPorts(loadBalancer *elb.LoadBalancerDescriptio } func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescription, policyName string) error { - glog.V(2).Info("Describing load balancer policies on load balancer") + klog.V(2).Info("Describing load balancer policies on load balancer") result, err := c.elb.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ LoadBalancerName: loadBalancer.LoadBalancerName, PolicyNames: []*string{ @@ -1403,7 +1403,7 @@ func (c *Cloud) ensureSSLNegotiationPolicy(loadBalancer *elb.LoadBalancerDescrip return nil } - glog.V(2).Infof("Creating SSL negotiation policy '%s' on load balancer", fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName)) + klog.V(2).Infof("Creating SSL negotiation policy '%s' on load balancer", fmt.Sprintf(SSLNegotiationPolicyNameFormat, policyName)) // there is an upper limit of 98 policies on an ELB, we're pretty safe from // running into it _, err = c.elb.CreateLoadBalancerPolicy(&elb.CreateLoadBalancerPolicyInput{ @@ -1432,7 +1432,7 @@ func (c *Cloud) setSSLNegotiationPolicy(loadBalancerName, sslPolicyName string, aws.String(policyName), }, } - glog.V(2).Infof("Setting SSL negotiation policy '%s' on load balancer", policyName) + klog.V(2).Infof("Setting SSL negotiation policy '%s' on load balancer", policyName) _, err := c.elb.SetLoadBalancerPoliciesOfListener(request) if err != nil { return fmt.Errorf("error setting SSL negotiation policy '%s' on load balancer: %q", policyName, err) @@ -1452,7 +1452,7 @@ func (c *Cloud) createProxyProtocolPolicy(loadBalancerName string) error { }, }, } - glog.V(2).Info("Creating proxy protocol policy on load balancer") + klog.V(2).Info("Creating proxy protocol policy on load balancer") _, err := c.elb.CreateLoadBalancerPolicy(request) if err != nil { return fmt.Errorf("error creating proxy protocol policy on load balancer: %q", err) @@ -1468,9 +1468,9 @@ func (c *Cloud) setBackendPolicies(loadBalancerName string, instancePort int64, PolicyNames: policies, } if len(policies) > 0 { - glog.V(2).Infof("Adding AWS loadbalancer backend policies on node port %d", instancePort) + klog.V(2).Infof("Adding AWS loadbalancer backend policies on node port %d", instancePort) } else { - glog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort) + klog.V(2).Infof("Removing AWS loadbalancer backend policies on node port %d", instancePort) } _, err := c.elb.SetLoadBalancerPoliciesForBackendServer(request) if err != nil { diff --git a/pkg/cloudprovider/providers/aws/aws_routes.go b/pkg/cloudprovider/providers/aws/aws_routes.go index 658f6e898573f..2827596dce49b 100644 --- a/pkg/cloudprovider/providers/aws/aws_routes.go +++ b/pkg/cloudprovider/providers/aws/aws_routes.go @@ -22,7 +22,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" + "k8s.io/klog" cloudprovider "k8s.io/cloud-provider" ) @@ -117,7 +117,7 @@ func (c *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpro route.TargetNode = mapInstanceToNodeName(instance) routes = append(routes, route) } else { - glog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID) + klog.Warningf("unable to find instance ID %s in the list of instances being routed to", instanceID) } } } @@ -172,7 +172,7 @@ func (c *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st } if deleteRoute != nil { - glog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock)) + klog.Infof("deleting blackholed route: %s", aws.StringValue(deleteRoute.DestinationCidrBlock)) request := &ec2.DeleteRouteInput{} request.DestinationCidrBlock = deleteRoute.DestinationCidrBlock diff --git a/pkg/cloudprovider/providers/aws/instances.go b/pkg/cloudprovider/providers/aws/instances.go index c0b03a5922cf9..60b10abc6a815 100644 --- a/pkg/cloudprovider/providers/aws/instances.go +++ b/pkg/cloudprovider/providers/aws/instances.go @@ -26,7 +26,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" ) @@ -110,12 +110,12 @@ func mapToAWSInstanceIDsTolerant(nodes []*v1.Node) []awsInstanceID { var instanceIDs []awsInstanceID for _, node := range nodes { if node.Spec.ProviderID == "" { - glog.Warningf("node %q did not have ProviderID set", node.Name) + klog.Warningf("node %q did not have ProviderID set", node.Name) continue } instanceID, err := kubernetesInstanceID(node.Spec.ProviderID).mapToAWSInstanceID() if err != nil { - glog.Warningf("unable to parse ProviderID %q for node %q", node.Spec.ProviderID, node.Name) + klog.Warningf("unable to parse ProviderID %q for node %q", node.Spec.ProviderID, node.Name) continue } instanceIDs = append(instanceIDs, instanceID) @@ -156,7 +156,7 @@ type instanceCache struct { func (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, error) { now := time.Now() - glog.V(4).Infof("EC2 DescribeInstances - fetching all instances") + klog.V(4).Infof("EC2 DescribeInstances - fetching all instances") filters := []*ec2.Filter{} instances, err := c.cloud.describeInstances(filters) @@ -177,7 +177,7 @@ func (c *instanceCache) describeAllInstancesUncached() (*allInstancesSnapshot, e if c.snapshot != nil && snapshot.olderThan(c.snapshot) { // If this happens a lot, we could run this function in a mutex and only return one result - glog.Infof("Not caching concurrent AWS DescribeInstances results") + klog.Infof("Not caching concurrent AWS DescribeInstances results") } else { c.snapshot = snapshot } @@ -210,7 +210,7 @@ func (c *instanceCache) describeAllInstancesCached(criteria cacheCriteria) (*all return nil, err } } else { - glog.V(6).Infof("EC2 DescribeInstances - using cached results") + klog.V(6).Infof("EC2 DescribeInstances - using cached results") } return snapshot, nil @@ -236,7 +236,7 @@ func (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool { // Sub() is technically broken by time changes until we have monotonic time now := time.Now() if now.Sub(s.timestamp) > criteria.MaxAge { - glog.V(6).Infof("instanceCache snapshot cannot be used as is older than MaxAge=%s", criteria.MaxAge) + klog.V(6).Infof("instanceCache snapshot cannot be used as is older than MaxAge=%s", criteria.MaxAge) return false } } @@ -244,7 +244,7 @@ func (s *allInstancesSnapshot) MeetsCriteria(criteria cacheCriteria) bool { if len(criteria.HasInstances) != 0 { for _, id := range criteria.HasInstances { if nil == s.instances[id] { - glog.V(6).Infof("instanceCache snapshot cannot be used as does not contain instance %s", id) + klog.V(6).Infof("instanceCache snapshot cannot be used as does not contain instance %s", id) return false } } diff --git a/pkg/cloudprovider/providers/aws/log_handler.go b/pkg/cloudprovider/providers/aws/log_handler.go index 86aa30628db10..9328fd284ac7a 100644 --- a/pkg/cloudprovider/providers/aws/log_handler.go +++ b/pkg/cloudprovider/providers/aws/log_handler.go @@ -18,23 +18,23 @@ package aws import ( "github.com/aws/aws-sdk-go/aws/request" - "github.com/golang/glog" + "k8s.io/klog" ) // Handler for aws-sdk-go that logs all requests func awsHandlerLogger(req *request.Request) { service, name := awsServiceAndName(req) - glog.V(4).Infof("AWS request: %s %s", service, name) + klog.V(4).Infof("AWS request: %s %s", service, name) } func awsSendHandlerLogger(req *request.Request) { service, name := awsServiceAndName(req) - glog.V(4).Infof("AWS API Send: %s %s %v %v", service, name, req.Operation, req.Params) + klog.V(4).Infof("AWS API Send: %s %s %v %v", service, name, req.Operation, req.Params) } func awsValidateResponseHandlerLogger(req *request.Request) { service, name := awsServiceAndName(req) - glog.V(4).Infof("AWS API ValidateResponse: %s %s %v %v %s", service, name, req.Operation, req.Params, req.HTTPResponse.Status) + klog.V(4).Infof("AWS API ValidateResponse: %s %s %v %v %s", service, name, req.Operation, req.Params, req.HTTPResponse.Status) } func awsServiceAndName(req *request.Request) (string, string) { diff --git a/pkg/cloudprovider/providers/aws/regions.go b/pkg/cloudprovider/providers/aws/regions.go index 74d64c68de0c4..f19bab6eb550d 100644 --- a/pkg/cloudprovider/providers/aws/regions.go +++ b/pkg/cloudprovider/providers/aws/regions.go @@ -19,7 +19,7 @@ package aws import ( "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" awscredentialprovider "k8s.io/kubernetes/pkg/credentialprovider/aws" @@ -72,11 +72,11 @@ func recognizeRegion(region string) { } if awsRegions.Has(region) { - glog.V(6).Infof("found AWS region %q again - ignoring", region) + klog.V(6).Infof("found AWS region %q again - ignoring", region) return } - glog.V(4).Infof("found AWS region %q", region) + klog.V(4).Infof("found AWS region %q", region) awscredentialprovider.RegisterCredentialsProvider(region) diff --git a/pkg/cloudprovider/providers/aws/retry_handler.go b/pkg/cloudprovider/providers/aws/retry_handler.go index d6b382ccc354c..0fe6c2a575342 100644 --- a/pkg/cloudprovider/providers/aws/retry_handler.go +++ b/pkg/cloudprovider/providers/aws/retry_handler.go @@ -24,7 +24,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -52,7 +52,7 @@ func (c *CrossRequestRetryDelay) BeforeSign(r *request.Request) { now := time.Now() delay := c.backoff.ComputeDelayForRequest(now) if delay > 0 { - glog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s", + klog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s", describeRequest(r), delay.String()) if sleepFn := r.Config.SleepDelay; sleepFn != nil { @@ -96,7 +96,7 @@ func (c *CrossRequestRetryDelay) AfterRetry(r *request.Request) { if awsError.Code() == "RequestLimitExceeded" { c.backoff.ReportError() recordAWSThrottlesMetric(operationName(r)) - glog.Warningf("Got RequestLimitExceeded error on AWS request (%s)", + klog.Warningf("Got RequestLimitExceeded error on AWS request (%s)", describeRequest(r)) } } diff --git a/pkg/cloudprovider/providers/aws/tags.go b/pkg/cloudprovider/providers/aws/tags.go index 5773983c7a304..de6cad543e35a 100644 --- a/pkg/cloudprovider/providers/aws/tags.go +++ b/pkg/cloudprovider/providers/aws/tags.go @@ -22,7 +22,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/wait" ) @@ -74,7 +74,7 @@ func (t *awsTagging) init(legacyClusterID string, clusterID string) error { t.ClusterID = clusterID if clusterID != "" { - glog.Infof("AWS cloud filtering on ClusterID: %v", clusterID) + klog.Infof("AWS cloud filtering on ClusterID: %v", clusterID) } else { return fmt.Errorf("AWS cloud failed to find ClusterID") } @@ -92,7 +92,7 @@ func (t *awsTagging) initFromTags(tags []*ec2.Tag) error { } if legacyClusterID == "" && newClusterID == "" { - glog.Errorf("Tag %q nor %q not found; Kubernetes may behave unexpectedly.", TagNameKubernetesClusterLegacy, TagNameKubernetesClusterPrefix+"...") + klog.Errorf("Tag %q nor %q not found; Kubernetes may behave unexpectedly.", TagNameKubernetesClusterLegacy, TagNameKubernetesClusterPrefix+"...") } return t.init(legacyClusterID, newClusterID) @@ -168,7 +168,7 @@ func (t *awsTagging) readRepairClusterTags(client EC2, resourceID string, lifecy continue } if actual == "" { - glog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected) + klog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected) addTags[k] = expected } else { return fmt.Errorf("resource %q has tag belonging to another cluster: %q=%q (expected %q)", resourceID, k, actual, expected) @@ -223,7 +223,7 @@ func (t *awsTagging) createTags(client EC2, resourceID string, lifecycle Resourc // We could check that the error is retryable, but the error code changes based on what we are tagging // SecurityGroup: InvalidGroup.NotFound - glog.V(2).Infof("Failed to create tags; will retry. Error was %q", err) + klog.V(2).Infof("Failed to create tags; will retry. Error was %q", err) lastErr = err return false, nil }) diff --git a/pkg/cloudprovider/providers/aws/volumes.go b/pkg/cloudprovider/providers/aws/volumes.go index d7c046c5cd05e..7031c5a52c141 100644 --- a/pkg/cloudprovider/providers/aws/volumes.go +++ b/pkg/cloudprovider/providers/aws/volumes.go @@ -24,7 +24,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" ) @@ -121,7 +121,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type info, err := disk.describeVolume() if err != nil { - glog.Warningf("Error describing volume %s with %v", diskName, err) + klog.Warningf("Error describing volume %s with %v", diskName, err) awsDiskInfo.volumeState = "unknown" return awsDiskInfo, false, err } @@ -138,7 +138,7 @@ func (c *Cloud) checkIfAttachedToNode(diskName KubernetesVolumeID, nodeName type // has been deleted if err != nil { fetchErr := fmt.Errorf("Error fetching instance %s for volume %s", instanceID, diskName) - glog.Warning(fetchErr) + klog.Warning(fetchErr) return awsDiskInfo, false, fetchErr } diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index d49de26e7f5d2..d4116a63893e9 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -66,9 +66,9 @@ go_library( "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/rubiojr/go-vhd/vhd:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/azure/auth/BUILD b/pkg/cloudprovider/providers/azure/auth/BUILD index cc733d385aa0f..a0eca412154ef 100644 --- a/pkg/cloudprovider/providers/azure/auth/BUILD +++ b/pkg/cloudprovider/providers/azure/auth/BUILD @@ -8,8 +8,8 @@ go_library( deps = [ "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/crypto/pkcs12:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/azure/auth/azure_auth.go b/pkg/cloudprovider/providers/azure/auth/azure_auth.go index 08f894d72167b..6a651eb05c0bd 100644 --- a/pkg/cloudprovider/providers/azure/auth/azure_auth.go +++ b/pkg/cloudprovider/providers/azure/auth/azure_auth.go @@ -24,8 +24,8 @@ import ( "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" "golang.org/x/crypto/pkcs12" + "k8s.io/klog" ) // AzureAuthConfig holds auth related part of cloud config @@ -55,18 +55,18 @@ type AzureAuthConfig struct { // GetServicePrincipalToken creates a new service principal token based on the configuration func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) { if config.UseManagedIdentityExtension { - glog.V(2).Infoln("azure: using managed identity extension to retrieve access token") + klog.V(2).Infoln("azure: using managed identity extension to retrieve access token") msiEndpoint, err := adal.GetMSIVMEndpoint() if err != nil { return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err) } if len(config.UserAssignedIdentityID) > 0 { - glog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token") + klog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token") return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, env.ServiceManagementEndpoint, config.UserAssignedIdentityID) } - glog.V(4).Info("azure: using System Assigned MSI to retrieve access token") + klog.V(4).Info("azure: using System Assigned MSI to retrieve access token") return adal.NewServicePrincipalTokenFromMSI( msiEndpoint, env.ServiceManagementEndpoint) @@ -78,7 +78,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) ( } if len(config.AADClientSecret) > 0 { - glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") + klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") return adal.NewServicePrincipalToken( *oauthConfig, config.AADClientID, @@ -87,7 +87,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) ( } if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { - glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") + klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") certData, err := ioutil.ReadFile(config.AADClientCertPath) if err != nil { return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err) diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index c61117929d878..e2205c37fbe43 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -41,7 +41,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" + "k8s.io/klog" "sigs.k8s.io/yaml" ) @@ -255,11 +255,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { config.CloudProviderRateLimitQPSWrite, config.CloudProviderRateLimitBucketWrite) - glog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d", + klog.V(2).Infof("Azure cloudprovider (read ops) using rate limit config: QPS=%g, bucket=%d", config.CloudProviderRateLimitQPS, config.CloudProviderRateLimitBucket) - glog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d", + klog.V(2).Infof("Azure cloudprovider (write ops) using rate limit config: QPS=%g, bucket=%d", config.CloudProviderRateLimitQPSWrite, config.CloudProviderRateLimitBucketWrite) } @@ -321,7 +321,7 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { Duration: time.Duration(az.CloudProviderBackoffDuration) * time.Second, Jitter: az.CloudProviderBackoffJitter, } - glog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f", + klog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f", az.CloudProviderBackoffRetries, az.CloudProviderBackoffExponent, az.CloudProviderBackoffDuration, @@ -479,7 +479,7 @@ func initDiskControllers(az *Cloud) error { // SetInformers sets informers for Azure cloud provider. func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { - glog.Infof("Setting up informers for Azure cloud provider") + klog.Infof("Setting up informers for Azure cloud provider") nodeInformer := informerFactory.Core().V1().Nodes().Informer() nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { @@ -502,12 +502,12 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { if !isNode { deletedState, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Received unexpected object: %v", obj) + klog.Errorf("Received unexpected object: %v", obj) return } node, ok = deletedState.Obj.(*v1.Node) if !ok { - glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) + klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) return } } diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index ab268d311ea72..c0a6f4a34ee05 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -23,7 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -64,10 +64,10 @@ func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.Virtua return true, cloudprovider.InstanceNotFound } if retryErr != nil { - glog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr) + klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr) return false, nil } - glog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name) + klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name) return true, nil }) if err == wait.ErrWaitTimeout { @@ -86,12 +86,12 @@ func (az *Cloud) VirtualMachineClientListWithRetry(resourceGroup string) ([]comp defer cancel() allNodes, retryErr = az.VirtualMachinesClient.List(ctx, resourceGroup) if retryErr != nil { - glog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v", + klog.Errorf("VirtualMachinesClient.List(%v) - backoff: failure, will retry,err=%v", resourceGroup, retryErr) return false, retryErr } - glog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", resourceGroup) + klog.V(2).Infof("VirtualMachinesClient.List(%v) - backoff: success", resourceGroup) return true, nil }) if err != nil { @@ -108,10 +108,10 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, var retryErr error ip, publicIP, retryErr = az.getIPForMachine(name) if retryErr != nil { - glog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr) + klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr) return false, nil } - glog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name) + klog.V(2).Infof("GetIPForMachineWithRetry(%s): backoff success", name) return true, nil }) return ip, publicIP, err @@ -124,7 +124,7 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(service *v1.Service, sg network.Secur defer cancel() resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg) - glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name) + klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name) done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateSecurityGroup", resp, err) if done && err == nil { // Invalidate the cache right after updating @@ -141,7 +141,7 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(service *v1.Service, lb network.LoadB defer cancel() resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb) - glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) + klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateLoadBalancer", resp, err) if done && err == nil { // Invalidate the cache right after updating @@ -163,12 +163,12 @@ func (az *Cloud) ListLBWithRetry(service *v1.Service) ([]network.LoadBalancer, e allLBs, retryErr = az.LoadBalancerClient.List(ctx, az.ResourceGroup) if retryErr != nil { az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", retryErr.Error()) - glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v", + klog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, retryErr) return false, retryErr } - glog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup) + klog.V(2).Infof("LoadBalancerClient.List(%v) - backoff: success", az.ResourceGroup) return true, nil }) if err != nil { @@ -190,12 +190,12 @@ func (az *Cloud) ListPIPWithRetry(service *v1.Service, pipResourceGroup string) allPIPs, retryErr = az.PublicIPAddressesClient.List(ctx, pipResourceGroup) if retryErr != nil { az.Event(service, v1.EventTypeWarning, "ListPublicIPs", retryErr.Error()) - glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v", + klog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v", pipResourceGroup, retryErr) return false, retryErr } - glog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup) + klog.V(2).Infof("PublicIPAddressesClient.List(%v) - backoff: success", pipResourceGroup) return true, nil }) if err != nil { @@ -212,7 +212,7 @@ func (az *Cloud) CreateOrUpdatePIPWithRetry(service *v1.Service, pipResourceGrou defer cancel() resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip) - glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name) + klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name) return az.processHTTPRetryResponse(service, "CreateOrUpdatePublicIPAddress", resp, err) }) } @@ -224,7 +224,7 @@ func (az *Cloud) CreateOrUpdateInterfaceWithRetry(service *v1.Service, nic netwo defer cancel() resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic) - glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name) + klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name) return az.processHTTPRetryResponse(service, "CreateOrUpdateInterface", resp, err) }) } @@ -274,7 +274,7 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { defer cancel() resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route) - glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name) + klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name) return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -286,7 +286,7 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error { defer cancel() resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName) - glog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName) + klog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName) return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -298,7 +298,7 @@ func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM c defer cancel() resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM) - glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName) + klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName) return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -307,7 +307,7 @@ func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM c func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) - glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID) + klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID) return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -345,10 +345,10 @@ func (az *Cloud) processHTTPRetryResponse(service *v1.Service, reason string, re if shouldRetryHTTPRequest(resp, err) { if err != nil { az.Event(service, v1.EventTypeWarning, reason, err.Error()) - glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, err=%v", err) + klog.Errorf("processHTTPRetryResponse: backoff failure, will retry, err=%v", err) } else { az.Event(service, v1.EventTypeWarning, reason, fmt.Sprintf("Azure HTTP response %d", resp.StatusCode)) - glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d", resp.StatusCode) + klog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d", resp.StatusCode) } // suppress the error object so that backoff process continues diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go index 6ecc8bf443767..9d0be71489419 100644 --- a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -30,8 +30,8 @@ import ( "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" azstorage "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" "github.com/rubiojr/go-vhd/vhd" + "k8s.io/klog" kwait "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/volume" @@ -68,7 +68,7 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error // get accounts accounts, err := c.getAllStorageAccounts() if err != nil { - glog.Errorf("azureDisk - getAllStorageAccounts error: %v", err) + klog.Errorf("azureDisk - getAllStorageAccounts error: %v", err) c.accounts = make(map[string]*storageAccountState) return &c, nil } @@ -97,13 +97,13 @@ func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, lo return "", "", 0, err } - glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) + klog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) return diskName, diskURI, requestGB, err } // DeleteVolume deletes a VHD blob func (c *BlobDiskController) DeleteVolume(diskURI string) error { - glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI) + klog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI) accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI) if err != nil { return fmt.Errorf("failed to parse vhd URI %v", err) @@ -114,7 +114,7 @@ func (c *BlobDiskController) DeleteVolume(diskURI string) error { } err = c.common.cloud.deleteVhdBlob(accountName, key, blob) if err != nil { - glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err) + klog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err) detail := err.Error() if strings.Contains(detail, errLeaseIDMissing) { // disk is still being used @@ -123,7 +123,7 @@ func (c *BlobDiskController) DeleteVolume(diskURI string) error { } return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err) } - glog.V(4).Infof("azureDisk - blob %s deleted", diskURI) + klog.V(4).Infof("azureDisk - blob %s deleted", diskURI) return nil } @@ -153,7 +153,7 @@ func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageC tags := make(map[string]string) tags["createdby"] = "k8sAzureDataDisk" - glog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName) + klog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName) blob := container.GetBlobReference(vhdName) blob.Properties.ContentLength = vhdSize @@ -185,7 +185,7 @@ func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageC End: uint64(vhdSize - 1), } if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil { - glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n", + klog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n", vhdName, containerName, accountName, err.Error()) return "", "", err } @@ -215,7 +215,7 @@ func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName str //CreateBlobDisk : create a blob disk in a node func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) { - glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType) + klog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType) storageAccountName, err := c.findSANameForDisk(storageAccountType) if err != nil { @@ -247,7 +247,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { _, ok := c.accounts[storageAccountName] if !ok { // the storage account is specified by user - glog.V(4).Infof("azureDisk - deleting volume %s", diskURI) + klog.V(4).Infof("azureDisk - deleting volume %s", diskURI) return c.DeleteVolume(diskURI) } @@ -256,7 +256,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { return err } - glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName) + klog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName) container := blobSvc.GetContainerReference(vhdContainerName) blob := container.GetBlobReference(vhdName) @@ -266,7 +266,7 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error { if diskCount, err := c.getDiskCount(storageAccountName); err != nil { c.accounts[storageAccountName].diskCount = int32(diskCount) } else { - glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName) + klog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName) return nil // we have failed to acquire a new count. not an error condition } } @@ -291,7 +291,7 @@ func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) for _, v := range *listKeysResult.Keys { if v.Value != nil && *v.Value == "key1" { if _, ok := c.accounts[SAName]; !ok { - glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName) + klog.Warningf("azureDisk - account %s was not cached while getting keys", SAName) return *v.Value, nil } } @@ -366,7 +366,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e _, provisionState, err := c.getStorageAccountState(storageAccountName) if err != nil { - glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error()) + klog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error()) return false, nil // error performing the query - retryable } @@ -374,7 +374,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e return true, nil } - glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName) + klog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName) return false, nil // back off and see if the account becomes ready on next retry }) // we have failed to ensure that account is ready for us to create @@ -397,7 +397,7 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e return err } if bCreated { - glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName) + klog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName) } // flag so we no longer have to check on ARM @@ -429,7 +429,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) { if err != nil { return 0, err } - glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs)) + klog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs)) c.accounts[SAName].diskCount = int32(len(response.Blobs)) return int(c.accounts[SAName].diskCount), nil @@ -449,13 +449,13 @@ func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccount accounts := make(map[string]*storageAccountState) for _, v := range *accountListResult.Value { if v.Name == nil || v.Sku == nil { - glog.Info("azureDisk - accountListResult Name or Sku is nil") + klog.Info("azureDisk - accountListResult Name or Sku is nil") continue } if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) { continue } - glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name) + klog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name) sastate := &storageAccountState{ name: *v.Name, @@ -486,7 +486,7 @@ func (c *BlobDiskController) createStorageAccount(storageAccountName string, sto return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts) } - glog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType)) + klog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType)) cp := storage.AccountCreateParameters{ Sku: &storage.Sku{Name: storageAccountType}, @@ -542,7 +542,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam countAccounts = countAccounts + 1 // empty account if dCount == 0 { - glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name) + klog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name) return v.name, nil // short circuit, avg is good and no need to adjust } // if this account is less allocated @@ -555,7 +555,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // if we failed to find storageaccount if SAName == "" { - glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account") + klog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account") SAName = generateStorageAccountName(sharedDiskAccountNamePrefix) err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) if err != nil { @@ -571,7 +571,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // avg are not create and we should create more accounts if we can if aboveAvg && countAccounts < maxStorageAccounts { - glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) + klog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) SAName = generateStorageAccountName(sharedDiskAccountNamePrefix) err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) if err != nil { @@ -582,7 +582,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam // averages are not ok and we are at capacity (max storage accounts allowed) if aboveAvg && countAccounts == maxStorageAccounts { - glog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", + klog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts) } diff --git a/pkg/cloudprovider/providers/azure/azure_client.go b/pkg/cloudprovider/providers/azure/azure_client.go index 1f1de845b5c40..94a9703427510 100644 --- a/pkg/cloudprovider/providers/azure/azure_client.go +++ b/pkg/cloudprovider/providers/azure/azure_client.go @@ -27,7 +27,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/client-go/util/flowcontrol" ) @@ -179,9 +179,9 @@ func (az *azVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceG return } - glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): start", resourceGroupName, VMName) + klog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): start", resourceGroupName, VMName) defer func() { - glog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName) + klog.V(10).Infof("azVirtualMachinesClient.CreateOrUpdate(%q, %q): end", resourceGroupName, VMName) }() mc := newMetricContext("vm", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -201,9 +201,9 @@ func (az *azVirtualMachinesClient) Get(ctx context.Context, resourceGroupName st return } - glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): start", resourceGroupName, VMName) + klog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): start", resourceGroupName, VMName) defer func() { - glog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName) + klog.V(10).Infof("azVirtualMachinesClient.Get(%q, %q): end", resourceGroupName, VMName) }() mc := newMetricContext("vm", "get", resourceGroupName, az.client.SubscriptionID) @@ -218,9 +218,9 @@ func (az *azVirtualMachinesClient) List(ctx context.Context, resourceGroupName s return } - glog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azVirtualMachinesClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azVirtualMachinesClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("vm", "list", resourceGroupName, az.client.SubscriptionID) @@ -270,9 +270,9 @@ func (az *azInterfacesClient) CreateOrUpdate(ctx context.Context, resourceGroupN return } - glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkInterfaceName) defer func() { - glog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkInterfaceName) }() mc := newMetricContext("interfaces", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -293,9 +293,9 @@ func (az *azInterfacesClient) Get(ctx context.Context, resourceGroupName string, return } - glog.V(10).Infof("azInterfacesClient.Get(%q,%q): start", resourceGroupName, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.Get(%q,%q): start", resourceGroupName, networkInterfaceName) defer func() { - glog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.Get(%q,%q): end", resourceGroupName, networkInterfaceName) }() mc := newMetricContext("interfaces", "get", resourceGroupName, az.client.SubscriptionID) @@ -310,9 +310,9 @@ func (az *azInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx cont return } - glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) defer func() { - glog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) + klog.V(10).Infof("azInterfacesClient.GetVirtualMachineScaleSetNetworkInterface(%q,%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName) }() mc := newMetricContext("interfaces", "get_vmss_ni", resourceGroupName, az.client.SubscriptionID) @@ -349,9 +349,9 @@ func (az *azLoadBalancersClient) CreateOrUpdate(ctx context.Context, resourceGro return nil, err } - glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): start", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): start", resourceGroupName, loadBalancerName) defer func() { - glog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.CreateOrUpdate(%q,%q): end", resourceGroupName, loadBalancerName) }() mc := newMetricContext("load_balancers", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -373,9 +373,9 @@ func (az *azLoadBalancersClient) Delete(ctx context.Context, resourceGroupName s return nil, err } - glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): start", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): start", resourceGroupName, loadBalancerName) defer func() { - glog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.Delete(%q,%q): end", resourceGroupName, loadBalancerName) }() mc := newMetricContext("load_balancers", "delete", resourceGroupName, az.client.SubscriptionID) @@ -396,9 +396,9 @@ func (az *azLoadBalancersClient) Get(ctx context.Context, resourceGroupName stri return } - glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): start", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): start", resourceGroupName, loadBalancerName) defer func() { - glog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName) + klog.V(10).Infof("azLoadBalancersClient.Get(%q,%q): end", resourceGroupName, loadBalancerName) }() mc := newMetricContext("load_balancers", "get", resourceGroupName, az.client.SubscriptionID) @@ -413,9 +413,9 @@ func (az *azLoadBalancersClient) List(ctx context.Context, resourceGroupName str return nil, err } - glog.V(10).Infof("azLoadBalancersClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azLoadBalancersClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azLoadBalancersClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("load_balancers", "list", resourceGroupName, az.client.SubscriptionID) @@ -465,9 +465,9 @@ func (az *azPublicIPAddressesClient) CreateOrUpdate(ctx context.Context, resourc return nil, err } - glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, publicIPAddressName) defer func() { - glog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, publicIPAddressName) }() mc := newMetricContext("public_ip_addresses", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -489,9 +489,9 @@ func (az *azPublicIPAddressesClient) Delete(ctx context.Context, resourceGroupNa return nil, err } - glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): start", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): start", resourceGroupName, publicIPAddressName) defer func() { - glog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.Delete(%q,%q): end", resourceGroupName, publicIPAddressName) }() mc := newMetricContext("public_ip_addresses", "delete", resourceGroupName, az.client.SubscriptionID) @@ -512,9 +512,9 @@ func (az *azPublicIPAddressesClient) Get(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): start", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): start", resourceGroupName, publicIPAddressName) defer func() { - glog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName) + klog.V(10).Infof("azPublicIPAddressesClient.Get(%q,%q): end", resourceGroupName, publicIPAddressName) }() mc := newMetricContext("public_ip_addresses", "get", resourceGroupName, az.client.SubscriptionID) @@ -528,9 +528,9 @@ func (az *azPublicIPAddressesClient) List(ctx context.Context, resourceGroupName return nil, createRateLimitErr(false, "PublicIPList") } - glog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azPublicIPAddressesClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azPublicIPAddressesClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("public_ip_addresses", "list", resourceGroupName, az.client.SubscriptionID) @@ -580,9 +580,9 @@ func (az *azSubnetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) defer func() { - glog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() mc := newMetricContext("subnets", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -604,9 +604,9 @@ func (az *azSubnetsClient) Delete(ctx context.Context, resourceGroupName string, return } - glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) defer func() { - glog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.Delete(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() mc := newMetricContext("subnets", "delete", resourceGroupName, az.client.SubscriptionID) @@ -627,9 +627,9 @@ func (az *azSubnetsClient) Get(ctx context.Context, resourceGroupName string, vi return } - glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): start", resourceGroupName, virtualNetworkName, subnetName) defer func() { - glog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) + klog.V(10).Infof("azSubnetsClient.Get(%q,%q,%q): end", resourceGroupName, virtualNetworkName, subnetName) }() mc := newMetricContext("subnets", "get", resourceGroupName, az.client.SubscriptionID) @@ -643,9 +643,9 @@ func (az *azSubnetsClient) List(ctx context.Context, resourceGroupName string, v return nil, createRateLimitErr(false, "SubnetList") } - glog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName) + klog.V(10).Infof("azSubnetsClient.List(%q,%q): start", resourceGroupName, virtualNetworkName) defer func() { - glog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName) + klog.V(10).Infof("azSubnetsClient.List(%q,%q): end", resourceGroupName, virtualNetworkName) }() mc := newMetricContext("subnets", "list", resourceGroupName, az.client.SubscriptionID) @@ -695,9 +695,9 @@ func (az *azSecurityGroupsClient) CreateOrUpdate(ctx context.Context, resourceGr return } - glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, networkSecurityGroupName) defer func() { - glog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() mc := newMetricContext("security_groups", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -719,9 +719,9 @@ func (az *azSecurityGroupsClient) Delete(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): start", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): start", resourceGroupName, networkSecurityGroupName) defer func() { - glog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.Delete(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() mc := newMetricContext("security_groups", "delete", resourceGroupName, az.client.SubscriptionID) @@ -742,9 +742,9 @@ func (az *azSecurityGroupsClient) Get(ctx context.Context, resourceGroupName str return } - glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): start", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): start", resourceGroupName, networkSecurityGroupName) defer func() { - glog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName) + klog.V(10).Infof("azSecurityGroupsClient.Get(%q,%q): end", resourceGroupName, networkSecurityGroupName) }() mc := newMetricContext("security_groups", "get", resourceGroupName, az.client.SubscriptionID) @@ -758,9 +758,9 @@ func (az *azSecurityGroupsClient) List(ctx context.Context, resourceGroupName st return nil, createRateLimitErr(false, "NSGList") } - glog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azSecurityGroupsClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azSecurityGroupsClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("security_groups", "list", resourceGroupName, az.client.SubscriptionID) @@ -810,9 +810,9 @@ func (az *azVirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, r return } - glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): start", resourceGroupName, VMScaleSetName) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.CreateOrUpdate(%q,%q): end", resourceGroupName, VMScaleSetName) }() mc := newMetricContext("vmss", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -833,9 +833,9 @@ func (az *azVirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGrou return } - glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): start", resourceGroupName, VMScaleSetName) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.Get(%q,%q): end", resourceGroupName, VMScaleSetName) }() mc := newMetricContext("vmss", "get", resourceGroupName, az.client.SubscriptionID) @@ -850,9 +850,9 @@ func (az *azVirtualMachineScaleSetsClient) List(ctx context.Context, resourceGro return } - glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.List(%q): end", resourceGroupName) }() mc := newMetricContext("vmss", "list", resourceGroupName, az.client.SubscriptionID) @@ -881,9 +881,9 @@ func (az *azVirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, return } - glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): start", resourceGroupName, VMScaleSetName, VMInstanceIDs) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) + klog.V(10).Infof("azVirtualMachineScaleSetsClient.UpdateInstances(%q,%q,%v): end", resourceGroupName, VMScaleSetName, VMInstanceIDs) }() mc := newMetricContext("vmss", "update_instances", resourceGroupName, az.client.SubscriptionID) @@ -925,9 +925,9 @@ func (az *azVirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGr return } - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Get(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() mc := newMetricContext("vmssvm", "get", resourceGroupName, az.client.SubscriptionID) @@ -942,9 +942,9 @@ func (az *azVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context return } - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.GetInstanceView(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() mc := newMetricContext("vmssvm", "get_instance_view", resourceGroupName, az.client.SubscriptionID) @@ -959,9 +959,9 @@ func (az *azVirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceG return } - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): start", resourceGroupName, virtualMachineScaleSetName, filter) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.List(%q,%q,%q): end", resourceGroupName, virtualMachineScaleSetName, filter) }() mc := newMetricContext("vmssvm", "list", resourceGroupName, az.client.SubscriptionID) @@ -989,9 +989,9 @@ func (az *azVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourc return } - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): start", resourceGroupName, VMScaleSetName, instanceID) defer func() { - glog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) + klog.V(10).Infof("azVirtualMachineScaleSetVMsClient.Update(%q,%q,%q): end", resourceGroupName, VMScaleSetName, instanceID) }() mc := newMetricContext("vmssvm", "update", resourceGroupName, az.client.SubscriptionID) @@ -1034,9 +1034,9 @@ func (az *azRoutesClient) CreateOrUpdate(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) + klog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) defer func() { - glog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) + klog.V(10).Infof("azRoutesClient.CreateOrUpdate(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() mc := newMetricContext("routes", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -1058,9 +1058,9 @@ func (az *azRoutesClient) Delete(ctx context.Context, resourceGroupName string, return } - glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) + klog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): start", resourceGroupName, routeTableName, routeName) defer func() { - glog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) + klog.V(10).Infof("azRoutesClient.Delete(%q,%q,%q): end", resourceGroupName, routeTableName, routeName) }() mc := newMetricContext("routes", "delete", resourceGroupName, az.client.SubscriptionID) @@ -1103,9 +1103,9 @@ func (az *azRouteTablesClient) CreateOrUpdate(ctx context.Context, resourceGroup return } - glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, routeTableName) + klog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): start", resourceGroupName, routeTableName) defer func() { - glog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName) + klog.V(10).Infof("azRouteTablesClient.CreateOrUpdate(%q,%q): end", resourceGroupName, routeTableName) }() mc := newMetricContext("route_tables", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -1126,9 +1126,9 @@ func (az *azRouteTablesClient) Get(ctx context.Context, resourceGroupName string return } - glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): start", resourceGroupName, routeTableName) + klog.V(10).Infof("azRouteTablesClient.Get(%q,%q): start", resourceGroupName, routeTableName) defer func() { - glog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName) + klog.V(10).Infof("azRouteTablesClient.Get(%q,%q): end", resourceGroupName, routeTableName) }() mc := newMetricContext("route_tables", "get", resourceGroupName, az.client.SubscriptionID) @@ -1164,9 +1164,9 @@ func (az *azStorageAccountClient) Create(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): start", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.Create(%q,%q): start", resourceGroupName, accountName) defer func() { - glog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.Create(%q,%q): end", resourceGroupName, accountName) }() mc := newMetricContext("storage_account", "create", resourceGroupName, az.client.SubscriptionID) @@ -1186,9 +1186,9 @@ func (az *azStorageAccountClient) Delete(ctx context.Context, resourceGroupName return } - glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): start", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): start", resourceGroupName, accountName) defer func() { - glog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.Delete(%q,%q): end", resourceGroupName, accountName) }() mc := newMetricContext("storage_account", "delete", resourceGroupName, az.client.SubscriptionID) @@ -1203,9 +1203,9 @@ func (az *azStorageAccountClient) ListKeys(ctx context.Context, resourceGroupNam return } - glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): start", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): start", resourceGroupName, accountName) defer func() { - glog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.ListKeys(%q,%q): end", resourceGroupName, accountName) }() mc := newMetricContext("storage_account", "list_keys", resourceGroupName, az.client.SubscriptionID) @@ -1220,9 +1220,9 @@ func (az *azStorageAccountClient) ListByResourceGroup(ctx context.Context, resou return } - glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): start", resourceGroupName) + klog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): start", resourceGroupName) defer func() { - glog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName) + klog.V(10).Infof("azStorageAccountClient.ListByResourceGroup(%q): end", resourceGroupName) }() mc := newMetricContext("storage_account", "list_by_resource_group", resourceGroupName, az.client.SubscriptionID) @@ -1237,9 +1237,9 @@ func (az *azStorageAccountClient) GetProperties(ctx context.Context, resourceGro return } - glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): start", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): start", resourceGroupName, accountName) defer func() { - glog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName) + klog.V(10).Infof("azStorageAccountClient.GetProperties(%q,%q): end", resourceGroupName, accountName) }() mc := newMetricContext("storage_account", "get_properties", resourceGroupName, az.client.SubscriptionID) @@ -1275,9 +1275,9 @@ func (az *azDisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName s return } - glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): start", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): start", resourceGroupName, diskName) defer func() { - glog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.CreateOrUpdate(%q,%q): end", resourceGroupName, diskName) }() mc := newMetricContext("disks", "create_or_update", resourceGroupName, az.client.SubscriptionID) @@ -1299,9 +1299,9 @@ func (az *azDisksClient) Delete(ctx context.Context, resourceGroupName string, d return } - glog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.Delete(%q,%q): start", resourceGroupName, diskName) defer func() { - glog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.Delete(%q,%q): end", resourceGroupName, diskName) }() mc := newMetricContext("disks", "delete", resourceGroupName, az.client.SubscriptionID) @@ -1322,9 +1322,9 @@ func (az *azDisksClient) Get(ctx context.Context, resourceGroupName string, disk return } - glog.V(10).Infof("azDisksClient.Get(%q,%q): start", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.Get(%q,%q): start", resourceGroupName, diskName) defer func() { - glog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName) + klog.V(10).Infof("azDisksClient.Get(%q,%q): end", resourceGroupName, diskName) }() mc := newMetricContext("disks", "get", resourceGroupName, az.client.SubscriptionID) @@ -1360,9 +1360,9 @@ func (az *azVirtualMachineSizesClient) List(ctx context.Context, location string return } - glog.V(10).Infof("azVirtualMachineSizesClient.List(%q): start", location) + klog.V(10).Infof("azVirtualMachineSizesClient.List(%q): start", location) defer func() { - glog.V(10).Infof("azVirtualMachineSizesClient.List(%q): end", location) + klog.V(10).Infof("azVirtualMachineSizesClient.List(%q): end", location) }() mc := newMetricContext("vmsizes", "list", "", az.client.SubscriptionID) diff --git a/pkg/cloudprovider/providers/azure/azure_controller_common.go b/pkg/cloudprovider/providers/azure/azure_controller_common.go index 9be915cfc37c4..7109ea73bd473 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_common.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_common.go @@ -21,7 +21,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" kwait "k8s.io/apimachinery/pkg/util/wait" @@ -119,7 +119,7 @@ func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute. func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { disks, err := c.getNodeDataDisks(nodeName) if err != nil { - glog.Errorf("error of getting data disks for node %q: %v", nodeName, err) + klog.Errorf("error of getting data disks for node %q: %v", nodeName, err) return -1, err } @@ -128,7 +128,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || (disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) { // found the disk - glog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) + klog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) return *disk.Lun, nil } } @@ -139,7 +139,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) { disks, err := c.getNodeDataDisks(nodeName) if err != nil { - glog.Errorf("error of getting data disks for node %q: %v", nodeName, err) + klog.Errorf("error of getting data disks for node %q: %v", nodeName, err) return -1, err } @@ -168,7 +168,7 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N if err != nil { if err == cloudprovider.InstanceNotFound { // if host doesn't exist, no need to detach - glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", + klog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", nodeName, diskNames) return attached, nil } diff --git a/pkg/cloudprovider/providers/azure/azure_controller_standard.go b/pkg/cloudprovider/providers/azure/azure_controller_standard.go index 9c62e2877dfab..505b19af8ecc0 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_standard.go @@ -21,7 +21,7 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" ) @@ -73,19 +73,19 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri }, }, } - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, vmName, diskName) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, vmName, diskName) ctx, cancel := getContextWithCancel() defer cancel() if _, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM); err != nil { - glog.Errorf("azureDisk - attach disk(%s) failed, err: %v", diskName, err) + klog.Errorf("azureDisk - attach disk(%s) failed, err: %v", diskName, err) detail := err.Error() if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error - glog.V(2).Infof("azureDisk - err %v, try detach disk(%s)", err, diskName) + klog.V(2).Infof("azureDisk - err %v, try detach disk(%s)", err, diskName) as.DetachDiskByName(diskName, diskURI, nodeName) } } else { - glog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName) + klog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName) // Invalidate the cache right after updating as.cloud.vmCache.Delete(vmName) } @@ -98,7 +98,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t vm, err := as.getVirtualMachine(nodeName) if err != nil { // if host doesn't exist, no need to detach - glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName) + klog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName) return nil } @@ -115,7 +115,7 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || (disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) { // found the disk - glog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) + klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) disks = append(disks[:i], disks[i+1:]...) bFoundDisk = true break @@ -134,13 +134,13 @@ func (as *availabilitySet) DetachDiskByName(diskName, diskURI string, nodeName t }, }, } - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, vmName, diskName) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, vmName, diskName) ctx, cancel := getContextWithCancel() defer cancel() if _, err := as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM); err != nil { - glog.Errorf("azureDisk - detach disk(%s) failed, err: %v", diskName, err) + klog.Errorf("azureDisk - detach disk(%s) failed, err: %v", diskName, err) } else { - glog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName) + klog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName) // Invalidate the cache right after updating as.cloud.vmCache.Delete(vmName) } diff --git a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go index d65b1be47295d..53bbde77a995e 100644 --- a/pkg/cloudprovider/providers/azure/azure_controller_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_controller_vmss.go @@ -21,7 +21,7 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" ) @@ -71,16 +71,16 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod ctx, cancel := getContextWithCancel() defer cancel() - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s)", nodeResourceGroup, nodeName, diskName) if _, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm); err != nil { detail := err.Error() if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) { // if lease cannot be acquired or disk not found, immediately detach the disk and return the original error - glog.Infof("azureDisk - err %s, try detach disk(%s)", detail, diskName) + klog.Infof("azureDisk - err %s, try detach disk(%s)", detail, diskName) ss.DetachDiskByName(diskName, diskURI, nodeName) } } else { - glog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName) + klog.V(2).Infof("azureDisk - attach disk(%s) succeeded", diskName) // Invalidate the cache right after updating key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) ss.vmssVMCache.Delete(key) @@ -112,7 +112,7 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || (disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) { // found the disk - glog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) + klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) disks = append(disks[:i], disks[i+1:]...) bFoundDisk = true break @@ -126,11 +126,11 @@ func (ss *scaleSet) DetachDiskByName(diskName, diskURI string, nodeName types.No vm.StorageProfile.DataDisks = &disks ctx, cancel := getContextWithCancel() defer cancel() - glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName) + klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s)", nodeResourceGroup, nodeName, diskName) if _, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, vm); err != nil { - glog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err) + klog.Errorf("azureDisk - detach disk(%s) from %s failed, err: %v", diskName, nodeName, err) } else { - glog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName) + klog.V(2).Infof("azureDisk - detach disk(%s) succeeded", diskName) // Invalidate the cache right after updating key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) ss.vmssVMCache.Delete(key) diff --git a/pkg/cloudprovider/providers/azure/azure_file.go b/pkg/cloudprovider/providers/azure/azure_file.go index f5bec74cb25d4..ab87cf3625ff6 100644 --- a/pkg/cloudprovider/providers/azure/azure_file.go +++ b/pkg/cloudprovider/providers/azure/azure_file.go @@ -21,7 +21,7 @@ import ( azs "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -82,7 +82,7 @@ func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string, } share := fileClient.GetShareReference(name) if share.Properties.Quota >= sizeGiB { - glog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s", + klog.Warningf("file share size(%dGi) is already greater or equal than requested size(%dGi), accountName: %s, shareName: %s", share.Properties.Quota, sizeGiB, accountName, name) return nil } @@ -90,7 +90,7 @@ func (f *azureFileClient) resizeFileShare(accountName, accountKey, name string, if err = share.SetProperties(nil); err != nil { return fmt.Errorf("failed to set quota on file share %s, err: %v", name, err) } - glog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB) + klog.V(4).Infof("resize file share completed, accountName: %s, shareName: %s, sizeGiB: %d", accountName, name, sizeGiB) return nil } diff --git a/pkg/cloudprovider/providers/azure/azure_instances.go b/pkg/cloudprovider/providers/azure/azure_instances.go index d966b21b3ebfe..158ffb976ca86 100644 --- a/pkg/cloudprovider/providers/azure/azure_instances.go +++ b/pkg/cloudprovider/providers/azure/azure_instances.go @@ -25,8 +25,8 @@ import ( "k8s.io/api/core/v1" cloudprovider "k8s.io/cloud-provider" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" ) const ( @@ -43,14 +43,14 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N return nil, err } if unmanaged { - glog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name) + klog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name) return nil, nil } addressGetter := func(nodeName types.NodeName) ([]v1.NodeAddress, error) { ip, publicIP, err := az.GetIPForMachineWithRetry(nodeName) if err != nil { - glog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err) + klog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err) return nil, err } @@ -132,7 +132,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { // Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them. if az.IsNodeUnmanagedByProviderID(providerID) { - glog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID) + klog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID) return nil, nil } @@ -149,7 +149,7 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { // Returns true for unmanaged nodes because azure cloud provider always assumes them exists. if az.IsNodeUnmanagedByProviderID(providerID) { - glog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID) + klog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID) return true, nil } @@ -180,7 +180,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st if err != nil { return false, err } - glog.V(5).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName) + klog.V(5).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName) return strings.ToLower(powerStatus) == vmPowerStateStopped || strings.ToLower(powerStatus) == vmPowerStateDeallocated, nil } @@ -210,7 +210,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e } if unmanaged { // InstanceID is same with nodeName for unmanaged nodes. - glog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name) + klog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name) return nodeName, nil } @@ -264,7 +264,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { // Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them. if az.IsNodeUnmanagedByProviderID(providerID) { - glog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID) + klog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID) return "", nil } @@ -287,7 +287,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, return "", err } if unmanaged { - glog.V(4).Infof("InstanceType: omitting unmanaged node %q", name) + klog.V(4).Infof("InstanceType: omitting unmanaged node %q", name) return "", nil } diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 506a64dd2f73d..77bfdd20de266 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -31,7 +31,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -101,7 +101,7 @@ func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, servic } if !exists { serviceName := getServiceName(service) - glog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName) + klog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName) return nil, false, nil } return status, true, nil @@ -120,7 +120,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser // the service may be switched from an internal LB to a public one, or vise versa. // Here we'll firstly ensure service do not lie in the opposite LB. serviceName := getServiceName(service) - glog.V(5).Infof("ensureloadbalancer(%s): START clusterName=%q", serviceName, clusterName) + klog.V(5).Infof("ensureloadbalancer(%s): START clusterName=%q", serviceName, clusterName) lb, err := az.reconcileLoadBalancer(clusterName, service, nodes, true /* wantLb */) if err != nil { @@ -136,7 +136,7 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser if lbStatus != nil && len(lbStatus.Ingress) > 0 { serviceIP = &lbStatus.Ingress[0].IP } - glog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP)) + klog.V(2).Infof("EnsureLoadBalancer: reconciling security group for service %q with IP %q, wantLb = true", serviceName, logSafe(serviceIP)) if _, err := az.reconcileSecurityGroup(clusterName, service, serviceIP, true /* wantLb */); err != nil { return nil, err } @@ -169,14 +169,14 @@ func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, ser func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - glog.V(5).Infof("delete(%s): START clusterName=%q", serviceName, clusterName) + klog.V(5).Infof("delete(%s): START clusterName=%q", serviceName, clusterName) serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal) if err != nil { return err } - glog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup) + klog.V(2).Infof("EnsureLoadBalancerDeleted: reconciling security group for service %q with IP %q, wantLb = false", serviceName, serviceIPToCleanup) if _, err := az.reconcileSecurityGroup(clusterName, service, &serviceIPToCleanup, false /* wantLb */); err != nil { return err } @@ -189,7 +189,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri return err } - glog.V(2).Infof("delete(%s): FINISH", serviceName) + klog.V(2).Infof("delete(%s): FINISH", serviceName) return nil } @@ -279,13 +279,13 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, existingLBs *[]network.LoadBalancer, nodes []*v1.Node) (selectedLB *network.LoadBalancer, existsLb bool, err error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - glog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal) + klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal) vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes) if err != nil { - glog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) + klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) return nil, false, err } - glog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames) + klog.Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, *vmSetNames) mapExistingLBs := map[string]network.LoadBalancer{} for _, lb := range *existingLBs { @@ -320,13 +320,13 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi if selectedLB == nil { err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - unable to find load balancer for selected VM sets %v", clusterName, serviceName, isInternal, *vmSetNames) - glog.Error(err) + klog.Error(err) return nil, false, err } // validate if the selected LB has not exceeded the MaximumLoadBalancerRuleCount if az.Config.MaximumLoadBalancerRuleCount != 0 && selectedLBRuleCount >= az.Config.MaximumLoadBalancerRuleCount { err = fmt.Errorf("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - all available load balancers have exceeded maximum rule limit %d, vmSetNames (%v)", clusterName, serviceName, isInternal, selectedLBRuleCount, *vmSetNames) - glog.Error(err) + klog.Error(err) return selectedLB, existsLb, err } @@ -335,11 +335,11 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.LoadBalancer) (status *v1.LoadBalancerStatus, err error) { if lb == nil { - glog.V(10).Info("getServiceLoadBalancerStatus: lb is nil") + klog.V(10).Info("getServiceLoadBalancerStatus: lb is nil") return nil, nil } if lb.FrontendIPConfigurations == nil || *lb.FrontendIPConfigurations == nil { - glog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil") + klog.V(10).Info("getServiceLoadBalancerStatus: lb.FrontendIPConfigurations is nil") return nil, nil } isInternal := requiresInternalLoadBalancer(service) @@ -371,7 +371,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L } } - glog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", to.String(lbIP), lbFrontendIPConfigName, serviceName) + klog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", to.String(lbIP), lbFrontendIPConfigName, serviceName) return &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: to.String(lbIP)}}}, nil } } @@ -434,11 +434,11 @@ func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, s return "", err } if !existsLb { - glog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name) + klog.V(2).Infof("Expected to find an IP address for service %s but did not. Assuming it has been removed", service.Name) return "", nil } if len(lbStatus.Ingress) < 1 { - glog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name) + klog.V(2).Infof("Expected to find an IP address for service %s but it had no ingresses. Assuming it has been removed", service.Name) return "", nil } @@ -473,14 +473,14 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai } } - glog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name) - glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name) + klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name) + klog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name) err = az.CreateOrUpdatePIPWithRetry(service, pipResourceGroup, pip) if err != nil { - glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name) + klog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name) return nil, err } - glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name) + klog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): end", pipResourceGroup, *pip.Name) ctx, cancel := getContextWithCancel() defer cancel() @@ -570,14 +570,14 @@ func (az *Cloud) isFrontendIPChanged(clusterName string, config network.Frontend func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*network.LoadBalancer, error) { isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - glog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb) + klog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb) lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb) if err != nil { - glog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err) + klog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err) return nil, err } lbName := *lb.Name - glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb) + klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) wantLb(%t) resolved load balancer name", serviceName, lbName, wantLb) lbFrontendIPConfigName := az.getFrontendIPConfigName(service, subnet(service)) lbFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbFrontendIPConfigName) lbBackendPoolName := getBackendPoolName(clusterName) @@ -600,18 +600,18 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, foundBackendPool := false for _, bp := range newBackendPools { if strings.EqualFold(*bp.Name, lbBackendPoolName) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb) foundBackendPool = true break } else { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found other backendpool %s", serviceName, wantLb, *bp.Name) } } if !foundBackendPool { newBackendPools = append(newBackendPools, network.BackendAddressPool{ Name: to.StringPtr(lbBackendPoolName), }) - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - adding backendpool", serviceName, wantLb) dirtyLb = true lb.BackendAddressPools = &newBackendPools @@ -629,7 +629,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for i := len(newConfigs) - 1; i >= 0; i-- { config := newConfigs[i] if az.serviceOwnsFrontendIP(config, service) { - glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName) + klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, lbFrontendIPConfigName) newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true } @@ -642,7 +642,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, return nil, err } if isFipChanged { - glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name) + klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name) newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true } @@ -705,7 +705,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, Name: to.StringPtr(lbFrontendIPConfigName), FrontendIPConfigurationPropertiesFormat: fipConfigurationProperties, }) - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigName) dirtyConfigs = true } } @@ -726,15 +726,15 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for i := len(updatedProbes) - 1; i >= 0; i-- { existingProbe := updatedProbes[i] if az.serviceOwnsRule(service, *existingProbe.Name) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name) keepProbe := false if findProbe(expectedProbes, existingProbe) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name) keepProbe = true } if !keepProbe { updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...) - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name) dirtyProbes = true } } @@ -743,11 +743,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for _, expectedProbe := range expectedProbes { foundProbe := false if findProbe(updatedProbes, expectedProbe) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name) foundProbe = true } if !foundProbe { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name) updatedProbes = append(updatedProbes, expectedProbe) dirtyProbes = true } @@ -768,13 +768,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, existingRule := updatedRules[i] if az.serviceOwnsRule(service, *existingRule.Name) { keepRule := false - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) if findRule(expectedRules, existingRule) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) keepRule = true } if !keepRule { - glog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) + klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtyRules = true } @@ -784,11 +784,11 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, for _, expectedRule := range expectedRules { foundRule := false if findRule(updatedRules, expectedRule) { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } if !foundRule { - glog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name) + klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name) updatedRules = append(updatedRules, expectedRule) dirtyRules = true } @@ -805,31 +805,31 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 { // When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself, // because an Azure load balancer cannot have an empty FrontendIPConfigurations collection - glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) + klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) - glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName) + klog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName) err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools) if err != nil { - glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err) + klog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err) return nil, err } - glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName) + klog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): end", lbBackendPoolID, vmSetName) // Remove the LB. - glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName) + klog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName) err = az.DeleteLBWithRetry(service, lbName) if err != nil { - glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) + klog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) return nil, err } - glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName) + klog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName) } else { - glog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName) + klog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName) err := az.CreateOrUpdateLBWithRetry(service, *lb) if err != nil { - glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName) + klog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName) return nil, err } @@ -837,7 +837,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // Refresh updated lb which will be used later in other places. newLB, exist, err := az.getAzureLoadBalancer(lbName) if err != nil { - glog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err) + klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err) return nil, err } if !exist { @@ -857,7 +857,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, } } - glog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName) + klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName) return lb, nil } @@ -881,7 +881,7 @@ func (az *Cloud) reconcileLoadBalancerRule( for _, port := range ports { lbRuleName := az.getLoadBalancerRuleName(service, port, subnet(service)) - glog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName) + klog.V(2).Infof("reconcileLoadBalancerRule lb name (%s) rule name (%s)", lbName, lbRuleName) transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol) if err != nil { @@ -956,12 +956,12 @@ func (az *Cloud) reconcileLoadBalancerRule( // This entails adding required, missing SecurityRules and removing stale rules. func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, lbIP *string, wantLb bool) (*network.SecurityGroup, error) { serviceName := getServiceName(service) - glog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName) + klog.V(5).Infof("reconcileSecurityGroup(%s): START clusterName=%q", serviceName, clusterName) ports := service.Spec.Ports if ports == nil { if useSharedSecurityRule(service) { - glog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name) + klog.V(2).Infof("Attempting to reconcile security group for service %s, but service uses shared rule and we don't know which port it's for", service.Name) return nil, fmt.Errorf("No port info for reconciling shared rule for service %s", service.Name) } ports = []v1.ServicePort{} @@ -1034,7 +1034,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, } for _, r := range expectedSecurityRules { - glog.V(10).Infof("Expecting security rule for %s: %s:%s -> %s:%s", service.Name, *r.SourceAddressPrefix, *r.SourcePortRange, *r.DestinationAddressPrefix, *r.DestinationPortRange) + klog.V(10).Infof("Expecting security rule for %s: %s:%s -> %s:%s", service.Name, *r.SourceAddressPrefix, *r.SourcePortRange, *r.DestinationAddressPrefix, *r.DestinationPortRange) } // update security rules @@ -1045,7 +1045,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, } for _, r := range updatedRules { - glog.V(10).Infof("Existing security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) + klog.V(10).Infof("Existing security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) } // update security rules: remove unwanted rules that belong privately @@ -1053,14 +1053,14 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, for i := len(updatedRules) - 1; i >= 0; i-- { existingRule := updatedRules[i] if az.serviceOwnsRule(service, *existingRule.Name) { - glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) + klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) keepRule := false if findSecurityRule(expectedSecurityRules, existingRule) { - glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) + klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) keepRule = true } if !keepRule { - glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) + klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtySg = true } @@ -1074,17 +1074,17 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix) sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName) if !sharedRuleFound { - glog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) + klog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) return nil, fmt.Errorf("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name) } if sharedRule.DestinationAddressPrefixes == nil { - glog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) + klog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) return nil, fmt.Errorf("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name) } existingPrefixes := *sharedRule.DestinationAddressPrefixes addressIndex, found := findIndex(existingPrefixes, destinationIPAddress) if !found { - glog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) + klog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) return nil, fmt.Errorf("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name) } if len(existingPrefixes) == 1 { @@ -1114,7 +1114,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, for _, expectedRule := range expectedSecurityRules { foundRule := false if findSecurityRule(updatedRules, expectedRule) { - glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) + klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) foundRule = true } if foundRule && allowsConsolidation(expectedRule) { @@ -1123,7 +1123,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, dirtySg = true } if !foundRule { - glog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name) + klog.V(10).Infof("reconcile(%s)(%t): sg rule(%s) - adding", serviceName, wantLb, *expectedRule.Name) nextAvailablePriority, err := getNextAvailablePriority(updatedRules) if err != nil { @@ -1137,16 +1137,16 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, } for _, r := range updatedRules { - glog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) + klog.V(10).Infof("Updated security rule while processing %s: %s:%s -> %s:%s", service.Name, logSafe(r.SourceAddressPrefix), logSafe(r.SourcePortRange), logSafeCollection(r.DestinationAddressPrefix, r.DestinationAddressPrefixes), logSafe(r.DestinationPortRange)) } if dirtySg { sg.SecurityRules = &updatedRules - glog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name) - glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name) + klog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name) + klog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name) err := az.CreateOrUpdateSGWithRetry(service, sg) if err != nil { - glog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name) + klog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name) // TODO (Nov 2017): remove when augmented security rules are out of preview // we could try to parse the response but it's not worth it for bridging a preview errorDescription := err.Error() @@ -1157,7 +1157,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, // END TODO return nil, err } - glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): end", *sg.Name) + klog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): end", *sg.Name) } return &sg, nil } @@ -1331,13 +1331,13 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lb * // This is the only case we should preserve the // Public ip resource with match service tag } else { - glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName) + klog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName) err := az.safeDeletePublicIP(service, pipResourceGroup, &pip, lb) if err != nil { - glog.Errorf("safeDeletePublicIP(%s) failed with error: %v", pipName, err) + klog.Errorf("safeDeletePublicIP(%s) failed with error: %v", pipName, err) return nil, err } - glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName) + klog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - finished", serviceName, pipName) } } @@ -1416,21 +1416,21 @@ func (az *Cloud) safeDeletePublicIP(service *v1.Service, pipResourceGroup string if frontendIPConfigUpdated || loadBalancerRuleUpdated { err := az.CreateOrUpdateLBWithRetry(service, *lb) if err != nil { - glog.Errorf("safeDeletePublicIP for service(%s) failed with error: %v", getServiceName(service), err) + klog.Errorf("safeDeletePublicIP for service(%s) failed with error: %v", getServiceName(service), err) return err } } } pipName := to.String(pip.Name) - glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName) + klog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName) err := az.DeletePublicIPWithRetry(service, pipResourceGroup, pipName) if err != nil { if err = ignoreStatusNotFoundFromError(err); err != nil { return err } } - glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): end", pipResourceGroup, pipName) + klog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): end", pipResourceGroup, pipName) return nil } diff --git a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go index 1bbd65ac19ada..110636531d35e 100644 --- a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go +++ b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go @@ -25,7 +25,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -75,7 +75,7 @@ func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, //CreateManagedDisk : create managed disk func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (string, error) { var err error - glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) + klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) var createZones *[]string if len(options.AvailabilityZone) > 0 { @@ -171,9 +171,9 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) ( }) if err != nil { - glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB) + klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB) } else { - glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) + klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB) } return diskID, nil @@ -197,7 +197,7 @@ func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error { // We don't need poll here, k8s will immediately stop referencing the disk // the disk will be eventually deleted - cleanly - by ARM - glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI) + klog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI) return nil } @@ -244,7 +244,7 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan requestGiB := int32(util.RoundUpSize(requestBytes, 1024*1024*1024)) newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB)) - glog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize) + klog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize) // If disk already of greater or equal size than requested we return if *result.DiskProperties.DiskSizeGB >= requestGiB { return newSizeQuant, nil @@ -258,7 +258,7 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan return oldSize, err } - glog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB) + klog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB) return newSizeQuant, nil } @@ -295,7 +295,7 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) { diskName := path.Base(diskURI) resourceGroup, err := getResourceGroupFromDiskURI(diskURI) if err != nil { - glog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err) + klog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err) return nil, err } @@ -304,13 +304,13 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) { defer cancel() disk, err := c.DisksClient.Get(ctx, resourceGroup, diskName) if err != nil { - glog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, err) + klog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, err) return nil, err } // Check whether availability zone is specified. if disk.Zones == nil || len(*disk.Zones) == 0 { - glog.V(4).Infof("Azure disk %q is not zoned", diskName) + klog.V(4).Infof("Azure disk %q is not zoned", diskName) return nil, nil } @@ -321,7 +321,7 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) { } zone := c.makeZone(zoneID) - glog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName) + klog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName) labels := map[string]string{ kubeletapis.LabelZoneRegion: c.Location, kubeletapis.LabelZoneFailureDomain: zone, diff --git a/pkg/cloudprovider/providers/azure/azure_routes.go b/pkg/cloudprovider/providers/azure/azure_routes.go index a737de6c7f565..36219b2f917a1 100644 --- a/pkg/cloudprovider/providers/azure/azure_routes.go +++ b/pkg/cloudprovider/providers/azure/azure_routes.go @@ -24,13 +24,13 @@ import ( "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" ) // ListRoutes lists all managed routes that belong to the specified clusterName func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { - glog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName) + klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName) routeTable, existsRouteTable, err := az.getRouteTable() routes, err := processRoutes(routeTable, existsRouteTable, err) if err != nil { @@ -72,7 +72,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl for i, route := range *routeTable.Routes { instance := mapRouteNameToNodeName(*route.Name) cidr := *route.AddressPrefix - glog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr) + klog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr) kubeRoutes[i] = &cloudprovider.Route{ Name: *route.Name, @@ -82,13 +82,13 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl } } - glog.V(10).Info("ListRoutes: FINISH") + klog.V(10).Info("ListRoutes: FINISH") return kubeRoutes, nil } func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error { if _, existsRouteTable, err := az.getRouteTable(); err != nil { - glog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return err } else if existsRouteTable { return nil @@ -103,17 +103,17 @@ func (az *Cloud) createRouteTable() error { RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{}, } - glog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName) + klog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName) ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable) - glog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName) + klog.V(10).Infof("RouteTablesClient.CreateOrUpdate(%q): end", az.RouteTableName) if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName) + klog.V(2).Infof("createRouteTableIfNotExists backing off: creating routetable. routeTableName=%q", az.RouteTableName) retryErr := az.CreateOrUpdateRouteTableWithRetry(routeTable) if retryErr != nil { err = retryErr - glog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName) + klog.V(2).Infof("createRouteTableIfNotExists abort backoff: creating routetable. routeTableName=%q", az.RouteTableName) } } if err != nil { @@ -136,14 +136,14 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s return err } if unmanaged { - glog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode) + klog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode) az.routeCIDRsLock.Lock() defer az.routeCIDRsLock.Unlock() az.routeCIDRs[nodeName] = kubeRoute.DestinationCIDR return nil } - glog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("CreateRoute: creating route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) if err := az.createRouteTableIfNotExists(clusterName, kubeRoute); err != nil { return err } @@ -162,24 +162,24 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint s }, } - glog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(3).Infof("CreateRoute: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route) - glog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName) + klog.V(10).Infof("RoutesClient.CreateOrUpdate(%q): end", az.RouteTableName) if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("CreateRoute backing off: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) retryErr := az.CreateOrUpdateRouteWithRetry(route) if retryErr != nil { err = retryErr - glog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("CreateRoute abort backoff: creating route: instance=%q cidr=%q", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) } } if err != nil { return err } - glog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return nil } @@ -193,34 +193,34 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute return err } if unmanaged { - glog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode) + klog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode) az.routeCIDRsLock.Lock() defer az.routeCIDRsLock.Unlock() delete(az.routeCIDRs, nodeName) return nil } - glog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) ctx, cancel := getContextWithCancel() defer cancel() routeName := mapNodeNameToRouteName(kubeRoute.TargetNode) resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName) - glog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName) + klog.V(10).Infof("RoutesClient.Delete(%q): end", az.RouteTableName) if az.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("DeleteRoute backing off: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) retryErr := az.DeleteRouteWithRetry(routeName) if retryErr != nil { err = retryErr - glog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("DeleteRoute abort backoff: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) } } if err != nil { return err } - glog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + klog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) return nil } diff --git a/pkg/cloudprovider/providers/azure/azure_standard.go b/pkg/cloudprovider/providers/azure/azure_standard.go index dc565276b1b11..95126f58b0f79 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_standard.go @@ -32,11 +32,11 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/klog" ) const ( @@ -333,10 +333,10 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) } if err != nil { if as.CloudProviderBackoff { - glog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name) + klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name) machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name)) if err != nil { - glog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name) + klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name) return "", err } } else { @@ -381,7 +381,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) { machine, err := as.getVirtualMachine(types.NodeName(name)) if err != nil { - glog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err) + klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err) return "", err } @@ -433,7 +433,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) ipConfig, err := getPrimaryIPConfig(nic) if err != nil { - glog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err) + klog.Errorf("as.GetIPByNodeName(%s) failed: getPrimaryIPConfig(%v), err=%v", name, nic, err) return "", "", err } @@ -462,7 +462,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentPoolAvailabilitySets *[]string, err error) { vms, err := as.VirtualMachineClientListWithRetry(as.ResourceGroup) if err != nil { - glog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err) + klog.Errorf("as.getNodeAvailabilitySet - VirtualMachineClientListWithRetry failed, err=%v", err) return nil, err } vmNameToAvailabilitySetID := make(map[string]string, len(vms)) @@ -481,7 +481,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP } asID, ok := vmNameToAvailabilitySetID[nodeName] if !ok { - glog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName) + klog.Errorf("as.getNodeAvailabilitySet - Node(%s) has no availability sets", nodeName) return nil, fmt.Errorf("Node (%s) - has no availability sets", nodeName) } if availabilitySetIDs.Has(asID) { @@ -490,7 +490,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP } asName, err := getLastSegment(asID) if err != nil { - glog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err) + klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err) return nil, err } // AvailabilitySet ID is currently upper cased in a indeterministic way @@ -516,11 +516,11 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) } availabilitySetNames, err = as.getAgentPoolAvailabiliySets(nodes) if err != nil { - glog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err) + klog.Errorf("as.GetVMSetNames - getAgentPoolAvailabiliySets failed err=(%v)", err) return nil, err } if len(*availabilitySetNames) == 0 { - glog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes)) + klog.Errorf("as.GetVMSetNames - No availability sets found for nodes in the cluster, node count(%d)", len(nodes)) return nil, fmt.Errorf("No availability sets found for nodes, node count(%d)", len(nodes)) } // sort the list to have deterministic selection @@ -540,7 +540,7 @@ func (as *availabilitySet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) } } if !found { - glog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx]) + klog.Errorf("as.GetVMSetNames - Availability set (%s) in service annotation not found", serviceAvailabilitySetNames[sasx]) return nil, fmt.Errorf("availability set (%s) - not found", serviceAvailabilitySetNames[sasx]) } } @@ -581,7 +581,7 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName)) if err != nil { - glog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) + klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) return network.Interface{}, err } @@ -608,7 +608,7 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri if vmSetName != "" && !as.useStandardLoadBalancer() { expectedAvailabilitySetName := as.getAvailabilitySetID(nodeResourceGroup, vmSetName) if machine.AvailabilitySet == nil || !strings.EqualFold(*machine.AvailabilitySet.ID, expectedAvailabilitySetName) { - glog.V(3).Infof( + klog.V(3).Infof( "GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName) return network.Interface{}, errNotInVMSet } @@ -637,16 +637,16 @@ func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types. nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName) if err != nil { if err == errNotInVMSet { - glog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName) + klog.V(3).Infof("ensureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName) return nil } - glog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) + klog.Errorf("error: az.ensureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) return err } if nic.ProvisioningState != nil && *nic.ProvisioningState == nicFailedState { - glog.V(3).Infof("ensureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name) + klog.V(3).Infof("ensureHostInPool skips node %s because its primary nic %s is in Failed state", nodeName, *nic.Name) return nil } @@ -679,7 +679,7 @@ func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types. if len(matches) == 2 { lbName := matches[1] if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal { - glog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName) + klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, lbName) return nil } } @@ -694,17 +694,17 @@ func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types. primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools nicName := *nic.Name - glog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) + klog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) ctx, cancel := getContextWithCancel() defer cancel() resp, err := as.InterfacesClient.CreateOrUpdate(ctx, as.ResourceGroup, *nic.Name, nic) - glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name) + klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name) if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err) + klog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err) retryErr := as.CreateOrUpdateInterfaceWithRetry(service, nic) if retryErr != nil { err = retryErr - glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName) + klog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName) } } if err != nil { @@ -721,12 +721,12 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No for _, node := range nodes { localNodeName := node.Name if as.useStandardLoadBalancer() && as.excludeMasterNodesFromStandardLB() && isMasterNode(node) { - glog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) + klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) continue } if as.ShouldNodeExcludedFromLoadBalancer(node) { - glog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) + klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) continue } diff --git a/pkg/cloudprovider/providers/azure/azure_storage.go b/pkg/cloudprovider/providers/azure/azure_storage.go index 7c95487be2685..19b91fc99bacb 100644 --- a/pkg/cloudprovider/providers/azure/azure_storage.go +++ b/pkg/cloudprovider/providers/azure/azure_storage.go @@ -20,7 +20,7 @@ import ( "fmt" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -46,7 +46,7 @@ func (az *Cloud) CreateFileShare(shareName, accountName, accountType, accountKin if err := az.createFileShare(account, key, shareName, requestGiB); err != nil { return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, account, err) } - glog.V(4).Infof("created share %s in account %s", shareName, account) + klog.V(4).Infof("created share %s in account %s", shareName, account) return account, key, nil } @@ -55,7 +55,7 @@ func (az *Cloud) DeleteFileShare(accountName, accountKey, shareName string) erro if err := az.deleteFileShare(accountName, accountKey, shareName); err != nil { return err } - glog.V(4).Infof("share %s deleted", shareName) + klog.V(4).Infof("share %s deleted", shareName) return nil } diff --git a/pkg/cloudprovider/providers/azure/azure_storageaccount.go b/pkg/cloudprovider/providers/azure/azure_storageaccount.go index f0d5009147468..34871a1119851 100644 --- a/pkg/cloudprovider/providers/azure/azure_storageaccount.go +++ b/pkg/cloudprovider/providers/azure/azure_storageaccount.go @@ -22,7 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" + "k8s.io/klog" ) type accountWithLocation struct { @@ -100,7 +100,7 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, accountKind, res if len(accounts) > 0 { accountName = accounts[0].Name - glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location) + klog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location) } if len(accountName) == 0 { @@ -118,7 +118,7 @@ func (az *Cloud) ensureStorageAccount(accountName, accountType, accountKind, res if accountKind != "" { kind = storage.Kind(accountKind) } - glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s", + klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s", accountName, resourceGroup, location, accountType, kind) cp := storage.AccountCreateParameters{ Sku: &storage.Sku{Name: storage.SkuName(accountType)}, diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index ca09f46e88d64..4d5df40462869 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -27,7 +27,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest/to" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -113,7 +113,7 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co return "", "", vm, err } - glog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName) + klog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName) key := buildVmssCacheKey(resourceGroup, ss.makeVmssVMName(ssName, instanceID)) cachedVM, err := ss.vmssVMCache.Get(key) if err != nil { @@ -121,7 +121,7 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co } if cachedVM == nil { - glog.Errorf("Can't find node (%q) in any scale sets", nodeName) + klog.Errorf("Can't find node (%q) in any scale sets", nodeName) return ssName, instanceID, vm, cloudprovider.InstanceNotFound } @@ -159,7 +159,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI } if cachedVM == nil { - glog.Errorf("couldn't find vmss virtual machine by scaleSetName (%s) and instanceID (%s)", scaleSetName, instanceID) + klog.Errorf("couldn't find vmss virtual machine by scaleSetName (%s) and instanceID (%s)", scaleSetName, instanceID) return vm, cloudprovider.InstanceNotFound } @@ -172,7 +172,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) if err != nil { - glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return "", err } if managedByAS { @@ -193,7 +193,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, // NodeName is not part of providerID for vmss instances. scaleSetName, err := extractScaleSetNameByProviderID(providerID) if err != nil { - glog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) + klog.V(4).Infof("Can not extract scale set name from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) return ss.availabilitySet.GetNodeNameByProviderID(providerID) } @@ -204,7 +204,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, instanceID, err := getLastSegment(providerID) if err != nil { - glog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) + klog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) return ss.availabilitySet.GetNodeNameByProviderID(providerID) } @@ -225,7 +225,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) if err != nil { - glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return "", err } if managedByAS { @@ -250,7 +250,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) if err != nil { - glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return cloudprovider.Zone{}, err } if managedByAS { @@ -294,13 +294,13 @@ func (ss *scaleSet) GetPrimaryVMSetName() string { func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) { nic, err := ss.GetPrimaryInterface(nodeName) if err != nil { - glog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err) + klog.Errorf("error: ss.GetIPByNodeName(%s), GetPrimaryInterface(%q), err=%v", nodeName, nodeName, err) return "", "", err } ipConfig, err := getPrimaryIPConfig(nic) if err != nil { - glog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err) + klog.Errorf("error: ss.GetIPByNodeName(%s), getPrimaryIPConfig(%v), err=%v", nodeName, nic, err) return "", "", err } @@ -390,7 +390,7 @@ func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) { allScaleSets, err := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup) if err != nil { - glog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err) + klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", err) return nil, err } @@ -410,7 +410,7 @@ func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compu allVMs, err := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, "", "", string(compute.InstanceView)) if err != nil { - glog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err) + klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", err) return nil, err } @@ -437,7 +437,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) { } if ssName == "" { - glog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName) + klog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName) continue } @@ -461,11 +461,11 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN scaleSetNames, err := ss.getAgentPoolScaleSets(nodes) if err != nil { - glog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err) + klog.Errorf("ss.GetVMSetNames - getAgentPoolScaleSets failed err=(%v)", err) return nil, err } if len(*scaleSetNames) == 0 { - glog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes)) + klog.Errorf("ss.GetVMSetNames - No scale sets found for nodes in the cluster, node count(%d)", len(nodes)) return nil, fmt.Errorf("No scale sets found for nodes, node count(%d)", len(nodes)) } @@ -487,7 +487,7 @@ func (ss *scaleSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (vmSetN } } if !found { - glog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx]) + klog.Errorf("ss.GetVMSetNames - scale set (%s) in service annotation not found", serviceVMSetNames[sasx]) return nil, fmt.Errorf("scale set (%s) - not found", serviceVMSetNames[sasx]) } } @@ -511,7 +511,7 @@ func extractResourceGroupByVMSSNicID(nicID string) (string, error) { func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) { managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName) if err != nil { - glog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) return network.Interface{}, err } if managedByAS { @@ -526,19 +526,19 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err return ss.availabilitySet.GetPrimaryInterface(nodeName) } - glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err) + klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getVmssVM(%s), err=%v", nodeName, nodeName, err) return network.Interface{}, err } primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm) if err != nil { - glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err) + klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err) return network.Interface{}, err } nicName, err := getLastSegment(primaryInterfaceID) if err != nil { - glog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err) + klog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err) return network.Interface{}, err } resourceGroup, err := extractResourceGroupByVMSSNicID(primaryInterfaceID) @@ -550,7 +550,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err defer cancel() nic, err := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroup, ssName, instanceID, nicName, "") if err != nil { - glog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, ssName, nicName, err) + klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, ssName, nicName, err) return network.Interface{}, err } @@ -572,10 +572,10 @@ func (ss *scaleSet) getScaleSetWithRetry(service *v1.Service, name string) (comp cached, retryErr := ss.vmssCache.Get(name) if retryErr != nil { ss.Event(service, v1.EventTypeWarning, "GetVirtualMachineScaleSet", retryErr.Error()) - glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr) + klog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr) return false, nil } - glog.V(4).Infof("backoff: success for scale set %q", name) + klog.V(4).Infof("backoff: success for scale set %q", name) if cached != nil { exists = true @@ -627,7 +627,7 @@ func (ss *scaleSet) createOrUpdateVMSSWithRetry(service *v1.Service, virtualMach ctx, cancel := getContextWithCancel() defer cancel() resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name) + klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name) return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSS", resp, err) }) } @@ -638,7 +638,7 @@ func (ss *scaleSet) updateVMSSInstancesWithRetry(service *v1.Service, scaleSetNa ctx, cancel := getContextWithCancel() defer cancel() resp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, scaleSetName, vmInstanceIDs) - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName) return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSSInstance", resp, err) }) } @@ -650,18 +650,18 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, for _, curNode := range nodes { if ss.useStandardLoadBalancer() && ss.excludeMasterNodesFromStandardLB() && isMasterNode(curNode) { - glog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name) + klog.V(4).Infof("Excluding master node %q from load balancer backendpool", curNode.Name) continue } if ss.ShouldNodeExcludedFromLoadBalancer(curNode) { - glog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", curNode.Name) + klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", curNode.Name) continue } curScaleSetName, err := extractScaleSetNameByProviderID(curNode.Spec.ProviderID) if err != nil { - glog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name) + klog.V(4).Infof("Node %q is not belonging to any scale sets, assuming it is belong to availability sets", curNode.Name) standardNodes = append(standardNodes, curNode) continue } @@ -672,7 +672,7 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, instanceID, err := getLastSegment(curNode.Spec.ProviderID) if err != nil { - glog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err) + klog.Errorf("Failed to get instance ID for node %q: %v", curNode.Spec.ProviderID, err) return nil, nil, err } @@ -685,16 +685,16 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, // ensureHostsInVMSetPool ensures the given Node's primary IP configurations are // participating in the vmSet's LoadBalancer Backend Pool. func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error { - glog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID) + klog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID) serviceName := getServiceName(service) virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, vmSetName) if err != nil { - glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err) + klog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err) return err } if !exists { errorMessage := fmt.Errorf("Scale set %q not found", vmSetName) - glog.Errorf("%v", errorMessage) + klog.Errorf("%v", errorMessage) return errorMessage } @@ -735,7 +735,7 @@ func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID st if len(matches) == 2 { lbName := matches[1] if strings.HasSuffix(lbName, InternalLoadBalancerNameSuffix) == isInternal { - glog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName) + klog.V(4).Infof("vmss %q has already been added to LB %q, omit adding it to a new one", vmSetName, lbName) return nil } } @@ -750,15 +750,15 @@ func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID st ctx, cancel := getContextWithCancel() defer cancel() - glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName) + klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating", serviceName, vmSetName) resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, vmSetName, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) if retryErr != nil { err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) } } if err != nil { @@ -773,13 +773,13 @@ func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID st ctx, cancel := getContextWithCancel() defer cancel() instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, vmSetName, vmInstanceIDs) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) + klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) retryErr := ss.updateVMSSInstancesWithRetry(service, vmSetName, vmInstanceIDs) if retryErr != nil { err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) + klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) } } if err != nil { @@ -795,7 +795,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac serviceName := getServiceName(service) scalesets, standardNodes, err := ss.getNodesScaleSets(nodes) if err != nil { - glog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err) + klog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err) return err } @@ -807,14 +807,14 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac if instanceIDs.Len() == 0 { // This may happen when scaling a vmss capacity to 0. - glog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName) + klog.V(3).Infof("scale set %q has 0 nodes, adding it to load balancer anyway", ssName) // InstanceIDs is required to update vmss, use * instead here since there are no nodes actually. instanceIDs.Insert("*") } err := ss.ensureHostsInVMSetPool(service, backendPoolID, ssName, instanceIDs.List(), isInternal) if err != nil { - glog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err) + klog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err) return err } } @@ -822,7 +822,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac if ss.useStandardLoadBalancer() && len(standardNodes) > 0 { err := ss.availabilitySet.EnsureHostsInPool(service, standardNodes, backendPoolID, "", isInternal) if err != nil { - glog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err) + klog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err) return err } } @@ -832,14 +832,14 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac // ensureScaleSetBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified scaleset. func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID, ssName string) error { - glog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName) + klog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName) virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, ssName) if err != nil { - glog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err) + klog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err) return err } if !exists { - glog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName) + klog.V(2).Infof("ss.ensureScaleSetBackendPoolDeleted(%s, %s), scale set %s has already been non-exist", poolID, ssName, ssName) return nil } @@ -866,7 +866,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID for i := len(existingBackendPools) - 1; i >= 0; i-- { curPool := existingBackendPools[i] if strings.EqualFold(poolID, *curPool.ID) { - glog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName) + klog.V(10).Infof("ensureScaleSetBackendPoolDeleted gets unwanted backend pool %q for scale set %q", poolID, ssName) foundPool = true newBackendPools = append(existingBackendPools[:i], existingBackendPools[i+1:]...) } @@ -878,17 +878,17 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID // Update scale set with backoff. primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools - glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName) + klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating", ssName) ctx, cancel := getContextWithCancel() defer cancel() resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, ssName, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) if retryErr != nil { err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) } } if err != nil { @@ -903,13 +903,13 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID instanceCtx, instanceCancel := getContextWithCancel() defer instanceCancel() instanceResp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(instanceCtx, ss.ResourceGroup, ssName, vmInstanceIDs) - glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err) + klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err) retryErr := ss.updateVMSSInstancesWithRetry(service, ssName, vmInstanceIDs) if retryErr != nil { err = retryErr - glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName) + klog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName) } } if err != nil { @@ -921,14 +921,14 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID if len(newBackendPools) == 0 { updateCtx, updateCancel := getContextWithCancel() defer updateCancel() - glog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName) + klog.V(3).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating second time", ssName) resp, err = ss.VirtualMachineScaleSetsClient.CreateOrUpdate(updateCtx, ss.ResourceGroup, ssName, virtualMachineScaleSet) - glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) + klog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) if retryErr != nil { - glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) + klog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) } } } @@ -952,7 +952,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetN ssName, err := extractScaleSetNameByProviderID(*ipConfigurations.ID) if err != nil { - glog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID) + klog.V(4).Infof("backend IP configuration %q is not belonging to any vmss, omit it", *ipConfigurations.ID) continue } @@ -970,7 +970,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetN err := ss.ensureScaleSetBackendPoolDeleted(service, poolID, ssName) if err != nil { - glog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err) + klog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err) return err } } diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_cache.go b/pkg/cloudprovider/providers/azure/azure_vmss_cache.go index 73bc59ea4e777..a9a46ba703bfc 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss_cache.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss_cache.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" ) @@ -50,7 +50,7 @@ func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string { func extractVmssVMName(name string) (string, string, error) { split := strings.SplitAfter(name, vmssNameSeparator) if len(split) < 2 { - glog.V(3).Infof("Failed to extract vmssVMName %q", name) + klog.V(3).Infof("Failed to extract vmssVMName %q", name) return "", "", ErrorNotVmssInstance } @@ -74,7 +74,7 @@ func (ss *scaleSet) newVmssCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message) + klog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message) return nil, nil } @@ -107,7 +107,7 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) { for _, vm := range vms { if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil { - glog.Warningf("failed to get computerName for vmssVM (%q)", ssName) + klog.Warningf("failed to get computerName for vmssVM (%q)", ssName) continue } @@ -195,7 +195,7 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) + klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) return nil, nil } @@ -210,7 +210,7 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) { return nil, realErr } if !exists { - glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) + klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) return nil, nil } diff --git a/pkg/cloudprovider/providers/azure/azure_wrap.go b/pkg/cloudprovider/providers/azure/azure_wrap.go index e2fa8f83d1378..cf3632956ab0c 100644 --- a/pkg/cloudprovider/providers/azure/azure_wrap.go +++ b/pkg/cloudprovider/providers/azure/azure_wrap.go @@ -26,9 +26,9 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" "github.com/Azure/go-autorest/autorest" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) var ( @@ -117,7 +117,7 @@ func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (pi } if !exists { - glog.V(2).Infof("Public IP %q not found with message: %q", pipName, message) + klog.V(2).Infof("Public IP %q not found with message: %q", pipName, message) return pip, false, nil } @@ -144,7 +144,7 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet } if !exists { - glog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message) + klog.V(2).Infof("Subnet %q not found with message: %q", subnetName, message) return subnet, false, nil } @@ -204,7 +204,7 @@ func (az *Cloud) newVMCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Virtual machine %q not found with message: %q", key, message) + klog.V(2).Infof("Virtual machine %q not found with message: %q", key, message) return nil, nil } @@ -226,7 +226,7 @@ func (az *Cloud) newLBCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Load balancer %q not found with message: %q", key, message) + klog.V(2).Infof("Load balancer %q not found with message: %q", key, message) return nil, nil } @@ -247,7 +247,7 @@ func (az *Cloud) newNSGCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Security group %q not found with message: %q", key, message) + klog.V(2).Infof("Security group %q not found with message: %q", key, message) return nil, nil } @@ -268,7 +268,7 @@ func (az *Cloud) newRouteTableCache() (*timedCache, error) { } if !exists { - glog.V(2).Infof("Route table %q not found with message: %q", key, message) + klog.V(2).Infof("Route table %q not found with message: %q", key, message) return nil, nil } diff --git a/pkg/cloudprovider/providers/azure/azure_zones.go b/pkg/cloudprovider/providers/azure/azure_zones.go index ca8b0b517633c..3c2d84571af88 100644 --- a/pkg/cloudprovider/providers/azure/azure_zones.go +++ b/pkg/cloudprovider/providers/azure/azure_zones.go @@ -22,9 +22,9 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) // makeZone returns the zone value in format of -. @@ -66,7 +66,7 @@ func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { } zone = az.makeZone(zoneID) } else { - glog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain") + klog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain") zone = metadata.Compute.FaultDomain } @@ -82,7 +82,7 @@ func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) { // Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them. if az.IsNodeUnmanagedByProviderID(providerID) { - glog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID) + klog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID) return cloudprovider.Zone{}, nil } @@ -104,7 +104,7 @@ func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) return cloudprovider.Zone{}, err } if unmanaged { - glog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName) + klog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName) return cloudprovider.Zone{}, nil } diff --git a/pkg/cloudprovider/providers/cloudstack/BUILD b/pkg/cloudprovider/providers/cloudstack/BUILD index 1e519fcde5868..65cd15f25721a 100644 --- a/pkg/cloudprovider/providers/cloudstack/BUILD +++ b/pkg/cloudprovider/providers/cloudstack/BUILD @@ -22,10 +22,10 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/d2g/dhcp4:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/kardianos/osext:go_default_library", "//vendor/github.com/xanzy/go-cloudstack/cloudstack:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//vendor/github.com/d2g/dhcp4client:go_default_library", diff --git a/pkg/cloudprovider/providers/cloudstack/cloudstack.go b/pkg/cloudprovider/providers/cloudstack/cloudstack.go index f0a08b77bd2cf..4769adb647f7f 100644 --- a/pkg/cloudprovider/providers/cloudstack/cloudstack.go +++ b/pkg/cloudprovider/providers/cloudstack/cloudstack.go @@ -24,12 +24,12 @@ import ( "os" "path/filepath" - "github.com/golang/glog" "github.com/kardianos/osext" "github.com/xanzy/go-cloudstack/cloudstack" "gopkg.in/gcfg.v1" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) // ProviderName is the name of this cloud provider. @@ -98,10 +98,10 @@ func newCSCloud(cfg *CSConfig) (*CSCloud, error) { // In CloudStack your metadata is always served by the DHCP server. dhcpServer, err := findDHCPServer() if err == nil { - glog.V(4).Infof("Found metadata server: %v", dhcpServer) + klog.V(4).Infof("Found metadata server: %v", dhcpServer) cs.metadata = &metadata{dhcpServer: dhcpServer, zone: cs.zone} } else { - glog.Errorf("Error searching metadata server: %v", err) + klog.Errorf("Error searching metadata server: %v", err) } } @@ -111,7 +111,7 @@ func newCSCloud(cfg *CSConfig) (*CSCloud, error) { if cs.client == nil { if cs.metadata != nil { - glog.V(2).Infof("No API URL, key and secret are provided, so only using metadata!") + klog.V(2).Infof("No API URL, key and secret are provided, so only using metadata!") } else { return nil, errors.New("no cloud provider config given") } @@ -208,7 +208,7 @@ func (cs *CSCloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { cs.zone = instance.Zonename } - glog.V(2).Infof("Current zone is %v", cs.zone) + klog.V(2).Infof("Current zone is %v", cs.zone) zone.FailureDomain = cs.zone zone.Region = cs.zone @@ -230,7 +230,7 @@ func (cs *CSCloud) GetZoneByProviderID(ctx context.Context, providerID string) ( return zone, fmt.Errorf("error retrieving zone: %v", err) } - glog.V(2).Infof("Current zone is %v", cs.zone) + klog.V(2).Infof("Current zone is %v", cs.zone) zone.FailureDomain = instance.Zonename zone.Region = instance.Zonename @@ -252,7 +252,7 @@ func (cs *CSCloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeNam return zone, fmt.Errorf("error retrieving zone: %v", err) } - glog.V(2).Infof("Current zone is %v", cs.zone) + klog.V(2).Infof("Current zone is %v", cs.zone) zone.FailureDomain = instance.Zonename zone.Region = instance.Zonename diff --git a/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go b/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go index e318b7837f482..55a81bdaa6f7d 100644 --- a/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go +++ b/pkg/cloudprovider/providers/cloudstack/cloudstack_instances.go @@ -21,11 +21,11 @@ import ( "errors" "fmt" - "github.com/golang/glog" "github.com/xanzy/go-cloudstack/cloudstack" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) // NodeAddresses returns the addresses of the specified instance. @@ -78,7 +78,7 @@ func (cs *CSCloud) nodeAddresses(instance *cloudstack.VirtualMachine) ([]v1.Node } else { // Since there is no sane way to determine the external IP if the host isn't // using static NAT, we will just fire a log message and omit the external IP. - glog.V(4).Infof("Could not determine the public IP of host %v (%v)", instance.Name, instance.Id) + klog.V(4).Infof("Could not determine the public IP of host %v (%v)", instance.Name, instance.Id) } return addresses, nil diff --git a/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go b/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go index c561b4ab33379..64971d02cf237 100644 --- a/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/cloudstack/cloudstack_loadbalancer.go @@ -21,8 +21,8 @@ import ( "fmt" "strconv" - "github.com/golang/glog" "github.com/xanzy/go-cloudstack/cloudstack" + "k8s.io/klog" "k8s.io/api/core/v1" cloudprovider "k8s.io/cloud-provider" @@ -43,7 +43,7 @@ type loadBalancer struct { // GetLoadBalancer returns whether the specified load balancer exists, and if so, what its status is. func (cs *CSCloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) { - glog.V(4).Infof("GetLoadBalancer(%v, %v, %v)", clusterName, service.Namespace, service.Name) + klog.V(4).Infof("GetLoadBalancer(%v, %v, %v)", clusterName, service.Namespace, service.Name) // Get the load balancer details and existing rules. lb, err := cs.getLoadBalancer(service) @@ -56,7 +56,7 @@ func (cs *CSCloud) GetLoadBalancer(ctx context.Context, clusterName string, serv return nil, false, nil } - glog.V(4).Infof("Found a load balancer associated with IP %v", lb.ipAddr) + klog.V(4).Infof("Found a load balancer associated with IP %v", lb.ipAddr) status := &v1.LoadBalancerStatus{} status.Ingress = append(status.Ingress, v1.LoadBalancerIngress{IP: lb.ipAddr}) @@ -66,7 +66,7 @@ func (cs *CSCloud) GetLoadBalancer(ctx context.Context, clusterName string, serv // EnsureLoadBalancer creates a new load balancer, or updates the existing one. Returns the status of the balancer. func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (status *v1.LoadBalancerStatus, err error) { - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, service.Spec.LoadBalancerIP, service.Spec.Ports, nodes) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v)", clusterName, service.Namespace, service.Name, service.Spec.LoadBalancerIP, service.Spec.Ports, nodes) if len(service.Spec.Ports) == 0 { return nil, fmt.Errorf("requested load balancer with no ports") @@ -104,14 +104,14 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s defer func(lb *loadBalancer) { if err != nil { if err := lb.releaseLoadBalancerIP(); err != nil { - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) } } }(lb) } } - glog.V(4).Infof("Load balancer %v is associated with IP %v", lb.name, lb.ipAddr) + klog.V(4).Infof("Load balancer %v is associated with IP %v", lb.name, lb.ipAddr) for _, port := range service.Spec.Ports { // All ports have their own load balancer rule, so add the port to lbName to keep the names unique. @@ -123,14 +123,14 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s return nil, err } if exists && !needsUpdate { - glog.V(4).Infof("Load balancer rule %v is up-to-date", lbRuleName) + klog.V(4).Infof("Load balancer rule %v is up-to-date", lbRuleName) // Delete the rule from the map, to prevent it being deleted. delete(lb.rules, lbRuleName) continue } if needsUpdate { - glog.V(4).Infof("Updating load balancer rule: %v", lbRuleName) + klog.V(4).Infof("Updating load balancer rule: %v", lbRuleName) if err := lb.updateLoadBalancerRule(lbRuleName); err != nil { return nil, err } @@ -139,13 +139,13 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s continue } - glog.V(4).Infof("Creating load balancer rule: %v", lbRuleName) + klog.V(4).Infof("Creating load balancer rule: %v", lbRuleName) lbRule, err := lb.createLoadBalancerRule(lbRuleName, port) if err != nil { return nil, err } - glog.V(4).Infof("Assigning hosts (%v) to load balancer rule: %v", lb.hostIDs, lbRuleName) + klog.V(4).Infof("Assigning hosts (%v) to load balancer rule: %v", lb.hostIDs, lbRuleName) if err = lb.assignHostsToRule(lbRule, lb.hostIDs); err != nil { return nil, err } @@ -154,7 +154,7 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s // Cleanup any rules that are now still in the rules map, as they are no longer needed. for _, lbRule := range lb.rules { - glog.V(4).Infof("Deleting obsolete load balancer rule: %v", lbRule.Name) + klog.V(4).Infof("Deleting obsolete load balancer rule: %v", lbRule.Name) if err := lb.deleteLoadBalancerRule(lbRule); err != nil { return nil, err } @@ -168,7 +168,7 @@ func (cs *CSCloud) EnsureLoadBalancer(ctx context.Context, clusterName string, s // UpdateLoadBalancer updates hosts under the specified load balancer. func (cs *CSCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error { - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v)", clusterName, service.Namespace, service.Name, nodes) + klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v)", clusterName, service.Namespace, service.Name, nodes) // Get the load balancer details and existing rules. lb, err := cs.getLoadBalancer(service) @@ -194,14 +194,14 @@ func (cs *CSCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, s assign, remove := symmetricDifference(lb.hostIDs, l.LoadBalancerRuleInstances) if len(assign) > 0 { - glog.V(4).Infof("Assigning new hosts (%v) to load balancer rule: %v", assign, lbRule.Name) + klog.V(4).Infof("Assigning new hosts (%v) to load balancer rule: %v", assign, lbRule.Name) if err := lb.assignHostsToRule(lbRule, assign); err != nil { return err } } if len(remove) > 0 { - glog.V(4).Infof("Removing old hosts (%v) from load balancer rule: %v", assign, lbRule.Name) + klog.V(4).Infof("Removing old hosts (%v) from load balancer rule: %v", assign, lbRule.Name) if err := lb.removeHostsFromRule(lbRule, remove); err != nil { return err } @@ -214,7 +214,7 @@ func (cs *CSCloud) UpdateLoadBalancer(ctx context.Context, clusterName string, s // EnsureLoadBalancerDeleted deletes the specified load balancer if it exists, returning // nil if the load balancer specified either didn't exist or was successfully deleted. func (cs *CSCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v)", clusterName, service.Namespace, service.Name) + klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v)", clusterName, service.Namespace, service.Name) // Get the load balancer details and existing rules. lb, err := cs.getLoadBalancer(service) @@ -223,14 +223,14 @@ func (cs *CSCloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName st } for _, lbRule := range lb.rules { - glog.V(4).Infof("Deleting load balancer rule: %v", lbRule.Name) + klog.V(4).Infof("Deleting load balancer rule: %v", lbRule.Name) if err := lb.deleteLoadBalancerRule(lbRule); err != nil { return err } } if lb.ipAddr != "" && lb.ipAddr != service.Spec.LoadBalancerIP { - glog.V(4).Infof("Releasing load balancer IP: %v", lb.ipAddr) + klog.V(4).Infof("Releasing load balancer IP: %v", lb.ipAddr) if err := lb.releaseLoadBalancerIP(); err != nil { return err } @@ -270,14 +270,14 @@ func (cs *CSCloud) getLoadBalancer(service *v1.Service) (*loadBalancer, error) { lb.rules[lbRule.Name] = lbRule if lb.ipAddr != "" && lb.ipAddr != lbRule.Publicip { - glog.Warningf("Load balancer for service %v/%v has rules associated with different IP's: %v, %v", service.Namespace, service.Name, lb.ipAddr, lbRule.Publicip) + klog.Warningf("Load balancer for service %v/%v has rules associated with different IP's: %v, %v", service.Namespace, service.Name, lb.ipAddr, lbRule.Publicip) } lb.ipAddr = lbRule.Publicip lb.ipAddrID = lbRule.Publicipid } - glog.V(4).Infof("Load balancer %v contains %d rule(s)", lb.name, len(lb.rules)) + klog.V(4).Infof("Load balancer %v contains %d rule(s)", lb.name, len(lb.rules)) return lb, nil } @@ -335,7 +335,7 @@ func (lb *loadBalancer) getLoadBalancerIP(loadBalancerIP string) error { // getPublicIPAddressID retrieves the ID of the given IP, and sets the address and it's ID. func (lb *loadBalancer) getPublicIPAddress(loadBalancerIP string) error { - glog.V(4).Infof("Retrieve load balancer IP details: %v", loadBalancerIP) + klog.V(4).Infof("Retrieve load balancer IP details: %v", loadBalancerIP) p := lb.Address.NewListPublicIpAddressesParams() p.SetIpaddress(loadBalancerIP) @@ -362,7 +362,7 @@ func (lb *loadBalancer) getPublicIPAddress(loadBalancerIP string) error { // associatePublicIPAddress associates a new IP and sets the address and it's ID. func (lb *loadBalancer) associatePublicIPAddress() error { - glog.V(4).Infof("Allocate new IP for load balancer: %v", lb.name) + klog.V(4).Infof("Allocate new IP for load balancer: %v", lb.name) // If a network belongs to a VPC, the IP address needs to be associated with // the VPC instead of with the network. network, count, err := lb.Network.GetNetworkByID(lb.networkID, cloudstack.WithProject(lb.projectID)) diff --git a/pkg/cloudprovider/providers/cloudstack/metadata.go b/pkg/cloudprovider/providers/cloudstack/metadata.go index 1c75b10794127..f2decccdf862b 100644 --- a/pkg/cloudprovider/providers/cloudstack/metadata.go +++ b/pkg/cloudprovider/providers/cloudstack/metadata.go @@ -25,10 +25,10 @@ import ( "net/http" "github.com/d2g/dhcp4" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) type metadata struct { @@ -143,7 +143,7 @@ func (m *metadata) GetZone(ctx context.Context) (cloudprovider.Zone, error) { m.zone = zoneName } - glog.V(2).Infof("Current zone is %v", zone) + klog.V(2).Infof("Current zone is %v", zone) zone.FailureDomain = m.zone zone.Region = m.zone diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index d648f30c13c6a..0e59056470744 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -80,7 +80,6 @@ go_library( "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/cloud.google.com/go/compute/metadata:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", @@ -91,6 +90,7 @@ go_library( "//vendor/google.golang.org/api/googleapi:go_default_library", "//vendor/google.golang.org/api/tpu/v1:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/gce/cloud/BUILD b/pkg/cloudprovider/providers/gce/cloud/BUILD index de46184e547a8..e3c289daa6fae 100644 --- a/pkg/cloudprovider/providers/gce/cloud/BUILD +++ b/pkg/cloudprovider/providers/gce/cloud/BUILD @@ -19,11 +19,11 @@ go_library( deps = [ "//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/api/compute/v0.alpha:go_default_library", "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library", "//vendor/google.golang.org/api/googleapi:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/gce/cloud/filter/BUILD b/pkg/cloudprovider/providers/gce/cloud/filter/BUILD index a2e0c7941cf7f..0932666c50bd6 100644 --- a/pkg/cloudprovider/providers/gce/cloud/filter/BUILD +++ b/pkg/cloudprovider/providers/gce/cloud/filter/BUILD @@ -5,7 +5,7 @@ go_library( srcs = ["filter.go"], importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) go_test( diff --git a/pkg/cloudprovider/providers/gce/cloud/filter/filter.go b/pkg/cloudprovider/providers/gce/cloud/filter/filter.go index c08005726c879..b65ab6391a7d0 100644 --- a/pkg/cloudprovider/providers/gce/cloud/filter/filter.go +++ b/pkg/cloudprovider/providers/gce/cloud/filter/filter.go @@ -34,7 +34,7 @@ import ( "regexp" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -221,7 +221,7 @@ func (fp *filterPredicate) String() string { func (fp *filterPredicate) match(o interface{}) bool { v, err := extractValue(fp.fieldName, o) - glog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err) + klog.V(6).Infof("extractValue(%q, %#v) = %v, %v", fp.fieldName, o, v, err) if err != nil { return false } @@ -234,7 +234,7 @@ func (fp *filterPredicate) match(o interface{}) bool { } re, err := regexp.Compile(*fp.s) if err != nil { - glog.Errorf("Match regexp %q is invalid: %v", *fp.s, err) + klog.Errorf("Match regexp %q is invalid: %v", *fp.s, err) return false } match = re.Match([]byte(x)) diff --git a/pkg/cloudprovider/providers/gce/cloud/gen.go b/pkg/cloudprovider/providers/gce/cloud/gen.go index efbc4b7bae0ba..a3e7cd2c657a5 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen.go @@ -25,8 +25,8 @@ import ( "net/http" "sync" - "github.com/golang/glog" "google.golang.org/api/googleapi" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" @@ -649,7 +649,7 @@ func (m *MockAddressesObj) ToAlpha() *alpha.Address { // Convert the object via JSON copying to the type that was requested. ret := &alpha.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.Address via JSON: %v", m.Obj, err) } return ret } @@ -662,7 +662,7 @@ func (m *MockAddressesObj) ToBeta() *beta.Address { // Convert the object via JSON copying to the type that was requested. ret := &beta.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.Address via JSON: %v", m.Obj, err) } return ret } @@ -675,7 +675,7 @@ func (m *MockAddressesObj) ToGA() *ga.Address { // Convert the object via JSON copying to the type that was requested. ret := &ga.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) } return ret } @@ -695,7 +695,7 @@ func (m *MockBackendServicesObj) ToAlpha() *alpha.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &alpha.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -708,7 +708,7 @@ func (m *MockBackendServicesObj) ToBeta() *beta.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &beta.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -721,7 +721,7 @@ func (m *MockBackendServicesObj) ToGA() *ga.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &ga.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -741,7 +741,7 @@ func (m *MockDisksObj) ToGA() *ga.Disk { // Convert the object via JSON copying to the type that was requested. ret := &ga.Disk{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) } return ret } @@ -761,7 +761,7 @@ func (m *MockFirewallsObj) ToGA() *ga.Firewall { // Convert the object via JSON copying to the type that was requested. ret := &ga.Firewall{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Firewall via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Firewall via JSON: %v", m.Obj, err) } return ret } @@ -781,7 +781,7 @@ func (m *MockForwardingRulesObj) ToAlpha() *alpha.ForwardingRule { // Convert the object via JSON copying to the type that was requested. ret := &alpha.ForwardingRule{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.ForwardingRule via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.ForwardingRule via JSON: %v", m.Obj, err) } return ret } @@ -794,7 +794,7 @@ func (m *MockForwardingRulesObj) ToGA() *ga.ForwardingRule { // Convert the object via JSON copying to the type that was requested. ret := &ga.ForwardingRule{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) } return ret } @@ -814,7 +814,7 @@ func (m *MockGlobalAddressesObj) ToGA() *ga.Address { // Convert the object via JSON copying to the type that was requested. ret := &ga.Address{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Address via JSON: %v", m.Obj, err) } return ret } @@ -834,7 +834,7 @@ func (m *MockGlobalForwardingRulesObj) ToGA() *ga.ForwardingRule { // Convert the object via JSON copying to the type that was requested. ret := &ga.ForwardingRule{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.ForwardingRule via JSON: %v", m.Obj, err) } return ret } @@ -854,7 +854,7 @@ func (m *MockHealthChecksObj) ToAlpha() *alpha.HealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &alpha.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.HealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.HealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -867,7 +867,7 @@ func (m *MockHealthChecksObj) ToBeta() *beta.HealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &beta.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.HealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.HealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -880,7 +880,7 @@ func (m *MockHealthChecksObj) ToGA() *ga.HealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &ga.HealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.HealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.HealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -900,7 +900,7 @@ func (m *MockHttpHealthChecksObj) ToGA() *ga.HttpHealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &ga.HttpHealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.HttpHealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.HttpHealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -920,7 +920,7 @@ func (m *MockHttpsHealthChecksObj) ToGA() *ga.HttpsHealthCheck { // Convert the object via JSON copying to the type that was requested. ret := &ga.HttpsHealthCheck{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.HttpsHealthCheck via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.HttpsHealthCheck via JSON: %v", m.Obj, err) } return ret } @@ -940,7 +940,7 @@ func (m *MockInstanceGroupsObj) ToGA() *ga.InstanceGroup { // Convert the object via JSON copying to the type that was requested. ret := &ga.InstanceGroup{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.InstanceGroup via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.InstanceGroup via JSON: %v", m.Obj, err) } return ret } @@ -960,7 +960,7 @@ func (m *MockInstancesObj) ToAlpha() *alpha.Instance { // Convert the object via JSON copying to the type that was requested. ret := &alpha.Instance{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.Instance via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.Instance via JSON: %v", m.Obj, err) } return ret } @@ -973,7 +973,7 @@ func (m *MockInstancesObj) ToBeta() *beta.Instance { // Convert the object via JSON copying to the type that was requested. ret := &beta.Instance{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.Instance via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.Instance via JSON: %v", m.Obj, err) } return ret } @@ -986,7 +986,7 @@ func (m *MockInstancesObj) ToGA() *ga.Instance { // Convert the object via JSON copying to the type that was requested. ret := &ga.Instance{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Instance via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Instance via JSON: %v", m.Obj, err) } return ret } @@ -1006,7 +1006,7 @@ func (m *MockNetworkEndpointGroupsObj) ToAlpha() *alpha.NetworkEndpointGroup { // Convert the object via JSON copying to the type that was requested. ret := &alpha.NetworkEndpointGroup{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.NetworkEndpointGroup via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.NetworkEndpointGroup via JSON: %v", m.Obj, err) } return ret } @@ -1019,7 +1019,7 @@ func (m *MockNetworkEndpointGroupsObj) ToBeta() *beta.NetworkEndpointGroup { // Convert the object via JSON copying to the type that was requested. ret := &beta.NetworkEndpointGroup{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.NetworkEndpointGroup via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.NetworkEndpointGroup via JSON: %v", m.Obj, err) } return ret } @@ -1039,7 +1039,7 @@ func (m *MockProjectsObj) ToGA() *ga.Project { // Convert the object via JSON copying to the type that was requested. ret := &ga.Project{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Project via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Project via JSON: %v", m.Obj, err) } return ret } @@ -1059,7 +1059,7 @@ func (m *MockRegionBackendServicesObj) ToAlpha() *alpha.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &alpha.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *alpha.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -1072,7 +1072,7 @@ func (m *MockRegionBackendServicesObj) ToGA() *ga.BackendService { // Convert the object via JSON copying to the type that was requested. ret := &ga.BackendService{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.BackendService via JSON: %v", m.Obj, err) } return ret } @@ -1092,7 +1092,7 @@ func (m *MockRegionDisksObj) ToGA() *ga.Disk { // Convert the object via JSON copying to the type that was requested. ret := &ga.Disk{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Disk via JSON: %v", m.Obj, err) } return ret } @@ -1112,7 +1112,7 @@ func (m *MockRegionsObj) ToGA() *ga.Region { // Convert the object via JSON copying to the type that was requested. ret := &ga.Region{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Region via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Region via JSON: %v", m.Obj, err) } return ret } @@ -1132,7 +1132,7 @@ func (m *MockRoutesObj) ToGA() *ga.Route { // Convert the object via JSON copying to the type that was requested. ret := &ga.Route{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Route via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Route via JSON: %v", m.Obj, err) } return ret } @@ -1152,7 +1152,7 @@ func (m *MockSecurityPoliciesObj) ToBeta() *beta.SecurityPolicy { // Convert the object via JSON copying to the type that was requested. ret := &beta.SecurityPolicy{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *beta.SecurityPolicy via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *beta.SecurityPolicy via JSON: %v", m.Obj, err) } return ret } @@ -1172,7 +1172,7 @@ func (m *MockSslCertificatesObj) ToGA() *ga.SslCertificate { // Convert the object via JSON copying to the type that was requested. ret := &ga.SslCertificate{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.SslCertificate via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.SslCertificate via JSON: %v", m.Obj, err) } return ret } @@ -1192,7 +1192,7 @@ func (m *MockTargetHttpProxiesObj) ToGA() *ga.TargetHttpProxy { // Convert the object via JSON copying to the type that was requested. ret := &ga.TargetHttpProxy{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.TargetHttpProxy via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.TargetHttpProxy via JSON: %v", m.Obj, err) } return ret } @@ -1212,7 +1212,7 @@ func (m *MockTargetHttpsProxiesObj) ToGA() *ga.TargetHttpsProxy { // Convert the object via JSON copying to the type that was requested. ret := &ga.TargetHttpsProxy{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.TargetHttpsProxy via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.TargetHttpsProxy via JSON: %v", m.Obj, err) } return ret } @@ -1232,7 +1232,7 @@ func (m *MockTargetPoolsObj) ToGA() *ga.TargetPool { // Convert the object via JSON copying to the type that was requested. ret := &ga.TargetPool{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.TargetPool via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.TargetPool via JSON: %v", m.Obj, err) } return ret } @@ -1252,7 +1252,7 @@ func (m *MockUrlMapsObj) ToGA() *ga.UrlMap { // Convert the object via JSON copying to the type that was requested. ret := &ga.UrlMap{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.UrlMap via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.UrlMap via JSON: %v", m.Obj, err) } return ret } @@ -1272,7 +1272,7 @@ func (m *MockZonesObj) ToGA() *ga.Zone { // Convert the object via JSON copying to the type that was requested. ret := &ga.Zone{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *ga.Zone via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *ga.Zone via JSON: %v", m.Obj, err) } return ret } @@ -1332,7 +1332,7 @@ type MockAddresses struct { func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -1344,12 +1344,12 @@ func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, er defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -1357,7 +1357,7 @@ func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, er Code: http.StatusNotFound, Message: fmt.Sprintf("MockAddresses %v not found", key), } - glog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -1365,7 +1365,7 @@ func (m *MockAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, er func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -1375,7 +1375,7 @@ func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ( if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -1391,7 +1391,7 @@ func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ( objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -1399,7 +1399,7 @@ func (m *MockAddresses) List(ctx context.Context, region string, fl *filter.F) ( func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -1411,7 +1411,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -1419,7 +1419,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre Code: http.StatusConflict, Message: fmt.Sprintf("MockAddresses %v exists", key), } - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -1428,7 +1428,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre obj.SelfLink = SelfLink(meta.VersionGA, projectID, "addresses", key) m.Objects[*key] = &MockAddressesObj{obj} - glog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -1436,7 +1436,7 @@ func (m *MockAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addre func (m *MockAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -1448,7 +1448,7 @@ func (m *MockAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -1456,12 +1456,12 @@ func (m *MockAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockAddresses %v not found", key), } - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -1477,10 +1477,10 @@ type GCEAddresses struct { // Get the Address named by key. func (g *GCEAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { - glog.V(5).Infof("GCEAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") @@ -1490,21 +1490,21 @@ func (g *GCEAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, err Version: meta.Version("ga"), Service: "Addresses", } - glog.V(5).Infof("GCEAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Addresses.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Address, error) { - glog.V(5).Infof("GCEAddresses.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAddresses.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -1515,30 +1515,30 @@ func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([ if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.Addresses.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Address f := func(l *ga.AddressList) error { - glog.V(5).Infof("GCEAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -1546,9 +1546,9 @@ func (g *GCEAddresses) List(ctx context.Context, region string, fl *filter.F) ([ // Insert Address with key of value obj. func (g *GCEAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { - glog.V(5).Infof("GCEAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") @@ -1558,9 +1558,9 @@ func (g *GCEAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addres Version: meta.Version("ga"), Service: "Addresses", } - glog.V(5).Infof("GCEAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -1569,20 +1569,20 @@ func (g *GCEAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Addres op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Addresses") @@ -1592,9 +1592,9 @@ func (g *GCEAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Addresses", } - glog.V(5).Infof("GCEAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Addresses.Delete(projectID, key.Region, key.Name) @@ -1602,12 +1602,12 @@ func (g *GCEAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -1666,7 +1666,7 @@ type MockAlphaAddresses struct { func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -1678,12 +1678,12 @@ func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Add defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -1691,7 +1691,7 @@ func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Add Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaAddresses %v not found", key), } - glog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -1699,7 +1699,7 @@ func (m *MockAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Add func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -1709,7 +1709,7 @@ func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -1725,7 +1725,7 @@ func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -1733,7 +1733,7 @@ func (m *MockAlphaAddresses) List(ctx context.Context, region string, fl *filter func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alpha.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -1745,7 +1745,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -1753,7 +1753,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaAddresses %v exists", key), } - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -1762,7 +1762,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "addresses", key) m.Objects[*key] = &MockAddressesObj{obj} - glog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -1770,7 +1770,7 @@ func (m *MockAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alp func (m *MockAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -1782,7 +1782,7 @@ func (m *MockAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -1790,12 +1790,12 @@ func (m *MockAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaAddresses %v not found", key), } - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -1811,10 +1811,10 @@ type GCEAlphaAddresses struct { // Get the Address named by key. func (g *GCEAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Address, error) { - glog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") @@ -1824,21 +1824,21 @@ func (g *GCEAlphaAddresses) Get(ctx context.Context, key *meta.Key) (*alpha.Addr Version: meta.Version("alpha"), Service: "Addresses", } - glog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.Addresses.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.Address, error) { - glog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -1849,30 +1849,30 @@ func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter. if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Alpha.Addresses.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.Address f := func(l *alpha.AddressList) error { - glog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -1880,9 +1880,9 @@ func (g *GCEAlphaAddresses) List(ctx context.Context, region string, fl *filter. // Insert Address with key of value obj. func (g *GCEAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alpha.Address) error { - glog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") @@ -1892,9 +1892,9 @@ func (g *GCEAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alph Version: meta.Version("alpha"), Service: "Addresses", } - glog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -1903,20 +1903,20 @@ func (g *GCEAlphaAddresses) Insert(ctx context.Context, key *meta.Key, obj *alph op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Addresses") @@ -1926,9 +1926,9 @@ func (g *GCEAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("alpha"), Service: "Addresses", } - glog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Addresses.Delete(projectID, key.Region, key.Name) @@ -1936,12 +1936,12 @@ func (g *GCEAlphaAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -2000,7 +2000,7 @@ type MockBetaAddresses struct { func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2012,12 +2012,12 @@ func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addre defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -2025,7 +2025,7 @@ func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addre Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaAddresses %v not found", key), } - glog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -2033,7 +2033,7 @@ func (m *MockBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addre func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -2043,7 +2043,7 @@ func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -2059,7 +2059,7 @@ func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter. objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockBetaAddresses.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -2067,7 +2067,7 @@ func (m *MockBetaAddresses) List(ctx context.Context, region string, fl *filter. func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2079,7 +2079,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -2087,7 +2087,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaAddresses %v exists", key), } - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -2096,7 +2096,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "addresses", key) m.Objects[*key] = &MockAddressesObj{obj} - glog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -2104,7 +2104,7 @@ func (m *MockBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta func (m *MockBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2116,7 +2116,7 @@ func (m *MockBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -2124,12 +2124,12 @@ func (m *MockBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaAddresses %v not found", key), } - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -2145,10 +2145,10 @@ type GCEBetaAddresses struct { // Get the Address named by key. func (g *GCEBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Address, error) { - glog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") @@ -2158,21 +2158,21 @@ func (g *GCEBetaAddresses) Get(ctx context.Context, key *meta.Key) (*beta.Addres Version: meta.Version("beta"), Service: "Addresses", } - glog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.Addresses.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F) ([]*beta.Address, error) { - glog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -2183,30 +2183,30 @@ func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Beta.Addresses.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.Address f := func(l *beta.AddressList) error { - glog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -2214,9 +2214,9 @@ func (g *GCEBetaAddresses) List(ctx context.Context, region string, fl *filter.F // Insert Address with key of value obj. func (g *GCEBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta.Address) error { - glog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") @@ -2226,9 +2226,9 @@ func (g *GCEBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta. Version: meta.Version("beta"), Service: "Addresses", } - glog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -2237,20 +2237,20 @@ func (g *GCEBetaAddresses) Insert(ctx context.Context, key *meta.Key, obj *beta. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Addresses") @@ -2260,9 +2260,9 @@ func (g *GCEBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("beta"), Service: "Addresses", } - glog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Addresses.Delete(projectID, key.Region, key.Name) @@ -2270,12 +2270,12 @@ func (g *GCEBetaAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -2334,7 +2334,7 @@ type MockGlobalAddresses struct { func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2346,12 +2346,12 @@ func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addre defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -2359,7 +2359,7 @@ func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addre Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), } - glog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -2367,7 +2367,7 @@ func (m *MockGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addre func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -2377,7 +2377,7 @@ func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Add if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -2390,7 +2390,7 @@ func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Add objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockGlobalAddresses.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -2398,7 +2398,7 @@ func (m *MockGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Add func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2410,7 +2410,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -2418,7 +2418,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockGlobalAddresses %v exists", key), } - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -2427,7 +2427,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "addresses", key) m.Objects[*key] = &MockGlobalAddressesObj{obj} - glog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockGlobalAddresses.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -2435,7 +2435,7 @@ func (m *MockGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2447,7 +2447,7 @@ func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -2455,12 +2455,12 @@ func (m *MockGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalAddresses %v not found", key), } - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockGlobalAddresses.Delete(%v, %v) = nil", ctx, key) return nil } @@ -2476,10 +2476,10 @@ type GCEGlobalAddresses struct { // Get the Address named by key. func (g *GCEGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Address, error) { - glog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") @@ -2489,21 +2489,21 @@ func (g *GCEGlobalAddresses) Get(ctx context.Context, key *meta.Key) (*ga.Addres Version: meta.Version("ga"), Service: "GlobalAddresses", } - glog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.GlobalAddresses.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEGlobalAddresses.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Address objects. func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Address, error) { - glog.V(5).Infof("GCEGlobalAddresses.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") rk := &RateLimitKey{ ProjectID: projectID, @@ -2514,30 +2514,30 @@ func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Addr if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEGlobalAddresses.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.GlobalAddresses.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Address f := func(l *ga.AddressList) error { - glog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEGlobalAddresses.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEGlobalAddresses.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -2545,9 +2545,9 @@ func (g *GCEGlobalAddresses) List(ctx context.Context, fl *filter.F) ([]*ga.Addr // Insert Address with key of value obj. func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga.Address) error { - glog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") @@ -2557,9 +2557,9 @@ func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "GlobalAddresses", } - glog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -2568,20 +2568,20 @@ func (g *GCEGlobalAddresses) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEGlobalAddresses.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Address referenced by key. func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalAddresses.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalAddresses") @@ -2591,9 +2591,9 @@ func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "GlobalAddresses", } - glog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalAddresses.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.GlobalAddresses.Delete(projectID, key.Name) @@ -2602,12 +2602,12 @@ func (g *GCEGlobalAddresses) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalAddresses.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -2672,7 +2672,7 @@ type MockBackendServices struct { func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -2684,12 +2684,12 @@ func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backe defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -2697,7 +2697,7 @@ func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backe Code: http.StatusNotFound, Message: fmt.Sprintf("MockBackendServices %v not found", key), } - glog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -2705,7 +2705,7 @@ func (m *MockBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backe func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -2715,7 +2715,7 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -2728,7 +2728,7 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -2736,7 +2736,7 @@ func (m *MockBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Bac func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -2748,7 +2748,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -2756,7 +2756,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockBackendServices %v exists", key), } - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -2765,7 +2765,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - glog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -2773,7 +2773,7 @@ func (m *MockBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -2785,7 +2785,7 @@ func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -2793,12 +2793,12 @@ func (m *MockBackendServices) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockBackendServices %v not found", key), } - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -2838,10 +2838,10 @@ type GCEBackendServices struct { // Get the BackendService named by key. func (g *GCEBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { - glog.V(5).Infof("GCEBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2851,21 +2851,21 @@ func (g *GCEBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.Backen Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.BackendService, error) { - glog.V(5).Infof("GCEBackendServices.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBackendServices.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -2876,30 +2876,30 @@ func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Back if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.BackendService f := func(l *ga.BackendServiceList) error { - glog.V(5).Infof("GCEBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -2907,9 +2907,9 @@ func (g *GCEBackendServices) List(ctx context.Context, fl *filter.F) ([]*ga.Back // Insert BackendService with key of value obj. func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { - glog.V(5).Infof("GCEBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2919,9 +2919,9 @@ func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -2930,20 +2930,20 @@ func (g *GCEBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2953,9 +2953,9 @@ func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.BackendServices.Delete(projectID, key.Name) @@ -2964,21 +2964,21 @@ func (g *GCEBackendServices) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // GetHealth is a method on GCEBackendServices. func (g *GCEBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { - glog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -2988,25 +2988,25 @@ func (g *GCEBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.BackendServices.GetHealth(projectID, key.Name, arg0) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Patch is a method on GCEBackendServices. func (g *GCEBackendServices) Patch(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - glog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -3016,30 +3016,30 @@ func (g *GCEBackendServices) Patch(ctx context.Context, key *meta.Key, arg0 *ga. Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.BackendServices.Patch(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } // Update is a method on GCEBackendServices. func (g *GCEBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - glog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "BackendServices") @@ -3049,21 +3049,21 @@ func (g *GCEBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga Version: meta.Version("ga"), Service: "BackendServices", } - glog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -3126,7 +3126,7 @@ type MockBetaBackendServices struct { func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3138,12 +3138,12 @@ func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -3151,7 +3151,7 @@ func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaBackendServices %v not found", key), } - glog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -3159,7 +3159,7 @@ func (m *MockBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -3169,7 +3169,7 @@ func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*be if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -3182,7 +3182,7 @@ func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*be objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -3190,7 +3190,7 @@ func (m *MockBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*be func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -3202,7 +3202,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -3210,7 +3210,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaBackendServices %v exists", key), } - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -3219,7 +3219,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - glog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -3227,7 +3227,7 @@ func (m *MockBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -3239,7 +3239,7 @@ func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) err defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -3247,12 +3247,12 @@ func (m *MockBetaBackendServices) Delete(ctx context.Context, key *meta.Key) err Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaBackendServices %v not found", key), } - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -3284,10 +3284,10 @@ type GCEBetaBackendServices struct { // Get the BackendService named by key. func (g *GCEBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta.BackendService, error) { - glog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3297,21 +3297,21 @@ func (g *GCEBetaBackendServices) Get(ctx context.Context, key *meta.Key) (*beta. Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*beta.BackendService, error) { - glog.V(5).Infof("GCEBetaBackendServices.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -3322,30 +3322,30 @@ func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*bet if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Beta.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.BackendService f := func(l *beta.BackendServiceList) error { - glog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -3353,9 +3353,9 @@ func (g *GCEBetaBackendServices) List(ctx context.Context, fl *filter.F) ([]*bet // Insert BackendService with key of value obj. func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *beta.BackendService) error { - glog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3365,9 +3365,9 @@ func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -3376,20 +3376,20 @@ func (g *GCEBetaBackendServices) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3399,9 +3399,9 @@ func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) erro Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.BackendServices.Delete(projectID, key.Name) @@ -3410,21 +3410,21 @@ func (g *GCEBetaBackendServices) Delete(ctx context.Context, key *meta.Key) erro op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // SetSecurityPolicy is a method on GCEBetaBackendServices. func (g *GCEBetaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyReference) error { - glog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3434,30 +3434,30 @@ func (g *GCEBetaBackendServices) SetSecurityPolicy(ctx context.Context, key *met Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } // Update is a method on GCEBetaBackendServices. func (g *GCEBetaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *beta.BackendService) error { - glog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "BackendServices") @@ -3467,21 +3467,21 @@ func (g *GCEBetaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("beta"), Service: "BackendServices", } - glog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -3544,7 +3544,7 @@ type MockAlphaBackendServices struct { func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3556,12 +3556,12 @@ func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alp defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -3569,7 +3569,7 @@ func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alp Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -3577,7 +3577,7 @@ func (m *MockAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alp func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -3587,7 +3587,7 @@ func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*a if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -3600,7 +3600,7 @@ func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*a objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaBackendServices.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -3608,7 +3608,7 @@ func (m *MockAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*a func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -3620,7 +3620,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -3628,7 +3628,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaBackendServices %v exists", key), } - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -3637,7 +3637,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "backendServices", key) m.Objects[*key] = &MockBackendServicesObj{obj} - glog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -3645,7 +3645,7 @@ func (m *MockAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, ob func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -3657,7 +3657,7 @@ func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -3665,12 +3665,12 @@ func (m *MockAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) er Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -3702,10 +3702,10 @@ type GCEAlphaBackendServices struct { // Get the BackendService named by key. func (g *GCEAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3715,21 +3715,21 @@ func (g *GCEAlphaBackendServices) Get(ctx context.Context, key *meta.Key) (*alph Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.BackendServices.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -3740,30 +3740,30 @@ func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*al if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Alpha.BackendServices.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.BackendService f := func(l *alpha.BackendServiceList) error { - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -3771,9 +3771,9 @@ func (g *GCEAlphaBackendServices) List(ctx context.Context, fl *filter.F) ([]*al // Insert BackendService with key of value obj. func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3783,9 +3783,9 @@ func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -3794,20 +3794,20 @@ func (g *GCEAlphaBackendServices) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3817,9 +3817,9 @@ func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) err Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.BackendServices.Delete(projectID, key.Name) @@ -3828,21 +3828,21 @@ func (g *GCEAlphaBackendServices) Delete(ctx context.Context, key *meta.Key) err op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // SetSecurityPolicy is a method on GCEAlphaBackendServices. func (g *GCEAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *meta.Key, arg0 *alpha.SecurityPolicyReference) error { - glog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3852,30 +3852,30 @@ func (g *GCEAlphaBackendServices) SetSecurityPolicy(ctx context.Context, key *me Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.BackendServices.SetSecurityPolicy(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.SetSecurityPolicy(%v, %v, ...) = %+v", ctx, key, err) return err } // Update is a method on GCEAlphaBackendServices. func (g *GCEAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "BackendServices") @@ -3885,21 +3885,21 @@ func (g *GCEAlphaBackendServices) Update(ctx context.Context, key *meta.Key, arg Version: meta.Version("alpha"), Service: "BackendServices", } - glog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.BackendServices.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -3962,7 +3962,7 @@ type MockRegionBackendServices struct { func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -3974,12 +3974,12 @@ func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -3987,7 +3987,7 @@ func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -3995,7 +3995,7 @@ func (m *MockRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -4005,7 +4005,7 @@ func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -4021,7 +4021,7 @@ func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -4029,7 +4029,7 @@ func (m *MockRegionBackendServices) List(ctx context.Context, region string, fl func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4041,7 +4041,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -4049,7 +4049,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o Code: http.StatusConflict, Message: fmt.Sprintf("MockRegionBackendServices %v exists", key), } - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -4058,7 +4058,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o obj.SelfLink = SelfLink(meta.VersionGA, projectID, "backendServices", key) m.Objects[*key] = &MockRegionBackendServicesObj{obj} - glog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -4066,7 +4066,7 @@ func (m *MockRegionBackendServices) Insert(ctx context.Context, key *meta.Key, o func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4078,7 +4078,7 @@ func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) e defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -4086,12 +4086,12 @@ func (m *MockRegionBackendServices) Delete(ctx context.Context, key *meta.Key) e Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRegionBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -4123,10 +4123,10 @@ type GCERegionBackendServices struct { // Get the BackendService named by key. func (g *GCERegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga.BackendService, error) { - glog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4136,21 +4136,21 @@ func (g *GCERegionBackendServices) Get(ctx context.Context, key *meta.Key) (*ga. Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.RegionBackendServices.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*ga.BackendService, error) { - glog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -4161,30 +4161,30 @@ func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl * if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.RegionBackendServices.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.BackendService f := func(l *ga.BackendServiceList) error { - glog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -4192,9 +4192,9 @@ func (g *GCERegionBackendServices) List(ctx context.Context, region string, fl * // Insert BackendService with key of value obj. func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *ga.BackendService) error { - glog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4204,9 +4204,9 @@ func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, ob Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -4215,20 +4215,20 @@ func (g *GCERegionBackendServices) Insert(ctx context.Context, key *meta.Key, ob op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4238,9 +4238,9 @@ func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) er Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.RegionBackendServices.Delete(projectID, key.Region, key.Name) @@ -4248,21 +4248,21 @@ func (g *GCERegionBackendServices) Delete(ctx context.Context, key *meta.Key) er op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // GetHealth is a method on GCERegionBackendServices. func (g *GCERegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *ga.ResourceGroupReference) (*ga.BackendServiceGroupHealth, error) { - glog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4272,25 +4272,25 @@ func (g *GCERegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Update is a method on GCERegionBackendServices. func (g *GCERegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *ga.BackendService) error { - glog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionBackendServices") @@ -4300,21 +4300,21 @@ func (g *GCERegionBackendServices) Update(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -4377,7 +4377,7 @@ type MockAlphaRegionBackendServices struct { func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -4389,12 +4389,12 @@ func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -4402,7 +4402,7 @@ func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -4410,7 +4410,7 @@ func (m *MockAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -4420,7 +4420,7 @@ func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -4436,7 +4436,7 @@ func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaRegionBackendServices.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -4444,7 +4444,7 @@ func (m *MockAlphaRegionBackendServices) List(ctx context.Context, region string func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4456,7 +4456,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -4464,7 +4464,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaRegionBackendServices %v exists", key), } - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -4473,7 +4473,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "backendServices", key) m.Objects[*key] = &MockRegionBackendServicesObj{obj} - glog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaRegionBackendServices.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -4481,7 +4481,7 @@ func (m *MockAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.K func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4493,7 +4493,7 @@ func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -4501,12 +4501,12 @@ func (m *MockAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.K Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaRegionBackendServices %v not found", key), } - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaRegionBackendServices.Delete(%v, %v) = nil", ctx, key) return nil } @@ -4538,10 +4538,10 @@ type GCEAlphaRegionBackendServices struct { // Get the BackendService named by key. func (g *GCEAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) (*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4551,21 +4551,21 @@ func (g *GCEAlphaRegionBackendServices) Get(ctx context.Context, key *meta.Key) Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.RegionBackendServices.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all BackendService objects. func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.BackendService, error) { - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") rk := &RateLimitKey{ ProjectID: projectID, @@ -4576,30 +4576,30 @@ func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Alpha.RegionBackendServices.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.BackendService f := func(l *alpha.BackendServiceList) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaRegionBackendServices.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -4607,9 +4607,9 @@ func (g *GCEAlphaRegionBackendServices) List(ctx context.Context, region string, // Insert BackendService with key of value obj. func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Key, obj *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4619,9 +4619,9 @@ func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -4630,20 +4630,20 @@ func (g *GCEAlphaRegionBackendServices) Insert(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the BackendService referenced by key. func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4653,9 +4653,9 @@ func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.RegionBackendServices.Delete(projectID, key.Region, key.Name) @@ -4663,21 +4663,21 @@ func (g *GCEAlphaRegionBackendServices) Delete(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Delete(%v, %v) = %v", ctx, key, err) return err } // GetHealth is a method on GCEAlphaRegionBackendServices. func (g *GCEAlphaRegionBackendServices) GetHealth(ctx context.Context, key *meta.Key, arg0 *alpha.ResourceGroupReference) (*alpha.BackendServiceGroupHealth, error) { - glog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4687,25 +4687,25 @@ func (g *GCEAlphaRegionBackendServices) GetHealth(ctx context.Context, key *meta Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.RegionBackendServices.GetHealth(projectID, key.Region, key.Name, arg0) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.GetHealth(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Update is a method on GCEAlphaRegionBackendServices. func (g *GCEAlphaRegionBackendServices) Update(ctx context.Context, key *meta.Key, arg0 *alpha.BackendService) error { - glog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "RegionBackendServices") @@ -4715,21 +4715,21 @@ func (g *GCEAlphaRegionBackendServices) Update(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "RegionBackendServices", } - glog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.RegionBackendServices.Update(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaRegionBackendServices.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -4790,7 +4790,7 @@ type MockDisks struct { func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -4802,12 +4802,12 @@ func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -4815,7 +4815,7 @@ func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { Code: http.StatusNotFound, Message: fmt.Sprintf("MockDisks %v not found", key), } - glog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -4823,7 +4823,7 @@ func (m *MockDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -4833,7 +4833,7 @@ func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -4849,7 +4849,7 @@ func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga. objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockDisks.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -4857,7 +4857,7 @@ func (m *MockDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga. func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -4869,7 +4869,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -4877,7 +4877,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err Code: http.StatusConflict, Message: fmt.Sprintf("MockDisks %v exists", key), } - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -4886,7 +4886,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key) m.Objects[*key] = &MockDisksObj{obj} - glog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -4894,7 +4894,7 @@ func (m *MockDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) err func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -4906,7 +4906,7 @@ func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -4914,12 +4914,12 @@ func (m *MockDisks) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockDisks %v not found", key), } - glog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockDisks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockDisks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -4943,10 +4943,10 @@ type GCEDisks struct { // Get the Disk named by key. func (g *GCEDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { - glog.V(5).Infof("GCEDisks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEDisks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -4956,21 +4956,21 @@ func (g *GCEDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Disks.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Disk objects. func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Disk, error) { - glog.V(5).Infof("GCEDisks.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEDisks.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") rk := &RateLimitKey{ ProjectID: projectID, @@ -4981,30 +4981,30 @@ func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.D if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.GA.Disks.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Disk f := func(l *ga.DiskList) error { - glog.V(5).Infof("GCEDisks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEDisks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -5012,9 +5012,9 @@ func (g *GCEDisks) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.D // Insert Disk with key of value obj. func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { - glog.V(5).Infof("GCEDisks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEDisks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -5024,9 +5024,9 @@ func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) erro Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -5035,20 +5035,20 @@ func (g *GCEDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) erro op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Disk referenced by key. func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEDisks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEDisks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -5058,9 +5058,9 @@ func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Disks.Delete(projectID, key.Zone, key.Name) @@ -5068,21 +5068,21 @@ func (g *GCEDisks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Delete(%v, %v) = %v", ctx, key, err) return err } // Resize is a method on GCEDisks. func (g *GCEDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResizeRequest) error { - glog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Disks") @@ -5092,21 +5092,21 @@ func (g *GCEDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.DisksResi Version: meta.Version("ga"), Service: "Disks", } - glog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Disks.Resize(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -5167,7 +5167,7 @@ type MockRegionDisks struct { func (m *MockRegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5179,12 +5179,12 @@ func (m *MockRegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, err defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -5192,7 +5192,7 @@ func (m *MockRegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, err Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegionDisks %v not found", key), } - glog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -5200,7 +5200,7 @@ func (m *MockRegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, err func (m *MockRegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -5210,7 +5210,7 @@ func (m *MockRegionDisks) List(ctx context.Context, region string, fl *filter.F) if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -5226,7 +5226,7 @@ func (m *MockRegionDisks) List(ctx context.Context, region string, fl *filter.F) objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockRegionDisks.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -5234,7 +5234,7 @@ func (m *MockRegionDisks) List(ctx context.Context, region string, fl *filter.F) func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5246,7 +5246,7 @@ func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Dis defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -5254,7 +5254,7 @@ func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Dis Code: http.StatusConflict, Message: fmt.Sprintf("MockRegionDisks %v exists", key), } - glog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -5263,7 +5263,7 @@ func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Dis obj.SelfLink = SelfLink(meta.VersionGA, projectID, "disks", key) m.Objects[*key] = &MockRegionDisksObj{obj} - glog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockRegionDisks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -5271,7 +5271,7 @@ func (m *MockRegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Dis func (m *MockRegionDisks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -5283,7 +5283,7 @@ func (m *MockRegionDisks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -5291,12 +5291,12 @@ func (m *MockRegionDisks) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegionDisks %v not found", key), } - glog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRegionDisks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -5320,10 +5320,10 @@ type GCERegionDisks struct { // Get the Disk named by key. func (g *GCERegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, error) { - glog.V(5).Infof("GCERegionDisks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegionDisks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") @@ -5333,21 +5333,21 @@ func (g *GCERegionDisks) Get(ctx context.Context, key *meta.Key) (*ga.Disk, erro Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCERegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.RegionDisks.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegionDisks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Disk objects. func (g *GCERegionDisks) List(ctx context.Context, region string, fl *filter.F) ([]*ga.Disk, error) { - glog.V(5).Infof("GCERegionDisks.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCERegionDisks.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") rk := &RateLimitKey{ ProjectID: projectID, @@ -5358,30 +5358,30 @@ func (g *GCERegionDisks) List(ctx context.Context, region string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCERegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCERegionDisks.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.RegionDisks.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Disk f := func(l *ga.DiskList) error { - glog.V(5).Infof("GCERegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCERegionDisks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERegionDisks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegionDisks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -5389,9 +5389,9 @@ func (g *GCERegionDisks) List(ctx context.Context, region string, fl *filter.F) // Insert Disk with key of value obj. func (g *GCERegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk) error { - glog.V(5).Infof("GCERegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCERegionDisks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCERegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") @@ -5401,9 +5401,9 @@ func (g *GCERegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCERegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -5412,20 +5412,20 @@ func (g *GCERegionDisks) Insert(ctx context.Context, key *meta.Key, obj *ga.Disk op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERegionDisks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Disk referenced by key. func (g *GCERegionDisks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCERegionDisks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegionDisks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") @@ -5435,9 +5435,9 @@ func (g *GCERegionDisks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCERegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.RegionDisks.Delete(projectID, key.Region, key.Name) @@ -5445,21 +5445,21 @@ func (g *GCERegionDisks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Delete(%v, %v) = %v", ctx, key, err) return err } // Resize is a method on GCERegionDisks. func (g *GCERegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.RegionDisksResizeRequest) error { - glog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegionDisks.Resize(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "RegionDisks") @@ -5469,21 +5469,21 @@ func (g *GCERegionDisks) Resize(ctx context.Context, key *meta.Key, arg0 *ga.Reg Version: meta.Version("ga"), Service: "RegionDisks", } - glog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegionDisks.Resize(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.RegionDisks.Resize(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERegionDisks.Resize(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -5544,7 +5544,7 @@ type MockFirewalls struct { func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5556,12 +5556,12 @@ func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, e defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -5569,7 +5569,7 @@ func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, e Code: http.StatusNotFound, Message: fmt.Sprintf("MockFirewalls %v not found", key), } - glog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -5577,7 +5577,7 @@ func (m *MockFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, e func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -5587,7 +5587,7 @@ func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockFirewalls.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -5600,7 +5600,7 @@ func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockFirewalls.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -5608,7 +5608,7 @@ func (m *MockFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5620,7 +5620,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -5628,7 +5628,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew Code: http.StatusConflict, Message: fmt.Sprintf("MockFirewalls %v exists", key), } - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -5637,7 +5637,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew obj.SelfLink = SelfLink(meta.VersionGA, projectID, "firewalls", key) m.Objects[*key] = &MockFirewallsObj{obj} - glog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockFirewalls.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -5645,7 +5645,7 @@ func (m *MockFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firew func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -5657,7 +5657,7 @@ func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -5665,12 +5665,12 @@ func (m *MockFirewalls) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockFirewalls %v not found", key), } - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockFirewalls.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockFirewalls.Delete(%v, %v) = nil", ctx, key) return nil } @@ -5694,10 +5694,10 @@ type GCEFirewalls struct { // Get the Firewall named by key. func (g *GCEFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, error) { - glog.V(5).Infof("GCEFirewalls.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEFirewalls.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5707,21 +5707,21 @@ func (g *GCEFirewalls) Get(ctx context.Context, key *meta.Key) (*ga.Firewall, er Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Firewalls.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEFirewalls.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEFirewalls.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Firewall objects. func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, error) { - glog.V(5).Infof("GCEFirewalls.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEFirewalls.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") rk := &RateLimitKey{ ProjectID: projectID, @@ -5732,30 +5732,30 @@ func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEFirewalls.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEFirewalls.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Firewalls.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Firewall f := func(l *ga.FirewallList) error { - glog.V(5).Infof("GCEFirewalls.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEFirewalls.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEFirewalls.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEFirewalls.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -5763,9 +5763,9 @@ func (g *GCEFirewalls) List(ctx context.Context, fl *filter.F) ([]*ga.Firewall, // Insert Firewall with key of value obj. func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewall) error { - glog.V(5).Infof("GCEFirewalls.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEFirewalls.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5775,9 +5775,9 @@ func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewa Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -5786,20 +5786,20 @@ func (g *GCEFirewalls) Insert(ctx context.Context, key *meta.Key, obj *ga.Firewa op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEFirewalls.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEFirewalls.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Firewall referenced by key. func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEFirewalls.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEFirewalls.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5809,9 +5809,9 @@ func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Firewalls.Delete(projectID, key.Name) @@ -5820,21 +5820,21 @@ func (g *GCEFirewalls) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEFirewalls. func (g *GCEFirewalls) Update(ctx context.Context, key *meta.Key, arg0 *ga.Firewall) error { - glog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEFirewalls.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEFirewalls.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Firewalls") @@ -5844,21 +5844,21 @@ func (g *GCEFirewalls) Update(ctx context.Context, key *meta.Key, arg0 *ga.Firew Version: meta.Version("ga"), Service: "Firewalls", } - glog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEFirewalls.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Firewalls.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEFirewalls.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -5917,7 +5917,7 @@ type MockForwardingRules struct { func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -5929,12 +5929,12 @@ func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwa defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -5942,7 +5942,7 @@ func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwa Code: http.StatusNotFound, Message: fmt.Sprintf("MockForwardingRules %v not found", key), } - glog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -5950,7 +5950,7 @@ func (m *MockForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwa func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -5960,7 +5960,7 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -5976,7 +5976,7 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -5984,7 +5984,7 @@ func (m *MockForwardingRules) List(ctx context.Context, region string, fl *filte func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -5996,7 +5996,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -6004,7 +6004,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockForwardingRules %v exists", key), } - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -6013,7 +6013,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "forwardingRules", key) m.Objects[*key] = &MockForwardingRulesObj{obj} - glog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -6021,7 +6021,7 @@ func (m *MockForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6033,7 +6033,7 @@ func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -6041,12 +6041,12 @@ func (m *MockForwardingRules) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockForwardingRules %v not found", key), } - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } @@ -6062,10 +6062,10 @@ type GCEForwardingRules struct { // Get the ForwardingRule named by key. func (g *GCEForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEForwardingRules.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") @@ -6075,21 +6075,21 @@ func (g *GCEForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.Forwar Version: meta.Version("ga"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.ForwardingRules.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all ForwardingRule objects. func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, @@ -6100,30 +6100,30 @@ func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.ForwardingRules.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.ForwardingRule f := func(l *ga.ForwardingRuleList) error { - glog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -6131,9 +6131,9 @@ func (g *GCEForwardingRules) List(ctx context.Context, region string, fl *filter // Insert ForwardingRule with key of value obj. func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { - glog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") @@ -6143,9 +6143,9 @@ func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -6154,20 +6154,20 @@ func (g *GCEForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the ForwardingRule referenced by key. func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "ForwardingRules") @@ -6177,9 +6177,9 @@ func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.ForwardingRules.Delete(projectID, key.Region, key.Name) @@ -6187,12 +6187,12 @@ func (g *GCEForwardingRules) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -6251,7 +6251,7 @@ type MockAlphaForwardingRules struct { func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6263,12 +6263,12 @@ func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alp defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -6276,7 +6276,7 @@ func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alp Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), } - glog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -6284,7 +6284,7 @@ func (m *MockAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alp func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -6294,7 +6294,7 @@ func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl * if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -6310,7 +6310,7 @@ func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl * objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockAlphaForwardingRules.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -6318,7 +6318,7 @@ func (m *MockAlphaForwardingRules) List(ctx context.Context, region string, fl * func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -6330,7 +6330,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -6338,7 +6338,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaForwardingRules %v exists", key), } - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -6347,7 +6347,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "forwardingRules", key) m.Objects[*key] = &MockForwardingRulesObj{obj} - glog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -6355,7 +6355,7 @@ func (m *MockAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, ob func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6367,7 +6367,7 @@ func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -6375,12 +6375,12 @@ func (m *MockAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) er Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaForwardingRules %v not found", key), } - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } @@ -6396,10 +6396,10 @@ type GCEAlphaForwardingRules struct { // Get the ForwardingRule named by key. func (g *GCEAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alpha.ForwardingRule, error) { - glog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") @@ -6409,21 +6409,21 @@ func (g *GCEAlphaForwardingRules) Get(ctx context.Context, key *meta.Key) (*alph Version: meta.Version("alpha"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.ForwardingRules.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all ForwardingRule objects. func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *filter.F) ([]*alpha.ForwardingRule, error) { - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, @@ -6434,30 +6434,30 @@ func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *f if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.Alpha.ForwardingRules.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.ForwardingRule f := func(l *alpha.ForwardingRuleList) error { - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -6465,9 +6465,9 @@ func (g *GCEAlphaForwardingRules) List(ctx context.Context, region string, fl *f // Insert ForwardingRule with key of value obj. func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *alpha.ForwardingRule) error { - glog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") @@ -6477,9 +6477,9 @@ func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("alpha"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -6488,20 +6488,20 @@ func (g *GCEAlphaForwardingRules) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the ForwardingRule referenced by key. func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "ForwardingRules") @@ -6511,9 +6511,9 @@ func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) err Version: meta.Version("alpha"), Service: "ForwardingRules", } - glog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.ForwardingRules.Delete(projectID, key.Region, key.Name) @@ -6521,12 +6521,12 @@ func (g *GCEAlphaForwardingRules) Delete(ctx context.Context, key *meta.Key) err op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -6587,7 +6587,7 @@ type MockGlobalForwardingRules struct { func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6599,12 +6599,12 @@ func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -6612,7 +6612,7 @@ func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), } - glog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -6620,7 +6620,7 @@ func (m *MockGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -6630,7 +6630,7 @@ func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]* if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -6643,7 +6643,7 @@ func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]* objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockGlobalForwardingRules.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -6651,7 +6651,7 @@ func (m *MockGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]* func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -6663,7 +6663,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -6671,7 +6671,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o Code: http.StatusConflict, Message: fmt.Sprintf("MockGlobalForwardingRules %v exists", key), } - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -6680,7 +6680,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o obj.SelfLink = SelfLink(meta.VersionGA, projectID, "forwardingRules", key) m.Objects[*key] = &MockGlobalForwardingRulesObj{obj} - glog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockGlobalForwardingRules.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -6688,7 +6688,7 @@ func (m *MockGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, o func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -6700,7 +6700,7 @@ func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) e defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -6708,12 +6708,12 @@ func (m *MockGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) e Code: http.StatusNotFound, Message: fmt.Sprintf("MockGlobalForwardingRules %v not found", key), } - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockGlobalForwardingRules.Delete(%v, %v) = nil", ctx, key) return nil } @@ -6737,10 +6737,10 @@ type GCEGlobalForwardingRules struct { // Get the ForwardingRule named by key. func (g *GCEGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6750,21 +6750,21 @@ func (g *GCEGlobalForwardingRules) Get(ctx context.Context, key *meta.Key) (*ga. Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.GlobalForwardingRules.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all ForwardingRule objects. func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*ga.ForwardingRule, error) { - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") rk := &RateLimitKey{ ProjectID: projectID, @@ -6775,30 +6775,30 @@ func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*g if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.GlobalForwardingRules.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.ForwardingRule f := func(l *ga.ForwardingRuleList) error { - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEGlobalForwardingRules.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -6806,9 +6806,9 @@ func (g *GCEGlobalForwardingRules) List(ctx context.Context, fl *filter.F) ([]*g // Insert ForwardingRule with key of value obj. func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, obj *ga.ForwardingRule) error { - glog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6818,9 +6818,9 @@ func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, ob Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -6829,20 +6829,20 @@ func (g *GCEGlobalForwardingRules) Insert(ctx context.Context, key *meta.Key, ob op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the ForwardingRule referenced by key. func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6852,9 +6852,9 @@ func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) er Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.GlobalForwardingRules.Delete(projectID, key.Name) @@ -6863,21 +6863,21 @@ func (g *GCEGlobalForwardingRules) Delete(ctx context.Context, key *meta.Key) er op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.Delete(%v, %v) = %v", ctx, key, err) return err } // SetTarget is a method on GCEGlobalForwardingRules. func (g *GCEGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, arg0 *ga.TargetReference) error { - glog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "GlobalForwardingRules") @@ -6887,21 +6887,21 @@ func (g *GCEGlobalForwardingRules) SetTarget(ctx context.Context, key *meta.Key, Version: meta.Version("ga"), Service: "GlobalForwardingRules", } - glog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.GlobalForwardingRules.SetTarget(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEGlobalForwardingRules.SetTarget(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -6962,7 +6962,7 @@ type MockHealthChecks struct { func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -6974,12 +6974,12 @@ func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCh defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -6987,7 +6987,7 @@ func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCh Code: http.StatusNotFound, Message: fmt.Sprintf("MockHealthChecks %v not found", key), } - glog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -6995,7 +6995,7 @@ func (m *MockHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCh func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -7005,7 +7005,7 @@ func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Health if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -7018,7 +7018,7 @@ func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Health objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -7026,7 +7026,7 @@ func (m *MockHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Health func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7038,7 +7038,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -7046,7 +7046,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He Code: http.StatusConflict, Message: fmt.Sprintf("MockHealthChecks %v exists", key), } - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -7055,7 +7055,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He obj.SelfLink = SelfLink(meta.VersionGA, projectID, "healthChecks", key) m.Objects[*key] = &MockHealthChecksObj{obj} - glog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -7063,7 +7063,7 @@ func (m *MockHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.He func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7075,7 +7075,7 @@ func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -7083,12 +7083,12 @@ func (m *MockHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockHealthChecks %v not found", key), } - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -7112,10 +7112,10 @@ type GCEHealthChecks struct { // Get the HealthCheck named by key. func (g *GCEHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthCheck, error) { - glog.V(5).Infof("GCEHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7125,21 +7125,21 @@ func (g *GCEHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HealthChe Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HealthCheck objects. func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthCheck, error) { - glog.V(5).Infof("GCEHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -7150,30 +7150,30 @@ func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthC if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.HealthCheck f := func(l *ga.HealthCheckList) error { - glog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -7181,9 +7181,9 @@ func (g *GCEHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HealthC // Insert HealthCheck with key of value obj. func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HealthCheck) error { - glog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7193,9 +7193,9 @@ func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.Hea Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -7204,20 +7204,20 @@ func (g *GCEHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.Hea op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HealthCheck referenced by key. func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7227,9 +7227,9 @@ func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HealthChecks.Delete(projectID, key.Name) @@ -7238,21 +7238,21 @@ func (g *GCEHealthChecks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEHealthChecks. func (g *GCEHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HealthCheck) error { - glog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HealthChecks") @@ -7262,21 +7262,21 @@ func (g *GCEHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.He Version: meta.Version("ga"), Service: "HealthChecks", } - glog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -7337,7 +7337,7 @@ type MockBetaHealthChecks struct { func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -7349,12 +7349,12 @@ func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.He defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -7362,7 +7362,7 @@ func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.He Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaHealthChecks %v not found", key), } - glog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -7370,7 +7370,7 @@ func (m *MockBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.He func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -7380,7 +7380,7 @@ func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -7393,7 +7393,7 @@ func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta. objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -7401,7 +7401,7 @@ func (m *MockBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta. func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7413,7 +7413,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -7421,7 +7421,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaHealthChecks %v exists", key), } - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -7430,7 +7430,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "healthChecks", key) m.Objects[*key] = &MockHealthChecksObj{obj} - glog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -7438,7 +7438,7 @@ func (m *MockBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *b func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7450,7 +7450,7 @@ func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -7458,12 +7458,12 @@ func (m *MockBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaHealthChecks %v not found", key), } - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -7487,10 +7487,10 @@ type GCEBetaHealthChecks struct { // Get the HealthCheck named by key. func (g *GCEBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.HealthCheck, error) { - glog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7500,21 +7500,21 @@ func (g *GCEBetaHealthChecks) Get(ctx context.Context, key *meta.Key) (*beta.Hea Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HealthCheck objects. func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.HealthCheck, error) { - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -7525,30 +7525,30 @@ func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.H if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Beta.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.HealthCheck f := func(l *beta.HealthCheckList) error { - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -7556,9 +7556,9 @@ func (g *GCEBetaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*beta.H // Insert HealthCheck with key of value obj. func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *beta.HealthCheck) error { - glog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7568,9 +7568,9 @@ func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *be Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -7579,20 +7579,20 @@ func (g *GCEBetaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *be op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HealthCheck referenced by key. func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7602,9 +7602,9 @@ func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.HealthChecks.Delete(projectID, key.Name) @@ -7613,21 +7613,21 @@ func (g *GCEBetaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEBetaHealthChecks. func (g *GCEBetaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *beta.HealthCheck) error { - glog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "HealthChecks") @@ -7637,21 +7637,21 @@ func (g *GCEBetaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *b Version: meta.Version("beta"), Service: "HealthChecks", } - glog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -7712,7 +7712,7 @@ type MockAlphaHealthChecks struct { func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -7724,12 +7724,12 @@ func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha. defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -7737,7 +7737,7 @@ func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha. Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), } - glog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -7745,7 +7745,7 @@ func (m *MockAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha. func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -7755,7 +7755,7 @@ func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alph if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -7768,7 +7768,7 @@ func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alph objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -7776,7 +7776,7 @@ func (m *MockAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alph func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -7788,7 +7788,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -7796,7 +7796,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaHealthChecks %v exists", key), } - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -7805,7 +7805,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "healthChecks", key) m.Objects[*key] = &MockHealthChecksObj{obj} - glog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -7813,7 +7813,7 @@ func (m *MockAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -7825,7 +7825,7 @@ func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -7833,12 +7833,12 @@ func (m *MockAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaHealthChecks %v not found", key), } - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -7862,10 +7862,10 @@ type GCEAlphaHealthChecks struct { // Get the HealthCheck named by key. func (g *GCEAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.HealthCheck, error) { - glog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -7875,21 +7875,21 @@ func (g *GCEAlphaHealthChecks) Get(ctx context.Context, key *meta.Key) (*alpha.H Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.HealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HealthCheck objects. func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha.HealthCheck, error) { - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -7900,30 +7900,30 @@ func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Alpha.HealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.HealthCheck f := func(l *alpha.HealthCheckList) error { - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -7931,9 +7931,9 @@ func (g *GCEAlphaHealthChecks) List(ctx context.Context, fl *filter.F) ([]*alpha // Insert HealthCheck with key of value obj. func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *alpha.HealthCheck) error { - glog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -7943,9 +7943,9 @@ func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *a Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -7954,20 +7954,20 @@ func (g *GCEAlphaHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *a op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HealthCheck referenced by key. func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -7977,9 +7977,9 @@ func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.HealthChecks.Delete(projectID, key.Name) @@ -7988,21 +7988,21 @@ func (g *GCEAlphaHealthChecks) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEAlphaHealthChecks. func (g *GCEAlphaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *alpha.HealthCheck) error { - glog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "HealthChecks") @@ -8012,21 +8012,21 @@ func (g *GCEAlphaHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 * Version: meta.Version("alpha"), Service: "HealthChecks", } - glog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.HealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -8087,7 +8087,7 @@ type MockHttpHealthChecks struct { func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8099,12 +8099,12 @@ func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -8112,7 +8112,7 @@ func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -8120,7 +8120,7 @@ func (m *MockHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -8130,7 +8130,7 @@ func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -8143,7 +8143,7 @@ func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockHttpHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -8151,7 +8151,7 @@ func (m *MockHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8163,7 +8163,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -8171,7 +8171,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g Code: http.StatusConflict, Message: fmt.Sprintf("MockHttpHealthChecks %v exists", key), } - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -8180,7 +8180,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g obj.SelfLink = SelfLink(meta.VersionGA, projectID, "httpHealthChecks", key) m.Objects[*key] = &MockHttpHealthChecksObj{obj} - glog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockHttpHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -8188,7 +8188,7 @@ func (m *MockHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8200,7 +8200,7 @@ func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -8208,12 +8208,12 @@ func (m *MockHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHttpHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -8237,10 +8237,10 @@ type GCEHttpHealthChecks struct { // Get the HttpHealthCheck named by key. func (g *GCEHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpHealthCheck, error) { - glog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8250,21 +8250,21 @@ func (g *GCEHttpHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpH Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.HttpHealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHttpHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HttpHealthCheck objects. func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpHealthCheck, error) { - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -8275,30 +8275,30 @@ func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Htt if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.HttpHealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.HttpHealthCheck f := func(l *ga.HttpHealthCheckList) error { - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHttpHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -8306,9 +8306,9 @@ func (g *GCEHttpHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Htt // Insert HttpHealthCheck with key of value obj. func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpHealthCheck) error { - glog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8318,9 +8318,9 @@ func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -8329,20 +8329,20 @@ func (g *GCEHttpHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEHttpHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HttpHealthCheck referenced by key. func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8352,9 +8352,9 @@ func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpHealthChecks.Delete(projectID, key.Name) @@ -8363,21 +8363,21 @@ func (g *GCEHttpHealthChecks) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEHttpHealthChecks. func (g *GCEHttpHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpHealthCheck) error { - glog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpHealthChecks") @@ -8387,21 +8387,21 @@ func (g *GCEHttpHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *g Version: meta.Version("ga"), Service: "HttpHealthChecks", } - glog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpHealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -8462,7 +8462,7 @@ type MockHttpsHealthChecks struct { func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8474,12 +8474,12 @@ func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Htt defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -8487,7 +8487,7 @@ func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Htt Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -8495,7 +8495,7 @@ func (m *MockHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Htt func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -8505,7 +8505,7 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -8518,7 +8518,7 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockHttpsHealthChecks.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -8526,7 +8526,7 @@ func (m *MockHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.H func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8538,7 +8538,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -8546,7 +8546,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * Code: http.StatusConflict, Message: fmt.Sprintf("MockHttpsHealthChecks %v exists", key), } - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -8555,7 +8555,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * obj.SelfLink = SelfLink(meta.VersionGA, projectID, "httpsHealthChecks", key) m.Objects[*key] = &MockHttpsHealthChecksObj{obj} - glog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockHttpsHealthChecks.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -8563,7 +8563,7 @@ func (m *MockHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj * func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8575,7 +8575,7 @@ func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -8583,12 +8583,12 @@ func (m *MockHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockHttpsHealthChecks %v not found", key), } - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockHttpsHealthChecks.Delete(%v, %v) = nil", ctx, key) return nil } @@ -8612,10 +8612,10 @@ type GCEHttpsHealthChecks struct { // Get the HttpsHealthCheck named by key. func (g *GCEHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.HttpsHealthCheck, error) { - glog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8625,21 +8625,21 @@ func (g *GCEHttpsHealthChecks) Get(ctx context.Context, key *meta.Key) (*ga.Http Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.HttpsHealthChecks.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all HttpsHealthCheck objects. func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.HttpsHealthCheck, error) { - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") rk := &RateLimitKey{ ProjectID: projectID, @@ -8650,30 +8650,30 @@ func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.HttpsHealthChecks.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.HttpsHealthCheck f := func(l *ga.HttpsHealthCheckList) error { - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEHttpsHealthChecks.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -8681,9 +8681,9 @@ func (g *GCEHttpsHealthChecks) List(ctx context.Context, fl *filter.F) ([]*ga.Ht // Insert HttpsHealthCheck with key of value obj. func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *ga.HttpsHealthCheck) error { - glog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8693,9 +8693,9 @@ func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -8704,20 +8704,20 @@ func (g *GCEHttpsHealthChecks) Insert(ctx context.Context, key *meta.Key, obj *g op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the HttpsHealthCheck referenced by key. func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8727,9 +8727,9 @@ func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpsHealthChecks.Delete(projectID, key.Name) @@ -8738,21 +8738,21 @@ func (g *GCEHttpsHealthChecks) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEHttpsHealthChecks. func (g *GCEHttpsHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 *ga.HttpsHealthCheck) error { - glog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "HttpsHealthChecks") @@ -8762,21 +8762,21 @@ func (g *GCEHttpsHealthChecks) Update(ctx context.Context, key *meta.Key, arg0 * Version: meta.Version("ga"), Service: "HttpsHealthChecks", } - glog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.HttpsHealthChecks.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEHttpsHealthChecks.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -8843,7 +8843,7 @@ type MockInstanceGroups struct { func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -8855,12 +8855,12 @@ func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instan defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -8868,7 +8868,7 @@ func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instan Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstanceGroups %v not found", key), } - glog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -8876,7 +8876,7 @@ func (m *MockInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instan func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -8886,7 +8886,7 @@ func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -8902,7 +8902,7 @@ func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockInstanceGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -8910,7 +8910,7 @@ func (m *MockInstanceGroups) List(ctx context.Context, zone string, fl *filter.F func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -8922,7 +8922,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -8930,7 +8930,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. Code: http.StatusConflict, Message: fmt.Sprintf("MockInstanceGroups %v exists", key), } - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -8939,7 +8939,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. obj.SelfLink = SelfLink(meta.VersionGA, projectID, "instanceGroups", key) m.Objects[*key] = &MockInstanceGroupsObj{obj} - glog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockInstanceGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -8947,7 +8947,7 @@ func (m *MockInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga. func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -8959,7 +8959,7 @@ func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -8967,12 +8967,12 @@ func (m *MockInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstanceGroups %v not found", key), } - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockInstanceGroups.Delete(%v, %v) = nil", ctx, key) return nil } @@ -9020,10 +9020,10 @@ type GCEInstanceGroups struct { // Get the InstanceGroup named by key. func (g *GCEInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.InstanceGroup, error) { - glog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9033,21 +9033,21 @@ func (g *GCEInstanceGroups) Get(ctx context.Context, key *meta.Key) (*ga.Instanc Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.InstanceGroups.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEInstanceGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEInstanceGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all InstanceGroup objects. func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.InstanceGroup, error) { - glog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") rk := &RateLimitKey{ ProjectID: projectID, @@ -9058,30 +9058,30 @@ func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.GA.InstanceGroups.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.InstanceGroup f := func(l *ga.InstanceGroupList) error { - glog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEInstanceGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEInstanceGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -9089,9 +9089,9 @@ func (g *GCEInstanceGroups) List(ctx context.Context, zone string, fl *filter.F) // Insert InstanceGroup with key of value obj. func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.InstanceGroup) error { - glog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9101,9 +9101,9 @@ func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.I Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -9112,20 +9112,20 @@ func (g *GCEInstanceGroups) Insert(ctx context.Context, key *meta.Key, obj *ga.I op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEInstanceGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the InstanceGroup referenced by key. func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9135,9 +9135,9 @@ func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.Delete(projectID, key.Zone, key.Name) @@ -9145,21 +9145,21 @@ func (g *GCEInstanceGroups) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.Delete(%v, %v) = %v", ctx, key, err) return err } // AddInstances is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) AddInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsAddInstancesRequest) error { - glog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9169,30 +9169,30 @@ func (g *GCEInstanceGroups) AddInstances(ctx context.Context, key *meta.Key, arg Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.AddInstances(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.AddInstances(%v, %v, ...) = %+v", ctx, key, err) return err } // ListInstances is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) ListInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsListInstancesRequest, fl *filter.F) ([]*ga.InstanceWithNamedPorts, error) { - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9202,41 +9202,41 @@ func (g *GCEInstanceGroups) ListInstances(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.InstanceGroups.ListInstances(projectID, key.Zone, key.Name, arg0) var all []*ga.InstanceWithNamedPorts f := func(l *ga.InstanceGroupsListInstances) error { - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): page %+v", ctx, key, l) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...): page %+v", ctx, key, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, nil, err) + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + klog.V(5).Infof("GCEInstanceGroups.ListInstances(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) } return all, nil } // RemoveInstances is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) RemoveInstances(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsRemoveInstancesRequest) error { - glog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9246,30 +9246,30 @@ func (g *GCEInstanceGroups) RemoveInstances(ctx context.Context, key *meta.Key, Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.RemoveInstances(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.RemoveInstances(%v, %v, ...) = %+v", ctx, key, err) return err } // SetNamedPorts is a method on GCEInstanceGroups. func (g *GCEInstanceGroups) SetNamedPorts(ctx context.Context, key *meta.Key, arg0 *ga.InstanceGroupsSetNamedPortsRequest) error { - glog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "InstanceGroups") @@ -9279,21 +9279,21 @@ func (g *GCEInstanceGroups) SetNamedPorts(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "InstanceGroups", } - glog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.InstanceGroups.SetNamedPorts(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstanceGroups.SetNamedPorts(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -9356,7 +9356,7 @@ type MockInstances struct { func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -9368,12 +9368,12 @@ func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, e defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -9381,7 +9381,7 @@ func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, e Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstances %v not found", key), } - glog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -9389,7 +9389,7 @@ func (m *MockInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, e func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -9399,7 +9399,7 @@ func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([] if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -9415,7 +9415,7 @@ func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([] objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -9423,7 +9423,7 @@ func (m *MockInstances) List(ctx context.Context, zone string, fl *filter.F) ([] func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -9435,7 +9435,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -9443,7 +9443,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta Code: http.StatusConflict, Message: fmt.Sprintf("MockInstances %v exists", key), } - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -9452,7 +9452,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta obj.SelfLink = SelfLink(meta.VersionGA, projectID, "instances", key) m.Objects[*key] = &MockInstancesObj{obj} - glog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -9460,7 +9460,7 @@ func (m *MockInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Insta func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -9472,7 +9472,7 @@ func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -9480,12 +9480,12 @@ func (m *MockInstances) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockInstances %v not found", key), } - glog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockInstances.Delete(%v, %v) = nil", ctx, key) return nil } @@ -9517,10 +9517,10 @@ type GCEInstances struct { // Get the Instance named by key. func (g *GCEInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, error) { - glog.V(5).Infof("GCEInstances.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstances.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9530,21 +9530,21 @@ func (g *GCEInstances) Get(ctx context.Context, key *meta.Key) (*ga.Instance, er Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Instances.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Instance objects. func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*ga.Instance, error) { - glog.V(5).Infof("GCEInstances.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEInstances.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") rk := &RateLimitKey{ ProjectID: projectID, @@ -9555,30 +9555,30 @@ func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]* if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.GA.Instances.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Instance f := func(l *ga.InstanceList) error { - glog.V(5).Infof("GCEInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEInstances.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -9586,9 +9586,9 @@ func (g *GCEInstances) List(ctx context.Context, zone string, fl *filter.F) ([]* // Insert Instance with key of value obj. func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instance) error { - glog.V(5).Infof("GCEInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEInstances.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9598,9 +9598,9 @@ func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instan Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -9609,20 +9609,20 @@ func (g *GCEInstances) Insert(ctx context.Context, key *meta.Key, obj *ga.Instan op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Instance referenced by key. func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEInstances.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEInstances.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9632,9 +9632,9 @@ func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Instances.Delete(projectID, key.Zone, key.Name) @@ -9642,21 +9642,21 @@ func (g *GCEInstances) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.Delete(%v, %v) = %v", ctx, key, err) return err } // AttachDisk is a method on GCEInstances. func (g *GCEInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *ga.AttachedDisk) error { - glog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9666,30 +9666,30 @@ func (g *GCEInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *ga.A Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachDisk is a method on GCEInstances. func (g *GCEInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - glog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Instances") @@ -9699,21 +9699,21 @@ func (g *GCEInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 strin Version: meta.Version("ga"), Service: "Instances", } - glog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -9778,7 +9778,7 @@ type MockBetaInstances struct { func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -9790,12 +9790,12 @@ func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Insta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -9803,7 +9803,7 @@ func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Insta Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaInstances %v not found", key), } - glog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -9811,7 +9811,7 @@ func (m *MockBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Insta func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -9821,7 +9821,7 @@ func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -9837,7 +9837,7 @@ func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockBetaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -9845,7 +9845,7 @@ func (m *MockBetaInstances) List(ctx context.Context, zone string, fl *filter.F) func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -9857,7 +9857,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -9865,7 +9865,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaInstances %v exists", key), } - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -9874,7 +9874,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "instances", key) m.Objects[*key] = &MockInstancesObj{obj} - glog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -9882,7 +9882,7 @@ func (m *MockBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -9894,7 +9894,7 @@ func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -9902,12 +9902,12 @@ func (m *MockBetaInstances) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaInstances %v not found", key), } - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaInstances.Delete(%v, %v) = nil", ctx, key) return nil } @@ -9947,10 +9947,10 @@ type GCEBetaInstances struct { // Get the Instance named by key. func (g *GCEBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instance, error) { - glog.V(5).Infof("GCEBetaInstances.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -9960,21 +9960,21 @@ func (g *GCEBetaInstances) Get(ctx context.Context, key *meta.Key) (*beta.Instan Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.Instances.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Instance objects. func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.Instance, error) { - glog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") rk := &RateLimitKey{ ProjectID: projectID, @@ -9985,30 +9985,30 @@ func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Beta.Instances.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.Instance f := func(l *beta.InstanceList) error { - glog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -10016,9 +10016,9 @@ func (g *GCEBetaInstances) List(ctx context.Context, zone string, fl *filter.F) // Insert Instance with key of value obj. func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta.Instance) error { - glog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10028,9 +10028,9 @@ func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta. Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -10039,20 +10039,20 @@ func (g *GCEBetaInstances) Insert(ctx context.Context, key *meta.Key, obj *beta. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Instance referenced by key. func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10062,9 +10062,9 @@ func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.Delete(projectID, key.Zone, key.Name) @@ -10072,21 +10072,21 @@ func (g *GCEBetaInstances) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } // AttachDisk is a method on GCEBetaInstances. func (g *GCEBetaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *beta.AttachedDisk) error { - glog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10096,30 +10096,30 @@ func (g *GCEBetaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 * Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachDisk is a method on GCEBetaInstances. func (g *GCEBetaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - glog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10129,30 +10129,30 @@ func (g *GCEBetaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 s Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // UpdateNetworkInterface is a method on GCEBetaInstances. func (g *GCEBetaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *beta.NetworkInterface) error { - glog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "Instances") @@ -10162,21 +10162,21 @@ func (g *GCEBetaInstances) UpdateNetworkInterface(ctx context.Context, key *meta Version: meta.Version("beta"), Service: "Instances", } - glog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -10241,7 +10241,7 @@ type MockAlphaInstances struct { func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -10253,12 +10253,12 @@ func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Ins defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -10266,7 +10266,7 @@ func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Ins Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaInstances %v not found", key), } - glog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -10274,7 +10274,7 @@ func (m *MockAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Ins func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -10284,7 +10284,7 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -10300,7 +10300,7 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockAlphaInstances.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -10308,7 +10308,7 @@ func (m *MockAlphaInstances) List(ctx context.Context, zone string, fl *filter.F func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -10320,7 +10320,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -10328,7 +10328,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaInstances %v exists", key), } - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -10337,7 +10337,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "instances", key) m.Objects[*key] = &MockInstancesObj{obj} - glog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaInstances.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -10345,7 +10345,7 @@ func (m *MockAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alp func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -10357,7 +10357,7 @@ func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -10365,12 +10365,12 @@ func (m *MockAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaInstances %v not found", key), } - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaInstances.Delete(%v, %v) = nil", ctx, key) return nil } @@ -10410,10 +10410,10 @@ type GCEAlphaInstances struct { // Get the Instance named by key. func (g *GCEAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Instance, error) { - glog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10423,21 +10423,21 @@ func (g *GCEAlphaInstances) Get(ctx context.Context, key *meta.Key) (*alpha.Inst Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.Instances.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaInstances.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Instance objects. func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.Instance, error) { - glog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") rk := &RateLimitKey{ ProjectID: projectID, @@ -10448,30 +10448,30 @@ func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Alpha.Instances.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.Instance f := func(l *alpha.InstanceList) error { - glog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaInstances.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaInstances.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -10479,9 +10479,9 @@ func (g *GCEAlphaInstances) List(ctx context.Context, zone string, fl *filter.F) // Insert Instance with key of value obj. func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alpha.Instance) error { - glog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10491,9 +10491,9 @@ func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alph Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -10502,20 +10502,20 @@ func (g *GCEAlphaInstances) Insert(ctx context.Context, key *meta.Key, obj *alph op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaInstances.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Instance referenced by key. func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10525,9 +10525,9 @@ func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.Delete(projectID, key.Zone, key.Name) @@ -10535,21 +10535,21 @@ func (g *GCEAlphaInstances) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.Delete(%v, %v) = %v", ctx, key, err) return err } // AttachDisk is a method on GCEAlphaInstances. func (g *GCEAlphaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 *alpha.AttachedDisk) error { - glog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10559,30 +10559,30 @@ func (g *GCEAlphaInstances) AttachDisk(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.AttachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.AttachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachDisk is a method on GCEAlphaInstances. func (g *GCEAlphaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 string) error { - glog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10592,30 +10592,30 @@ func (g *GCEAlphaInstances) DetachDisk(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.DetachDisk(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.DetachDisk(%v, %v, ...) = %+v", ctx, key, err) return err } // UpdateNetworkInterface is a method on GCEAlphaInstances. func (g *GCEAlphaInstances) UpdateNetworkInterface(ctx context.Context, key *meta.Key, arg0 string, arg1 *alpha.NetworkInterface) error { - glog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "Instances") @@ -10625,21 +10625,21 @@ func (g *GCEAlphaInstances) UpdateNetworkInterface(ctx context.Context, key *met Version: meta.Version("alpha"), Service: "Instances", } - glog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.Instances.UpdateNetworkInterface(projectID, key.Zone, key.Name, arg0, arg1) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaInstances.UpdateNetworkInterface(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -10707,7 +10707,7 @@ type MockAlphaNetworkEndpointGroups struct { func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -10719,12 +10719,12 @@ func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToAlpha() - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -10732,7 +10732,7 @@ func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -10740,7 +10740,7 @@ func (m *MockAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -10750,7 +10750,7 @@ func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -10766,7 +10766,7 @@ func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, objs = append(objs, obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -10774,7 +10774,7 @@ func (m *MockAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -10786,7 +10786,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -10794,7 +10794,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K Code: http.StatusConflict, Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v exists", key), } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -10803,7 +10803,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K obj.SelfLink = SelfLink(meta.VersionAlpha, projectID, "networkEndpointGroups", key) m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -10811,7 +10811,7 @@ func (m *MockAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.K func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -10823,7 +10823,7 @@ func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.K defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -10831,12 +10831,12 @@ func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.K Code: http.StatusNotFound, Message: fmt.Sprintf("MockAlphaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) return nil } @@ -10844,7 +10844,7 @@ func (m *MockAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.K func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { if m.AggregatedListHook != nil { if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -10854,7 +10854,7 @@ func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl if m.AggregatedListError != nil { err := *m.AggregatedListError - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } @@ -10863,7 +10863,7 @@ func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl res, err := ParseResourceURL(obj.ToAlpha().SelfLink) location := res.Key.Zone if err != nil { - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } if !fl.Match(obj.ToAlpha()) { @@ -10871,7 +10871,7 @@ func (m *MockAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl } objs[location] = append(objs[location], obj.ToAlpha()) } - glog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -10911,10 +10911,10 @@ type GCEAlphaNetworkEndpointGroups struct { // Get the NetworkEndpointGroup named by key. func (g *GCEAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*alpha.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -10924,21 +10924,21 @@ func (g *GCEAlphaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all NetworkEndpointGroup objects. func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*alpha.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") rk := &RateLimitKey{ ProjectID: projectID, @@ -10949,30 +10949,30 @@ func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, f if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Alpha.NetworkEndpointGroups.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*alpha.NetworkEndpointGroup f := func(l *alpha.NetworkEndpointGroupList) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -10980,9 +10980,9 @@ func (g *GCEAlphaNetworkEndpointGroups) List(ctx context.Context, zone string, f // Insert NetworkEndpointGroup with key of value obj. func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *alpha.NetworkEndpointGroup) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -10992,9 +10992,9 @@ func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -11003,20 +11003,20 @@ func (g *GCEAlphaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the NetworkEndpointGroup referenced by key. func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11026,9 +11026,9 @@ func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) @@ -11036,18 +11036,18 @@ func (g *GCEAlphaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } // AggregatedList lists all resources of the given type across all locations. func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*alpha.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") rk := &RateLimitKey{ @@ -11057,9 +11057,9 @@ func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) return nil, err } @@ -11072,33 +11072,33 @@ func (g *GCEAlphaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * all := map[string][]*alpha.NetworkEndpointGroup{} f := func(l *alpha.NetworkEndpointGroupAggregatedList) error { for k, v := range l.Items { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) all[k] = append(all[k], v.NetworkEndpointGroups...) } return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // AttachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. func (g *GCEAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsAttachEndpointsRequest) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11108,30 +11108,30 @@ func (g *GCEAlphaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Conte Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. func (g *GCEAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsDetachEndpointsRequest) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11141,30 +11141,30 @@ func (g *GCEAlphaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Conte Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Alpha.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // ListNetworkEndpoints is a method on GCEAlphaNetworkEndpointGroups. func (g *GCEAlphaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *alpha.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*alpha.NetworkEndpointWithHealthStatus, error) { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "alpha", "NetworkEndpointGroups") @@ -11174,31 +11174,31 @@ func (g *GCEAlphaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context Version: meta.Version("alpha"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Alpha.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) var all []*alpha.NetworkEndpointWithHealthStatus f := func(l *alpha.NetworkEndpointGroupsListNetworkEndpoints) error { - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + klog.V(5).Infof("GCEAlphaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) } return all, nil } @@ -11267,7 +11267,7 @@ type MockBetaNetworkEndpointGroups struct { func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -11279,12 +11279,12 @@ func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -11292,7 +11292,7 @@ func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -11300,7 +11300,7 @@ func (m *MockBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) return objs, err } } @@ -11310,7 +11310,7 @@ func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, f if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) return nil, *m.ListError } @@ -11326,7 +11326,7 @@ func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, f objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) return objs, nil } @@ -11334,7 +11334,7 @@ func (m *MockBetaNetworkEndpointGroups) List(ctx context.Context, zone string, f func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -11346,7 +11346,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -11354,7 +11354,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v exists", key), } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -11363,7 +11363,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "networkEndpointGroups", key) m.Objects[*key] = &MockNetworkEndpointGroupsObj{obj} - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -11371,7 +11371,7 @@ func (m *MockBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Ke func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -11383,7 +11383,7 @@ func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -11391,12 +11391,12 @@ func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaNetworkEndpointGroups %v not found", key), } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.Delete(%v, %v) = nil", ctx, key) return nil } @@ -11404,7 +11404,7 @@ func (m *MockBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Ke func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) { if m.AggregatedListHook != nil { if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -11414,7 +11414,7 @@ func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * if m.AggregatedListError != nil { err := *m.AggregatedListError - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } @@ -11423,7 +11423,7 @@ func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * res, err := ParseResourceURL(obj.ToBeta().SelfLink) location := res.Key.Zone if err != nil { - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } if !fl.Match(obj.ToBeta()) { @@ -11431,7 +11431,7 @@ func (m *MockBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl * } objs[location] = append(objs[location], obj.ToBeta()) } - glog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -11471,10 +11471,10 @@ type GCEBetaNetworkEndpointGroups struct { // Get the NetworkEndpointGroup named by key. func (g *GCEBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) (*beta.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11484,21 +11484,21 @@ func (g *GCEBetaNetworkEndpointGroups) Get(ctx context.Context, key *meta.Key) ( Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.NetworkEndpointGroups.Get(projectID, key.Zone, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all NetworkEndpointGroup objects. func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl *filter.F) ([]*beta.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v) called", ctx, zone, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") rk := &RateLimitKey{ ProjectID: projectID, @@ -11509,30 +11509,30 @@ func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.Beta.NetworkEndpointGroups.List(projectID, zone) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.NetworkEndpointGroup f := func(l *beta.NetworkEndpointGroupList) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -11540,9 +11540,9 @@ func (g *GCEBetaNetworkEndpointGroups) List(ctx context.Context, zone string, fl // Insert NetworkEndpointGroup with key of value obj. func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key, obj *beta.NetworkEndpointGroup) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11552,9 +11552,9 @@ func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -11563,20 +11563,20 @@ func (g *GCEBetaNetworkEndpointGroups) Insert(ctx context.Context, key *meta.Key op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the NetworkEndpointGroup referenced by key. func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11586,9 +11586,9 @@ func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.NetworkEndpointGroups.Delete(projectID, key.Zone, key.Name) @@ -11596,18 +11596,18 @@ func (g *GCEBetaNetworkEndpointGroups) Delete(ctx context.Context, key *meta.Key op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.Delete(%v, %v) = %v", ctx, key, err) return err } // AggregatedList lists all resources of the given type across all locations. func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*beta.NetworkEndpointGroup, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") rk := &RateLimitKey{ @@ -11617,9 +11617,9 @@ func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *f Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) return nil, err } @@ -11632,33 +11632,33 @@ func (g *GCEBetaNetworkEndpointGroups) AggregatedList(ctx context.Context, fl *f all := map[string][]*beta.NetworkEndpointGroup{} f := func(l *beta.NetworkEndpointGroupAggregatedList) error { for k, v := range l.Items { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) all[k] = append(all[k], v.NetworkEndpointGroups...) } return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } // AttachNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. func (g *GCEBetaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsAttachEndpointsRequest) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11668,30 +11668,30 @@ func (g *GCEBetaNetworkEndpointGroups) AttachNetworkEndpoints(ctx context.Contex Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.NetworkEndpointGroups.AttachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.AttachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // DetachNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. func (g *GCEBetaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsDetachEndpointsRequest) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11701,30 +11701,30 @@ func (g *GCEBetaNetworkEndpointGroups) DetachNetworkEndpoints(ctx context.Contex Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.NetworkEndpointGroups.DetachNetworkEndpoints(projectID, key.Zone, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.DetachNetworkEndpoints(%v, %v, ...) = %+v", ctx, key, err) return err } // ListNetworkEndpoints is a method on GCEBetaNetworkEndpointGroups. func (g *GCEBetaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, key *meta.Key, arg0 *beta.NetworkEndpointGroupsListEndpointsRequest, fl *filter.F) ([]*beta.NetworkEndpointWithHealthStatus, error) { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "NetworkEndpointGroups") @@ -11734,31 +11734,31 @@ func (g *GCEBetaNetworkEndpointGroups) ListNetworkEndpoints(ctx context.Context, Version: meta.Version("beta"), Service: "NetworkEndpointGroups", } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.NetworkEndpointGroups.ListNetworkEndpoints(projectID, key.Zone, key.Name, arg0) var all []*beta.NetworkEndpointWithHealthStatus f := func(l *beta.NetworkEndpointGroupsListNetworkEndpoints) error { - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...): page %+v", ctx, key, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + klog.V(5).Infof("GCEBetaNetworkEndpointGroups.ListNetworkEndpoints(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) } return all, nil } @@ -11859,7 +11859,7 @@ type MockRegions struct { func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -11871,12 +11871,12 @@ func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRegions.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -11884,7 +11884,7 @@ func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error Code: http.StatusNotFound, Message: fmt.Sprintf("MockRegions %v not found", key), } - glog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRegions.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -11892,7 +11892,7 @@ func (m *MockRegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -11902,7 +11902,7 @@ func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, err if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockRegions.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockRegions.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -11915,7 +11915,7 @@ func (m *MockRegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, err objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockRegions.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -11931,10 +11931,10 @@ type GCERegions struct { // Get the Region named by key. func (g *GCERegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) { - glog.V(5).Infof("GCERegions.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERegions.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERegions.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERegions.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") @@ -11944,21 +11944,21 @@ func (g *GCERegions) Get(ctx context.Context, key *meta.Key) (*ga.Region, error) Version: meta.Version("ga"), Service: "Regions", } - glog.V(5).Infof("GCERegions.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERegions.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERegions.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERegions.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Regions.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERegions.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERegions.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Region objects. func (g *GCERegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, error) { - glog.V(5).Infof("GCERegions.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCERegions.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Regions") rk := &RateLimitKey{ ProjectID: projectID, @@ -11969,30 +11969,30 @@ func (g *GCERegions) List(ctx context.Context, fl *filter.F) ([]*ga.Region, erro if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCERegions.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCERegions.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Regions.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Region f := func(l *ga.RegionList) error { - glog.V(5).Infof("GCERegions.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCERegions.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCERegions.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERegions.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERegions.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -12053,7 +12053,7 @@ type MockRoutes struct { func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12065,12 +12065,12 @@ func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -12078,7 +12078,7 @@ func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) Code: http.StatusNotFound, Message: fmt.Sprintf("MockRoutes %v not found", key), } - glog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -12086,7 +12086,7 @@ func (m *MockRoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -12096,7 +12096,7 @@ func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockRoutes.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockRoutes.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -12109,7 +12109,7 @@ func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockRoutes.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -12117,7 +12117,7 @@ func (m *MockRoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -12129,7 +12129,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -12137,7 +12137,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e Code: http.StatusConflict, Message: fmt.Sprintf("MockRoutes %v exists", key), } - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -12146,7 +12146,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e obj.SelfLink = SelfLink(meta.VersionGA, projectID, "routes", key) m.Objects[*key] = &MockRoutesObj{obj} - glog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockRoutes.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -12154,7 +12154,7 @@ func (m *MockRoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) e func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -12166,7 +12166,7 @@ func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -12174,12 +12174,12 @@ func (m *MockRoutes) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockRoutes %v not found", key), } - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockRoutes.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockRoutes.Delete(%v, %v) = nil", ctx, key) return nil } @@ -12195,10 +12195,10 @@ type GCERoutes struct { // Get the Route named by key. func (g *GCERoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { - glog.V(5).Infof("GCERoutes.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERoutes.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERoutes.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERoutes.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") @@ -12208,21 +12208,21 @@ func (g *GCERoutes) Get(ctx context.Context, key *meta.Key) (*ga.Route, error) { Version: meta.Version("ga"), Service: "Routes", } - glog.V(5).Infof("GCERoutes.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERoutes.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERoutes.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Routes.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCERoutes.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCERoutes.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Route objects. func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) { - glog.V(5).Infof("GCERoutes.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCERoutes.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") rk := &RateLimitKey{ ProjectID: projectID, @@ -12233,30 +12233,30 @@ func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCERoutes.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCERoutes.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Routes.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Route f := func(l *ga.RouteList) error { - glog.V(5).Infof("GCERoutes.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCERoutes.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCERoutes.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCERoutes.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCERoutes.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -12264,9 +12264,9 @@ func (g *GCERoutes) List(ctx context.Context, fl *filter.F) ([]*ga.Route, error) // Insert Route with key of value obj. func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) error { - glog.V(5).Infof("GCERoutes.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCERoutes.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCERoutes.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERoutes.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") @@ -12276,9 +12276,9 @@ func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) er Version: meta.Version("ga"), Service: "Routes", } - glog.V(5).Infof("GCERoutes.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERoutes.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERoutes.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -12287,20 +12287,20 @@ func (g *GCERoutes) Insert(ctx context.Context, key *meta.Key, obj *ga.Route) er op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERoutes.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERoutes.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCERoutes.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the Route referenced by key. func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCERoutes.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCERoutes.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCERoutes.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCERoutes.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Routes") @@ -12310,9 +12310,9 @@ func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "Routes", } - glog.V(5).Infof("GCERoutes.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCERoutes.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCERoutes.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.Routes.Delete(projectID, key.Name) @@ -12321,12 +12321,12 @@ func (g *GCERoutes) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCERoutes.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -12395,7 +12395,7 @@ type MockBetaSecurityPolicies struct { func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12407,12 +12407,12 @@ func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*bet defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToBeta() - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -12420,7 +12420,7 @@ func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*bet Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaSecurityPolicies %v not found", key), } - glog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -12428,7 +12428,7 @@ func (m *MockBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*bet func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -12438,7 +12438,7 @@ func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*b if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -12451,7 +12451,7 @@ func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*b objs = append(objs, obj.ToBeta()) } - glog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockBetaSecurityPolicies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -12459,7 +12459,7 @@ func (m *MockBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*b func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -12471,7 +12471,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -12479,7 +12479,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob Code: http.StatusConflict, Message: fmt.Sprintf("MockBetaSecurityPolicies %v exists", key), } - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -12488,7 +12488,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob obj.SelfLink = SelfLink(meta.VersionBeta, projectID, "securityPolicies", key) m.Objects[*key] = &MockSecurityPoliciesObj{obj} - glog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockBetaSecurityPolicies.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -12496,7 +12496,7 @@ func (m *MockBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, ob func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -12508,7 +12508,7 @@ func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) er defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -12516,12 +12516,12 @@ func (m *MockBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) er Code: http.StatusNotFound, Message: fmt.Sprintf("MockBetaSecurityPolicies %v not found", key), } - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockBetaSecurityPolicies.Delete(%v, %v) = nil", ctx, key) return nil } @@ -12577,10 +12577,10 @@ type GCEBetaSecurityPolicies struct { // Get the SecurityPolicy named by key. func (g *GCEBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta.SecurityPolicy, error) { - glog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12590,21 +12590,21 @@ func (g *GCEBetaSecurityPolicies) Get(ctx context.Context, key *meta.Key) (*beta Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.SecurityPolicies.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all SecurityPolicy objects. func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*beta.SecurityPolicy, error) { - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") rk := &RateLimitKey{ ProjectID: projectID, @@ -12615,30 +12615,30 @@ func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*be if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.Beta.SecurityPolicies.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*beta.SecurityPolicy f := func(l *beta.SecurityPolicyList) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEBetaSecurityPolicies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -12646,9 +12646,9 @@ func (g *GCEBetaSecurityPolicies) List(ctx context.Context, fl *filter.F) ([]*be // Insert SecurityPolicy with key of value obj. func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj *beta.SecurityPolicy) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12658,9 +12658,9 @@ func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -12669,20 +12669,20 @@ func (g *GCEBetaSecurityPolicies) Insert(ctx context.Context, key *meta.Key, obj op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the SecurityPolicy referenced by key. func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12692,9 +12692,9 @@ func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) err Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.Delete(projectID, key.Name) @@ -12703,21 +12703,21 @@ func (g *GCEBetaSecurityPolicies) Delete(ctx context.Context, key *meta.Key) err op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Delete(%v, %v) = %v", ctx, key, err) return err } // AddRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12727,30 +12727,30 @@ func (g *GCEBetaSecurityPolicies) AddRule(ctx context.Context, key *meta.Key, ar Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.AddRule(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.AddRule(%v, %v, ...) = %+v", ctx, key, err) return err } // GetRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (*beta.SecurityPolicyRule, error) { - glog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12760,25 +12760,25 @@ func (g *GCEBetaSecurityPolicies) GetRule(ctx context.Context, key *meta.Key) (* Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.Beta.SecurityPolicies.GetRule(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.GetRule(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err } // Patch is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicy) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12788,30 +12788,30 @@ func (g *GCEBetaSecurityPolicies) Patch(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.Patch(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.Patch(%v, %v, ...) = %+v", ctx, key, err) return err } // PatchRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, arg0 *beta.SecurityPolicyRule) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12821,30 +12821,30 @@ func (g *GCEBetaSecurityPolicies) PatchRule(ctx context.Context, key *meta.Key, Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.PatchRule(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.PatchRule(%v, %v, ...) = %+v", ctx, key, err) return err } // RemoveRule is a method on GCEBetaSecurityPolicies. func (g *GCEBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "beta", "SecurityPolicies") @@ -12854,21 +12854,21 @@ func (g *GCEBetaSecurityPolicies) RemoveRule(ctx context.Context, key *meta.Key) Version: meta.Version("beta"), Service: "SecurityPolicies", } - glog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.Beta.SecurityPolicies.RemoveRule(projectID, key.Name) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEBetaSecurityPolicies.RemoveRule(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -12927,7 +12927,7 @@ type MockSslCertificates struct { func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -12939,12 +12939,12 @@ func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCe defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -12952,7 +12952,7 @@ func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCe Code: http.StatusNotFound, Message: fmt.Sprintf("MockSslCertificates %v not found", key), } - glog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -12960,7 +12960,7 @@ func (m *MockSslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCe func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -12970,7 +12970,7 @@ func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.Ssl if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -12983,7 +12983,7 @@ func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.Ssl objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockSslCertificates.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -12991,7 +12991,7 @@ func (m *MockSslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.Ssl func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13003,7 +13003,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -13011,7 +13011,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga Code: http.StatusConflict, Message: fmt.Sprintf("MockSslCertificates %v exists", key), } - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -13020,7 +13020,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga obj.SelfLink = SelfLink(meta.VersionGA, projectID, "sslCertificates", key) m.Objects[*key] = &MockSslCertificatesObj{obj} - glog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockSslCertificates.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -13028,7 +13028,7 @@ func (m *MockSslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13040,7 +13040,7 @@ func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -13048,12 +13048,12 @@ func (m *MockSslCertificates) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockSslCertificates %v not found", key), } - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockSslCertificates.Delete(%v, %v) = nil", ctx, key) return nil } @@ -13069,10 +13069,10 @@ type GCESslCertificates struct { // Get the SslCertificate named by key. func (g *GCESslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCertificate, error) { - glog.V(5).Infof("GCESslCertificates.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCESslCertificates.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCESslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCESslCertificates.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") @@ -13082,21 +13082,21 @@ func (g *GCESslCertificates) Get(ctx context.Context, key *meta.Key) (*ga.SslCer Version: meta.Version("ga"), Service: "SslCertificates", } - glog.V(5).Infof("GCESslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCESslCertificates.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCESslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.SslCertificates.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCESslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCESslCertificates.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all SslCertificate objects. func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslCertificate, error) { - glog.V(5).Infof("GCESslCertificates.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCESslCertificates.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") rk := &RateLimitKey{ ProjectID: projectID, @@ -13107,30 +13107,30 @@ func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslC if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCESslCertificates.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCESslCertificates.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.SslCertificates.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.SslCertificate f := func(l *ga.SslCertificateList) error { - glog.V(5).Infof("GCESslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCESslCertificates.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCESslCertificates.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCESslCertificates.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -13138,9 +13138,9 @@ func (g *GCESslCertificates) List(ctx context.Context, fl *filter.F) ([]*ga.SslC // Insert SslCertificate with key of value obj. func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga.SslCertificate) error { - glog.V(5).Infof("GCESslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCESslCertificates.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCESslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCESslCertificates.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") @@ -13150,9 +13150,9 @@ func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga. Version: meta.Version("ga"), Service: "SslCertificates", } - glog.V(5).Infof("GCESslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCESslCertificates.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -13161,20 +13161,20 @@ func (g *GCESslCertificates) Insert(ctx context.Context, key *meta.Key, obj *ga. op, err := call.Do() if err != nil { - glog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCESslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCESslCertificates.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the SslCertificate referenced by key. func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCESslCertificates.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCESslCertificates.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCESslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCESslCertificates.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "SslCertificates") @@ -13184,9 +13184,9 @@ func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "SslCertificates", } - glog.V(5).Infof("GCESslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCESslCertificates.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCESslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.SslCertificates.Delete(projectID, key.Name) @@ -13195,12 +13195,12 @@ func (g *GCESslCertificates) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCESslCertificates.Delete(%v, %v) = %v", ctx, key, err) return err } @@ -13261,7 +13261,7 @@ type MockTargetHttpProxies struct { func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -13273,12 +13273,12 @@ func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -13286,7 +13286,7 @@ func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -13294,7 +13294,7 @@ func (m *MockTargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -13304,7 +13304,7 @@ func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -13317,7 +13317,7 @@ func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockTargetHttpProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -13325,7 +13325,7 @@ func (m *MockTargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13337,7 +13337,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -13345,7 +13345,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * Code: http.StatusConflict, Message: fmt.Sprintf("MockTargetHttpProxies %v exists", key), } - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -13354,7 +13354,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetHttpProxies", key) m.Objects[*key] = &MockTargetHttpProxiesObj{obj} - glog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockTargetHttpProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -13362,7 +13362,7 @@ func (m *MockTargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj * func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13374,7 +13374,7 @@ func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -13382,12 +13382,12 @@ func (m *MockTargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockTargetHttpProxies.Delete(%v, %v) = nil", ctx, key) return nil } @@ -13411,10 +13411,10 @@ type GCETargetHttpProxies struct { // Get the TargetHttpProxy named by key. func (g *GCETargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpProxy, error) { - glog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13424,21 +13424,21 @@ func (g *GCETargetHttpProxies) Get(ctx context.Context, key *meta.Key) (*ga.Targ Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.TargetHttpProxies.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCETargetHttpProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all TargetHttpProxy objects. func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpProxy, error) { - glog.V(5).Infof("GCETargetHttpProxies.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") rk := &RateLimitKey{ ProjectID: projectID, @@ -13449,30 +13449,30 @@ func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.Ta if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCETargetHttpProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.TargetHttpProxies.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.TargetHttpProxy f := func(l *ga.TargetHttpProxyList) error { - glog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCETargetHttpProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCETargetHttpProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -13480,9 +13480,9 @@ func (g *GCETargetHttpProxies) List(ctx context.Context, fl *filter.F) ([]*ga.Ta // Insert TargetHttpProxy with key of value obj. func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpProxy) error { - glog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13492,9 +13492,9 @@ func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *g Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -13503,20 +13503,20 @@ func (g *GCETargetHttpProxies) Insert(ctx context.Context, key *meta.Key, obj *g op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCETargetHttpProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the TargetHttpProxy referenced by key. func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13526,9 +13526,9 @@ func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpProxies.Delete(projectID, key.Name) @@ -13537,21 +13537,21 @@ func (g *GCETargetHttpProxies) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.Delete(%v, %v) = %v", ctx, key, err) return err } // SetUrlMap is a method on GCETargetHttpProxies. func (g *GCETargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { - glog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpProxies") @@ -13561,21 +13561,21 @@ func (g *GCETargetHttpProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg Version: meta.Version("ga"), Service: "TargetHttpProxies", } - glog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpProxies.SetUrlMap(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -13638,7 +13638,7 @@ type MockTargetHttpsProxies struct { func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -13650,12 +13650,12 @@ func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Ta defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -13663,7 +13663,7 @@ func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Ta Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -13671,7 +13671,7 @@ func (m *MockTargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Ta func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -13681,7 +13681,7 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -13694,7 +13694,7 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockTargetHttpsProxies.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -13702,7 +13702,7 @@ func (m *MockTargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga. func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -13714,7 +13714,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -13722,7 +13722,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj Code: http.StatusConflict, Message: fmt.Sprintf("MockTargetHttpsProxies %v exists", key), } - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -13731,7 +13731,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetHttpsProxies", key) m.Objects[*key] = &MockTargetHttpsProxiesObj{obj} - glog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockTargetHttpsProxies.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -13739,7 +13739,7 @@ func (m *MockTargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -13751,7 +13751,7 @@ func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) erro defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -13759,12 +13759,12 @@ func (m *MockTargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) erro Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetHttpsProxies %v not found", key), } - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockTargetHttpsProxies.Delete(%v, %v) = nil", ctx, key) return nil } @@ -13796,10 +13796,10 @@ type GCETargetHttpsProxies struct { // Get the TargetHttpsProxy named by key. func (g *GCETargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.TargetHttpsProxy, error) { - glog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13809,21 +13809,21 @@ func (g *GCETargetHttpsProxies) Get(ctx context.Context, key *meta.Key) (*ga.Tar Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.TargetHttpsProxies.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCETargetHttpsProxies.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all TargetHttpsProxy objects. func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.TargetHttpsProxy, error) { - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") rk := &RateLimitKey{ ProjectID: projectID, @@ -13834,30 +13834,30 @@ func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.TargetHttpsProxies.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.TargetHttpsProxy f := func(l *ga.TargetHttpsProxyList) error { - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCETargetHttpsProxies.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -13865,9 +13865,9 @@ func (g *GCETargetHttpsProxies) List(ctx context.Context, fl *filter.F) ([]*ga.T // Insert TargetHttpsProxy with key of value obj. func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetHttpsProxy) error { - glog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13877,9 +13877,9 @@ func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj * Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -13888,20 +13888,20 @@ func (g *GCETargetHttpsProxies) Insert(ctx context.Context, key *meta.Key, obj * op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCETargetHttpsProxies.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the TargetHttpsProxy referenced by key. func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13911,9 +13911,9 @@ func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpsProxies.Delete(projectID, key.Name) @@ -13922,21 +13922,21 @@ func (g *GCETargetHttpsProxies) Delete(ctx context.Context, key *meta.Key) error op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.Delete(%v, %v) = %v", ctx, key, err) return err } // SetSslCertificates is a method on GCETargetHttpsProxies. func (g *GCETargetHttpsProxies) SetSslCertificates(ctx context.Context, key *meta.Key, arg0 *ga.TargetHttpsProxiesSetSslCertificatesRequest) error { - glog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13946,30 +13946,30 @@ func (g *GCETargetHttpsProxies) SetSslCertificates(ctx context.Context, key *met Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpsProxies.SetSslCertificates(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetSslCertificates(%v, %v, ...) = %+v", ctx, key, err) return err } // SetUrlMap is a method on GCETargetHttpsProxies. func (g *GCETargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, arg0 *ga.UrlMapReference) error { - glog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetHttpsProxies") @@ -13979,21 +13979,21 @@ func (g *GCETargetHttpsProxies) SetUrlMap(ctx context.Context, key *meta.Key, ar Version: meta.Version("ga"), Service: "TargetHttpsProxies", } - glog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetHttpsProxies.SetUrlMap(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetHttpsProxies.SetUrlMap(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -14056,7 +14056,7 @@ type MockTargetPools struct { func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14068,12 +14068,12 @@ func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPoo defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -14081,7 +14081,7 @@ func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPoo Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetPools %v not found", key), } - glog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -14089,7 +14089,7 @@ func (m *MockTargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPoo func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) return objs, err } } @@ -14099,7 +14099,7 @@ func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) return nil, *m.ListError } @@ -14115,7 +14115,7 @@ func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("MockTargetPools.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) return objs, nil } @@ -14123,7 +14123,7 @@ func (m *MockTargetPools) List(ctx context.Context, region string, fl *filter.F) func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -14135,7 +14135,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -14143,7 +14143,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar Code: http.StatusConflict, Message: fmt.Sprintf("MockTargetPools %v exists", key), } - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -14152,7 +14152,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar obj.SelfLink = SelfLink(meta.VersionGA, projectID, "targetPools", key) m.Objects[*key] = &MockTargetPoolsObj{obj} - glog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockTargetPools.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -14160,7 +14160,7 @@ func (m *MockTargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Tar func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -14172,7 +14172,7 @@ func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -14180,12 +14180,12 @@ func (m *MockTargetPools) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockTargetPools %v not found", key), } - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockTargetPools.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockTargetPools.Delete(%v, %v) = nil", ctx, key) return nil } @@ -14217,10 +14217,10 @@ type GCETargetPools struct { // Get the TargetPool named by key. func (g *GCETargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool, error) { - glog.V(5).Infof("GCETargetPools.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetPools.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14230,21 +14230,21 @@ func (g *GCETargetPools) Get(ctx context.Context, key *meta.Key) (*ga.TargetPool Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.TargetPools.Get(projectID, key.Region, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCETargetPools.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCETargetPools.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all TargetPool objects. func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) ([]*ga.TargetPool, error) { - glog.V(5).Infof("GCETargetPools.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("GCETargetPools.List(%v, %v, %v) called", ctx, region, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") rk := &RateLimitKey{ ProjectID: projectID, @@ -14255,30 +14255,30 @@ func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCETargetPools.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("GCETargetPools.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.GA.TargetPools.List(projectID, region) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.TargetPool f := func(l *ga.TargetPoolList) error { - glog.V(5).Infof("GCETargetPools.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCETargetPools.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCETargetPools.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCETargetPools.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -14286,9 +14286,9 @@ func (g *GCETargetPools) List(ctx context.Context, region string, fl *filter.F) // Insert TargetPool with key of value obj. func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.TargetPool) error { - glog.V(5).Infof("GCETargetPools.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCETargetPools.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14298,9 +14298,9 @@ func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Targ Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -14309,20 +14309,20 @@ func (g *GCETargetPools) Insert(ctx context.Context, key *meta.Key, obj *ga.Targ op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCETargetPools.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the TargetPool referenced by key. func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCETargetPools.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCETargetPools.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14332,9 +14332,9 @@ func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetPools.Delete(projectID, key.Region, key.Name) @@ -14342,21 +14342,21 @@ func (g *GCETargetPools) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.Delete(%v, %v) = %v", ctx, key, err) return err } // AddInstance is a method on GCETargetPools. func (g *GCETargetPools) AddInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsAddInstanceRequest) error { - glog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.AddInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.AddInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14366,30 +14366,30 @@ func (g *GCETargetPools) AddInstance(ctx context.Context, key *meta.Key, arg0 *g Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.AddInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetPools.AddInstance(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.AddInstance(%v, %v, ...) = %+v", ctx, key, err) return err } // RemoveInstance is a method on GCETargetPools. func (g *GCETargetPools) RemoveInstance(ctx context.Context, key *meta.Key, arg0 *ga.TargetPoolsRemoveInstanceRequest) error { - glog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "TargetPools") @@ -14399,21 +14399,21 @@ func (g *GCETargetPools) RemoveInstance(ctx context.Context, key *meta.Key, arg0 Version: meta.Version("ga"), Service: "TargetPools", } - glog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.TargetPools.RemoveInstance(projectID, key.Region, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCETargetPools.RemoveInstance(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -14474,7 +14474,7 @@ type MockUrlMaps struct { func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14486,12 +14486,12 @@ func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -14499,7 +14499,7 @@ func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error Code: http.StatusNotFound, Message: fmt.Sprintf("MockUrlMaps %v not found", key), } - glog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -14507,7 +14507,7 @@ func (m *MockUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -14517,7 +14517,7 @@ func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, err if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -14530,7 +14530,7 @@ func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, err objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockUrlMaps.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -14538,7 +14538,7 @@ func (m *MockUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, err func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -14550,7 +14550,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -14558,7 +14558,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) Code: http.StatusConflict, Message: fmt.Sprintf("MockUrlMaps %v exists", key), } - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -14567,7 +14567,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) obj.SelfLink = SelfLink(meta.VersionGA, projectID, "urlMaps", key) m.Objects[*key] = &MockUrlMapsObj{obj} - glog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("MockUrlMaps.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } @@ -14575,7 +14575,7 @@ func (m *MockUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -14587,7 +14587,7 @@ func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -14595,12 +14595,12 @@ func (m *MockUrlMaps) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("MockUrlMaps %v not found", key), } - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("MockUrlMaps.Delete(%v, %v) = nil", ctx, key) return nil } @@ -14624,10 +14624,10 @@ type GCEUrlMaps struct { // Get the UrlMap named by key. func (g *GCEUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) { - glog.V(5).Infof("GCEUrlMaps.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEUrlMaps.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14637,21 +14637,21 @@ func (g *GCEUrlMaps) Get(ctx context.Context, key *meta.Key) (*ga.UrlMap, error) Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.UrlMaps.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEUrlMaps.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all UrlMap objects. func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, error) { - glog.V(5).Infof("GCEUrlMaps.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEUrlMaps.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") rk := &RateLimitKey{ ProjectID: projectID, @@ -14662,30 +14662,30 @@ func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, erro if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEUrlMaps.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.UrlMaps.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.UrlMap f := func(l *ga.UrlMapList) error { - glog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEUrlMaps.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEUrlMaps.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -14693,9 +14693,9 @@ func (g *GCEUrlMaps) List(ctx context.Context, fl *filter.F) ([]*ga.UrlMap, erro // Insert UrlMap with key of value obj. func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) error { - glog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14705,9 +14705,9 @@ func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -14716,20 +14716,20 @@ func (g *GCEUrlMaps) Insert(ctx context.Context, key *meta.Key, obj *ga.UrlMap) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("GCEUrlMaps.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } // Delete the UrlMap referenced by key. func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14739,9 +14739,9 @@ func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.UrlMaps.Delete(projectID, key.Name) @@ -14750,21 +14750,21 @@ func (g *GCEUrlMaps) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Delete(%v, %v) = %v", ctx, key, err) return err } // Update is a method on GCEUrlMaps. func (g *GCEUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *ga.UrlMap) error { - glog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEUrlMaps.Update(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "UrlMaps") @@ -14774,21 +14774,21 @@ func (g *GCEUrlMaps) Update(ctx context.Context, key *meta.Key, arg0 *ga.UrlMap) Version: meta.Version("ga"), Service: "UrlMaps", } - glog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEUrlMaps.Update(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } call := g.s.GA.UrlMaps.Update(projectID, key.Name, arg0) call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("GCEUrlMaps.Update(%v, %v, ...) = %+v", ctx, key, err) return err } @@ -14839,7 +14839,7 @@ type MockZones struct { func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("MockZones.Get(%v, %s) = %+v, %v", ctx, key, obj, err) + klog.V(5).Infof("MockZones.Get(%v, %s) = %+v, %v", ctx, key, obj, err) return obj, err } } @@ -14851,12 +14851,12 @@ func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.ToGA() - glog.V(5).Infof("MockZones.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("MockZones.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -14864,7 +14864,7 @@ func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { Code: http.StatusNotFound, Message: fmt.Sprintf("MockZones %v not found", key), } - glog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("MockZones.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } @@ -14872,7 +14872,7 @@ func (m *MockZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { if m.ListHook != nil { if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("MockZones.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("MockZones.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -14882,7 +14882,7 @@ func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) if m.ListError != nil { err := *m.ListError - glog.V(5).Infof("MockZones.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("MockZones.List(%v, %v) = nil, %v", ctx, fl, err) return nil, *m.ListError } @@ -14895,7 +14895,7 @@ func (m *MockZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) objs = append(objs, obj.ToGA()) } - glog.V(5).Infof("MockZones.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("MockZones.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } @@ -14911,10 +14911,10 @@ type GCEZones struct { // Get the Zone named by key. func (g *GCEZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { - glog.V(5).Infof("GCEZones.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("GCEZones.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("GCEZones.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("GCEZones.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Zones") @@ -14924,21 +14924,21 @@ func (g *GCEZones) Get(ctx context.Context, key *meta.Key) (*ga.Zone, error) { Version: meta.Version("ga"), Service: "Zones", } - glog.V(5).Infof("GCEZones.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("GCEZones.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("GCEZones.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("GCEZones.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } call := g.s.GA.Zones.Get(projectID, key.Name) call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("GCEZones.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("GCEZones.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } // List all Zone objects. func (g *GCEZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { - glog.V(5).Infof("GCEZones.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("GCEZones.List(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "ga", "Zones") rk := &RateLimitKey{ ProjectID: projectID, @@ -14949,30 +14949,30 @@ func (g *GCEZones) List(ctx context.Context, fl *filter.F) ([]*ga.Zone, error) { if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { return nil, err } - glog.V(5).Infof("GCEZones.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("GCEZones.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.GA.Zones.List(projectID) if fl != filter.None { call.Filter(fl.String()) } var all []*ga.Zone f := func(l *ga.ZoneList) error { - glog.V(5).Infof("GCEZones.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("GCEZones.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("GCEZones.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("GCEZones.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("GCEZones.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil diff --git a/pkg/cloudprovider/providers/gce/cloud/gen/main.go b/pkg/cloudprovider/providers/gce/cloud/gen/main.go index 6ff2383c46593..ff45ccb3e11df 100644 --- a/pkg/cloudprovider/providers/gce/cloud/gen/main.go +++ b/pkg/cloudprovider/providers/gce/cloud/gen/main.go @@ -99,7 +99,7 @@ import ( "sync" "google.golang.org/api/googleapi" - "github.com/golang/glog" + "k8s.io/klog" "{{.PackageRoot}}/filter" "{{.PackageRoot}}/meta" @@ -219,7 +219,7 @@ func (m *Mock{{.Service}}Obj) ToAlpha() *{{.Alpha.FQObjectType}} { // Convert the object via JSON copying to the type that was requested. ret := &{{.Alpha.FQObjectType}}{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *{{.Alpha.FQObjectType}} via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *{{.Alpha.FQObjectType}} via JSON: %v", m.Obj, err) } return ret } @@ -233,7 +233,7 @@ func (m *Mock{{.Service}}Obj) ToBeta() *{{.Beta.FQObjectType}} { // Convert the object via JSON copying to the type that was requested. ret := &{{.Beta.FQObjectType}}{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *{{.Beta.FQObjectType}} via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *{{.Beta.FQObjectType}} via JSON: %v", m.Obj, err) } return ret } @@ -247,7 +247,7 @@ func (m *Mock{{.Service}}Obj) ToGA() *{{.GA.FQObjectType}} { // Convert the object via JSON copying to the type that was requested. ret := &{{.GA.FQObjectType}}{} if err := copyViaJSON(ret, m.Obj); err != nil { - glog.Errorf("Could not convert %T to *{{.GA.FQObjectType}} via JSON: %v", m.Obj, err) + klog.Errorf("Could not convert %T to *{{.GA.FQObjectType}} via JSON: %v", m.Obj, err) } return ret } @@ -394,7 +394,7 @@ type {{.MockWrapType}} struct { func (m *{{.MockWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObjectType}}, error) { if m.GetHook != nil { if intercept, obj, err := m.GetHook(ctx, key, m); intercept { - glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, %v", ctx, key, obj ,err) + klog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, %v", ctx, key, obj ,err) return obj, err } } @@ -406,12 +406,12 @@ func (m *{{.MockWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObjec defer m.Lock.Unlock() if err, ok := m.GetError[*key]; ok { - glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } if obj, ok := m.Objects[*key]; ok { typedObj := obj.To{{.VersionTitle}}() - glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, nil", ctx, key, typedObj) + klog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = %+v, nil", ctx, key, typedObj) return typedObj, nil } @@ -419,7 +419,7 @@ func (m *{{.MockWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObjec Code: http.StatusNotFound, Message: fmt.Sprintf("{{.MockWrapType}} %v not found", key), } - glog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err) + klog.V(5).Infof("{{.MockWrapType}}.Get(%v, %s) = nil, %v", ctx, key, err) return nil, err } {{- end}} @@ -440,15 +440,15 @@ func (m *{{.MockWrapType}}) List(ctx context.Context, zone string, fl *filter.F) if m.ListHook != nil { {{if .KeyIsGlobal -}} if intercept, objs, err := m.ListHook(ctx, fl, m); intercept { - glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) {{- end -}} {{- if .KeyIsRegional -}} if intercept, objs, err := m.ListHook(ctx, region, fl, m); intercept { - glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) + klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, region, fl, len(objs), err) {{- end -}} {{- if .KeyIsZonal -}} if intercept, objs, err := m.ListHook(ctx, zone, fl, m); intercept { - glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) + klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], %v", ctx, zone, fl, len(objs), err) {{- end}} return objs, err } @@ -460,13 +460,13 @@ func (m *{{.MockWrapType}}) List(ctx context.Context, zone string, fl *filter.F) if m.ListError != nil { err := *m.ListError {{if .KeyIsGlobal -}} - glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = nil, %v", ctx, fl, err) {{- end -}} {{- if .KeyIsRegional -}} - glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) + klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, region, fl, err) {{- end -}} {{- if .KeyIsZonal -}} - glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) + klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = nil, %v", ctx, zone, fl, err) {{- end}} return nil, *m.ListError @@ -495,13 +495,13 @@ func (m *{{.MockWrapType}}) List(ctx context.Context, zone string, fl *filter.F) } {{if .KeyIsGlobal -}} - glog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("{{.MockWrapType}}.List(%v, %v) = [%v items], nil", ctx, fl, len(objs)) {{- end -}} {{- if .KeyIsRegional -}} - glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) + klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, region, fl, len(objs)) {{- end -}} {{- if .KeyIsZonal -}} - glog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) + klog.V(5).Infof("{{.MockWrapType}}.List(%v, %q, %v) = [%v items], nil", ctx, zone, fl, len(objs)) {{- end}} return objs, nil } @@ -512,7 +512,7 @@ func (m *{{.MockWrapType}}) List(ctx context.Context, zone string, fl *filter.F) func (m *{{.MockWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.FQObjectType}}) error { if m.InsertHook != nil { if intercept, err := m.InsertHook(ctx, key, obj, m); intercept { - glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } } @@ -524,7 +524,7 @@ func (m *{{.MockWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.F defer m.Lock.Unlock() if err, ok := m.InsertError[*key]; ok { - glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } if _, ok := m.Objects[*key]; ok { @@ -532,7 +532,7 @@ func (m *{{.MockWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.F Code: http.StatusConflict, Message: fmt.Sprintf("{{.MockWrapType}} %v exists", key), } - glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) + klog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = %v", ctx, key, obj, err) return err } @@ -541,7 +541,7 @@ func (m *{{.MockWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.F obj.SelfLink = SelfLink(meta.Version{{.VersionTitle}}, projectID, "{{.Resource}}", key) m.Objects[*key] = &Mock{{.Service}}Obj{obj} - glog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = nil", ctx, key, obj) + klog.V(5).Infof("{{.MockWrapType}}.Insert(%v, %v, %+v) = nil", ctx, key, obj) return nil } {{- end}} @@ -551,7 +551,7 @@ func (m *{{.MockWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.F func (m *{{.MockWrapType}}) Delete(ctx context.Context, key *meta.Key) error { if m.DeleteHook != nil { if intercept, err := m.DeleteHook(ctx, key, m); intercept { - glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) return err } } @@ -563,7 +563,7 @@ func (m *{{.MockWrapType}}) Delete(ctx context.Context, key *meta.Key) error { defer m.Lock.Unlock() if err, ok := m.DeleteError[*key]; ok { - glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) return err } if _, ok := m.Objects[*key]; !ok { @@ -571,12 +571,12 @@ func (m *{{.MockWrapType}}) Delete(ctx context.Context, key *meta.Key) error { Code: http.StatusNotFound, Message: fmt.Sprintf("{{.MockWrapType}} %v not found", key), } - glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + klog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = %v", ctx, key, err) return err } delete(m.Objects, *key) - glog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = nil", ctx, key) + klog.V(5).Infof("{{.MockWrapType}}.Delete(%v, %v) = nil", ctx, key) return nil } {{- end}} @@ -586,7 +586,7 @@ func (m *{{.MockWrapType}}) Delete(ctx context.Context, key *meta.Key) error { func (m *{{.MockWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*{{.FQObjectType}}, error) { if m.AggregatedListHook != nil { if intercept, objs, err := m.AggregatedListHook(ctx, fl, m); intercept { - glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) + klog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(objs), err) return objs, err } } @@ -596,7 +596,7 @@ func (m *{{.MockWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (m if m.AggregatedListError != nil { err := *m.AggregatedListError - glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } @@ -610,7 +610,7 @@ func (m *{{.MockWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (m location := res.Key.Zone {{- end}} if err != nil { - glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) + klog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = nil, %v", ctx, fl, err) return nil, err } if !fl.Match(obj.To{{.VersionTitle}}()) { @@ -618,7 +618,7 @@ func (m *{{.MockWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (m } objs[location] = append(objs[location], obj.To{{.VersionTitle}}()) } - glog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) + klog.V(5).Infof("{{.MockWrapType}}.AggregatedList(%v, %v) = [%v items], nil", ctx, fl, len(objs)) return objs, nil } {{- end}} @@ -659,10 +659,10 @@ type {{.GCEWrapType}} struct { {{- if .GenerateGet}} // Get the {{.Object}} named by key. func (g *{{.GCEWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObjectType}}, error) { - glog.V(5).Infof("{{.GCEWrapType}}.Get(%v, %v): called", ctx, key) + klog.V(5).Infof("{{.GCEWrapType}}.Get(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("{{.GCEWrapType}}.Get(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("{{.GCEWrapType}}.Get(%v, %v): key is invalid (%#v)", ctx, key, key) return nil, fmt.Errorf("invalid GCE key (%#v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") @@ -672,9 +672,9 @@ func (g *{{.GCEWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObject Version: meta.Version("{{.Version}}"), Service: "{{.Service}}", } - glog.V(5).Infof("{{.GCEWrapType}}.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("{{.GCEWrapType}}.Get(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("{{.GCEWrapType}}.Get(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("{{.GCEWrapType}}.Get(%v, %v): RateLimiter error: %v", ctx, key, err) return nil, err } {{- if .KeyIsGlobal}} @@ -688,7 +688,7 @@ func (g *{{.GCEWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObject {{- end}} call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("{{.GCEWrapType}}.Get(%v, %v) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("{{.GCEWrapType}}.Get(%v, %v) = %+v, %v", ctx, key, v, err) return v, err } {{- end}} @@ -697,15 +697,15 @@ func (g *{{.GCEWrapType}}) Get(ctx context.Context, key *meta.Key) (*{{.FQObject // List all {{.Object}} objects. {{- if .KeyIsGlobal}} func (g *{{.GCEWrapType}}) List(ctx context.Context, fl *filter.F) ([]*{{.FQObjectType}}, error) { - glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v) called", ctx, fl) + klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v) called", ctx, fl) {{- end -}} {{- if .KeyIsRegional}} func (g *{{.GCEWrapType}}) List(ctx context.Context, region string, fl *filter.F) ([]*{{.FQObjectType}}, error) { - glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v) called", ctx, region, fl) + klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v) called", ctx, region, fl) {{- end -}} {{- if .KeyIsZonal}} func (g *{{.GCEWrapType}}) List(ctx context.Context, zone string, fl *filter.F) ([]*{{.FQObjectType}}, error) { - glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v) called", ctx, zone, fl) + klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v) called", ctx, zone, fl) {{- end}} projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") rk := &RateLimitKey{ @@ -718,15 +718,15 @@ func (g *{{.GCEWrapType}}) List(ctx context.Context, zone string, fl *filter.F) return nil, err } {{- if .KeyIsGlobal}} - glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID) {{- end -}} {{- if .KeyIsRegional}} - glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) + klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, region, fl, projectID, rk) call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID, region) {{- end -}} {{- if .KeyIsZonal}} - glog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) + klog.V(5).Infof("{{.GCEWrapType}}.List(%v, %v, %v): projectID = %v, rk = %+v", ctx, zone, fl, projectID, rk) call := g.s.{{.VersionTitle}}.{{.Service}}.List(projectID, zone) {{- end}} if fl != filter.None { @@ -734,23 +734,23 @@ func (g *{{.GCEWrapType}}) List(ctx context.Context, zone string, fl *filter.F) } var all []*{{.FQObjectType}} f := func(l *{{.ObjectListType}}) error { - glog.V(5).Infof("{{.GCEWrapType}}.List(%v, ..., %v): page %+v", ctx, fl, l) + klog.V(5).Infof("{{.GCEWrapType}}.List(%v, ..., %v): page %+v", ctx, fl, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("{{.GCEWrapType}}.List(%v, ..., %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil @@ -760,9 +760,9 @@ func (g *{{.GCEWrapType}}) List(ctx context.Context, zone string, fl *filter.F) {{- if .GenerateInsert}} // Insert {{.Object}} with key of value obj. func (g *{{.GCEWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.FQObjectType}}) error { - glog.V(5).Infof("{{.GCEWrapType}}.Insert(%v, %v, %+v): called", ctx, key, obj) + klog.V(5).Infof("{{.GCEWrapType}}.Insert(%v, %v, %+v): called", ctx, key, obj) if !key.Valid() { - glog.V(2).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") @@ -772,9 +772,9 @@ func (g *{{.GCEWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.FQ Version: meta.Version("{{.Version}}"), Service: "{{.Service}}", } - glog.V(5).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...): RateLimiter error: %v", ctx, key, err) return err } obj.Name = key.Name @@ -791,12 +791,12 @@ func (g *{{.GCEWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.FQ op, err := call.Do() if err != nil { - glog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) + klog.V(4).Infof("{{.GCEWrapType}}.Insert(%v, %v, %+v) = %+v", ctx, key, obj, err) return err } {{- end}} @@ -804,9 +804,9 @@ func (g *{{.GCEWrapType}}) Insert(ctx context.Context, key *meta.Key, obj *{{.FQ {{- if .GenerateDelete}} // Delete the {{.Object}} referenced by key. func (g *{{.GCEWrapType}}) Delete(ctx context.Context, key *meta.Key) error { - glog.V(5).Infof("{{.GCEWrapType}}.Delete(%v, %v): called", ctx, key) + klog.V(5).Infof("{{.GCEWrapType}}.Delete(%v, %v): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("{{.GCEWrapType}}.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("{{.GCEWrapType}}.Delete(%v, %v): key is invalid (%#v)", ctx, key, key) return fmt.Errorf("invalid GCE key (%+v)", key) } projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") @@ -816,9 +816,9 @@ func (g *{{.GCEWrapType}}) Delete(ctx context.Context, key *meta.Key) error { Version: meta.Version("{{.Version}}"), Service: "{{.Service}}", } - glog.V(5).Infof("{{.GCEWrapType}}.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("{{.GCEWrapType}}.Delete(%v, %v): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v): RateLimiter error: %v", ctx, key, err) return err } {{- if .KeyIsGlobal}} @@ -834,12 +834,12 @@ func (g *{{.GCEWrapType}}) Delete(ctx context.Context, key *meta.Key) error { op, err := call.Do() if err != nil { - glog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v) = %v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v) = %v", ctx, key, err) + klog.V(4).Infof("{{.GCEWrapType}}.Delete(%v, %v) = %v", ctx, key, err) return err } {{end -}} @@ -847,7 +847,7 @@ func (g *{{.GCEWrapType}}) Delete(ctx context.Context, key *meta.Key) error { {{- if .AggregatedList}} // AggregatedList lists all resources of the given type across all locations. func (g *{{.GCEWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (map[string][]*{{.FQObjectType}}, error) { - glog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) called", ctx, fl) + klog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) called", ctx, fl) projectID := g.s.ProjectRouter.ProjectID(ctx, "{{.Version}}", "{{.Service}}") rk := &RateLimitKey{ @@ -857,9 +857,9 @@ func (g *{{.GCEWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (ma Service: "{{.Service}}", } - glog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) + klog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): projectID = %v, rk = %+v", ctx, fl, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) + klog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): RateLimiter error: %v", ctx, fl, err) return nil, err } @@ -872,23 +872,23 @@ func (g *{{.GCEWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (ma all := map[string][]*{{.FQObjectType}}{} f := func(l *{{.ObjectAggregatedListType}}) error { for k, v := range l.Items { - glog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) + klog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v): page[%v]%+v", ctx, fl, k, v) all[k] = append(all[k], v.{{.AggregatedListField}}...) } return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) + klog.V(4).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = %v, %v", ctx, fl, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = [%v items], %v", ctx, fl, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) + klog.V(5).Infof("{{.GCEWrapType}}.AggregatedList(%v, %v) = %v, %v", ctx, fl, asStr, nil) } return all, nil } @@ -898,10 +898,10 @@ func (g *{{.GCEWrapType}}) AggregatedList(ctx context.Context, fl *filter.F) (ma {{- range .}} // {{.Name}} is a method on {{.GCEWrapType}}. func (g *{{.GCEWrapType}}) {{.FcnArgs}} { - glog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): called", ctx, key) + klog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): called", ctx, key) if !key.Valid() { - glog.V(2).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): key is invalid (%#v)", ctx, key, key) + klog.V(2).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): key is invalid (%#v)", ctx, key, key) {{- if .IsOperation}} return fmt.Errorf("invalid GCE key (%+v)", key) {{- else if .IsGet}} @@ -917,10 +917,10 @@ func (g *{{.GCEWrapType}}) {{.FcnArgs}} { Version: meta.Version("{{.Version}}"), Service: "{{.Service}}", } - glog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) + klog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): projectID = %v, rk = %+v", ctx, key, projectID, rk) if err := g.s.RateLimiter.Accept(ctx, rk); err != nil { - glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): RateLimiter error: %v", ctx, key, err) + klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): RateLimiter error: %v", ctx, key, err) {{- if .IsOperation}} return err {{- else}} @@ -940,36 +940,36 @@ func (g *{{.GCEWrapType}}) {{.FcnArgs}} { call.Context(ctx) op, err := call.Do() if err != nil { - glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v", ctx, key, err) return err } err = g.s.WaitForCompletion(ctx, op) - glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v", ctx, key, err) + klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v", ctx, key, err) return err {{- else if .IsGet}} call.Context(ctx) v, err := call.Do() - glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v, %v", ctx, key, v, err) + klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %+v, %v", ctx, key, v, err) return v, err {{- else if .IsPaged}} var all []*{{.Version}}.{{.ItemType}} f := func(l *{{.Version}}.{{.ReturnType}}) error { - glog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): page %+v", ctx, key, l) + klog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...): page %+v", ctx, key, l) all = append(all, l.Items...) return nil } if err := call.Pages(ctx, f); err != nil { - glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %v, %v", ctx, key, nil, err) + klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %v, %v", ctx, key, nil, err) return nil, err } - if glog.V(4) { - glog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) - } else if glog.V(5) { + if klog.V(4) { + klog.V(4).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = [%v items], %v", ctx, key, len(all), nil) + } else if klog.V(5) { var asStr []string for _, o := range all { asStr = append(asStr, fmt.Sprintf("%+v", o)) } - glog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) + klog.V(5).Infof("{{.GCEWrapType}}.{{.Name}}(%v, %v, ...) = %v, %v", ctx, key, asStr, nil) } return all, nil {{- end}} diff --git a/pkg/cloudprovider/providers/gce/cloud/op.go b/pkg/cloudprovider/providers/gce/cloud/op.go index 2933fe223b79c..eb45c769e4615 100644 --- a/pkg/cloudprovider/providers/gce/cloud/op.go +++ b/pkg/cloudprovider/providers/gce/cloud/op.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/golang/glog" + "k8s.io/klog" alpha "google.golang.org/api/compute/v0.alpha" beta "google.golang.org/api/compute/v0.beta" @@ -67,13 +67,13 @@ func (o *gaOperation) isDone(ctx context.Context) (bool, error) { switch o.key.Type() { case meta.Regional: op, err = o.s.GA.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("GA.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) + klog.V(5).Infof("GA.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) case meta.Zonal: op, err = o.s.GA.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("GA.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) + klog.V(5).Infof("GA.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) case meta.Global: op, err = o.s.GA.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("GA.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) + klog.V(5).Infof("GA.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) default: return false, fmt.Errorf("invalid key type: %#v", o.key) } @@ -124,13 +124,13 @@ func (o *alphaOperation) isDone(ctx context.Context) (bool, error) { switch o.key.Type() { case meta.Regional: op, err = o.s.Alpha.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Alpha.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) + klog.V(5).Infof("Alpha.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) case meta.Zonal: op, err = o.s.Alpha.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Alpha.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) + klog.V(5).Infof("Alpha.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) case meta.Global: op, err = o.s.Alpha.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Alpha.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) + klog.V(5).Infof("Alpha.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) default: return false, fmt.Errorf("invalid key type: %#v", o.key) } @@ -181,13 +181,13 @@ func (o *betaOperation) isDone(ctx context.Context) (bool, error) { switch o.key.Type() { case meta.Regional: op, err = o.s.Beta.RegionOperations.Get(o.projectID, o.key.Region, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Beta.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) + klog.V(5).Infof("Beta.RegionOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Region, o.key.Name, op, err, ctx) case meta.Zonal: op, err = o.s.Beta.ZoneOperations.Get(o.projectID, o.key.Zone, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Beta.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) + klog.V(5).Infof("Beta.ZoneOperations.Get(%v, %v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Zone, o.key.Name, op, err, ctx) case meta.Global: op, err = o.s.Beta.GlobalOperations.Get(o.projectID, o.key.Name).Context(ctx).Do() - glog.V(5).Infof("Beta.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) + klog.V(5).Infof("Beta.GlobalOperations.Get(%v, %v) = %+v, %v; ctx = %v", o.projectID, o.key.Name, op, err, ctx) default: return false, fmt.Errorf("invalid key type: %#v", o.key) } diff --git a/pkg/cloudprovider/providers/gce/cloud/service.go b/pkg/cloudprovider/providers/gce/cloud/service.go index 2f332dfff854a..4d7b4c557f2a8 100644 --- a/pkg/cloudprovider/providers/gce/cloud/service.go +++ b/pkg/cloudprovider/providers/gce/cloud/service.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/golang/glog" + "k8s.io/klog" alpha "google.golang.org/api/compute/v0.alpha" beta "google.golang.org/api/compute/v0.beta" @@ -69,7 +69,7 @@ func (s *Service) wrapOperation(anyOp interface{}) (operation, error) { func (s *Service) WaitForCompletion(ctx context.Context, genericOp interface{}) error { op, err := s.wrapOperation(genericOp) if err != nil { - glog.Errorf("wrapOperation(%+v) error: %v", genericOp, err) + klog.Errorf("wrapOperation(%+v) error: %v", genericOp, err) return err } @@ -86,18 +86,18 @@ func (s *Service) pollOperation(ctx context.Context, op operation) error { // returning ctx.Err(). select { case <-ctx.Done(): - glog.V(5).Infof("op.pollOperation(%v, %v) not completed, poll count = %d, ctx.Err = %v", ctx, op, pollCount, ctx.Err()) + klog.V(5).Infof("op.pollOperation(%v, %v) not completed, poll count = %d, ctx.Err = %v", ctx, op, pollCount, ctx.Err()) return ctx.Err() default: // ctx is not canceled, continue immediately } pollCount++ - glog.V(5).Infof("op.isDone(%v) waiting; op = %v, poll count = %d", ctx, op, pollCount) + klog.V(5).Infof("op.isDone(%v) waiting; op = %v, poll count = %d", ctx, op, pollCount) s.RateLimiter.Accept(ctx, op.rateLimitKey()) done, err := op.isDone(ctx) if err != nil { - glog.V(5).Infof("op.isDone(%v) error; op = %v, poll count = %d, err = %v, retrying", ctx, op, pollCount, err) + klog.V(5).Infof("op.isDone(%v) error; op = %v, poll count = %d, err = %v, retrying", ctx, op, pollCount, err) } if done { @@ -105,6 +105,6 @@ func (s *Service) pollOperation(ctx context.Context, op operation) error { } } - glog.V(5).Infof("op.isDone(%v) complete; op = %v, poll count = %d, op.err = %v", ctx, op, pollCount, op.error()) + klog.V(5).Infof("op.isDone(%v) complete; op = %v, poll count = %d, op.err = %v", ctx, op, pollCount, op.error()) return op.error() } diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index adba37d59b64a..616fe5f547750 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -30,13 +30,13 @@ import ( gcfg "gopkg.in/gcfg.v1" "cloud.google.com/go/compute/metadata" - "github.com/golang/glog" "golang.org/x/oauth2" "golang.org/x/oauth2/google" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" container "google.golang.org/api/container/v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -262,7 +262,7 @@ func newGCECloud(config io.Reader) (gceCloud *Cloud, err error) { if err != nil { return nil, err } - glog.Infof("Using GCE provider config %+v", configFile) + klog.Infof("Using GCE provider config %+v", configFile) } cloudConfig, err = generateCloudConfig(configFile) @@ -275,7 +275,7 @@ func newGCECloud(config io.Reader) (gceCloud *Cloud, err error) { func readConfig(reader io.Reader) (*ConfigFile, error) { cfg := &ConfigFile{} if err := gcfg.FatalOnly(gcfg.ReadInto(cfg, reader)); err != nil { - glog.Errorf("Couldn't read config: %v", err) + klog.Errorf("Couldn't read config: %v", err) return nil, err } return cfg, nil @@ -466,7 +466,7 @@ func CreateGCECloud(config *CloudConfig) (*Cloud, error) { } else { // Other consumers may use the cloudprovider without utilizing the wrapped GCE API functions // or functions requiring network/subnetwork URLs (e.g. Kubelet). - glog.Warningf("No network name or URL specified.") + klog.Warningf("No network name or URL specified.") } if config.SubnetworkURL != "" { @@ -480,20 +480,20 @@ func CreateGCECloud(config *CloudConfig) (*Cloud, error) { if networkName := lastComponent(networkURL); networkName != "" { var n *compute.Network if n, err = getNetwork(service, netProjID, networkName); err != nil { - glog.Warningf("Could not retrieve network %q; err: %v", networkName, err) + klog.Warningf("Could not retrieve network %q; err: %v", networkName, err) } else { switch typeOfNetwork(n) { case netTypeLegacy: - glog.Infof("Network %q is type legacy - no subnetwork", networkName) + klog.Infof("Network %q is type legacy - no subnetwork", networkName) isLegacyNetwork = true case netTypeCustom: - glog.Warningf("Network %q is type custom - cannot auto select a subnetwork", networkName) + klog.Warningf("Network %q is type custom - cannot auto select a subnetwork", networkName) case netTypeAuto: subnetURL, err = determineSubnetURL(service, netProjID, networkName, config.Region) if err != nil { - glog.Warningf("Could not determine subnetwork for network %q and region %v; err: %v", networkName, config.Region, err) + klog.Warningf("Could not determine subnetwork for network %q and region %v; err: %v", networkName, config.Region, err) } else { - glog.Infof("Auto selecting subnetwork %q", subnetURL) + klog.Infof("Auto selecting subnetwork %q", subnetURL) } } } @@ -507,7 +507,7 @@ func CreateGCECloud(config *CloudConfig) (*Cloud, error) { } } if len(config.ManagedZones) > 1 { - glog.Infof("managing multiple zones: %v", config.ManagedZones) + klog.Infof("managing multiple zones: %v", config.ManagedZones) } operationPollRateLimiter := flowcontrol.NewTokenBucketRateLimiter(5, 5) // 5 qps, 5 burst. @@ -588,7 +588,7 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic if isProjectNumber(projID) { projName, err := getProjectID(service, projID) if err != nil { - glog.Warningf("Failed to retrieve project %v while trying to retrieve its name. err %v", projID, err) + klog.Warningf("Failed to retrieve project %v while trying to retrieve its name. err %v", projID, err) } else { projID = projName } @@ -601,7 +601,7 @@ func tryConvertToProjectNames(configProject, configNetworkProject string, servic if isProjectNumber(netProjID) { netProjName, err := getProjectID(service, netProjID) if err != nil { - glog.Warningf("Failed to retrieve network project %v while trying to retrieve its name. err %v", netProjID, err) + klog.Warningf("Failed to retrieve network project %v while trying to retrieve its name. err %v", netProjID, err) } else { netProjID = netProjName } @@ -692,7 +692,7 @@ func (g *Cloud) IsLegacyNetwork() bool { // SetInformers sets up the zone handlers we need watching for node changes. func (g *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { - glog.Infof("Setting up informers for Cloud") + klog.Infof("Setting up informers for Cloud") nodeInformer := informerFactory.Core().V1().Nodes().Informer() nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { @@ -715,12 +715,12 @@ func (g *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { if !isNode { deletedState, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Received unexpected object: %v", obj) + klog.Errorf("Received unexpected object: %v", obj) return } node, ok = deletedState.Obj.(*v1.Node) if !ok { - glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) + klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) return } } @@ -871,12 +871,12 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { oauth2.NoContext, compute.CloudPlatformScope, compute.ComputeScope) - glog.Infof("Using DefaultTokenSource %#v", tokenSource) + klog.Infof("Using DefaultTokenSource %#v", tokenSource) if err != nil { return nil, err } } else { - glog.Infof("Using existing Token Source %#v", tokenSource) + klog.Infof("Using existing Token Source %#v", tokenSource) } backoff := wait.Backoff{ @@ -887,7 +887,7 @@ func newOauthClient(tokenSource oauth2.TokenSource) (*http.Client, error) { } if err := wait.ExponentialBackoff(backoff, func() (bool, error) { if _, err := tokenSource.Token(); err != nil { - glog.Errorf("error fetching initial token: %v", err) + klog.Errorf("error fetching initial token: %v", err) return false, nil } return true, nil diff --git a/pkg/cloudprovider/providers/gce/gce_address_manager.go b/pkg/cloudprovider/providers/gce/gce_address_manager.go index 449b33a0d21ce..51b9bc5e718f7 100644 --- a/pkg/cloudprovider/providers/gce/gce_address_manager.go +++ b/pkg/cloudprovider/providers/gce/gce_address_manager.go @@ -22,7 +22,7 @@ import ( compute "google.golang.org/api/compute/v1" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" ) @@ -62,7 +62,7 @@ func (am *addressManager) HoldAddress() (string, error) { // could be reserving another address; therefore, it would need to be deleted. In the normal // case of using a controller address, retrieving the address by name results in the fewest API // calls since it indicates whether a Delete is necessary before Reserve. - glog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType) + klog.V(4).Infof("%v: attempting hold of IP %q Type %q", am.logPrefix, am.targetIP, am.addressType) // Get the address in case it was orphaned earlier addr, err := am.svc.GetRegionAddress(am.name, am.region) if err != nil && !isNotFound(err) { @@ -73,20 +73,20 @@ func (am *addressManager) HoldAddress() (string, error) { // If address exists, check if the address had the expected attributes. validationError := am.validateAddress(addr) if validationError == nil { - glog.V(4).Infof("%v: address %q already reserves IP %q Type %q. No further action required.", am.logPrefix, addr.Name, addr.Address, addr.AddressType) + klog.V(4).Infof("%v: address %q already reserves IP %q Type %q. No further action required.", am.logPrefix, addr.Name, addr.Address, addr.AddressType) return addr.Address, nil } - glog.V(2).Infof("%v: deleting existing address because %v", am.logPrefix, validationError) + klog.V(2).Infof("%v: deleting existing address because %v", am.logPrefix, validationError) err := am.svc.DeleteRegionAddress(addr.Name, am.region) if err != nil { if isNotFound(err) { - glog.V(4).Infof("%v: address %q was not found. Ignoring.", am.logPrefix, addr.Name) + klog.V(4).Infof("%v: address %q was not found. Ignoring.", am.logPrefix, addr.Name) } else { return "", err } } else { - glog.V(4).Infof("%v: successfully deleted previous address %q", am.logPrefix, addr.Name) + klog.V(4).Infof("%v: successfully deleted previous address %q", am.logPrefix, addr.Name) } } @@ -96,23 +96,23 @@ func (am *addressManager) HoldAddress() (string, error) { // ReleaseAddress will release the address if it's owned by the controller. func (am *addressManager) ReleaseAddress() error { if !am.tryRelease { - glog.V(4).Infof("%v: not attempting release of address %q.", am.logPrefix, am.targetIP) + klog.V(4).Infof("%v: not attempting release of address %q.", am.logPrefix, am.targetIP) return nil } - glog.V(4).Infof("%v: releasing address %q named %q", am.logPrefix, am.targetIP, am.name) + klog.V(4).Infof("%v: releasing address %q named %q", am.logPrefix, am.targetIP, am.name) // Controller only ever tries to unreserve the address named with the load balancer's name. err := am.svc.DeleteRegionAddress(am.name, am.region) if err != nil { if isNotFound(err) { - glog.Warningf("%v: address %q was not found. Ignoring.", am.logPrefix, am.name) + klog.Warningf("%v: address %q was not found. Ignoring.", am.logPrefix, am.name) return nil } return err } - glog.V(4).Infof("%v: successfully released IP %q named %q", am.logPrefix, am.targetIP, am.name) + klog.V(4).Infof("%v: successfully released IP %q named %q", am.logPrefix, am.targetIP, am.name) return nil } @@ -130,7 +130,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) { reserveErr := am.svc.ReserveRegionAddress(newAddr, am.region) if reserveErr == nil { if newAddr.Address != "" { - glog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name) + klog.V(4).Infof("%v: successfully reserved IP %q with name %q", am.logPrefix, newAddr.Address, newAddr.Name) return newAddr.Address, nil } @@ -139,7 +139,7 @@ func (am *addressManager) ensureAddressReservation() (string, error) { return "", err } - glog.V(4).Infof("%v: successfully created address %q which reserved IP %q", am.logPrefix, addr.Name, addr.Address) + klog.V(4).Infof("%v: successfully created address %q which reserved IP %q", am.logPrefix, addr.Name, addr.Address) return addr.Address, nil } else if !isHTTPErrorCode(reserveErr, http.StatusConflict) && !isHTTPErrorCode(reserveErr, http.StatusBadRequest) { // If the IP is already reserved: @@ -169,10 +169,10 @@ func (am *addressManager) ensureAddressReservation() (string, error) { if am.isManagedAddress(addr) { // The address with this name is checked at the beginning of 'HoldAddress()', but for some reason // it was re-created by this point. May be possible that two controllers are running. - glog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP) + klog.Warningf("%v: address %q unexpectedly existed with IP %q.", am.logPrefix, addr.Name, am.targetIP) } else { // If the retrieved address is not named with the loadbalancer name, then the controller does not own it, but will allow use of it. - glog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description) + klog.V(4).Infof("%v: address %q was already reserved with name: %q, description: %q", am.logPrefix, am.targetIP, addr.Name, addr.Description) am.tryRelease = false } diff --git a/pkg/cloudprovider/providers/gce/gce_addresses.go b/pkg/cloudprovider/providers/gce/gce_addresses.go index b595ae6282538..044258f1b4e35 100644 --- a/pkg/cloudprovider/providers/gce/gce_addresses.go +++ b/pkg/cloudprovider/providers/gce/gce_addresses.go @@ -19,7 +19,7 @@ package gce import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" @@ -149,7 +149,7 @@ func (g *Cloud) GetRegionAddressByIP(region, ipAddress string) (*compute.Address } if len(addrs) > 1 { - glog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) + klog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) } for _, addr := range addrs { if addr.Address == ipAddress { @@ -173,7 +173,7 @@ func (g *Cloud) GetBetaRegionAddressByIP(region, ipAddress string) (*computebeta } if len(addrs) > 1 { - glog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) + klog.Warningf("More than one addresses matching the IP %q: %v", ipAddress, addrNames(addrs)) } for _, addr := range addrs { if addr.Address == ipAddress { diff --git a/pkg/cloudprovider/providers/gce/gce_annotations.go b/pkg/cloudprovider/providers/gce/gce_annotations.go index 4f3281c3e4763..39e632e0795f3 100644 --- a/pkg/cloudprovider/providers/gce/gce_annotations.go +++ b/pkg/cloudprovider/providers/gce/gce_annotations.go @@ -19,7 +19,7 @@ package gce import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" @@ -90,7 +90,7 @@ func GetLoadBalancerAnnotationBackendShare(service *v1.Service) bool { // Check for deprecated annotation key if l, exists := service.Annotations[deprecatedServiceAnnotationILBBackendShare]; exists && l == "true" { - glog.Warningf("Annotation %q is deprecated and replaced with an alpha-specific key: %q", deprecatedServiceAnnotationILBBackendShare, ServiceAnnotationILBBackendShare) + klog.Warningf("Annotation %q is deprecated and replaced with an alpha-specific key: %q", deprecatedServiceAnnotationILBBackendShare, ServiceAnnotationILBBackendShare) return true } diff --git a/pkg/cloudprovider/providers/gce/gce_clusterid.go b/pkg/cloudprovider/providers/gce/gce_clusterid.go index 23dd6cf5e214c..2f40167788d84 100644 --- a/pkg/cloudprovider/providers/gce/gce_clusterid.go +++ b/pkg/cloudprovider/providers/gce/gce_clusterid.go @@ -25,7 +25,6 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -33,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" + "k8s.io/klog" ) const ( @@ -77,7 +77,7 @@ func (g *Cloud) watchClusterID(stop <-chan struct{}) { AddFunc: func(obj interface{}) { m, ok := obj.(*v1.ConfigMap) if !ok || m == nil { - glog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", obj, ok) + klog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", obj, ok) return } if m.Namespace != UIDNamespace || @@ -85,13 +85,13 @@ func (g *Cloud) watchClusterID(stop <-chan struct{}) { return } - glog.V(4).Infof("Observed new configmap for clusteriD: %v, %v; setting local values", m.Name, m.Data) + klog.V(4).Infof("Observed new configmap for clusteriD: %v, %v; setting local values", m.Name, m.Data) g.ClusterID.update(m) }, UpdateFunc: func(old, cur interface{}) { m, ok := cur.(*v1.ConfigMap) if !ok || m == nil { - glog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", cur, ok) + klog.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", cur, ok) return } @@ -104,7 +104,7 @@ func (g *Cloud) watchClusterID(stop <-chan struct{}) { return } - glog.V(4).Infof("Observed updated configmap for clusteriD %v, %v; setting local values", m.Name, m.Data) + klog.V(4).Infof("Observed updated configmap for clusteriD %v, %v; setting local values", m.Name, m.Data) g.ClusterID.update(m) }, } @@ -185,7 +185,7 @@ func (ci *ClusterID) getOrInitialize() error { return err } - glog.V(4).Infof("Creating clusteriD: %v", newID) + klog.V(4).Infof("Creating clusteriD: %v", newID) cfg := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: UIDConfigMapName, @@ -198,11 +198,11 @@ func (ci *ClusterID) getOrInitialize() error { } if _, err := ci.client.CoreV1().ConfigMaps(UIDNamespace).Create(cfg); err != nil { - glog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) + klog.Errorf("GCE cloud provider failed to create %v config map to store cluster id: %v", ci.cfgMapKey, err) return err } - glog.V(2).Infof("Created a config map containing clusteriD: %v", newID) + klog.V(2).Infof("Created a config map containing clusteriD: %v", newID) ci.update(cfg) return nil } @@ -219,7 +219,7 @@ func (ci *ClusterID) getConfigMap() (bool, error) { m, ok := item.(*v1.ConfigMap) if !ok || m == nil { err = fmt.Errorf("Expected v1.ConfigMap, item=%+v, typeIsOk=%v", item, ok) - glog.Error(err) + klog.Error(err) return false, err } ci.update(m) diff --git a/pkg/cloudprovider/providers/gce/gce_clusters.go b/pkg/cloudprovider/providers/gce/gce_clusters.go index 53295000c669e..379f5396a253f 100644 --- a/pkg/cloudprovider/providers/gce/gce_clusters.go +++ b/pkg/cloudprovider/providers/gce/gce_clusters.go @@ -20,8 +20,8 @@ import ( "context" "fmt" - "github.com/golang/glog" "google.golang.org/api/container/v1" + "k8s.io/klog" ) func newClustersMetricContext(request, zone string) *metricContext { @@ -97,7 +97,7 @@ func (g *Cloud) getClustersInLocation(zoneOrRegion string) ([]*container.Cluster return nil, mc.Observe(err) } if list.Header.Get("nextPageToken") != "" { - glog.Errorf("Failed to get all clusters for request, received next page token %s", list.Header.Get("nextPageToken")) + klog.Errorf("Failed to get all clusters for request, received next page token %s", list.Header.Get("nextPageToken")) } return list.Clusters, mc.Observe(nil) diff --git a/pkg/cloudprovider/providers/gce/gce_disks.go b/pkg/cloudprovider/providers/gce/gce_disks.go index b9068d42121ee..24dd76fc96dbc 100644 --- a/pkg/cloudprovider/providers/gce/gce_disks.go +++ b/pkg/cloudprovider/providers/gce/gce_disks.go @@ -33,10 +33,10 @@ import ( "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "github.com/golang/glog" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" "k8s.io/kubernetes/pkg/features" @@ -400,7 +400,7 @@ func (manager *gceServiceManager) getRegionFromZone(zoneInfo zoneType) (string, region, err := GetGCERegion(zone) if err != nil { - glog.Warningf("failed to parse GCE region from zone %q: %v", zone, err) + klog.Warningf("failed to parse GCE region from zone %q: %v", zone, err) region = manager.gce.region } @@ -569,7 +569,7 @@ func (g *Cloud) DetachDisk(devicePath string, nodeName types.NodeName) error { if err != nil { if err == cloudprovider.InstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( + klog.Warningf( "Instance %q does not exist. DetachDisk will assume PD %q is not attached to it.", instanceName, devicePath) @@ -590,7 +590,7 @@ func (g *Cloud) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, if err != nil { if err == cloudprovider.InstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( + klog.Warningf( "Instance %q does not exist. DiskIsAttached will assume PD %q is not attached to it.", instanceName, diskName) @@ -622,7 +622,7 @@ func (g *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (m if err != nil { if err == cloudprovider.InstanceNotFound { // If instance no longer exists, safe to assume volume is not attached. - glog.Warningf( + klog.Warningf( "Instance %q does not exist. DisksAreAttached will assume PD %v are not attached to it.", instanceName, diskNames) @@ -676,7 +676,7 @@ func (g *Cloud) CreateDisk( mc.Observe(err) if isGCEError(err, "alreadyExists") { - glog.Warningf("GCE PD %q already exists, reusing", name) + klog.Warningf("GCE PD %q already exists, reusing", name) return nil } return err @@ -717,7 +717,7 @@ func (g *Cloud) CreateRegionalDisk( mc.Observe(err) if isGCEError(err, "alreadyExists") { - glog.Warningf("GCE PD %q already exists, reusing", name) + klog.Warningf("GCE PD %q already exists, reusing", name) return nil } return err @@ -821,7 +821,7 @@ func (g *Cloud) GetAutoLabelsForPD(name string, zone string) (map[string]string, if utilfeature.DefaultFeatureGate.Enabled(features.GCERegionalPersistentDisk) { zoneSet, err := volumeutil.LabelZonesToSet(zone) if err != nil { - glog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone) + klog.Warningf("Failed to parse zone field: %q. Will use raw field.", zone) } if len(zoneSet) > 1 { @@ -955,7 +955,7 @@ func (g *Cloud) GetDiskByNameUnknownZone(diskName string) (*Disk, error) { switch zoneInfo := disk.ZoneInfo.(type) { case multiZone: if zoneInfo.replicaZones.Has(zone) { - glog.Warningf("GCE PD name (%q) was found in multiple zones (%q), but ok because it is a RegionalDisk.", + klog.Warningf("GCE PD name (%q) was found in multiple zones (%q), but ok because it is a RegionalDisk.", diskName, zoneInfo.replicaZones) continue } @@ -969,7 +969,7 @@ func (g *Cloud) GetDiskByNameUnknownZone(diskName string) (*Disk, error) { if found != nil { return found, nil } - glog.Warningf("GCE persistent disk %q not found in managed zones (%s)", + klog.Warningf("GCE persistent disk %q not found in managed zones (%s)", diskName, strings.Join(g.managedZones, ",")) return nil, cloudprovider.DiskNotFound diff --git a/pkg/cloudprovider/providers/gce/gce_healthchecks.go b/pkg/cloudprovider/providers/gce/gce_healthchecks.go index 10e1f72a3592c..d314376db1214 100644 --- a/pkg/cloudprovider/providers/gce/gce_healthchecks.go +++ b/pkg/cloudprovider/providers/gce/gce_healthchecks.go @@ -17,7 +17,7 @@ limitations under the License. package gce import ( - "github.com/golang/glog" + "k8s.io/klog" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" @@ -42,7 +42,7 @@ var ( func init() { if v, err := utilversion.ParseGeneric("1.7.2"); err != nil { - glog.Fatalf("Failed to parse version for minNodesHealthCheckVersion: %v", err) + klog.Fatalf("Failed to parse version for minNodesHealthCheckVersion: %v", err) } else { minNodesHealthCheckVersion = v } @@ -274,7 +274,7 @@ func GetNodesHealthCheckPath() string { func isAtLeastMinNodesHealthCheckVersion(vstring string) bool { version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return false } return version.AtLeast(minNodesHealthCheckVersion) diff --git a/pkg/cloudprovider/providers/gce/gce_instances.go b/pkg/cloudprovider/providers/gce/gce_instances.go index f97d89648f852..e8345abfae564 100644 --- a/pkg/cloudprovider/providers/gce/gce_instances.go +++ b/pkg/cloudprovider/providers/gce/gce_instances.go @@ -25,9 +25,9 @@ import ( "time" "cloud.google.com/go/compute/metadata" - "github.com/golang/glog" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -97,7 +97,7 @@ func (g *Cloud) NodeAddresses(_ context.Context, _ types.NodeName) ([]v1.NodeAdd } if internalDNSFull, err := metadata.Get("instance/hostname"); err != nil { - glog.Warningf("couldn't get full internal DNS name: %v", err) + klog.Warningf("couldn't get full internal DNS name: %v", err) } else { addresses = append(addresses, v1.NodeAddress{Type: v1.NodeInternalDNS, Address: internalDNSFull}, @@ -234,7 +234,7 @@ func (g *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyDat return wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) { project, err := g.c.Projects().Get(ctx, g.projectID) if err != nil { - glog.Errorf("Could not get project: %v", err) + klog.Errorf("Could not get project: %v", err) return false, nil } keyString := fmt.Sprintf("%s:%s %s@%s", user, strings.TrimSpace(string(keyData)), user, user) @@ -243,7 +243,7 @@ func (g *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyDat if item.Key == "sshKeys" { if strings.Contains(*item.Value, keyString) { // We've already added the key - glog.Info("SSHKey already in project metadata") + klog.Info("SSHKey already in project metadata") return true, nil } value := *item.Value + "\n" + keyString @@ -254,7 +254,7 @@ func (g *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyDat } if !found { // This is super unlikely, so log. - glog.Infof("Failed to find sshKeys metadata, creating a new item") + klog.Infof("Failed to find sshKeys metadata, creating a new item") project.CommonInstanceMetadata.Items = append(project.CommonInstanceMetadata.Items, &compute.MetadataItems{ Key: "sshKeys", @@ -267,10 +267,10 @@ func (g *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyDat mc.Observe(err) if err != nil { - glog.Errorf("Could not Set Metadata: %v", err) + klog.Errorf("Could not Set Metadata: %v", err) return false, nil } - glog.Infof("Successfully added sshKey to project metadata") + klog.Infof("Successfully added sshKey to project metadata") return true, nil }) } @@ -278,7 +278,7 @@ func (g *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyDat // GetAllCurrentZones returns all the zones in which k8s nodes are currently running func (g *Cloud) GetAllCurrentZones() (sets.String, error) { if g.nodeInformerSynced == nil { - glog.Warningf("Cloud object does not have informers set, should only happen in E2E binary.") + klog.Warningf("Cloud object does not have informers set, should only happen in E2E binary.") return g.GetAllZonesFromCloudProvider() } g.nodeZonesLock.Lock() @@ -407,7 +407,7 @@ func (g *Cloud) AddAliasToInstance(nodeName types.NodeName, alias *net.IPNet) er return fmt.Errorf("instance %q has no network interfaces", nodeName) case 1: default: - glog.Warningf("Instance %q has more than one network interface, using only the first (%v)", + klog.Warningf("Instance %q has more than one network interface, using only the first (%v)", nodeName, instance.NetworkInterfaces) } @@ -437,7 +437,7 @@ func (g *Cloud) getInstancesByNames(names []string) ([]*gceInstance, error) { for _, name := range names { name = canonicalizeInstanceName(name) if !strings.HasPrefix(name, g.nodeInstancePrefix) { - glog.Warningf("Instance %q does not conform to prefix %q, removing filter", name, g.nodeInstancePrefix) + klog.Warningf("Instance %q does not conform to prefix %q, removing filter", name, g.nodeInstancePrefix) nodeInstancePrefix = "" } found[name] = nil @@ -459,7 +459,7 @@ func (g *Cloud) getInstancesByNames(names []string) ([]*gceInstance, error) { continue } if found[inst.Name] != nil { - glog.Errorf("Instance name %q was duplicated (in zone %q and %q)", inst.Name, zone, found[inst.Name].Zone) + klog.Errorf("Instance name %q was duplicated (in zone %q and %q)", inst.Name, zone, found[inst.Name].Zone) continue } found[inst.Name] = &gceInstance{ @@ -480,7 +480,7 @@ func (g *Cloud) getInstancesByNames(names []string) ([]*gceInstance, error) { failed = append(failed, k) } } - glog.Errorf("Failed to retrieve instances: %v", failed) + klog.Errorf("Failed to retrieve instances: %v", failed) return nil, cloudprovider.InstanceNotFound } @@ -501,7 +501,7 @@ func (g *Cloud) getInstanceByName(name string) (*gceInstance, error) { if isHTTPErrorCode(err, http.StatusNotFound) { continue } - glog.Errorf("getInstanceByName: failed to get instance %s in zone %s; err: %v", name, zone, err) + klog.Errorf("getInstanceByName: failed to get instance %s in zone %s; err: %v", name, zone, err) return nil, err } return instance, nil @@ -561,7 +561,7 @@ func (g *Cloud) isCurrentInstance(instanceID string) bool { currentInstanceID, err := getInstanceIDViaMetadata() if err != nil { // Log and swallow error - glog.Errorf("Failed to fetch instanceID via Metadata: %v", err) + klog.Errorf("Failed to fetch instanceID via Metadata: %v", err) return false } @@ -583,7 +583,7 @@ func (g *Cloud) computeHostTags(hosts []*gceInstance) ([]string, error) { nodeInstancePrefix := g.nodeInstancePrefix for _, host := range hosts { if !strings.HasPrefix(host.Name, g.nodeInstancePrefix) { - glog.Warningf("instance %v does not conform to prefix '%s', ignoring filter", host, g.nodeInstancePrefix) + klog.Warningf("instance %v does not conform to prefix '%s', ignoring filter", host, g.nodeInstancePrefix) nodeInstancePrefix = "" } diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer.go index b6e55d6e9dd39..35a2c6952f4ac 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer.go @@ -24,7 +24,7 @@ import ( "sort" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" cloudprovider "k8s.io/cloud-provider" @@ -114,7 +114,7 @@ func (g *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, svc return nil, err } - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): ensure %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, desiredScheme) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): ensure %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, desiredScheme) existingFwdRule, err := g.GetRegionForwardingRule(loadBalancerName, g.region) if err != nil && !isNotFound(err) { @@ -126,14 +126,14 @@ func (g *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, svc // If the loadbalancer type changes between INTERNAL and EXTERNAL, the old load balancer should be deleted. if existingScheme != desiredScheme { - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): deleting existing %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, existingScheme) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): deleting existing %v loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, existingScheme) switch existingScheme { case cloud.SchemeInternal: err = g.ensureInternalLoadBalancerDeleted(clusterName, clusterID, svc) default: err = g.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc) } - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done deleting existing %v loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, existingScheme, err) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done deleting existing %v loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, existingScheme, err) if err != nil { return nil, err } @@ -150,7 +150,7 @@ func (g *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, svc default: status, err = g.ensureExternalLoadBalancer(clusterName, clusterID, svc, existingFwdRule, nodes) } - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done ensuring loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v): done ensuring loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) return status, err } @@ -163,7 +163,7 @@ func (g *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, svc return err } - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): updating with %d nodes", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, len(nodes)) + klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): updating with %d nodes", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, len(nodes)) switch scheme { case cloud.SchemeInternal: @@ -171,7 +171,7 @@ func (g *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, svc default: err = g.updateExternalLoadBalancer(clusterName, svc, nodes) } - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): done updating. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) + klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v, %v, %v): done updating. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) return err } @@ -184,7 +184,7 @@ func (g *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin return err } - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): deleting loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region) + klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): deleting loadbalancer", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region) switch scheme { case cloud.SchemeInternal: @@ -192,7 +192,7 @@ func (g *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin default: err = g.ensureExternalLoadBalancerDeleted(clusterName, clusterID, svc) } - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): done deleting loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) + klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v, %v, %v, %v): done deleting loadbalancer. err: %v", clusterName, svc.Namespace, svc.Name, loadBalancerName, g.region, err) return err } diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go index 62531716662e5..6b92e71ef77f5 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer_external.go @@ -31,9 +31,9 @@ import ( "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" netsets "k8s.io/kubernetes/pkg/util/net/sets" - "github.com/golang/glog" computealpha "google.golang.org/api/compute/v0.alpha" compute "google.golang.org/api/compute/v1" + "k8s.io/klog" ) // ensureExternalLoadBalancer is the external implementation of LoadBalancer.EnsureLoadBalancer. @@ -66,16 +66,16 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name} lbRefStr := fmt.Sprintf("%v(%v)", loadBalancerName, serviceName) - glog.V(2).Infof("ensureExternalLoadBalancer(%s, %v, %v, %v, %v, %v)", lbRefStr, g.region, requestedIP, portStr, hostNames, apiService.Annotations) + klog.V(2).Infof("ensureExternalLoadBalancer(%s, %v, %v, %v, %v, %v)", lbRefStr, g.region, requestedIP, portStr, hostNames, apiService.Annotations) // Check the current and the desired network tiers. If they do not match, // tear down the existing resources with the wrong tier. netTier, err := g.getServiceNetworkTier(apiService) if err != nil { - glog.Errorf("ensureExternalLoadBalancer(%s): Failed to get the desired network tier: %v.", lbRefStr, err) + klog.Errorf("ensureExternalLoadBalancer(%s): Failed to get the desired network tier: %v.", lbRefStr, err) return nil, err } - glog.V(4).Infof("ensureExternalLoadBalancer(%s): Desired network tier %q.", lbRefStr, netTier) + klog.V(4).Infof("ensureExternalLoadBalancer(%s): Desired network tier %q.", lbRefStr, netTier) if g.AlphaFeatureGate.Enabled(AlphaFeatureNetworkTiers) { g.deleteWrongNetworkTieredResources(loadBalancerName, lbRefStr, netTier) } @@ -86,7 +86,7 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, return nil, err } if !fwdRuleExists { - glog.V(2).Infof("ensureExternalLoadBalancer(%s): Forwarding rule %v doesn't exist.", lbRefStr, loadBalancerName) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): Forwarding rule %v doesn't exist.", lbRefStr, loadBalancerName) } // Make sure we know which IP address will be used and have properly reserved @@ -121,14 +121,14 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, } if isSafeToReleaseIP { if err := g.DeleteRegionAddress(loadBalancerName, g.region); err != nil && !isNotFound(err) { - glog.Errorf("ensureExternalLoadBalancer(%s): Failed to release static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, g.region, err) + klog.Errorf("ensureExternalLoadBalancer(%s): Failed to release static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, g.region, err) } else if isNotFound(err) { - glog.V(2).Infof("ensureExternalLoadBalancer(%s): IP address %s is not reserved.", lbRefStr, ipAddressToUse) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): IP address %s is not reserved.", lbRefStr, ipAddressToUse) } else { - glog.Infof("ensureExternalLoadBalancer(%s): Released static IP %s.", lbRefStr, ipAddressToUse) + klog.Infof("ensureExternalLoadBalancer(%s): Released static IP %s.", lbRefStr, ipAddressToUse) } } else { - glog.Warningf("ensureExternalLoadBalancer(%s): Orphaning static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, g.region, err) + klog.Warningf("ensureExternalLoadBalancer(%s): Orphaning static IP %s in region %v: %v.", lbRefStr, ipAddressToUse, g.region, err) } }() @@ -149,7 +149,7 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, if err != nil { return nil, fmt.Errorf("failed to ensure a static IP for load balancer (%s): %v", lbRefStr, err) } - glog.Infof("ensureExternalLoadBalancer(%s): Ensured IP address %s (tier: %s).", lbRefStr, ipAddr, netTier) + klog.Infof("ensureExternalLoadBalancer(%s): Ensured IP address %s (tier: %s).", lbRefStr, ipAddr, netTier) // If the IP was not owned by the user, but it already existed, it // could indicate that the previous update cycle failed. We can use // this IP and try to run through the process again, but we should @@ -177,17 +177,17 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, // Unlike forwarding rules and target pools, firewalls can be updated // without needing to be deleted and recreated. if firewallExists { - glog.Infof("ensureExternalLoadBalancer(%s): Updating firewall.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Updating firewall.", lbRefStr) if err := g.updateFirewall(apiService, MakeFirewallName(loadBalancerName), g.region, desc, sourceRanges, ports, hosts); err != nil { return nil, err } - glog.Infof("ensureExternalLoadBalancer(%s): Updated firewall.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Updated firewall.", lbRefStr) } else { - glog.Infof("ensureExternalLoadBalancer(%s): Creating firewall.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Creating firewall.", lbRefStr) if err := g.createFirewall(apiService, MakeFirewallName(loadBalancerName), g.region, desc, sourceRanges, ports, hosts); err != nil { return nil, err } - glog.Infof("ensureExternalLoadBalancer(%s): Created firewall.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Created firewall.", lbRefStr) } } @@ -196,7 +196,7 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, return nil, err } if !tpExists { - glog.Infof("ensureExternalLoadBalancer(%s): Target pool for service doesn't exist.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Target pool for service doesn't exist.", lbRefStr) } // Check which health check needs to create and which health check needs to delete. @@ -207,12 +207,12 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, return nil, fmt.Errorf("error checking HTTP health check for load balancer (%s): %v", lbRefStr, err) } if path, healthCheckNodePort := apiservice.GetServiceHealthCheckPathPort(apiService); path != "" { - glog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs local traffic health checks on: %d%s.", lbRefStr, healthCheckNodePort, path) + klog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs local traffic health checks on: %d%s.", lbRefStr, healthCheckNodePort, path) if hcLocalTrafficExisting == nil { // This logic exists to detect a transition for non-OnlyLocal to OnlyLocal service // turn on the tpNeedsRecreation flag to delete/recreate fwdrule/tpool updating the // target pool to use local traffic health check. - glog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from nodes health checks to local traffic health checks.", lbRefStr) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from nodes health checks to local traffic health checks.", lbRefStr) if supportsNodesHealthCheck { hcToDelete = makeHTTPHealthCheck(MakeNodesHealthCheckName(clusterID), GetNodesHealthCheckPath(), GetNodesHealthCheckPort()) } @@ -220,12 +220,12 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, } hcToCreate = makeHTTPHealthCheck(loadBalancerName, path, healthCheckNodePort) } else { - glog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs nodes health checks.", lbRefStr) + klog.V(4).Infof("ensureExternalLoadBalancer(%s): Service needs nodes health checks.", lbRefStr) if hcLocalTrafficExisting != nil { // This logic exists to detect a transition from OnlyLocal to non-OnlyLocal service // and turn on the tpNeedsRecreation flag to delete/recreate fwdrule/tpool updating the // target pool to use nodes health check. - glog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from local traffic health checks to nodes health checks.", lbRefStr) + klog.V(2).Infof("ensureExternalLoadBalancer(%s): Updating from local traffic health checks to nodes health checks.", lbRefStr) hcToDelete = hcLocalTrafficExisting tpNeedsRecreation = true } @@ -248,7 +248,7 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, if err := g.DeleteRegionForwardingRule(loadBalancerName, g.region); err != nil && !isNotFound(err) { return nil, fmt.Errorf("failed to delete existing forwarding rule for load balancer (%s) update: %v", lbRefStr, err) } - glog.Infof("ensureExternalLoadBalancer(%s): Deleted forwarding rule.", lbRefStr) + klog.Infof("ensureExternalLoadBalancer(%s): Deleted forwarding rule.", lbRefStr) } if err := g.ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation, apiService, loadBalancerName, clusterID, ipAddressToUse, hosts, hcToCreate, hcToDelete); err != nil { @@ -256,7 +256,7 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, } if tpNeedsRecreation || fwdRuleNeedsUpdate { - glog.Infof("ensureExternalLoadBalancer(%s): Creating forwarding rule, IP %s (tier: %s).", lbRefStr, ipAddressToUse, netTier) + klog.Infof("ensureExternalLoadBalancer(%s): Creating forwarding rule, IP %s (tier: %s).", lbRefStr, ipAddressToUse, netTier) if err := createForwardingRule(g, loadBalancerName, serviceName.String(), g.region, ipAddressToUse, g.targetPoolURL(loadBalancerName), ports, netTier); err != nil { return nil, fmt.Errorf("failed to create forwarding rule for load balancer (%s): %v", lbRefStr, err) } @@ -265,7 +265,7 @@ func (g *Cloud) ensureExternalLoadBalancer(clusterName string, clusterID string, // of a user-requested IP, the "is user-owned" flag will be set, // preventing it from actually being released. isSafeToReleaseIP = true - glog.Infof("ensureExternalLoadBalancer(%s): Created forwarding rule, IP %s.", lbRefStr, ipAddressToUse) + klog.Infof("ensureExternalLoadBalancer(%s): Created forwarding rule, IP %s.", lbRefStr, ipAddressToUse) } status := &v1.LoadBalancerStatus{} @@ -295,7 +295,7 @@ func (g *Cloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID string, if path, _ := apiservice.GetServiceHealthCheckPathPort(service); path != "" { hcToDelete, err := g.GetHTTPHealthCheck(loadBalancerName) if err != nil && !isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Failed to retrieve health check:%v.", lbRefStr, err) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Failed to retrieve health check:%v.", lbRefStr, err) return err } // If we got 'StatusNotFound' LB was already deleted and it's safe to ignore. @@ -313,11 +313,11 @@ func (g *Cloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID string, errs := utilerrors.AggregateGoroutines( func() error { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting firewall rule.", lbRefStr) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting firewall rule.", lbRefStr) fwName := MakeFirewallName(loadBalancerName) err := ignoreNotFound(g.DeleteFirewall(fwName)) if isForbidden(err) && g.OnXPN() { - glog.V(4).Infof("ensureExternalLoadBalancerDeleted(%s): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) + klog.V(4).Infof("ensureExternalLoadBalancerDeleted(%s): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) g.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, g.NetworkProjectID())) return nil } @@ -327,17 +327,17 @@ func (g *Cloud) ensureExternalLoadBalancerDeleted(clusterName, clusterID string, // possible that EnsureLoadBalancer left one around in a failed // creation/update attempt, so make sure we clean it up here just in case. func() error { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting IP address.", lbRefStr) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting IP address.", lbRefStr) return ignoreNotFound(g.DeleteRegionAddress(loadBalancerName, g.region)) }, func() error { - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting forwarding rule.", lbRefStr) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting forwarding rule.", lbRefStr) // The forwarding rule must be deleted before either the target pool can, // unfortunately, so we have to do these two serially. if err := ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil { return err } - glog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting target pool.", lbRefStr) + klog.Infof("ensureExternalLoadBalancerDeleted(%s): Deleting target pool.", lbRefStr) if err := g.DeleteExternalTargetPoolAndChecks(service, loadBalancerName, g.region, clusterID, hcNames...); err != nil { return err } @@ -356,9 +356,9 @@ func (g *Cloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name, reg lbRefStr := fmt.Sprintf("%v(%v)", name, serviceName) if err := g.DeleteTargetPool(name, region); err != nil && isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Target pool already deleted. Continuing to delete other resources.", lbRefStr) + klog.Infof("DeleteExternalTargetPoolAndChecks(%v): Target pool already deleted. Continuing to delete other resources.", lbRefStr) } else if err != nil { - glog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete target pool, got error %s.", lbRefStr, err.Error()) + klog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete target pool, got error %s.", lbRefStr, err.Error()) return err } @@ -373,14 +373,14 @@ func (g *Cloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name, reg g.sharedResourceLock.Lock() defer g.sharedResourceLock.Unlock() } - glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check %v.", lbRefStr, hcName) + klog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check %v.", lbRefStr, hcName) if err := g.DeleteHTTPHealthCheck(hcName); err != nil { // Delete nodes health checks will fail if any other target pool is using it. if isInUsedByError(err) { - glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is in used: %v.", lbRefStr, hcName, err) + klog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is in used: %v.", lbRefStr, hcName, err) return nil } else if !isHTTPErrorCode(err, http.StatusNotFound) { - glog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete health check %v: %v.", lbRefStr, hcName, err) + klog.Warningf("DeleteExternalTargetPoolAndChecks(%v): Failed to delete health check %v: %v.", lbRefStr, hcName, err) return err } // StatusNotFound could happen when: @@ -390,15 +390,15 @@ func (g *Cloud) DeleteExternalTargetPoolAndChecks(service *v1.Service, name, reg // - This is a retry and in previous round we failed to delete the healthcheck firewall // after deleted the healthcheck. // We continue to delete the healthcheck firewall to prevent leaking. - glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is already deleted.", lbRefStr, hcName) + klog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Health check %v is already deleted.", lbRefStr, hcName) } // If health check is deleted without error, it means no load-balancer is using it. // So we should delete the health check firewall as well. fwName := MakeHealthCheckFirewallName(clusterID, hcName, isNodesHealthCheck) - glog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check firewall %v.", lbRefStr, fwName) + klog.Infof("DeleteExternalTargetPoolAndChecks(%v): Deleting health check firewall %v.", lbRefStr, fwName) if err := ignoreNotFound(g.DeleteFirewall(fwName)); err != nil { if isForbidden(err) && g.OnXPN() { - glog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) + klog.V(4).Infof("DeleteExternalTargetPoolAndChecks(%v): Do not have permission to delete firewall rule %v (on XPN). Raising event.", lbRefStr, fwName) g.raiseFirewallChangeNeededEvent(service, FirewallToGCloudDeleteCmd(fwName, g.NetworkProjectID())) return nil } @@ -429,7 +429,7 @@ func verifyUserRequestedIP(s CloudAddressService, region, requestedIP, fwdRuleIP // case we shouldn't delete it anyway). existingAddress, err := s.GetRegionAddressByIP(region, requestedIP) if err != nil && !isNotFound(err) { - glog.Errorf("verifyUserRequestedIP: failed to check whether the requested IP %q for LB %s exists: %v", requestedIP, lbRef, err) + klog.Errorf("verifyUserRequestedIP: failed to check whether the requested IP %q for LB %s exists: %v", requestedIP, lbRef, err) return false, err } if err == nil { @@ -443,23 +443,23 @@ func verifyUserRequestedIP(s CloudAddressService, region, requestedIP, fwdRuleIP } netTier := cloud.NetworkTierGCEValueToType(netTierStr) if netTier != desiredNetTier { - glog.Errorf("verifyUserRequestedIP: requested static IP %q (name: %s) for LB %s has network tier %s, need %s.", requestedIP, existingAddress.Name, lbRef, netTier, desiredNetTier) + klog.Errorf("verifyUserRequestedIP: requested static IP %q (name: %s) for LB %s has network tier %s, need %s.", requestedIP, existingAddress.Name, lbRef, netTier, desiredNetTier) return false, fmt.Errorf("requrested IP %q belongs to the %s network tier; expected %s", requestedIP, netTier, desiredNetTier) } - glog.V(4).Infof("verifyUserRequestedIP: the requested static IP %q (name: %s, tier: %s) for LB %s exists.", requestedIP, existingAddress.Name, netTier, lbRef) + klog.V(4).Infof("verifyUserRequestedIP: the requested static IP %q (name: %s, tier: %s) for LB %s exists.", requestedIP, existingAddress.Name, netTier, lbRef) return true, nil } if requestedIP == fwdRuleIP { // The requested IP is not a static IP, but is currently assigned // to this forwarding rule, so we can just use it. - glog.V(4).Infof("verifyUserRequestedIP: the requested IP %q is not static, but is currently in use by for LB %s", requestedIP, lbRef) + klog.V(4).Infof("verifyUserRequestedIP: the requested IP %q is not static, but is currently in use by for LB %s", requestedIP, lbRef) return false, nil } // The requested IP is not static and it is not assigned to the // current forwarding rule. It might be attached to a different // rule or it might not be part of this project at all. Either // way, we can't use it. - glog.Errorf("verifyUserRequestedIP: requested IP %q for LB %s is neither static nor assigned to the LB", requestedIP, lbRef) + klog.Errorf("verifyUserRequestedIP: requested IP %q for LB %s is neither static nor assigned to the LB", requestedIP, lbRef) return false, fmt.Errorf("requested ip %q is neither static nor assigned to the LB", requestedIP) } @@ -476,7 +476,7 @@ func (g *Cloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation bool, if err := g.DeleteExternalTargetPoolAndChecks(svc, loadBalancerName, g.region, clusterID, hcNames...); err != nil { return fmt.Errorf("failed to delete existing target pool for load balancer (%s) update: %v", lbRefStr, err) } - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Deleted target pool.", lbRefStr) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Deleted target pool.", lbRefStr) } // Once we've deleted the resources (if necessary), build them back up (or for // the first time if they're new). @@ -489,23 +489,23 @@ func (g *Cloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation bool, return fmt.Errorf("failed to create target pool for load balancer (%s): %v", lbRefStr, err) } if hcToCreate != nil { - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Created health checks %v.", lbRefStr, hcToCreate.Name) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Created health checks %v.", lbRefStr, hcToCreate.Name) } if len(hosts) <= maxTargetPoolCreateInstances { - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Created target pool.", lbRefStr) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Created target pool.", lbRefStr) } else { - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Created initial target pool (now updating the remaining %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Created initial target pool (now updating the remaining %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) if err := g.updateTargetPool(loadBalancerName, hosts); err != nil { return fmt.Errorf("failed to update target pool for load balancer (%s): %v", lbRefStr, err) } - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)-maxTargetPoolCreateInstances) } } else if tpExists { // Ensure hosts are updated even if there is no other changes required on target pool. if err := g.updateTargetPool(loadBalancerName, hosts); err != nil { return fmt.Errorf("failed to update target pool for load balancer (%s): %v", lbRefStr, err) } - glog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)) + klog.Infof("ensureTargetPoolAndHealthCheck(%s): Updated target pool (with %d hosts).", lbRefStr, len(hosts)) if hcToCreate != nil { if hc, err := g.ensureHTTPHealthCheck(hcToCreate.Name, hcToCreate.RequestPath, int32(hcToCreate.Port)); err != nil || hc == nil { return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", loadBalancerName, hcToCreate.Port, hcToCreate.RequestPath, err) @@ -513,7 +513,7 @@ func (g *Cloud) ensureTargetPoolAndHealthCheck(tpExists, tpNeedsRecreation bool, } } else { // Panic worthy. - glog.Errorf("ensureTargetPoolAndHealthCheck(%s): target pool not exists and doesn't need to be created.", lbRefStr) + klog.Errorf("ensureTargetPoolAndHealthCheck(%s): target pool not exists and doesn't need to be created.", lbRefStr) } return nil } @@ -547,7 +547,7 @@ func (g *Cloud) createTargetPoolAndHealthCheck(svc *v1.Service, name, serviceNam for _, host := range hosts { instances = append(instances, host.makeComparableHostPath()) } - glog.Infof("Creating targetpool %v with %d healthchecks", name, len(hcLinks)) + klog.Infof("Creating targetpool %v with %d healthchecks", name, len(hcLinks)) pool := &compute.TargetPool{ Name: name, Description: fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, serviceName), @@ -605,7 +605,7 @@ func (g *Cloud) updateTargetPool(loadBalancerName string, hosts []*gceInstance) return err } if len(updatedPool.Instances) != len(hosts) { - glog.Errorf("Unexpected number of instances (%d) in target pool %s after updating (expected %d). Instances in updated pool: %s", + klog.Errorf("Unexpected number of instances (%d) in target pool %s after updating (expected %d). Instances in updated pool: %s", len(updatedPool.Instances), loadBalancerName, len(hosts), strings.Join(updatedPool.Instances, ",")) return fmt.Errorf("Unexpected number of instances (%d) in target pool %s after update (expected %d)", len(updatedPool.Instances), loadBalancerName, len(hosts)) } @@ -665,28 +665,28 @@ func (g *Cloud) ensureHTTPHealthCheck(name, path string, port int32) (hc *comput newHC := makeHTTPHealthCheck(name, path, port) hc, err = g.GetHTTPHealthCheck(name) if hc == nil || err != nil && isHTTPErrorCode(err, http.StatusNotFound) { - glog.Infof("Did not find health check %v, creating port %v path %v", name, port, path) + klog.Infof("Did not find health check %v, creating port %v path %v", name, port, path) if err = g.CreateHTTPHealthCheck(newHC); err != nil { return nil, err } hc, err = g.GetHTTPHealthCheck(name) if err != nil { - glog.Errorf("Failed to get http health check %v", err) + klog.Errorf("Failed to get http health check %v", err) return nil, err } - glog.Infof("Created HTTP health check %v healthCheckNodePort: %d", name, port) + klog.Infof("Created HTTP health check %v healthCheckNodePort: %d", name, port) return hc, nil } // Validate health check fields - glog.V(4).Infof("Checking http health check params %s", name) + klog.V(4).Infof("Checking http health check params %s", name) if needToUpdateHTTPHealthChecks(hc, newHC) { - glog.Warningf("Health check %v exists but parameters have drifted - updating...", name) + klog.Warningf("Health check %v exists but parameters have drifted - updating...", name) newHC = mergeHTTPHealthChecks(hc, newHC) if err := g.UpdateHTTPHealthCheck(newHC); err != nil { - glog.Warningf("Failed to reconcile http health check %v parameters", name) + klog.Warningf("Failed to reconcile http health check %v parameters", name) return nil, err } - glog.V(4).Infof("Corrected health check %v parameters successful", name) + klog.V(4).Infof("Corrected health check %v parameters successful", name) hc, err = g.GetHTTPHealthCheck(name) if err != nil { return nil, err @@ -714,7 +714,7 @@ func (g *Cloud) forwardingRuleNeedsUpdate(name, region string, loadBalancerIP st // TODO: we report loadbalancer IP through status, so we want to verify if // that matches the forwarding rule as well. if loadBalancerIP != "" && loadBalancerIP != fwd.IPAddress { - glog.Infof("LoadBalancer ip for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPAddress, loadBalancerIP) + klog.Infof("LoadBalancer ip for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPAddress, loadBalancerIP) return true, true, fwd.IPAddress, nil } portRange, err := loadBalancerPortRange(ports) @@ -724,12 +724,12 @@ func (g *Cloud) forwardingRuleNeedsUpdate(name, region string, loadBalancerIP st return true, false, "", err } if portRange != fwd.PortRange { - glog.Infof("LoadBalancer port range for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.PortRange, portRange) + klog.Infof("LoadBalancer port range for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.PortRange, portRange) return true, true, fwd.IPAddress, nil } // The service controller verified all the protocols match on the ports, just check the first one if string(ports[0].Protocol) != fwd.IPProtocol { - glog.Infof("LoadBalancer protocol for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPProtocol, string(ports[0].Protocol)) + klog.Infof("LoadBalancer protocol for forwarding rule %v was expected to be %v, but was actually %v", fwd.Name, fwd.IPProtocol, string(ports[0].Protocol)) return true, true, fwd.IPAddress, nil } @@ -757,7 +757,7 @@ func (g *Cloud) targetPoolNeedsRecreation(name, region string, affinityType v1.S // target pool (which results in downtime). Fix this when we have formally // defined the defaults on either side. if tp.SessionAffinity != "" && translateAffinityType(affinityType) != tp.SessionAffinity { - glog.Infof("LoadBalancer target pool %v changed affinity from %v to %v", name, tp.SessionAffinity, affinityType) + klog.Infof("LoadBalancer target pool %v changed affinity from %v to %v", name, tp.SessionAffinity, affinityType) return true, true, nil } return true, false, nil @@ -814,7 +814,7 @@ func translateAffinityType(affinityType v1.ServiceAffinity) string { case v1.ServiceAffinityNone: return gceAffinityTypeNone default: - glog.Errorf("Unexpected affinity type: %v", affinityType) + klog.Errorf("Unexpected affinity type: %v", affinityType) return gceAffinityTypeNone } } @@ -846,7 +846,7 @@ func (g *Cloud) firewallNeedsUpdate(name, serviceName, region, ipAddress string, actualSourceRanges, err := netsets.ParseIPNets(fw.SourceRanges...) if err != nil { // This really shouldn't happen... GCE has returned something unexpected - glog.Warningf("Error parsing firewall SourceRanges: %v", fw.SourceRanges) + klog.Warningf("Error parsing firewall SourceRanges: %v", fw.SourceRanges) // We don't return the error, because we can hopefully recover from this by reconfiguring the firewall return true, true, nil } @@ -872,11 +872,11 @@ func (g *Cloud) ensureHTTPHealthCheckFirewall(svc *v1.Service, serviceName, ipAd if !isHTTPErrorCode(err, http.StatusNotFound) { return fmt.Errorf("error getting firewall for health checks: %v", err) } - glog.Infof("Creating firewall %v for health checks.", fwName) + klog.Infof("Creating firewall %v for health checks.", fwName) if err := g.createFirewall(svc, fwName, region, desc, sourceRanges, ports, hosts); err != nil { return err } - glog.Infof("Created firewall %v for health checks.", fwName) + klog.Infof("Created firewall %v for health checks.", fwName) return nil } // Validate firewall fields. @@ -885,12 +885,12 @@ func (g *Cloud) ensureHTTPHealthCheckFirewall(svc *v1.Service, serviceName, ipAd fw.Allowed[0].IPProtocol != string(ports[0].Protocol) || !equalStringSets(fw.Allowed[0].Ports, []string{strconv.Itoa(int(ports[0].Port))}) || !equalStringSets(fw.SourceRanges, sourceRanges.StringSlice()) { - glog.Warningf("Firewall %v exists but parameters have drifted - updating...", fwName) + klog.Warningf("Firewall %v exists but parameters have drifted - updating...", fwName) if err := g.updateFirewall(svc, fwName, region, desc, sourceRanges, ports, hosts); err != nil { - glog.Warningf("Failed to reconcile firewall %v parameters.", fwName) + klog.Warningf("Failed to reconcile firewall %v parameters.", fwName) return err } - glog.V(4).Infof("Corrected firewall %v parameters successful", fwName) + klog.V(4).Infof("Corrected firewall %v parameters successful", fwName) } return nil } @@ -943,7 +943,7 @@ func (g *Cloud) createFirewall(svc *v1.Service, name, region, desc string, sourc if isHTTPErrorCode(err, http.StatusConflict) { return nil } else if isForbidden(err) && g.OnXPN() { - glog.V(4).Infof("createFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", firewall.Name) + klog.V(4).Infof("createFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", firewall.Name) g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(firewall, g.NetworkProjectID())) return nil } @@ -962,7 +962,7 @@ func (g *Cloud) updateFirewall(svc *v1.Service, name, region, desc string, sourc if isHTTPErrorCode(err, http.StatusConflict) { return nil } else if isForbidden(err) && g.OnXPN() { - glog.V(4).Infof("updateFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", firewall.Name) + klog.V(4).Infof("updateFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", firewall.Name) g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(firewall, g.NetworkProjectID())) return nil } @@ -1091,7 +1091,7 @@ func deleteFWDRuleWithWrongTier(s CloudForwardingRuleService, region, name, logP if existingTier == desiredNetTier { return nil } - glog.V(2).Infof("%s: Network tiers do not match; existing forwarding rule: %q, desired: %q. Deleting the forwarding rule", + klog.V(2).Infof("%s: Network tiers do not match; existing forwarding rule: %q, desired: %q. Deleting the forwarding rule", logPrefix, existingTier, desiredNetTier) err = s.DeleteRegionForwardingRule(name, region) return ignoreNotFound(err) @@ -1119,7 +1119,7 @@ func deleteAddressWithWrongTier(s CloudAddressService, region, name, logPrefix s if existingTier == desiredNetTier { return nil } - glog.V(2).Infof("%s: Network tiers do not match; existing address: %q, desired: %q. Deleting the address", + klog.V(2).Infof("%s: Network tiers do not match; existing address: %q, desired: %q. Deleting the address", logPrefix, existingTier, desiredNetTier) err = s.DeleteRegionAddress(name, region) return ignoreNotFound(err) diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go index 49a2885520706..87d1be64b49e6 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer_internal.go @@ -22,11 +22,11 @@ import ( "strconv" "strings" - "github.com/golang/glog" compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" v1_service "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" ) @@ -102,7 +102,7 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v if err != nil { return nil, err } - glog.V(2).Infof("ensureInternalLoadBalancer(%v): reserved IP %q for the forwarding rule", loadBalancerName, ipToUse) + klog.V(2).Infof("ensureInternalLoadBalancer(%v): reserved IP %q for the forwarding rule", loadBalancerName, ipToUse) } // Ensure firewall rules if necessary @@ -130,7 +130,7 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v fwdRuleDeleted := false if existingFwdRule != nil && !fwdRuleEqual(existingFwdRule, expectedFwdRule) { - glog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", loadBalancerName, existingFwdRule.IPAddress) + klog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", loadBalancerName, existingFwdRule.IPAddress) if err = ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil { return nil, err } @@ -145,11 +145,11 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v // If we previously deleted the forwarding rule or it never existed, finally create it. if fwdRuleDeleted || existingFwdRule == nil { - glog.V(2).Infof("ensureInternalLoadBalancer(%v): creating forwarding rule", loadBalancerName) + klog.V(2).Infof("ensureInternalLoadBalancer(%v): creating forwarding rule", loadBalancerName) if err = g.CreateRegionForwardingRule(expectedFwdRule, g.region); err != nil { return nil, err } - glog.V(2).Infof("ensureInternalLoadBalancer(%v): created forwarding rule", loadBalancerName) + klog.V(2).Infof("ensureInternalLoadBalancer(%v): created forwarding rule", loadBalancerName) } // Delete the previous internal load balancer resources if necessary @@ -160,7 +160,7 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v if addrMgr != nil { // Now that the controller knows the forwarding rule exists, we can release the address. if err := addrMgr.ReleaseAddress(); err != nil { - glog.Errorf("ensureInternalLoadBalancer: failed to release address reservation, possibly causing an orphan: %v", err) + klog.Errorf("ensureInternalLoadBalancer: failed to release address reservation, possibly causing an orphan: %v", err) } } @@ -178,9 +178,9 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v func (g *Cloud) clearPreviousInternalResources(svc *v1.Service, loadBalancerName string, existingBackendService *compute.BackendService, expectedBSName, expectedHCName string) { // If a new backend service was created, delete the old one. if existingBackendService.Name != expectedBSName { - glog.V(2).Infof("clearPreviousInternalResources(%v): expected backend service %q does not match previous %q - deleting backend service", loadBalancerName, expectedBSName, existingBackendService.Name) + klog.V(2).Infof("clearPreviousInternalResources(%v): expected backend service %q does not match previous %q - deleting backend service", loadBalancerName, expectedBSName, existingBackendService.Name) if err := g.teardownInternalBackendService(existingBackendService.Name); err != nil && !isNotFound(err) { - glog.Warningf("clearPreviousInternalResources: could not delete old backend service: %v, err: %v", existingBackendService.Name, err) + klog.Warningf("clearPreviousInternalResources: could not delete old backend service: %v, err: %v", existingBackendService.Name, err) } } @@ -188,13 +188,13 @@ func (g *Cloud) clearPreviousInternalResources(svc *v1.Service, loadBalancerName if len(existingBackendService.HealthChecks) == 1 { existingHCName := getNameFromLink(existingBackendService.HealthChecks[0]) if existingHCName != expectedHCName { - glog.V(2).Infof("clearPreviousInternalResources(%v): expected health check %q does not match previous %q - deleting health check", loadBalancerName, expectedHCName, existingHCName) + klog.V(2).Infof("clearPreviousInternalResources(%v): expected health check %q does not match previous %q - deleting health check", loadBalancerName, expectedHCName, existingHCName) if err := g.teardownInternalHealthCheckAndFirewall(svc, existingHCName); err != nil { - glog.Warningf("clearPreviousInternalResources: could not delete existing healthcheck: %v, err: %v", existingHCName, err) + klog.Warningf("clearPreviousInternalResources: could not delete existing healthcheck: %v, err: %v", existingHCName, err) } } } else if len(existingBackendService.HealthChecks) > 1 { - glog.Warningf("clearPreviousInternalResources(%v): more than one health check on the backend service %v, %v", loadBalancerName, existingBackendService.Name, existingBackendService.HealthChecks) + klog.Warningf("clearPreviousInternalResources(%v): more than one health check on the backend service %v, %v", loadBalancerName, existingBackendService.Name, existingBackendService.HealthChecks) } } @@ -229,24 +229,24 @@ func (g *Cloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, g.sharedResourceLock.Lock() defer g.sharedResourceLock.Unlock() - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): attempting delete of region internal address", loadBalancerName) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): attempting delete of region internal address", loadBalancerName) ensureAddressDeleted(g, loadBalancerName, g.region) - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region internal forwarding rule", loadBalancerName) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region internal forwarding rule", loadBalancerName) if err := ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil { return err } backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity) - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region backend service %v", loadBalancerName, backendServiceName) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting region backend service %v", loadBalancerName, backendServiceName) if err := g.teardownInternalBackendService(backendServiceName); err != nil { return err } - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting firewall for traffic", loadBalancerName) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting firewall for traffic", loadBalancerName) if err := ignoreNotFound(g.DeleteFirewall(loadBalancerName)); err != nil { if isForbidden(err) && g.OnXPN() { - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", loadBalancerName) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): could not delete traffic firewall on XPN cluster. Raising event.", loadBalancerName) g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(loadBalancerName, g.NetworkProjectID())) } else { return err @@ -254,7 +254,7 @@ func (g *Cloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, } hcName := makeHealthCheckName(loadBalancerName, clusterID, sharedHealthCheck) - glog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting health check %v and its firewall", loadBalancerName, hcName) + klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): deleting health check %v and its firewall", loadBalancerName, hcName) if err := g.teardownInternalHealthCheckAndFirewall(svc, hcName); err != nil { return err } @@ -271,49 +271,49 @@ func (g *Cloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string, func (g *Cloud) teardownInternalBackendService(bsName string) error { if err := g.DeleteRegionBackendService(bsName, g.region); err != nil { if isNotFound(err) { - glog.V(2).Infof("teardownInternalBackendService(%v): backend service already deleted. err: %v", bsName, err) + klog.V(2).Infof("teardownInternalBackendService(%v): backend service already deleted. err: %v", bsName, err) return nil } else if isInUsedByError(err) { - glog.V(2).Infof("teardownInternalBackendService(%v): backend service in use.", bsName) + klog.V(2).Infof("teardownInternalBackendService(%v): backend service in use.", bsName) return nil } else { return fmt.Errorf("failed to delete backend service: %v, err: %v", bsName, err) } } - glog.V(2).Infof("teardownInternalBackendService(%v): backend service deleted", bsName) + klog.V(2).Infof("teardownInternalBackendService(%v): backend service deleted", bsName) return nil } func (g *Cloud) teardownInternalHealthCheckAndFirewall(svc *v1.Service, hcName string) error { if err := g.DeleteHealthCheck(hcName); err != nil { if isNotFound(err) { - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check does not exist.", hcName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check does not exist.", hcName) // Purposely do not early return - double check the firewall does not exist } else if isInUsedByError(err) { - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check in use.", hcName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check in use.", hcName) return nil } else { return fmt.Errorf("failed to delete health check: %v, err: %v", hcName, err) } } - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check deleted", hcName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check deleted", hcName) hcFirewallName := makeHealthCheckFirewallNameFromHC(hcName) if err := ignoreNotFound(g.DeleteFirewall(hcFirewallName)); err != nil { if isForbidden(err) && g.OnXPN() { - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): could not delete health check traffic firewall on XPN cluster. Raising Event.", hcName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): could not delete health check traffic firewall on XPN cluster. Raising Event.", hcName) g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudDeleteCmd(hcFirewallName, g.NetworkProjectID())) return nil } return fmt.Errorf("failed to delete health check firewall: %v, err: %v", hcFirewallName, err) } - glog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check firewall deleted", hcFirewallName) + klog.V(2).Infof("teardownInternalHealthCheckAndFirewall(%v): health check firewall deleted", hcFirewallName) return nil } func (g *Cloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, sourceRanges []string, ports []string, protocol v1.Protocol, nodes []*v1.Node) error { - glog.V(2).Infof("ensureInternalFirewall(%v): checking existing firewall", fwName) + klog.V(2).Infof("ensureInternalFirewall(%v): checking existing firewall", fwName) targetTags, err := g.GetNodeTags(nodeNames(nodes)) if err != nil { return err @@ -339,10 +339,10 @@ func (g *Cloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, s } if existingFirewall == nil { - glog.V(2).Infof("ensureInternalFirewall(%v): creating firewall", fwName) + klog.V(2).Infof("ensureInternalFirewall(%v): creating firewall", fwName) err = g.CreateFirewall(expectedFirewall) if err != nil && isForbidden(err) && g.OnXPN() { - glog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", fwName) + klog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to create firewall rule (on XPN). Raising event.", fwName) g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudCreateCmd(expectedFirewall, g.NetworkProjectID())) return nil } @@ -353,10 +353,10 @@ func (g *Cloud) ensureInternalFirewall(svc *v1.Service, fwName, fwDesc string, s return nil } - glog.V(2).Infof("ensureInternalFirewall(%v): updating firewall", fwName) + klog.V(2).Infof("ensureInternalFirewall(%v): updating firewall", fwName) err = g.UpdateFirewall(expectedFirewall) if err != nil && isForbidden(err) && g.OnXPN() { - glog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", fwName) + klog.V(2).Infof("ensureInternalFirewall(%v): do not have permission to update firewall rule (on XPN). Raising event.", fwName) g.raiseFirewallChangeNeededEvent(svc, FirewallToGCloudUpdateCmd(expectedFirewall, g.NetworkProjectID())) return nil } @@ -383,7 +383,7 @@ func (g *Cloud) ensureInternalFirewalls(loadBalancerName, ipAddress, clusterID s } func (g *Cloud) ensureInternalHealthCheck(name string, svcName types.NamespacedName, shared bool, path string, port int32) (*compute.HealthCheck, error) { - glog.V(2).Infof("ensureInternalHealthCheck(%v, %v, %v): checking existing health check", name, path, port) + klog.V(2).Infof("ensureInternalHealthCheck(%v, %v, %v): checking existing health check", name, path, port) expectedHC := newInternalLBHealthCheck(name, svcName, shared, path, port) hc, err := g.GetHealthCheck(name) @@ -392,27 +392,27 @@ func (g *Cloud) ensureInternalHealthCheck(name string, svcName types.NamespacedN } if hc == nil { - glog.V(2).Infof("ensureInternalHealthCheck: did not find health check %v, creating one with port %v path %v", name, port, path) + klog.V(2).Infof("ensureInternalHealthCheck: did not find health check %v, creating one with port %v path %v", name, port, path) if err = g.CreateHealthCheck(expectedHC); err != nil { return nil, err } hc, err = g.GetHealthCheck(name) if err != nil { - glog.Errorf("Failed to get http health check %v", err) + klog.Errorf("Failed to get http health check %v", err) return nil, err } - glog.V(2).Infof("ensureInternalHealthCheck: created health check %v", name) + klog.V(2).Infof("ensureInternalHealthCheck: created health check %v", name) return hc, nil } if needToUpdateHealthChecks(hc, expectedHC) { - glog.V(2).Infof("ensureInternalHealthCheck: health check %v exists but parameters have drifted - updating...", name) + klog.V(2).Infof("ensureInternalHealthCheck: health check %v exists but parameters have drifted - updating...", name) expectedHC = mergeHealthChecks(hc, expectedHC) if err := g.UpdateHealthCheck(expectedHC); err != nil { - glog.Warningf("Failed to reconcile http health check %v parameters", name) + klog.Warningf("Failed to reconcile http health check %v parameters", name) return nil, err } - glog.V(2).Infof("ensureInternalHealthCheck: corrected health check %v parameters successful", name) + klog.V(2).Infof("ensureInternalHealthCheck: corrected health check %v parameters successful", name) hc, err = g.GetHealthCheck(name) if err != nil { return nil, err @@ -422,7 +422,7 @@ func (g *Cloud) ensureInternalHealthCheck(name string, svcName types.NamespacedN } func (g *Cloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node) (string, error) { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): checking group that it contains %v nodes", name, zone, len(nodes)) + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): checking group that it contains %v nodes", name, zone, len(nodes)) ig, err := g.GetInstanceGroup(name, zone) if err != nil && !isNotFound(err) { return "", err @@ -435,7 +435,7 @@ func (g *Cloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node) gceNodes := sets.NewString() if ig == nil { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): creating instance group", name, zone) + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): creating instance group", name, zone) newIG := &compute.InstanceGroup{Name: name} if err = g.CreateInstanceGroup(newIG, zone); err != nil { return "", err @@ -461,7 +461,7 @@ func (g *Cloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node) addNodes := kubeNodes.Difference(gceNodes).List() if len(removeNodes) != 0 { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): removing nodes: %v", name, zone, removeNodes) + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): removing nodes: %v", name, zone, removeNodes) instanceRefs := g.ToInstanceReferences(zone, removeNodes) // Possible we'll receive 404's here if the instance was deleted before getting to this point. if err = g.RemoveInstancesFromInstanceGroup(name, zone, instanceRefs); err != nil && !isNotFound(err) { @@ -470,7 +470,7 @@ func (g *Cloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node) } if len(addNodes) != 0 { - glog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): adding nodes: %v", name, zone, addNodes) + klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): adding nodes: %v", name, zone, addNodes) instanceRefs := g.ToInstanceReferences(zone, addNodes) if err = g.AddInstancesToInstanceGroup(name, zone, instanceRefs); err != nil { return "", err @@ -484,7 +484,7 @@ func (g *Cloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node) // where a K8s node exists. It also ensures that each node belongs to an instance group func (g *Cloud) ensureInternalInstanceGroups(name string, nodes []*v1.Node) ([]string, error) { zonedNodes := splitNodesByZone(nodes) - glog.V(2).Infof("ensureInternalInstanceGroups(%v): %d nodes over %d zones in region %v", name, len(nodes), len(zonedNodes), g.region) + klog.V(2).Infof("ensureInternalInstanceGroups(%v): %d nodes over %d zones in region %v", name, len(nodes), len(zonedNodes), g.region) var igLinks []string for zone, nodes := range zonedNodes { igLink, err := g.ensureInternalInstanceGroup(name, zone, nodes) @@ -504,7 +504,7 @@ func (g *Cloud) ensureInternalInstanceGroupsDeleted(name string) error { return err } - glog.V(2).Infof("ensureInternalInstanceGroupsDeleted(%v): attempting delete instance group in all %d zones", name, len(zones)) + klog.V(2).Infof("ensureInternalInstanceGroupsDeleted(%v): attempting delete instance group in all %d zones", name, len(zones)) for _, z := range zones { if err := g.DeleteInstanceGroup(name, z.Name); err != nil && !isNotFoundOrInUse(err) { return err @@ -514,7 +514,7 @@ func (g *Cloud) ensureInternalInstanceGroupsDeleted(name string) error { } func (g *Cloud) ensureInternalBackendService(name, description string, affinityType v1.ServiceAffinity, scheme cloud.LbScheme, protocol v1.Protocol, igLinks []string, hcLink string) error { - glog.V(2).Infof("ensureInternalBackendService(%v, %v, %v): checking existing backend service with %d groups", name, scheme, protocol, len(igLinks)) + klog.V(2).Infof("ensureInternalBackendService(%v, %v, %v): checking existing backend service with %d groups", name, scheme, protocol, len(igLinks)) bs, err := g.GetRegionBackendService(name, g.region) if err != nil && !isNotFound(err) { return err @@ -533,12 +533,12 @@ func (g *Cloud) ensureInternalBackendService(name, description string, affinityT // Create backend service if none was found if bs == nil { - glog.V(2).Infof("ensureInternalBackendService: creating backend service %v", name) + klog.V(2).Infof("ensureInternalBackendService: creating backend service %v", name) err := g.CreateRegionBackendService(expectedBS, g.region) if err != nil { return err } - glog.V(2).Infof("ensureInternalBackendService: created backend service %v successfully", name) + klog.V(2).Infof("ensureInternalBackendService: created backend service %v successfully", name) return nil } @@ -546,19 +546,19 @@ func (g *Cloud) ensureInternalBackendService(name, description string, affinityT return nil } - glog.V(2).Infof("ensureInternalBackendService: updating backend service %v", name) + klog.V(2).Infof("ensureInternalBackendService: updating backend service %v", name) // Set fingerprint for optimistic locking expectedBS.Fingerprint = bs.Fingerprint if err := g.UpdateRegionBackendService(expectedBS, g.region); err != nil { return err } - glog.V(2).Infof("ensureInternalBackendService: updated backend service %v successfully", name) + klog.V(2).Infof("ensureInternalBackendService: updated backend service %v successfully", name) return nil } // ensureInternalBackendServiceGroups updates backend services if their list of backend instance groups is incorrect. func (g *Cloud) ensureInternalBackendServiceGroups(name string, igLinks []string) error { - glog.V(2).Infof("ensureInternalBackendServiceGroups(%v): checking existing backend service's groups", name) + klog.V(2).Infof("ensureInternalBackendServiceGroups(%v): checking existing backend service's groups", name) bs, err := g.GetRegionBackendService(name, g.region) if err != nil { return err @@ -572,11 +572,11 @@ func (g *Cloud) ensureInternalBackendServiceGroups(name string, igLinks []string // Set the backend service's backends to the updated list. bs.Backends = backends - glog.V(2).Infof("ensureInternalBackendServiceGroups: updating backend service %v", name) + klog.V(2).Infof("ensureInternalBackendServiceGroups: updating backend service %v", name) if err := g.UpdateRegionBackendService(bs, g.region); err != nil { return err } - glog.V(2).Infof("ensureInternalBackendServiceGroups: updated backend service %v successfully", name) + klog.V(2).Infof("ensureInternalBackendServiceGroups: updated backend service %v successfully", name) return nil } diff --git a/pkg/cloudprovider/providers/gce/gce_routes.go b/pkg/cloudprovider/providers/gce/gce_routes.go index 703e2dd3cf2b2..cc3cbfc53a045 100644 --- a/pkg/cloudprovider/providers/gce/gce_routes.go +++ b/pkg/cloudprovider/providers/gce/gce_routes.go @@ -22,9 +22,9 @@ import ( "net/http" "path" - "github.com/golang/glog" compute "google.golang.org/api/compute/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" cloudprovider "k8s.io/cloud-provider" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" @@ -83,7 +83,7 @@ func (g *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint st } err = g.c.Routes().Insert(ctx, meta.GlobalKey(cr.Name), cr) if isHTTPErrorCode(err, http.StatusConflict) { - glog.Infof("Route %q already exists.", cr.Name) + klog.Infof("Route %q already exists.", cr.Name) err = nil } return mc.Observe(err) diff --git a/pkg/cloudprovider/providers/gce/gce_tpu.go b/pkg/cloudprovider/providers/gce/gce_tpu.go index fda62851cfb5a..b9fae1da2fd66 100644 --- a/pkg/cloudprovider/providers/gce/gce_tpu.go +++ b/pkg/cloudprovider/providers/gce/gce_tpu.go @@ -23,9 +23,9 @@ import ( "net/http" "time" - "github.com/golang/glog" "google.golang.org/api/googleapi" tpuapi "google.golang.org/api/tpu/v1" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/wait" ) @@ -61,7 +61,7 @@ func (g *Cloud) CreateTPU(ctx context.Context, name, zone string, node *tpuapi.N if err != nil { return nil, err } - glog.V(2).Infof("Creating Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) + klog.V(2).Infof("Creating Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) op, err = g.waitForTPUOp(ctx, op) if err != nil { @@ -94,7 +94,7 @@ func (g *Cloud) DeleteTPU(ctx context.Context, name, zone string) error { if err != nil { return err } - glog.V(2).Infof("Deleting Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) + klog.V(2).Infof("Deleting Cloud TPU %q in zone %q with operation %q", name, zone, op.Name) op, err = g.waitForTPUOp(ctx, op) if err != nil { @@ -149,18 +149,18 @@ func (g *Cloud) waitForTPUOp(ctx context.Context, op *tpuapi.Operation) (*tpuapi // Check if context has been cancelled. select { case <-ctx.Done(): - glog.V(3).Infof("Context for operation %q has been cancelled: %s", op.Name, ctx.Err()) + klog.V(3).Infof("Context for operation %q has been cancelled: %s", op.Name, ctx.Err()) return true, ctx.Err() default: } - glog.V(3).Infof("Waiting for operation %q to complete...", op.Name) + klog.V(3).Infof("Waiting for operation %q to complete...", op.Name) start := time.Now() g.operationPollRateLimiter.Accept() duration := time.Now().Sub(start) if duration > 5*time.Second { - glog.V(2).Infof("Getting operation %q throttled for %v", op.Name, duration) + klog.V(2).Infof("Getting operation %q throttled for %v", op.Name, duration) } var err error @@ -169,7 +169,7 @@ func (g *Cloud) waitForTPUOp(ctx context.Context, op *tpuapi.Operation) (*tpuapi return true, err } if op.Done { - glog.V(3).Infof("Operation %q has completed", op.Name) + klog.V(3).Infof("Operation %q has completed", op.Name) return true, nil } return false, nil diff --git a/pkg/cloudprovider/providers/openstack/BUILD b/pkg/cloudprovider/providers/openstack/BUILD index 65997dff18172..6e45765123f27 100644 --- a/pkg/cloudprovider/providers/openstack/BUILD +++ b/pkg/cloudprovider/providers/openstack/BUILD @@ -34,7 +34,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions:go_default_library", @@ -62,6 +61,7 @@ go_library( "//vendor/github.com/mitchellh/mapstructure:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/openstack/metadata.go b/pkg/cloudprovider/providers/openstack/metadata.go index 6ef8eb98d8538..2ba32f59d5989 100644 --- a/pkg/cloudprovider/providers/openstack/metadata.go +++ b/pkg/cloudprovider/providers/openstack/metadata.go @@ -27,7 +27,7 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/utils/exec" ) @@ -121,7 +121,7 @@ func getMetadataFromConfigDrive(metadataVersion string) (*Metadata, error) { } defer os.Remove(mntdir) - glog.V(4).Infof("Attempting to mount configdrive %s on %s", dev, mntdir) + klog.V(4).Infof("Attempting to mount configdrive %s on %s", dev, mntdir) mounter := mount.New("" /* default mount path */) err = mounter.Mount(dev, mntdir, "iso9660", []string{"ro"}) @@ -133,7 +133,7 @@ func getMetadataFromConfigDrive(metadataVersion string) (*Metadata, error) { } defer mounter.Unmount(mntdir) - glog.V(4).Infof("Configdrive mounted on %s", mntdir) + klog.V(4).Infof("Configdrive mounted on %s", mntdir) configDrivePath := getConfigDrivePath(metadataVersion) f, err := os.Open( @@ -149,7 +149,7 @@ func getMetadataFromConfigDrive(metadataVersion string) (*Metadata, error) { func getMetadataFromMetadataService(metadataVersion string) (*Metadata, error) { // Try to get JSON from metadata server. metadataURL := getMetadataURL(metadataVersion) - glog.V(4).Infof("Attempting to fetch metadata from %s", metadataURL) + klog.V(4).Infof("Attempting to fetch metadata from %s", metadataURL) resp, err := http.Get(metadataURL) if err != nil { return nil, fmt.Errorf("error fetching %s: %v", metadataURL, err) diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go index fbcee0f42a950..9f13db944e191 100644 --- a/pkg/cloudprovider/providers/openstack/openstack.go +++ b/pkg/cloudprovider/providers/openstack/openstack.go @@ -41,12 +41,12 @@ import ( "github.com/mitchellh/mapstructure" "gopkg.in/gcfg.v1" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" netutil "k8s.io/apimachinery/pkg/util/net" certutil "k8s.io/client-go/util/cert" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" ) @@ -277,7 +277,7 @@ func readInstanceID(searchOrder string) (string, error) { if err == nil { instanceID := string(idBytes) instanceID = strings.TrimSpace(instanceID) - glog.V(3).Infof("Got instance id from %s: %s", instanceIDFile, instanceID) + klog.V(3).Infof("Got instance id from %s: %s", instanceIDFile, instanceID) if instanceID != "" { return instanceID, nil } @@ -584,10 +584,10 @@ func (os *OpenStack) HasClusterID() bool { // LoadBalancer initializes a LbaasV2 object func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) { - glog.V(4).Info("openstack.LoadBalancer() called") + klog.V(4).Info("openstack.LoadBalancer() called") if reflect.DeepEqual(os.lbOpts, LoadBalancerOpts{}) { - glog.V(4).Info("LoadBalancer section is empty/not defined in cloud-config") + klog.V(4).Info("LoadBalancer section is empty/not defined in cloud-config") return nil, false } @@ -610,11 +610,11 @@ func (os *OpenStack) LoadBalancer() (cloudprovider.LoadBalancer, bool) { // Currently kubernetes OpenStack cloud provider just support LBaaS v2. lbVersion := os.lbOpts.LBVersion if lbVersion != "" && lbVersion != "v2" { - glog.Warningf("Config error: currently only support LBaaS v2, unrecognised lb-version \"%v\"", lbVersion) + klog.Warningf("Config error: currently only support LBaaS v2, unrecognised lb-version \"%v\"", lbVersion) return nil, false } - glog.V(1).Info("Claiming to support LoadBalancer") + klog.V(1).Info("Claiming to support LoadBalancer") return &LbaasV2{LoadBalancer{network, compute, lb, os.lbOpts}}, true } @@ -635,7 +635,7 @@ func isNotFound(err error) bool { // Zones indicates that we support zones func (os *OpenStack) Zones() (cloudprovider.Zones, bool) { - glog.V(1).Info("Claiming to support Zones") + klog.V(1).Info("Claiming to support Zones") return os, true } @@ -650,7 +650,7 @@ func (os *OpenStack) GetZone(ctx context.Context) (cloudprovider.Zone, error) { FailureDomain: md.AvailabilityZone, Region: os.region, } - glog.V(4).Infof("Current zone is %v", zone) + klog.V(4).Infof("Current zone is %v", zone) return zone, nil } @@ -677,7 +677,7 @@ func (os *OpenStack) GetZoneByProviderID(ctx context.Context, providerID string) FailureDomain: srv.Metadata[availabilityZone], Region: os.region, } - glog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) + klog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) return zone, nil } @@ -702,13 +702,13 @@ func (os *OpenStack) GetZoneByNodeName(ctx context.Context, nodeName types.NodeN FailureDomain: srv.Metadata[availabilityZone], Region: os.region, } - glog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) + klog.V(4).Infof("The instance %s in zone %v", srv.Name, zone) return zone, nil } // Routes initializes routes support func (os *OpenStack) Routes() (cloudprovider.Routes, bool) { - glog.V(4).Info("openstack.Routes() called") + klog.V(4).Info("openstack.Routes() called") network, err := os.NewNetworkV2() if err != nil { @@ -717,12 +717,12 @@ func (os *OpenStack) Routes() (cloudprovider.Routes, bool) { netExts, err := networkExtensions(network) if err != nil { - glog.Warningf("Failed to list neutron extensions: %v", err) + klog.Warningf("Failed to list neutron extensions: %v", err) return nil, false } if !netExts["extraroute"] { - glog.V(3).Info("Neutron extraroute extension not found, required for Routes support") + klog.V(3).Info("Neutron extraroute extension not found, required for Routes support") return nil, false } @@ -733,11 +733,11 @@ func (os *OpenStack) Routes() (cloudprovider.Routes, bool) { r, err := NewRoutes(compute, network, os.routeOpts) if err != nil { - glog.Warningf("Error initialising Routes support: %v", err) + klog.Warningf("Error initialising Routes support: %v", err) return nil, false } - glog.V(1).Info("Claiming to support Routes") + klog.V(1).Info("Claiming to support Routes") return r, true } @@ -755,21 +755,21 @@ func (os *OpenStack) volumeService(forceVersion string) (volumeService, error) { if err != nil { return nil, err } - glog.V(3).Info("Using Blockstorage API V1") + klog.V(3).Info("Using Blockstorage API V1") return &VolumesV1{sClient, os.bsOpts}, nil case "v2": sClient, err := os.NewBlockStorageV2() if err != nil { return nil, err } - glog.V(3).Info("Using Blockstorage API V2") + klog.V(3).Info("Using Blockstorage API V2") return &VolumesV2{sClient, os.bsOpts}, nil case "v3": sClient, err := os.NewBlockStorageV3() if err != nil { return nil, err } - glog.V(3).Info("Using Blockstorage API V3") + klog.V(3).Info("Using Blockstorage API V3") return &VolumesV3{sClient, os.bsOpts}, nil case "auto": // Currently kubernetes support Cinder v1 / Cinder v2 / Cinder v3. @@ -777,17 +777,17 @@ func (os *OpenStack) volumeService(forceVersion string) (volumeService, error) { // If kubernetes can't initialize cinder v2 client, try to initialize cinder v1 client. // Return appropriate message when kubernetes can't initialize them. if sClient, err := os.NewBlockStorageV3(); err == nil { - glog.V(3).Info("Using Blockstorage API V3") + klog.V(3).Info("Using Blockstorage API V3") return &VolumesV3{sClient, os.bsOpts}, nil } if sClient, err := os.NewBlockStorageV2(); err == nil { - glog.V(3).Info("Using Blockstorage API V2") + klog.V(3).Info("Using Blockstorage API V2") return &VolumesV2{sClient, os.bsOpts}, nil } if sClient, err := os.NewBlockStorageV1(); err == nil { - glog.V(3).Info("Using Blockstorage API V1") + klog.V(3).Info("Using Blockstorage API V1") return &VolumesV1{sClient, os.bsOpts}, nil } diff --git a/pkg/cloudprovider/providers/openstack/openstack_instances.go b/pkg/cloudprovider/providers/openstack/openstack_instances.go index c85f74a9be94c..c52ce21998e05 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_instances.go +++ b/pkg/cloudprovider/providers/openstack/openstack_instances.go @@ -21,9 +21,9 @@ import ( "fmt" "regexp" - "github.com/golang/glog" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -42,15 +42,15 @@ const ( // Instances returns an implementation of Instances for OpenStack. func (os *OpenStack) Instances() (cloudprovider.Instances, bool) { - glog.V(4).Info("openstack.Instances() called") + klog.V(4).Info("openstack.Instances() called") compute, err := os.NewComputeV2() if err != nil { - glog.Errorf("unable to access compute v2 API : %v", err) + klog.Errorf("unable to access compute v2 API : %v", err) return nil, false } - glog.V(4).Info("Claiming to support Instances") + klog.V(4).Info("Claiming to support Instances") return &Instances{ compute: compute, @@ -75,14 +75,14 @@ func (i *Instances) AddSSHKeyToAllInstances(ctx context.Context, user string, ke // NodeAddresses implements Instances.NodeAddresses func (i *Instances) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) { - glog.V(4).Infof("NodeAddresses(%v) called", name) + klog.V(4).Infof("NodeAddresses(%v) called", name) addrs, err := getAddressesByName(i.compute, name) if err != nil { return nil, err } - glog.V(4).Infof("NodeAddresses(%v) => %v", name, addrs) + klog.V(4).Infof("NodeAddresses(%v) => %v", name, addrs) return addrs, nil } diff --git a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go index d52097682623d..26e6b095a2c18 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go +++ b/pkg/cloudprovider/providers/openstack/openstack_loadbalancer.go @@ -24,7 +24,6 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions" "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external" @@ -38,6 +37,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" neutronports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" "github.com/gophercloud/gophercloud/pagination" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -521,16 +521,16 @@ func nodeAddressForLB(node *v1.Node) (string, error) { //getStringFromServiceAnnotation searches a given v1.Service for a specific annotationKey and either returns the annotation's value or a specified defaultSetting func getStringFromServiceAnnotation(service *v1.Service, annotationKey string, defaultSetting string) string { - glog.V(4).Infof("getStringFromServiceAnnotation(%v, %v, %v)", service, annotationKey, defaultSetting) + klog.V(4).Infof("getStringFromServiceAnnotation(%v, %v, %v)", service, annotationKey, defaultSetting) if annotationValue, ok := service.Annotations[annotationKey]; ok { //if there is an annotation for this setting, set the "setting" var to it // annotationValue can be empty, it is working as designed // it makes possible for instance provisioning loadbalancer without floatingip - glog.V(4).Infof("Found a Service Annotation: %v = %v", annotationKey, annotationValue) + klog.V(4).Infof("Found a Service Annotation: %v = %v", annotationKey, annotationValue) return annotationValue } //if there is no annotation, set "settings" var to the value from cloud config - glog.V(4).Infof("Could not find a Service Annotation; falling back on cloud-config setting: %v = %v", annotationKey, defaultSetting) + klog.V(4).Infof("Could not find a Service Annotation; falling back on cloud-config setting: %v = %v", annotationKey, defaultSetting) return defaultSetting } @@ -641,7 +641,7 @@ func getFloatingNetworkIDForLB(client *gophercloud.ServiceClient) (string, error } if err == ErrMultipleResults { - glog.V(4).Infof("find multiple external networks, pick the first one when there are no explicit configuration.") + klog.V(4).Infof("find multiple external networks, pick the first one when there are no explicit configuration.") return floatingNetworkIds[0], nil } return "", err @@ -661,7 +661,7 @@ func getFloatingNetworkIDForLB(client *gophercloud.ServiceClient) (string, error // EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string, apiService *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) { - glog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, apiService.Annotations) + klog.V(4).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)", clusterName, apiService.Namespace, apiService.Name, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, nodes, apiService.Annotations) if len(nodes) == 0 { return nil, fmt.Errorf("there are no available nodes for LoadBalancer service %s/%s", apiService.Namespace, apiService.Name) @@ -673,7 +673,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string // The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node. subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0]) if err != nil { - glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + klog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) return nil, fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ "and failed to find subnet-id from OpenStack: %v", apiService.Namespace, apiService.Name, err) } @@ -690,7 +690,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string var err error floatingPool, err = getFloatingNetworkIDForLB(lbaas.network) if err != nil { - glog.Warningf("Failed to find floating-network-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) + klog.Warningf("Failed to find floating-network-id for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) } } @@ -698,11 +698,11 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string internal := getStringFromServiceAnnotation(apiService, ServiceAnnotationLoadBalancerInternal, "false") switch internal { case "true": - glog.V(4).Infof("Ensure an internal loadbalancer service.") + klog.V(4).Infof("Ensure an internal loadbalancer service.") internalAnnotation = true case "false": if len(floatingPool) != 0 { - glog.V(4).Infof("Ensure an external loadbalancer service, using floatingPool: %v", floatingPool) + klog.V(4).Infof("Ensure an external loadbalancer service, using floatingPool: %v", floatingPool) internalAnnotation = false } else { return nil, fmt.Errorf("floating-network-id or loadbalancer.openstack.org/floating-network-id should be specified when ensuring an external loadbalancer service") @@ -746,14 +746,14 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string if err != ErrNotFound { return nil, fmt.Errorf("error getting loadbalancer %s: %v", name, err) } - glog.V(2).Infof("Creating loadbalancer %s", name) + klog.V(2).Infof("Creating loadbalancer %s", name) loadbalancer, err = lbaas.createLoadBalancer(apiService, name, internalAnnotation) if err != nil { // Unknown error, retry later return nil, fmt.Errorf("error creating loadbalancer %s: %v", name, err) } } else { - glog.V(2).Infof("LoadBalancer %s already exists", name) + klog.V(2).Infof("LoadBalancer %s already exists", name) } provisioningStatus, err := waitLoadbalancerActiveProvisioningStatus(lbaas.lb, loadbalancer.ID) @@ -773,7 +773,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string for portIndex, port := range ports { listener := getListenerForPort(oldListeners, port) if listener == nil { - glog.V(4).Infof("Creating listener for port %d", int(port.Port)) + klog.V(4).Infof("Creating listener for port %d", int(port.Port)) listener, err = listeners.Create(lbaas.lb, listeners.CreateOpts{ Name: fmt.Sprintf("listener_%s_%d", name, portIndex), Protocol: listeners.Protocol(port.Protocol), @@ -790,7 +790,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } } - glog.V(4).Infof("Listener for %s port %d: %s", string(port.Protocol), int(port.Port), listener.ID) + klog.V(4).Infof("Listener for %s port %d: %s", string(port.Protocol), int(port.Port), listener.ID) // After all ports have been processed, remaining listeners are removed as obsolete. // Pop valid listeners. @@ -801,7 +801,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string return nil, fmt.Errorf("error getting pool for listener %s: %v", listener.ID, err) } if pool == nil { - glog.V(4).Infof("Creating pool for listener %s", listener.ID) + klog.V(4).Infof("Creating pool for listener %s", listener.ID) pool, err = v2pools.Create(lbaas.lb, v2pools.CreateOpts{ Name: fmt.Sprintf("pool_%s_%d", name, portIndex), Protocol: v2pools.Protocol(port.Protocol), @@ -820,7 +820,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } - glog.V(4).Infof("Pool for listener %s: %s", listener.ID, pool.ID) + klog.V(4).Infof("Pool for listener %s: %s", listener.ID, pool.ID) members, err := getMembersByPoolID(lbaas.lb, pool.ID) if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error getting pool members %s: %v", pool.ID, err) @@ -830,7 +830,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string if err != nil { if err == ErrNotFound { // Node failure, do not create member - glog.Warningf("Failed to create LB pool member for node %s: %v", node.Name, err) + klog.Warningf("Failed to create LB pool member for node %s: %v", node.Name, err) continue } else { return nil, fmt.Errorf("error getting address for node %s: %v", node.Name, err) @@ -838,7 +838,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } if !memberExists(members, addr, int(port.NodePort)) { - glog.V(4).Infof("Creating member for pool %s", pool.ID) + klog.V(4).Infof("Creating member for pool %s", pool.ID) _, err := v2pools.CreateMember(lbaas.lb, pool.ID, v2pools.CreateMemberOpts{ Name: fmt.Sprintf("member_%s_%d_%s", name, portIndex, node.Name), ProtocolPort: int(port.NodePort), @@ -858,12 +858,12 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string members = popMember(members, addr, int(port.NodePort)) } - glog.V(4).Infof("Ensured pool %s has member for %s at %s", pool.ID, node.Name, addr) + klog.V(4).Infof("Ensured pool %s has member for %s at %s", pool.ID, node.Name, addr) } // Delete obsolete members for this pool for _, member := range members { - glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) + klog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) err := v2pools.DeleteMember(lbaas.lb, pool.ID, member.ID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) @@ -876,7 +876,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string monitorID := pool.MonitorID if monitorID == "" && lbaas.opts.CreateMonitor { - glog.V(4).Infof("Creating monitor for pool %s", pool.ID) + klog.V(4).Infof("Creating monitor for pool %s", pool.ID) monitor, err := v2monitors.Create(lbaas.lb, v2monitors.CreateOpts{ Name: fmt.Sprintf("monitor_%s_%d", name, portIndex), PoolID: pool.ID, @@ -894,17 +894,17 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } monitorID = monitor.ID } else if lbaas.opts.CreateMonitor == false { - glog.V(4).Infof("Do not create monitor for pool %s when create-monitor is false", pool.ID) + klog.V(4).Infof("Do not create monitor for pool %s when create-monitor is false", pool.ID) } if monitorID != "" { - glog.V(4).Infof("Monitor for pool %s: %s", pool.ID, monitorID) + klog.V(4).Infof("Monitor for pool %s: %s", pool.ID, monitorID) } } // All remaining listeners are obsolete, delete for _, listener := range oldListeners { - glog.V(4).Infof("Deleting obsolete listener %s:", listener.ID) + klog.V(4).Infof("Deleting obsolete listener %s:", listener.ID) // get pool for listener pool, err := getPoolByListenerID(lbaas.lb, loadbalancer.ID, listener.ID) if err != nil && err != ErrNotFound { @@ -914,7 +914,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string // get and delete monitor monitorID := pool.MonitorID if monitorID != "" { - glog.V(4).Infof("Deleting obsolete monitor %s for pool %s", monitorID, pool.ID) + klog.V(4).Infof("Deleting obsolete monitor %s for pool %s", monitorID, pool.ID) err = v2monitors.Delete(lbaas.lb, monitorID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleting obsolete monitor %s for pool %s: %v", monitorID, pool.ID, err) @@ -931,7 +931,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } if members != nil { for _, member := range members { - glog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) + klog.V(4).Infof("Deleting obsolete member %s for pool %s address %s", member.ID, pool.ID, member.Address) err := v2pools.DeleteMember(lbaas.lb, pool.ID, member.ID).ExtractErr() if err != nil && !isNotFound(err) { return nil, fmt.Errorf("error deleting obsolete member %s for pool %s address %s: %v", member.ID, pool.ID, member.Address, err) @@ -942,7 +942,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string } } } - glog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID) + klog.V(4).Infof("Deleting obsolete pool %s for listener %s", pool.ID, listener.ID) // delete pool err = v2pools.Delete(lbaas.lb, pool.ID).ExtractErr() if err != nil && !isNotFound(err) { @@ -962,7 +962,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string if err != nil { return nil, fmt.Errorf("failed to loadbalance ACTIVE provisioning status %v: %v", provisioningStatus, err) } - glog.V(2).Infof("Deleted obsolete listener: %s", listener.ID) + klog.V(2).Infof("Deleted obsolete listener: %s", listener.ID) } portID := loadbalancer.VipPortID @@ -971,7 +971,7 @@ func (lbaas *LbaasV2) EnsureLoadBalancer(ctx context.Context, clusterName string return nil, fmt.Errorf("error getting floating ip for port %s: %v", portID, err) } if floatIP == nil && floatingPool != "" && !internalAnnotation { - glog.V(4).Infof("Creating floating ip for loadbalancer %s port %s", loadbalancer.ID, portID) + klog.V(4).Infof("Creating floating ip for loadbalancer %s port %s", loadbalancer.ID, portID) floatIPOpts := floatingips.CreateOpts{ FloatingNetworkID: floatingPool, PortID: portID, @@ -1019,7 +1019,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) } } - glog.V(4).Infof("find node-security-group %v for loadbalancer service %s/%s", lbaas.opts.NodeSecurityGroupIDs, apiService.Namespace, apiService.Name) + klog.V(4).Infof("find node-security-group %v for loadbalancer service %s/%s", lbaas.opts.NodeSecurityGroupIDs, apiService.Namespace, apiService.Name) // get service ports ports := apiService.Spec.Ports @@ -1183,7 +1183,7 @@ func (lbaas *LbaasV2) ensureSecurityGroup(clusterName string, apiService *v1.Ser // UpdateLoadBalancer updates hosts under the specified load balancer. func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error { loadBalancerName := lbaas.GetLoadBalancerName(ctx, clusterName, service) - glog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes) + klog.V(4).Infof("UpdateLoadBalancer(%v, %v, %v)", clusterName, loadBalancerName, nodes) lbaas.opts.SubnetID = getStringFromServiceAnnotation(service, ServiceAnnotationLoadBalancerSubnetID, lbaas.opts.SubnetID) if len(lbaas.opts.SubnetID) == 0 && len(nodes) > 0 { @@ -1191,7 +1191,7 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(ctx context.Context, clusterName string // The LB needs to be configured with instance addresses on the same subnet, so get SubnetID by one node. subnetID, err := getSubnetIDForLB(lbaas.compute, *nodes[0]) if err != nil { - glog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) + klog.Warningf("Failed to find subnet-id for loadbalancer service %s/%s: %v", service.Namespace, service.Name, err) return fmt.Errorf("no subnet-id for service %s/%s : subnet-id not set in cloud provider config, "+ "and failed to find subnet-id from OpenStack: %v", service.Namespace, service.Name, err) } @@ -1332,7 +1332,7 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser if err != nil { return fmt.Errorf("failed to find node-security-group for loadbalancer service %s/%s: %v", apiService.Namespace, apiService.Name, err) } - glog.V(4).Infof("find node-security-group %v for loadbalancer service %s/%s", lbaas.opts.NodeSecurityGroupIDs, apiService.Namespace, apiService.Name) + klog.V(4).Infof("find node-security-group %v for loadbalancer service %s/%s", lbaas.opts.NodeSecurityGroupIDs, apiService.Namespace, apiService.Name) original := sets.NewString(originalNodeSecurityGroupIDs...) current := sets.NewString(lbaas.opts.NodeSecurityGroupIDs...) @@ -1406,7 +1406,7 @@ func (lbaas *LbaasV2) updateSecurityGroup(clusterName string, apiService *v1.Ser // EnsureLoadBalancerDeleted deletes the specified load balancer func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error { loadBalancerName := lbaas.GetLoadBalancerName(ctx, clusterName, service) - glog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v)", clusterName, loadBalancerName) + klog.V(4).Infof("EnsureLoadBalancerDeleted(%v, %v)", clusterName, loadBalancerName) loadbalancer, err := getLoadbalancerByName(lbaas.lb, loadBalancerName) if err != nil && err != ErrNotFound { @@ -1550,7 +1550,7 @@ func (lbaas *LbaasV2) EnsureSecurityGroupDeleted(clusterName string, service *v1 // Just happen when nodes have not Security Group, or should not happen // UpdateLoadBalancer and EnsureLoadBalancer can set lbaas.opts.NodeSecurityGroupIDs when it is empty // And service controller call UpdateLoadBalancer to set lbaas.opts.NodeSecurityGroupIDs when controller manager service is restarted. - glog.Warningf("Can not find node-security-group from all the nodes of this cluster when delete loadbalancer service %s/%s", + klog.Warningf("Can not find node-security-group from all the nodes of this cluster when delete loadbalancer service %s/%s", service.Namespace, service.Name) } else { // Delete the rules in the Node Security Group diff --git a/pkg/cloudprovider/providers/openstack/openstack_routes.go b/pkg/cloudprovider/providers/openstack/openstack_routes.go index f9434e3b30b1f..0cb31a435a748 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_routes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_routes.go @@ -26,9 +26,9 @@ import ( "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" neutronports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" ) var errNoRouterID = errors.New("router-id not set in cloud provider config") @@ -55,7 +55,7 @@ func NewRoutes(compute *gophercloud.ServiceClient, network *gophercloud.ServiceC // ListRoutes lists all managed routes that belong to the specified clusterName func (r *Routes) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { - glog.V(4).Infof("ListRoutes(%v)", clusterName) + klog.V(4).Infof("ListRoutes(%v)", clusterName) nodeNamesByAddr := make(map[string]types.NodeName) err := foreachServer(r.compute, servers.ListOpts{}, func(srv *servers.Server) (bool, error) { @@ -109,12 +109,12 @@ func updateRoutes(network *gophercloud.ServiceClient, router *routers.Router, ne } unwinder := func() { - glog.V(4).Info("Reverting routes change to router ", router.ID) + klog.V(4).Info("Reverting routes change to router ", router.ID) _, err := routers.Update(network, router.ID, routers.UpdateOpts{ Routes: origRoutes, }).Extract() if err != nil { - glog.Warning("Unable to reset routes during error unwind: ", err) + klog.Warning("Unable to reset routes during error unwind: ", err) } } @@ -132,12 +132,12 @@ func updateAllowedAddressPairs(network *gophercloud.ServiceClient, port *neutron } unwinder := func() { - glog.V(4).Info("Reverting allowed-address-pairs change to port ", port.ID) + klog.V(4).Info("Reverting allowed-address-pairs change to port ", port.ID) _, err := neutronports.Update(network, port.ID, neutronports.UpdateOpts{ AllowedAddressPairs: &origPairs, }).Extract() if err != nil { - glog.Warning("Unable to reset allowed-address-pairs during error unwind: ", err) + klog.Warning("Unable to reset allowed-address-pairs during error unwind: ", err) } } @@ -146,7 +146,7 @@ func updateAllowedAddressPairs(network *gophercloud.ServiceClient, port *neutron // CreateRoute creates the described managed route func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error { - glog.V(4).Infof("CreateRoute(%v, %v, %v)", clusterName, nameHint, route) + klog.V(4).Infof("CreateRoute(%v, %v, %v)", clusterName, nameHint, route) onFailure := newCaller() @@ -158,7 +158,7 @@ func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint s return err } - glog.V(4).Infof("Using nexthop %v for node %v", addr, route.TargetNode) + klog.V(4).Infof("Using nexthop %v for node %v", addr, route.TargetNode) router, err := routers.Get(r.network, r.opts.RouterID).Extract() if err != nil { @@ -169,7 +169,7 @@ func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint s for _, item := range routes { if item.DestinationCIDR == route.DestinationCIDR && item.NextHop == addr { - glog.V(4).Infof("Skipping existing route: %v", route) + klog.V(4).Infof("Skipping existing route: %v", route) return nil } } @@ -198,7 +198,7 @@ func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint s found := false for _, item := range port.AllowedAddressPairs { if item.IPAddress == route.DestinationCIDR { - glog.V(4).Info("Found existing allowed-address-pair: ", item) + klog.V(4).Info("Found existing allowed-address-pair: ", item) found = true break } @@ -215,14 +215,14 @@ func (r *Routes) CreateRoute(ctx context.Context, clusterName string, nameHint s defer onFailure.call(unwind) } - glog.V(4).Infof("Route created: %v", route) + klog.V(4).Infof("Route created: %v", route) onFailure.disarm() return nil } // DeleteRoute deletes the specified managed route func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error { - glog.V(4).Infof("DeleteRoute(%v, %v)", clusterName, route) + klog.V(4).Infof("DeleteRoute(%v, %v)", clusterName, route) onFailure := newCaller() @@ -255,7 +255,7 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo } if index == -1 { - glog.V(4).Infof("Skipping non-existent route: %v", route) + klog.V(4).Infof("Skipping non-existent route: %v", route) return nil } @@ -301,7 +301,7 @@ func (r *Routes) DeleteRoute(ctx context.Context, clusterName string, route *clo defer onFailure.call(unwind) } - glog.V(4).Infof("Route deleted: %v", route) + klog.V(4).Infof("Route deleted: %v", route) onFailure.disarm() return nil } diff --git a/pkg/cloudprovider/providers/openstack/openstack_volumes.go b/pkg/cloudprovider/providers/openstack/openstack_volumes.go index d3e5cf1d3b6b7..68b8f92a81235 100644 --- a/pkg/cloudprovider/providers/openstack/openstack_volumes.go +++ b/pkg/cloudprovider/providers/openstack/openstack_volumes.go @@ -42,7 +42,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" "github.com/prometheus/client_golang/prometheus" - "github.com/golang/glog" + "k8s.io/klog" ) type volumeService interface { @@ -334,19 +334,19 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { if volume.AttachedServerID != "" { if instanceID == volume.AttachedServerID { - glog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) + klog.V(4).Infof("Disk %s is already attached to instance %s", volumeID, instanceID) return volume.ID, nil } nodeName, err := os.GetNodeNameByID(volume.AttachedServerID) attachErr := fmt.Sprintf("disk %s path %s is attached to a different instance (%s)", volumeID, volume.AttachedDevice, volume.AttachedServerID) if err != nil { - glog.Error(attachErr) + klog.Error(attachErr) return "", errors.New(attachErr) } // using volume.AttachedDevice may cause problems because cinder does not report device path correctly see issue #33128 devicePath := volume.AttachedDevice danglingErr := volumeutil.NewDanglingError(attachErr, nodeName, devicePath) - glog.V(2).Infof("Found dangling volume %s attached to node %s", volumeID, nodeName) + klog.V(2).Infof("Found dangling volume %s attached to node %s", volumeID, nodeName) return "", danglingErr } @@ -360,7 +360,7 @@ func (os *OpenStack) AttachDisk(instanceID, volumeID string) (string, error) { if err != nil { return "", fmt.Errorf("failed to attach %s volume to %s compute: %v", volumeID, instanceID, err) } - glog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID) + klog.V(2).Infof("Successfully attached %s volume to %s compute", volumeID, instanceID) return volume.ID, nil } @@ -372,7 +372,7 @@ func (os *OpenStack) DetachDisk(instanceID, volumeID string) error { } if volume.Status == volumeAvailableStatus { // "available" is fine since that means the volume is detached from instance already. - glog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID) + klog.V(2).Infof("volume: %s has been detached from compute: %s ", volume.ID, instanceID) return nil } @@ -396,7 +396,7 @@ func (os *OpenStack) DetachDisk(instanceID, volumeID string) error { if err != nil { return fmt.Errorf("failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err) } - glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID) + klog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID) return nil } @@ -468,7 +468,7 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str return "", "", "", os.bsOpts.IgnoreVolumeAZ, fmt.Errorf("failed to create a %d GB volume: %v", size, err) } - glog.Infof("Created volume %v in Availability Zone: %v Region: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ) + klog.Infof("Created volume %v in Availability Zone: %v Region: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ) return volumeID, volumeAZ, os.region, os.bsOpts.IgnoreVolumeAZ, nil } @@ -490,13 +490,13 @@ func (os *OpenStack) GetDevicePathBySerialID(volumeID string) string { for _, f := range files { for _, c := range candidateDeviceNodes { if c == f.Name() { - glog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name())) + klog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name())) return path.Join("/dev/disk/by-id/", f.Name()) } } } - glog.V(4).Infof("Failed to find device for the volumeID: %q by serial ID", volumeID) + klog.V(4).Infof("Failed to find device for the volumeID: %q by serial ID", volumeID) return "" } @@ -511,14 +511,14 @@ func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string { newtonMetadataVersion) if err != nil { - glog.V(4).Infof( + klog.V(4).Infof( "Could not retrieve instance metadata. Error: %v", err) return "" } for _, device := range instanceMetadata.Devices { if device.Type == "disk" && device.Serial == volumeID { - glog.V(4).Infof( + klog.V(4).Infof( "Found disk metadata for volumeID %q. Bus: %q, Address: %q", volumeID, device.Bus, device.Address) @@ -527,7 +527,7 @@ func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string { device.Bus, device.Address) diskPaths, err := filepath.Glob(diskPattern) if err != nil { - glog.Errorf( + klog.Errorf( "could not retrieve disk path for volumeID: %q. Error filepath.Glob(%q): %v", volumeID, diskPattern, err) return "" @@ -537,14 +537,14 @@ func (os *OpenStack) getDevicePathFromInstanceMetadata(volumeID string) string { return diskPaths[0] } - glog.Errorf( + klog.Errorf( "expecting to find one disk path for volumeID %q, found %d: %v", volumeID, len(diskPaths), diskPaths) return "" } } - glog.V(4).Infof( + klog.V(4).Infof( "Could not retrieve device metadata for volumeID: %q", volumeID) return "" } @@ -558,7 +558,7 @@ func (os *OpenStack) GetDevicePath(volumeID string) string { } if devicePath == "" { - glog.Warningf("Failed to find device for the volumeID: %q", volumeID) + klog.Warningf("Failed to find device for the volumeID: %q", volumeID) } return devicePath @@ -610,7 +610,7 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, // DiskIsAttached queries if a volume is attached to a compute instance func (os *OpenStack) DiskIsAttached(instanceID, volumeID string) (bool, error) { if instanceID == "" { - glog.Warningf("calling DiskIsAttached with empty instanceid: %s %s", instanceID, volumeID) + klog.Warningf("calling DiskIsAttached with empty instanceid: %s %s", instanceID, volumeID) } volume, err := os.getVolume(volumeID) if err != nil { @@ -717,7 +717,7 @@ func (os *OpenStack) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVo labels := make(map[string]string) labels[kubeletapis.LabelZoneFailureDomain] = volume.AvailabilityZone labels[kubeletapis.LabelZoneRegion] = os.region - glog.V(4).Infof("The Volume %s has labels %v", pv.Spec.Cinder.VolumeID, labels) + klog.V(4).Infof("The Volume %s has labels %v", pv.Spec.Cinder.VolumeID, labels) return labels, nil } diff --git a/pkg/cloudprovider/providers/photon/BUILD b/pkg/cloudprovider/providers/photon/BUILD index df4311f04ef86..57a17c416f6da 100644 --- a/pkg/cloudprovider/providers/photon/BUILD +++ b/pkg/cloudprovider/providers/photon/BUILD @@ -15,9 +15,9 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/vmware/photon-controller-go-sdk/photon:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/photon/photon.go b/pkg/cloudprovider/providers/photon/photon.go index bcf5e31344997..24f8d2b8dd8d5 100644 --- a/pkg/cloudprovider/providers/photon/photon.go +++ b/pkg/cloudprovider/providers/photon/photon.go @@ -34,12 +34,12 @@ import ( "os" "strings" - "github.com/golang/glog" "github.com/vmware/photon-controller-go-sdk/photon" "gopkg.in/gcfg.v1" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" ) @@ -135,7 +135,7 @@ func init() { cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { cfg, err := readConfig(config) if err != nil { - glog.Errorf("Photon Cloud Provider: failed to read in cloud provider config file. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: failed to read in cloud provider config file. Error[%v]", err) return nil, err } return newPCCloud(cfg) @@ -146,13 +146,13 @@ func init() { func getVMIDbyNodename(pc *PCCloud, nodeName string) (string, error) { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for getVMIDbyNodename, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for getVMIDbyNodename, error: [%v]", err) return "", err } vmList, err := photonClient.Projects.GetVMs(pc.projID, nil) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to GetVMs from project %s with nodeName %s, error: [%v]", pc.projID, nodeName, err) + klog.Errorf("Photon Cloud Provider: Failed to GetVMs from project %s with nodeName %s, error: [%v]", pc.projID, nodeName, err) return "", err } @@ -169,24 +169,24 @@ func getVMIDbyNodename(pc *PCCloud, nodeName string) (string, error) { func getVMIDbyIP(pc *PCCloud, IPAddress string) (string, error) { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for getVMIDbyNodename, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for getVMIDbyNodename, error: [%v]", err) return "", err } vmList, err := photonClient.Projects.GetVMs(pc.projID, nil) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. error: [%v]", pc.projID, err) + klog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. error: [%v]", pc.projID, err) return "", err } for _, vm := range vmList.Items { task, err := photonClient.VMs.GetNetworks(vm.ID) if err != nil { - glog.Warningf("Photon Cloud Provider: GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err) + klog.Warningf("Photon Cloud Provider: GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err) } else { task, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Warningf("Photon Cloud Provider: Wait task for GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err) + klog.Warningf("Photon Cloud Provider: Wait task for GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err) } else { networkConnections := task.ResourceProperties.(map[string]interface{}) networks := networkConnections["networkConnections"].([]interface{}) @@ -221,25 +221,25 @@ func getPhotonClient(pc *PCCloud) (*photon.Client, error) { // work around before metadata is available file, err := os.Open("/etc/kubernetes/pc_login_info") if err != nil { - glog.Errorf("Photon Cloud Provider: Authentication is enabled but found no username/password at /etc/kubernetes/pc_login_info. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: Authentication is enabled but found no username/password at /etc/kubernetes/pc_login_info. Error[%v]", err) return nil, err } defer file.Close() scanner := bufio.NewScanner(file) if !scanner.Scan() { - glog.Error("Photon Cloud Provider: Empty username inside /etc/kubernetes/pc_login_info.") + klog.Error("Photon Cloud Provider: Empty username inside /etc/kubernetes/pc_login_info.") return nil, fmt.Errorf("Failed to create authentication enabled client with invalid username") } username := scanner.Text() if !scanner.Scan() { - glog.Error("Photon Cloud Provider: Empty password set inside /etc/kubernetes/pc_login_info.") + klog.Error("Photon Cloud Provider: Empty password set inside /etc/kubernetes/pc_login_info.") return nil, fmt.Errorf("Failed to create authentication enabled client with invalid password") } password := scanner.Text() token_options, err := pc.photonClient.Auth.GetTokensByPassword(username, password) if err != nil { - glog.Error("Photon Cloud Provider: failed to get tokens by password") + klog.Error("Photon Cloud Provider: failed to get tokens by password") return nil, err } @@ -254,10 +254,10 @@ func getPhotonClient(pc *PCCloud) (*photon.Client, error) { status, err := pc.photonClient.Status.Get() if err != nil { - glog.Errorf("Photon Cloud Provider: new client creation failed. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: new client creation failed. Error[%v]", err) return nil, err } - glog.V(2).Infof("Photon Cloud Provider: Status of the new photon controller client: %v", status) + klog.V(2).Infof("Photon Cloud Provider: Status of the new photon controller client: %v", status) return pc.photonClient, nil } @@ -269,7 +269,7 @@ func newPCCloud(cfg PCConfig) (*PCCloud, error) { // Get local hostname hostname, err := os.Hostname() if err != nil { - glog.Errorf("Photon Cloud Provider: get hostname failed. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: get hostname failed. Error[%v]", err) return nil, err } pc := PCCloud{ @@ -307,14 +307,14 @@ func (pc *PCCloud) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName if name == pc.localK8sHostname { ifaces, err := net.Interfaces() if err != nil { - glog.Errorf("Photon Cloud Provider: net.Interfaces() failed for NodeAddresses. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: net.Interfaces() failed for NodeAddresses. Error[%v]", err) return nodeAddrs, err } for _, i := range ifaces { addrs, err := i.Addrs() if err != nil { - glog.Warningf("Photon Cloud Provider: Failed to extract addresses for NodeAddresses. Error[%v]", err) + klog.Warningf("Photon Cloud Provider: Failed to extract addresses for NodeAddresses. Error[%v]", err) } else { for _, addr := range addrs { if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { @@ -348,20 +348,20 @@ func (pc *PCCloud) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName // This is assumed to be done by master only. vmID, err := getInstanceID(pc, name) if err != nil { - glog.Errorf("Photon Cloud Provider: getInstanceID failed for NodeAddresses. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: getInstanceID failed for NodeAddresses. Error[%v]", err) return nodeAddrs, err } photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for NodeAddresses, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for NodeAddresses, error: [%v]", err) return nodeAddrs, err } // Retrieve the Photon VM's IP addresses from the Photon Controller endpoint based on the VM ID vmList, err := photonClient.Projects.GetVMs(pc.projID, nil) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. Error[%v]", pc.projID, err) + klog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. Error[%v]", pc.projID, err) return nodeAddrs, err } @@ -369,12 +369,12 @@ func (pc *PCCloud) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName if vm.ID == vmID { task, err := photonClient.VMs.GetNetworks(vm.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: GetNetworks failed for node %s with vm.ID %s. Error[%v]", name, vm.ID, err) + klog.Errorf("Photon Cloud Provider: GetNetworks failed for node %s with vm.ID %s. Error[%v]", name, vm.ID, err) return nodeAddrs, err } else { task, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: Wait task for GetNetworks failed for node %s with vm.ID %s. Error[%v]", name, vm.ID, err) + klog.Errorf("Photon Cloud Provider: Wait task for GetNetworks failed for node %s with vm.ID %s. Error[%v]", name, vm.ID, err) return nodeAddrs, err } else { networkConnections := task.ResourceProperties.(map[string]interface{}) @@ -414,7 +414,7 @@ func (pc *PCCloud) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName } } - glog.Errorf("Failed to find the node %s from Photon Controller endpoint", name) + klog.Errorf("Failed to find the node %s from Photon Controller endpoint", name) return nodeAddrs, fmt.Errorf("Failed to find the node %s from Photon Controller endpoint", name) } @@ -474,7 +474,7 @@ func (pc *PCCloud) InstanceID(ctx context.Context, nodeName k8stypes.NodeName) ( // We assume only master need to get InstanceID of a node other than itself ID, err := getInstanceID(pc, name) if err != nil { - glog.Errorf("Photon Cloud Provider: getInstanceID failed for InstanceID. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: getInstanceID failed for InstanceID. Error[%v]", err) return ID, err } else { return ID, nil @@ -544,7 +544,7 @@ func (pc *PCCloud) HasClusterID() bool { func (pc *PCCloud) AttachDisk(ctx context.Context, pdID string, nodeName k8stypes.NodeName) error { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for AttachDisk, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for AttachDisk, error: [%v]", err) return err } @@ -554,19 +554,19 @@ func (pc *PCCloud) AttachDisk(ctx context.Context, pdID string, nodeName k8stype vmID, err := pc.InstanceID(ctx, nodeName) if err != nil { - glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for AttachDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: pc.InstanceID failed for AttachDisk. Error[%v]", err) return err } task, err := photonClient.VMs.AttachDisk(vmID, operation) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to attach disk with pdID %s. Error[%v]", pdID, err) + klog.Errorf("Photon Cloud Provider: Failed to attach disk with pdID %s. Error[%v]", pdID, err) return err } _, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to wait for task to attach disk with pdID %s. Error[%v]", pdID, err) + klog.Errorf("Photon Cloud Provider: Failed to wait for task to attach disk with pdID %s. Error[%v]", pdID, err) return err } @@ -577,7 +577,7 @@ func (pc *PCCloud) AttachDisk(ctx context.Context, pdID string, nodeName k8stype func (pc *PCCloud) DetachDisk(ctx context.Context, pdID string, nodeName k8stypes.NodeName) error { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for DetachDisk, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for DetachDisk, error: [%v]", err) return err } @@ -587,19 +587,19 @@ func (pc *PCCloud) DetachDisk(ctx context.Context, pdID string, nodeName k8stype vmID, err := pc.InstanceID(ctx, nodeName) if err != nil { - glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DetachDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DetachDisk. Error[%v]", err) return err } task, err := photonClient.VMs.DetachDisk(vmID, operation) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to detach disk with pdID %s. Error[%v]", pdID, err) + klog.Errorf("Photon Cloud Provider: Failed to detach disk with pdID %s. Error[%v]", pdID, err) return err } _, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to wait for task to detach disk with pdID %s. Error[%v]", pdID, err) + klog.Errorf("Photon Cloud Provider: Failed to wait for task to detach disk with pdID %s. Error[%v]", pdID, err) return err } @@ -610,23 +610,23 @@ func (pc *PCCloud) DetachDisk(ctx context.Context, pdID string, nodeName k8stype func (pc *PCCloud) DiskIsAttached(ctx context.Context, pdID string, nodeName k8stypes.NodeName) (bool, error) { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for DiskIsAttached, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for DiskIsAttached, error: [%v]", err) return false, err } disk, err := photonClient.Disks.Get(pdID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to Get disk with pdID %s. Error[%v]", pdID, err) + klog.Errorf("Photon Cloud Provider: Failed to Get disk with pdID %s. Error[%v]", pdID, err) return false, err } vmID, err := pc.InstanceID(ctx, nodeName) if err == cloudprovider.InstanceNotFound { - glog.Infof("Instance %q does not exist, disk %s will be detached automatically.", nodeName, pdID) + klog.Infof("Instance %q does not exist, disk %s will be detached automatically.", nodeName, pdID) return false, nil } if err != nil { - glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err) return false, err } @@ -644,7 +644,7 @@ func (pc *PCCloud) DisksAreAttached(ctx context.Context, pdIDs []string, nodeNam attached := make(map[string]bool) photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for DisksAreAttached, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for DisksAreAttached, error: [%v]", err) return attached, err } @@ -654,19 +654,19 @@ func (pc *PCCloud) DisksAreAttached(ctx context.Context, pdIDs []string, nodeNam vmID, err := pc.InstanceID(ctx, nodeName) if err == cloudprovider.InstanceNotFound { - glog.Infof("Instance %q does not exist, its disks will be detached automatically.", nodeName) + klog.Infof("Instance %q does not exist, its disks will be detached automatically.", nodeName) // make all the disks as detached. return attached, nil } if err != nil { - glog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: pc.InstanceID failed for DiskIsAttached. Error[%v]", err) return attached, err } for _, pdID := range pdIDs { disk, err := photonClient.Disks.Get(pdID) if err != nil { - glog.Warningf("Photon Cloud Provider: failed to get VMs for persistent disk %s, err [%v]", pdID, err) + klog.Warningf("Photon Cloud Provider: failed to get VMs for persistent disk %s, err [%v]", pdID, err) } else { for _, vm := range disk.VMs { if vm == vmID { @@ -683,7 +683,7 @@ func (pc *PCCloud) DisksAreAttached(ctx context.Context, pdIDs []string, nodeNam func (pc *PCCloud) CreateDisk(volumeOptions *VolumeOptions) (pdID string, err error) { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for CreateDisk, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for CreateDisk, error: [%v]", err) return "", err } @@ -695,13 +695,13 @@ func (pc *PCCloud) CreateDisk(volumeOptions *VolumeOptions) (pdID string, err er task, err := photonClient.Projects.CreateDisk(pc.projID, &diskSpec) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to CreateDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to CreateDisk. Error[%v]", err) return "", err } waitTask, err := photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to wait for task to CreateDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to wait for task to CreateDisk. Error[%v]", err) return "", err } @@ -712,19 +712,19 @@ func (pc *PCCloud) CreateDisk(volumeOptions *VolumeOptions) (pdID string, err er func (pc *PCCloud) DeleteDisk(pdID string) error { photonClient, err := getPhotonClient(pc) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to get photon client for DeleteDisk, error: [%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to get photon client for DeleteDisk, error: [%v]", err) return err } task, err := photonClient.Disks.Delete(pdID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to DeleteDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to DeleteDisk. Error[%v]", err) return err } _, err = photonClient.Tasks.Wait(task.ID) if err != nil { - glog.Errorf("Photon Cloud Provider: Failed to wait for task to DeleteDisk. Error[%v]", err) + klog.Errorf("Photon Cloud Provider: Failed to wait for task to DeleteDisk. Error[%v]", err) return err } diff --git a/pkg/cloudprovider/providers/vsphere/BUILD b/pkg/cloudprovider/providers/vsphere/BUILD index 3eb30f03eb2ce..6e3034f86d0cc 100644 --- a/pkg/cloudprovider/providers/vsphere/BUILD +++ b/pkg/cloudprovider/providers/vsphere/BUILD @@ -27,12 +27,12 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/vmware/govmomi/vapi/rest:go_default_library", "//vendor/github.com/vmware/govmomi/vapi/tags:go_default_library", "//vendor/github.com/vmware/govmomi/vim25:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/vsphere/credentialmanager.go b/pkg/cloudprovider/providers/vsphere/credentialmanager.go index 95862a8a5aa2a..a3b651495f61d 100644 --- a/pkg/cloudprovider/providers/vsphere/credentialmanager.go +++ b/pkg/cloudprovider/providers/vsphere/credentialmanager.go @@ -19,10 +19,10 @@ package vsphere import ( "errors" "fmt" - "github.com/golang/glog" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/listers/core/v1" + "k8s.io/klog" "net/http" "strings" "sync" @@ -71,12 +71,12 @@ func (secretCredentialManager *SecretCredentialManager) GetCredential(server str return nil, err } // Handle secrets deletion by finding credentials from cache - glog.Warningf("secret %q not found in namespace %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace) + klog.Warningf("secret %q not found in namespace %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace) } credential, found := secretCredentialManager.Cache.GetCredential(server) if !found { - glog.Errorf("credentials not found for server %q", server) + klog.Errorf("credentials not found for server %q", server) return nil, ErrCredentialsNotFound } return &credential, nil @@ -88,13 +88,13 @@ func (secretCredentialManager *SecretCredentialManager) updateCredentialsMap() e } secret, err := secretCredentialManager.SecretLister.Secrets(secretCredentialManager.SecretNamespace).Get(secretCredentialManager.SecretName) if err != nil { - glog.Errorf("Cannot get secret %s in namespace %s. error: %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace, err) + klog.Errorf("Cannot get secret %s in namespace %s. error: %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace, err) return err } cacheSecret := secretCredentialManager.Cache.GetSecret() if cacheSecret != nil && cacheSecret.GetResourceVersion() == secret.GetResourceVersion() { - glog.V(4).Infof("VCP SecretCredentialManager: Secret %q will not be updated in cache. Since, secrets have same resource version %q", secretCredentialManager.SecretName, cacheSecret.GetResourceVersion()) + klog.V(4).Infof("VCP SecretCredentialManager: Secret %q will not be updated in cache. Since, secrets have same resource version %q", secretCredentialManager.SecretName, cacheSecret.GetResourceVersion()) return nil } secretCredentialManager.Cache.UpdateSecret(secret) @@ -150,13 +150,13 @@ func parseConfig(data map[string][]byte, config map[string]*Credential) error { } config[vcServer].User = string(credentialValue) } else { - glog.Errorf("Unknown secret key %s", credentialKey) + klog.Errorf("Unknown secret key %s", credentialKey) return ErrUnknownSecretKey } } for vcServer, credential := range config { if credential.User == "" || credential.Password == "" { - glog.Errorf("Username/Password is missing for server %s", vcServer) + klog.Errorf("Username/Password is missing for server %s", vcServer) return ErrCredentialMissing } } diff --git a/pkg/cloudprovider/providers/vsphere/nodemanager.go b/pkg/cloudprovider/providers/vsphere/nodemanager.go index 8d62eebb15bc4..92f4c55d5e0ac 100644 --- a/pkg/cloudprovider/providers/vsphere/nodemanager.go +++ b/pkg/cloudprovider/providers/vsphere/nodemanager.go @@ -22,9 +22,9 @@ import ( "strings" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" ) @@ -81,11 +81,11 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { queueChannel = make(chan *VmSearch, QUEUE_SIZE) nodeUUID, err := GetNodeUUID(node) if err != nil { - glog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) + klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) return err } - glog.V(4).Infof("Discovering node %s with uuid %s", node.ObjectMeta.Name, nodeUUID) + klog.V(4).Infof("Discovering node %s with uuid %s", node.ObjectMeta.Name, nodeUUID) vmFound := false globalErr = nil @@ -124,7 +124,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { err := nm.vcConnect(ctx, vsi) if err != nil { - glog.V(4).Info("Discovering node error vc:", err) + klog.V(4).Info("Discovering node error vc:", err) setGlobalErr(err) continue } @@ -132,7 +132,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { if vsi.cfg.Datacenters == "" { datacenterObjs, err = vclib.GetAllDatacenter(ctx, vsi.conn) if err != nil { - glog.V(4).Info("Discovering node error dc:", err) + klog.V(4).Info("Discovering node error dc:", err) setGlobalErr(err) continue } @@ -145,7 +145,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { } datacenterObj, err := vclib.GetDatacenter(ctx, vsi.conn, dc) if err != nil { - glog.V(4).Info("Discovering node error dc:", err) + klog.V(4).Info("Discovering node error dc:", err) setGlobalErr(err) continue } @@ -159,7 +159,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { break } - glog.V(4).Infof("Finding node %s in vc=%s and datacenter=%s", node.Name, vc, datacenterObj.Name()) + klog.V(4).Infof("Finding node %s in vc=%s and datacenter=%s", node.Name, vc, datacenterObj.Name()) queueChannel <- &VmSearch{ vc: vc, datacenter: datacenterObj, @@ -176,18 +176,18 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { defer cancel() vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID) if err != nil { - glog.V(4).Infof("Error while looking for vm=%+v in vc=%s and datacenter=%s: %v", + klog.V(4).Infof("Error while looking for vm=%+v in vc=%s and datacenter=%s: %v", vm, res.vc, res.datacenter.Name(), err) if err != vclib.ErrNoVMFound { setGlobalErr(err) } else { - glog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s", + klog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s", node.Name, res.vc, res.datacenter.Name()) } continue } if vm != nil { - glog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s", + klog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s", node.Name, vm, res.vc, res.datacenter.Name()) nodeInfo := &NodeInfo{dataCenter: res.datacenter, vm: vm, vcServer: res.vc, vmUUID: nodeUUID} @@ -210,7 +210,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { return *globalErr } - glog.V(4).Infof("Discovery Node: %q vm not found", node.Name) + klog.V(4).Infof("Discovery Node: %q vm not found", node.Name) return vclib.ErrNoVMFound } @@ -276,19 +276,19 @@ func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error) var err error if nodeInfo == nil { // Rediscover node if no NodeInfo found. - glog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", convertToString(nodeName)) + klog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", convertToString(nodeName)) err = nm.RediscoverNode(nodeName) if err != nil { - glog.Errorf("Error %q node info for node %q not found", err, convertToString(nodeName)) + klog.Errorf("Error %q node info for node %q not found", err, convertToString(nodeName)) return NodeInfo{}, err } nodeInfo = getNodeInfo(nodeName) } else { // Renew the found NodeInfo to avoid stale vSphere connection. - glog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, convertToString(nodeName)) + klog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, convertToString(nodeName)) nodeInfo, err = nm.renewNodeInfo(nodeInfo, true) if err != nil { - glog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, convertToString(nodeName)) + klog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, convertToString(nodeName)) return NodeInfo{}, err } nm.addNodeInfo(convertToString(nodeName), nodeInfo) @@ -309,7 +309,7 @@ func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) { if err != nil { return nil, err } - glog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName) + klog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName) nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID}) } return nodeDetails, nil @@ -324,7 +324,7 @@ func (nm *NodeManager) addNodeInfo(nodeName string, nodeInfo *NodeInfo) { func (nm *NodeManager) GetVSphereInstance(nodeName k8stypes.NodeName) (VSphereInstance, error) { nodeInfo, err := nm.GetNodeInfo(nodeName) if err != nil { - glog.V(4).Infof("node info for node %q not found", convertToString(nodeName)) + klog.V(4).Infof("node info for node %q not found", convertToString(nodeName)) return VSphereInstance{}, err } vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer] @@ -379,16 +379,16 @@ func (nm *NodeManager) vcConnect(ctx context.Context, vsphereInstance *VSphereIn credentialManager := nm.CredentialManager() if !vclib.IsInvalidCredentialsError(err) || credentialManager == nil { - glog.Errorf("Cannot connect to vCenter with err: %v", err) + klog.Errorf("Cannot connect to vCenter with err: %v", err) return err } - glog.V(4).Infof("Invalid credentials. Cannot connect to server %q. Fetching credentials from secrets.", vsphereInstance.conn.Hostname) + klog.V(4).Infof("Invalid credentials. Cannot connect to server %q. Fetching credentials from secrets.", vsphereInstance.conn.Hostname) // Get latest credentials from SecretCredentialManager credentials, err := credentialManager.GetCredential(vsphereInstance.conn.Hostname) if err != nil { - glog.Errorf("Failed to get credentials from Secret Credential Manager with err: %v", err) + klog.Errorf("Failed to get credentials from Secret Credential Manager with err: %v", err) return err } vsphereInstance.conn.UpdateCredentials(credentials.User, credentials.Password) @@ -412,19 +412,19 @@ func (nm *NodeManager) GetNodeInfoWithNodeObject(node *v1.Node) (NodeInfo, error var err error if nodeInfo == nil { // Rediscover node if no NodeInfo found. - glog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", nodeName) + klog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", nodeName) err = nm.DiscoverNode(node) if err != nil { - glog.Errorf("Error %q node info for node %q not found", err, nodeName) + klog.Errorf("Error %q node info for node %q not found", err, nodeName) return NodeInfo{}, err } nodeInfo = getNodeInfo(nodeName) } else { // Renew the found NodeInfo to avoid stale vSphere connection. - glog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, nodeName) + klog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, nodeName) nodeInfo, err = nm.renewNodeInfo(nodeInfo, true) if err != nil { - glog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, nodeName) + klog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, nodeName) return NodeInfo{}, err } nm.addNodeInfo(nodeName, nodeInfo) diff --git a/pkg/cloudprovider/providers/vsphere/vclib/BUILD b/pkg/cloudprovider/providers/vsphere/vclib/BUILD index 934054d1ec026..a81f8385a16fc 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/BUILD +++ b/pkg/cloudprovider/providers/vsphere/vclib/BUILD @@ -25,7 +25,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib", deps = [ "//pkg/version:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/vmware/govmomi/find:go_default_library", "//vendor/github.com/vmware/govmomi/object:go_default_library", @@ -38,6 +37,7 @@ go_library( "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/vsphere/vclib/connection.go b/pkg/cloudprovider/providers/vsphere/vclib/connection.go index be4c5e3f5384c..28f2228d65675 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/connection.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/connection.go @@ -25,11 +25,11 @@ import ( neturl "net/url" "sync" - "github.com/golang/glog" "github.com/vmware/govmomi/session" "github.com/vmware/govmomi/sts" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/soap" + "k8s.io/klog" "k8s.io/kubernetes/pkg/version" ) @@ -62,7 +62,7 @@ func (connection *VSphereConnection) Connect(ctx context.Context) error { if connection.Client == nil { connection.Client, err = connection.NewClient(ctx) if err != nil { - glog.Errorf("Failed to create govmomi client. err: %+v", err) + klog.Errorf("Failed to create govmomi client. err: %+v", err) return err } return nil @@ -70,17 +70,17 @@ func (connection *VSphereConnection) Connect(ctx context.Context) error { m := session.NewManager(connection.Client) userSession, err := m.UserSession(ctx) if err != nil { - glog.Errorf("Error while obtaining user session. err: %+v", err) + klog.Errorf("Error while obtaining user session. err: %+v", err) return err } if userSession != nil { return nil } - glog.Warningf("Creating new client session since the existing session is not valid or not authenticated") + klog.Warningf("Creating new client session since the existing session is not valid or not authenticated") connection.Client, err = connection.NewClient(ctx) if err != nil { - glog.Errorf("Failed to create govmomi client. err: %+v", err) + klog.Errorf("Failed to create govmomi client. err: %+v", err) return err } return nil @@ -98,21 +98,21 @@ func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Cl // decide to use LoginByToken if the username value is PEM encoded. b, _ := pem.Decode([]byte(connection.Username)) if b == nil { - glog.V(3).Infof("SessionManager.Login with username '%s'", connection.Username) + klog.V(3).Infof("SessionManager.Login with username '%s'", connection.Username) return m.Login(ctx, neturl.UserPassword(connection.Username, connection.Password)) } - glog.V(3).Infof("SessionManager.LoginByToken with certificate '%s'", connection.Username) + klog.V(3).Infof("SessionManager.LoginByToken with certificate '%s'", connection.Username) cert, err := tls.X509KeyPair([]byte(connection.Username), []byte(connection.Password)) if err != nil { - glog.Errorf("Failed to load X509 key pair. err: %+v", err) + klog.Errorf("Failed to load X509 key pair. err: %+v", err) return err } tokens, err := sts.NewClient(ctx, client) if err != nil { - glog.Errorf("Failed to create STS client. err: %+v", err) + klog.Errorf("Failed to create STS client. err: %+v", err) return err } @@ -122,7 +122,7 @@ func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Cl signer, err := tokens.Issue(ctx, req) if err != nil { - glog.Errorf("Failed to issue SAML token. err: %+v", err) + klog.Errorf("Failed to issue SAML token. err: %+v", err) return err } @@ -144,15 +144,15 @@ func (connection *VSphereConnection) Logout(ctx context.Context) { hasActiveSession, err := m.SessionIsActive(ctx) if err != nil { - glog.Errorf("Logout failed: %s", err) + klog.Errorf("Logout failed: %s", err) return } if !hasActiveSession { - glog.Errorf("No active session, cannot logout") + klog.Errorf("No active session, cannot logout") return } if err := m.Logout(ctx); err != nil { - glog.Errorf("Logout failed: %s", err) + klog.Errorf("Logout failed: %s", err) } } @@ -160,7 +160,7 @@ func (connection *VSphereConnection) Logout(ctx context.Context) { func (connection *VSphereConnection) NewClient(ctx context.Context) (*vim25.Client, error) { url, err := soap.ParseURL(net.JoinHostPort(connection.Hostname, connection.Port)) if err != nil { - glog.Errorf("Failed to parse URL: %s. err: %+v", url, err) + klog.Errorf("Failed to parse URL: %s. err: %+v", url, err) return nil, err } @@ -177,7 +177,7 @@ func (connection *VSphereConnection) NewClient(ctx context.Context) (*vim25.Clie client, err := vim25.NewClient(ctx, sc) if err != nil { - glog.Errorf("Failed to create new client. err: %+v", err) + klog.Errorf("Failed to create new client. err: %+v", err) return nil, err } @@ -188,10 +188,10 @@ func (connection *VSphereConnection) NewClient(ctx context.Context) (*vim25.Clie if err != nil { return nil, err } - if glog.V(3) { + if klog.V(3) { s, err := session.NewManager(client).UserSession(ctx) if err == nil { - glog.Infof("New session ID for '%s' = %s", s.UserName, s.Key) + klog.Infof("New session ID for '%s' = %s", s.UserName, s.Key) } } diff --git a/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go b/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go index 9a4eddd074d9f..778e0f6829005 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/datacenter.go @@ -23,12 +23,12 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" ) // Datacenter extends the govmomi Datacenter object @@ -42,7 +42,7 @@ func GetDatacenter(ctx context.Context, connection *VSphereConnection, datacente finder := find.NewFinder(connection.Client, false) datacenter, err := finder.Datacenter(ctx, datacenterPath) if err != nil { - glog.Errorf("Failed to find the datacenter: %s. err: %+v", datacenterPath, err) + klog.Errorf("Failed to find the datacenter: %s. err: %+v", datacenterPath, err) return nil, err } dc := Datacenter{datacenter} @@ -55,7 +55,7 @@ func GetAllDatacenter(ctx context.Context, connection *VSphereConnection) ([]*Da finder := find.NewFinder(connection.Client, false) datacenters, err := finder.DatacenterList(ctx, "*") if err != nil { - glog.Errorf("Failed to find the datacenter. err: %+v", err) + klog.Errorf("Failed to find the datacenter. err: %+v", err) return nil, err } for _, datacenter := range datacenters { @@ -71,11 +71,11 @@ func (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualM vmUUID = strings.ToLower(strings.TrimSpace(vmUUID)) svm, err := s.FindByUuid(ctx, dc.Datacenter, vmUUID, true, nil) if err != nil { - glog.Errorf("Failed to find VM by UUID. VM UUID: %s, err: %+v", vmUUID, err) + klog.Errorf("Failed to find VM by UUID. VM UUID: %s, err: %+v", vmUUID, err) return nil, err } if svm == nil { - glog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID) + klog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID) return nil, ErrNoVMFound } virtualMachine := VirtualMachine{object.NewVirtualMachine(dc.Client(), svm.Reference()), dc} @@ -89,11 +89,11 @@ func (dc *Datacenter) GetHostByVMUUID(ctx context.Context, vmUUID string) (*type pc := property.DefaultCollector(virtualMachine.Client()) err = pc.RetrieveOne(ctx, virtualMachine.Reference(), []string{"summary.runtime.host"}, &vmMo) if err != nil { - glog.Errorf("Failed to retrive VM runtime host, err: %v", err) + klog.Errorf("Failed to retrive VM runtime host, err: %v", err) return nil, err } host := vmMo.Summary.Runtime.Host - glog.Infof("%s host is %s", virtualMachine.Reference(), host) + klog.Infof("%s host is %s", virtualMachine.Reference(), host) return host, nil } @@ -103,7 +103,7 @@ func (dc *Datacenter) GetVMByPath(ctx context.Context, vmPath string) (*VirtualM finder := getFinder(dc) vm, err := finder.VirtualMachine(ctx, vmPath) if err != nil { - glog.Errorf("Failed to find VM by Path. VM Path: %s, err: %+v", vmPath, err) + klog.Errorf("Failed to find VM by Path. VM Path: %s, err: %+v", vmPath, err) return nil, err } virtualMachine := VirtualMachine{vm, dc} @@ -116,7 +116,7 @@ func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*Datasto finder := getFinder(dc) datastores, err := finder.DatastoreList(ctx, "*") if err != nil { - glog.Errorf("Failed to get all the datastores. err: %+v", err) + klog.Errorf("Failed to get all the datastores. err: %+v", err) return nil, err } var dsList []types.ManagedObjectReference @@ -129,7 +129,7 @@ func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*Datasto properties := []string{DatastoreInfoProperty} err = pc.Retrieve(ctx, dsList, properties, &dsMoList) if err != nil { - glog.Errorf("Failed to get Datastore managed objects from datastore objects."+ + klog.Errorf("Failed to get Datastore managed objects from datastore objects."+ " dsObjList: %+v, properties: %+v, err: %v", dsList, properties, err) return nil, err } @@ -141,7 +141,7 @@ func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*Datasto dc}, dsMo.Info.GetDatastoreInfo()} } - glog.V(9).Infof("dsURLInfoMap : %+v", dsURLInfoMap) + klog.V(9).Infof("dsURLInfoMap : %+v", dsURLInfoMap) return dsURLInfoMap, nil } @@ -150,7 +150,7 @@ func (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) datastorePathObj := new(object.DatastorePath) isSuccess := datastorePathObj.FromString(vmDiskPath) if !isSuccess { - glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) + klog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) return nil, errors.New("Failed to parse vmDiskPath") } @@ -162,7 +162,7 @@ func (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Dat finder := getFinder(dc) ds, err := finder.Datastore(ctx, name) if err != nil { - glog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err) + klog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err) return nil, err } datastore := Datastore{ds, dc} @@ -176,7 +176,7 @@ func (dc *Datacenter) GetResourcePool(ctx context.Context, resourcePoolPath stri var err error resourcePool, err = finder.ResourcePoolOrDefault(ctx, resourcePoolPath) if err != nil { - glog.Errorf("Failed to get the ResourcePool for path '%s'. err: %+v", resourcePoolPath, err) + klog.Errorf("Failed to get the ResourcePool for path '%s'. err: %+v", resourcePoolPath, err) return nil, err } return resourcePool, nil @@ -188,7 +188,7 @@ func (dc *Datacenter) GetFolderByPath(ctx context.Context, folderPath string) (* finder := getFinder(dc) vmFolder, err := finder.Folder(ctx, folderPath) if err != nil { - glog.Errorf("Failed to get the folder reference for %s. err: %+v", folderPath, err) + klog.Errorf("Failed to get the folder reference for %s. err: %+v", folderPath, err) return nil, err } folder := Folder{vmFolder, dc} @@ -200,7 +200,7 @@ func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachi var vmMoList []mo.VirtualMachine var vmRefs []types.ManagedObjectReference if len(vmObjList) < 1 { - glog.Errorf("VirtualMachine Object list is empty") + klog.Errorf("VirtualMachine Object list is empty") return nil, fmt.Errorf("VirtualMachine Object list is empty") } @@ -210,7 +210,7 @@ func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachi pc := property.DefaultCollector(dc.Client()) err := pc.Retrieve(ctx, vmRefs, properties, &vmMoList) if err != nil { - glog.Errorf("Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v", vmObjList, properties, err) + klog.Errorf("Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v", vmObjList, properties, err) return nil, err } return vmMoList, nil @@ -226,7 +226,7 @@ func (dc *Datacenter) GetVirtualDiskPage83Data(ctx context.Context, diskPath str diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc.Datacenter) if err != nil { - glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err) + klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err) return "", err } diskUUID = formatVirtualDiskUUID(diskUUID) @@ -238,7 +238,7 @@ func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datas var dsMoList []mo.Datastore var dsRefs []types.ManagedObjectReference if len(dsObjList) < 1 { - glog.Errorf("Datastore Object list is empty") + klog.Errorf("Datastore Object list is empty") return nil, fmt.Errorf("Datastore Object list is empty") } @@ -248,7 +248,7 @@ func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datas pc := property.DefaultCollector(dc.Client()) err := pc.Retrieve(ctx, dsRefs, properties, &dsMoList) if err != nil { - glog.Errorf("Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v", dsObjList, properties, err) + klog.Errorf("Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v", dsObjList, properties, err) return nil, err } return dsMoList, nil @@ -266,27 +266,27 @@ func (dc *Datacenter) CheckDisksAttached(ctx context.Context, nodeVolumes map[st vm, err := dc.GetVMByPath(ctx, nodeName) if err != nil { if IsNotFound(err) { - glog.Warningf("Node %q does not exist, vSphere CP will assume disks %v are not attached to it.", nodeName, volPaths) + klog.Warningf("Node %q does not exist, vSphere CP will assume disks %v are not attached to it.", nodeName, volPaths) } continue } vmList = append(vmList, vm) } if len(vmList) == 0 { - glog.V(2).Infof("vSphere CP will assume no disks are attached to any node.") + klog.V(2).Infof("vSphere CP will assume no disks are attached to any node.") return attached, nil } vmMoList, err := dc.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name"}) if err != nil { // When there is an error fetching instance information // it is safer to return nil and let volume information not be touched. - glog.Errorf("Failed to get VM Managed object for nodes: %+v. err: +%v", vmList, err) + klog.Errorf("Failed to get VM Managed object for nodes: %+v. err: +%v", vmList, err) return nil, err } for _, vmMo := range vmMoList { if vmMo.Config == nil { - glog.Errorf("Config is not available for VM: %q", vmMo.Name) + klog.Errorf("Config is not available for VM: %q", vmMo.Name) continue } for nodeName, volPaths := range nodeVolumes { diff --git a/pkg/cloudprovider/providers/vsphere/vclib/datastore.go b/pkg/cloudprovider/providers/vsphere/vclib/datastore.go index 8d21789f9ce92..a57685bc76cb1 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/datastore.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/datastore.go @@ -20,12 +20,12 @@ import ( "context" "fmt" - "github.com/golang/glog" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" ) // Datastore extends the govmomi Datastore object @@ -59,7 +59,7 @@ func (ds *Datastore) CreateDirectory(ctx context.Context, directoryPath string, } return err } - glog.V(LogLevel).Infof("Created dir with path as %+q", directoryPath) + klog.V(LogLevel).Infof("Created dir with path as %+q", directoryPath) return nil } @@ -69,7 +69,7 @@ func (ds *Datastore) GetType(ctx context.Context) (string, error) { pc := property.DefaultCollector(ds.Client()) err := pc.RetrieveOne(ctx, ds.Datastore.Reference(), []string{"summary"}, &dsMo) if err != nil { - glog.Errorf("Failed to retrieve datastore summary property. err: %v", err) + klog.Errorf("Failed to retrieve datastore summary property. err: %v", err) return "", err } return dsMo.Summary.Type, nil @@ -80,7 +80,7 @@ func (ds *Datastore) GetType(ctx context.Context) (string, error) { func (ds *Datastore) IsCompatibleWithStoragePolicy(ctx context.Context, storagePolicyID string) (bool, string, error) { pbmClient, err := NewPbmClient(ctx, ds.Client()) if err != nil { - glog.Errorf("Failed to get new PbmClient Object. err: %v", err) + klog.Errorf("Failed to get new PbmClient Object. err: %v", err) return false, "", err } return pbmClient.IsDatastoreCompatible(ctx, storagePolicyID, ds) diff --git a/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD b/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD index 377202f4cd6b2..4273c972f2c46 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD +++ b/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/BUILD @@ -15,9 +15,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers", deps = [ "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/vmware/govmomi/object:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go b/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go index a643241bdb6f5..9a1c4715cf25e 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vdm.go @@ -20,9 +20,9 @@ import ( "context" "time" - "github.com/golang/glog" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" ) @@ -55,13 +55,13 @@ func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vcl task, err := vdm.CreateVirtualDisk(ctx, diskManager.diskPath, datastore.Datacenter.Datacenter, vmDiskSpec) if err != nil { vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err) - glog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err) + klog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err) return "", err } taskInfo, err := task.WaitForResult(ctx, nil) vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err) if err != nil { - glog.Errorf("Failed to complete virtual disk creation: %s. err: %+v", diskManager.diskPath, err) + klog.Errorf("Failed to complete virtual disk creation: %s. err: %+v", diskManager.diskPath, err) return "", err } canonicalDiskPath = taskInfo.Result.(string) @@ -77,14 +77,14 @@ func (diskManager virtualDiskManager) Delete(ctx context.Context, datacenter *vc // Delete virtual disk task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter.Datacenter) if err != nil { - glog.Errorf("Failed to delete virtual disk. err: %v", err) + klog.Errorf("Failed to delete virtual disk. err: %v", err) vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err) return err } err = task.Wait(ctx) vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err) if err != nil { - glog.Errorf("Failed to delete virtual disk. err: %v", err) + klog.Errorf("Failed to delete virtual disk. err: %v", err) return err } return nil diff --git a/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go b/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go index fe905cc79f726..6d7f7f5fe9054 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/virtualdisk.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" ) @@ -65,7 +65,7 @@ func (virtualDisk *VirtualDisk) Create(ctx context.Context, datastore *vclib.Dat virtualDisk.VolumeOptions.DiskFormat = vclib.ThinDiskType } if !virtualDisk.VolumeOptions.VerifyVolumeOptions() { - glog.Error("VolumeOptions verification failed. volumeOptions: ", virtualDisk.VolumeOptions) + klog.Error("VolumeOptions verification failed. volumeOptions: ", virtualDisk.VolumeOptions) return "", vclib.ErrInvalidVolumeOptions } if virtualDisk.VolumeOptions.StoragePolicyID != "" && virtualDisk.VolumeOptions.StoragePolicyName != "" { diff --git a/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go b/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go index 12fba7f70245d..637ac514bfd65 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers/vmdm.go @@ -22,9 +22,9 @@ import ( "hash/fnv" "strings" - "github.com/golang/glog" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" ) @@ -43,26 +43,26 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto } pbmClient, err := vclib.NewPbmClient(ctx, datastore.Client()) if err != nil { - glog.Errorf("Error occurred while creating new pbmClient, err: %+v", err) + klog.Errorf("Error occurred while creating new pbmClient, err: %+v", err) return "", err } if vmdisk.volumeOptions.StoragePolicyID == "" && vmdisk.volumeOptions.StoragePolicyName != "" { vmdisk.volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, vmdisk.volumeOptions.StoragePolicyName) if err != nil { - glog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err) + klog.Errorf("Error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err) return "", err } } if vmdisk.volumeOptions.StoragePolicyID != "" { compatible, faultMessage, err := datastore.IsCompatibleWithStoragePolicy(ctx, vmdisk.volumeOptions.StoragePolicyID) if err != nil { - glog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err) + klog.Errorf("Error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err) return "", err } if !compatible { - glog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName) + klog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName) return "", fmt.Errorf("User specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage) } } @@ -79,7 +79,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto return "", err } if dsType != vclib.VSANDatastoreType { - glog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name()) + klog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name()) return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+ " The policy parameters will work only with VSAN Datastore."+ " So, please specify a valid VSAN datastore in Storage class definition.", datastore.Name()) @@ -90,7 +90,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto ObjectData: vmdisk.volumeOptions.VSANStorageProfileData, } } else { - glog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") + klog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") return "", fmt.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") } var dummyVM *vclib.VirtualMachine @@ -102,10 +102,10 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName) if err != nil { // Create a dummy VM - glog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName) + klog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName) dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName) if err != nil { - glog.Errorf("Failed to create Dummy VM. err: %v", err) + klog.Errorf("Failed to create Dummy VM. err: %v", err) return "", err } } @@ -114,7 +114,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto virtualMachineConfigSpec := types.VirtualMachineConfigSpec{} disk, _, err := dummyVM.CreateDiskSpec(ctx, vmdisk.diskPath, datastore, vmdisk.volumeOptions) if err != nil { - glog.Errorf("Failed to create Disk Spec. err: %v", err) + klog.Errorf("Failed to create Disk Spec. err: %v", err) return "", err } deviceConfigSpec := &types.VirtualDeviceConfigSpec{ @@ -128,7 +128,7 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto fileAlreadyExist := false task, err := dummyVM.Reconfigure(ctx, virtualMachineConfigSpec) if err != nil { - glog.Errorf("Failed to reconfig. err: %v", err) + klog.Errorf("Failed to reconfig. err: %v", err) return "", err } err = task.Wait(ctx) @@ -136,9 +136,9 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err) if fileAlreadyExist { //Skip error and continue to detach the disk as the disk was already created on the datastore. - glog.V(vclib.LogLevel).Infof("File: %v already exists", vmdisk.diskPath) + klog.V(vclib.LogLevel).Infof("File: %v already exists", vmdisk.diskPath) } else { - glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err) + klog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err) return "", err } } @@ -147,16 +147,16 @@ func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datasto if err != nil { if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist { // Skip error if disk was already detached from the dummy VM but still present on the datastore. - glog.V(vclib.LogLevel).Infof("File: %v is already detached", vmdisk.diskPath) + klog.V(vclib.LogLevel).Infof("File: %v is already detached", vmdisk.diskPath) } else { - glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err) + klog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err) return "", err } } // Delete the dummy VM err = dummyVM.DeleteVM(ctx) if err != nil { - glog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err) + klog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err) } return vmdisk.diskPath, nil } @@ -195,13 +195,13 @@ func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib task, err := vmdisk.vmOptions.VMFolder.CreateVM(ctx, virtualMachineConfigSpec, vmdisk.vmOptions.VMResourcePool, nil) if err != nil { - glog.Errorf("Failed to create VM. err: %+v", err) + klog.Errorf("Failed to create VM. err: %+v", err) return nil, err } dummyVMTaskInfo, err := task.WaitForResult(ctx, nil) if err != nil { - glog.Errorf("Error occurred while waiting for create VM task result. err: %+v", err) + klog.Errorf("Error occurred while waiting for create VM task result. err: %+v", err) return nil, err } @@ -214,11 +214,11 @@ func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datacenter) error { vmList, err := folder.GetVirtualMachines(ctx) if err != nil { - glog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err) + klog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err) return err } if vmList == nil || len(vmList) == 0 { - glog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) + klog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) return fmt.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) } var dummyVMList []*vclib.VirtualMachine @@ -226,7 +226,7 @@ func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datace for _, vm := range vmList { vmName, err := vm.ObjectName(ctx) if err != nil { - glog.V(4).Infof("Unable to get name from VM with err: %+v", err) + klog.V(4).Infof("Unable to get name from VM with err: %+v", err) continue } if strings.HasPrefix(vmName, vclib.DummyVMPrefixName) { @@ -237,7 +237,7 @@ func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder, dc *vclib.Datace for _, vm := range dummyVMList { err = vm.DeleteVM(ctx) if err != nil { - glog.V(4).Infof("Unable to delete dummy VM with err: %+v", err) + klog.V(4).Infof("Unable to delete dummy VM with err: %+v", err) continue } } diff --git a/pkg/cloudprovider/providers/vsphere/vclib/folder.go b/pkg/cloudprovider/providers/vsphere/vclib/folder.go index 4e66de88dbb14..1e4f8e4e88891 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/folder.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/folder.go @@ -19,8 +19,8 @@ package vclib import ( "context" - "github.com/golang/glog" "github.com/vmware/govmomi/object" + "k8s.io/klog" ) // Folder extends the govmomi Folder object @@ -33,7 +33,7 @@ type Folder struct { func (folder *Folder) GetVirtualMachines(ctx context.Context) ([]*VirtualMachine, error) { vmFolders, err := folder.Children(ctx) if err != nil { - glog.Errorf("Failed to get children from Folder: %s. err: %+v", folder.InventoryPath, err) + klog.Errorf("Failed to get children from Folder: %s. err: %+v", folder.InventoryPath, err) return nil, err } var vmObjList []*VirtualMachine diff --git a/pkg/cloudprovider/providers/vsphere/vclib/pbm.go b/pkg/cloudprovider/providers/vsphere/vclib/pbm.go index 0a494f7d7313d..8070f20042398 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/pbm.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/pbm.go @@ -20,8 +20,8 @@ import ( "context" "fmt" - "github.com/golang/glog" "github.com/vmware/govmomi/pbm" + "k8s.io/klog" pbmtypes "github.com/vmware/govmomi/pbm/types" "github.com/vmware/govmomi/vim25" @@ -36,7 +36,7 @@ type PbmClient struct { func NewPbmClient(ctx context.Context, client *vim25.Client) (*PbmClient, error) { pbmClient, err := pbm.NewClient(ctx, client) if err != nil { - glog.Errorf("Failed to create new Pbm Client. err: %+v", err) + klog.Errorf("Failed to create new Pbm Client. err: %+v", err) return nil, err } return &PbmClient{pbmClient}, nil @@ -60,7 +60,7 @@ func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePo } compatibilityResult, err := pbmClient.CheckRequirements(ctx, hubs, nil, req) if err != nil { - glog.Errorf("Error occurred for CheckRequirements call. err %+v", err) + klog.Errorf("Error occurred for CheckRequirements call. err %+v", err) return false, "", err } if compatibilityResult != nil && len(compatibilityResult) > 0 { @@ -70,7 +70,7 @@ func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePo } dsName, err := datastore.ObjectName(ctx) if err != nil { - glog.Errorf("Failed to get datastore ObjectName") + klog.Errorf("Failed to get datastore ObjectName") return false, "", err } if compatibilityResult[0].Error[0].LocalizedMessage == "" { @@ -92,7 +92,7 @@ func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, dc *Dat ) compatibilityResult, err := pbmClient.GetPlacementCompatibilityResult(ctx, storagePolicyID, datastores) if err != nil { - glog.Errorf("Error occurred while retrieving placement compatibility result for datastores: %+v with storagePolicyID: %s. err: %+v", datastores, storagePolicyID, err) + klog.Errorf("Error occurred while retrieving placement compatibility result for datastores: %+v with storagePolicyID: %s. err: %+v", datastores, storagePolicyID, err) return nil, "", err } compatibleHubs := compatibilityResult.CompatibleDatastores() @@ -114,7 +114,7 @@ func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, dc *Dat } // Return an error if there are no compatible datastores. if len(compatibleHubs) < 1 { - glog.Errorf("No compatible datastores found that satisfy the storage policy requirements: %s", storagePolicyID) + klog.Errorf("No compatible datastores found that satisfy the storage policy requirements: %s", storagePolicyID) return nil, localizedMessagesForNotCompatibleDatastores, fmt.Errorf("No compatible datastores found that satisfy the storage policy requirements") } return compatibleDatastoreList, localizedMessagesForNotCompatibleDatastores, nil @@ -138,7 +138,7 @@ func (pbmClient *PbmClient) GetPlacementCompatibilityResult(ctx context.Context, } res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req) if err != nil { - glog.Errorf("Error occurred for CheckRequirements call. err: %+v", err) + klog.Errorf("Error occurred for CheckRequirements call. err: %+v", err) return nil, err } return res, nil @@ -162,7 +162,7 @@ func getDsMorNameMap(ctx context.Context, datastores []*DatastoreInfo) map[strin if err == nil { dsMorNameMap[ds.Reference().Value] = dsObjectName } else { - glog.Errorf("Error occurred while getting datastore object name. err: %+v", err) + klog.Errorf("Error occurred while getting datastore object name. err: %+v", err) } } return dsMorNameMap diff --git a/pkg/cloudprovider/providers/vsphere/vclib/utils.go b/pkg/cloudprovider/providers/vsphere/vclib/utils.go index 36ea8d6c6ef1a..161e0ebfe726e 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/utils.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/utils.go @@ -22,12 +22,12 @@ import ( "regexp" "strings" - "github.com/golang/glog" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" ) // IsNotFound return true if err is NotFoundError or DefaultNotFoundError @@ -140,7 +140,7 @@ func GetPathFromVMDiskPath(vmDiskPath string) string { datastorePathObj := new(object.DatastorePath) isSuccess := datastorePathObj.FromString(vmDiskPath) if !isSuccess { - glog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) + klog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) return "" } return datastorePathObj.Path @@ -151,7 +151,7 @@ func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath datastorePathObj := new(object.DatastorePath) isSuccess := datastorePathObj.FromString(vmDiskPath) if !isSuccess { - glog.Errorf("Failed to parse volPath: %s", vmDiskPath) + klog.Errorf("Failed to parse volPath: %s", vmDiskPath) return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath) } return datastorePathObj, nil diff --git a/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go b/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go index 01654b3d1ef7f..f6e28cb1036cb 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/virtualmachine.go @@ -22,12 +22,12 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" ) // VirtualMachine extends the govmomi VirtualMachine object @@ -52,7 +52,7 @@ func (vm *VirtualMachine) IsDiskAttached(ctx context.Context, diskPath string) ( func (vm *VirtualMachine) DeleteVM(ctx context.Context) error { destroyTask, err := vm.Destroy(ctx) if err != nil { - glog.Errorf("Failed to delete the VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to delete the VM: %q. err: %+v", vm.InventoryPath, err) return err } return destroyTask.Wait(ctx) @@ -69,7 +69,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath) attached, err := vm.IsDiskAttached(ctx, vmDiskPath) if err != nil { - glog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err) + klog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err) return "", err } // If disk is already attached, return the disk UUID @@ -81,31 +81,31 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol if volumeOptions.StoragePolicyName != "" { pbmClient, err := NewPbmClient(ctx, vm.Client()) if err != nil { - glog.Errorf("Error occurred while creating new pbmClient. err: %+v", err) + klog.Errorf("Error occurred while creating new pbmClient. err: %+v", err) return "", err } volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, volumeOptions.StoragePolicyName) if err != nil { - glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", volumeOptions.StoragePolicyName, err) + klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", volumeOptions.StoragePolicyName, err) return "", err } } dsObj, err := vm.Datacenter.GetDatastoreByPath(ctx, vmDiskPathCopy) if err != nil { - glog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err) + klog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err) return "", err } // If disk is not attached, create a disk spec for disk to be attached to the VM. disk, newSCSIController, err := vm.CreateDiskSpec(ctx, vmDiskPath, dsObj, volumeOptions) if err != nil { - glog.Errorf("Error occurred while creating disk spec. err: %+v", err) + klog.Errorf("Error occurred while creating disk spec. err: %+v", err) return "", err } vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err) return "", err } virtualMachineConfigSpec := types.VirtualMachineConfigSpec{} @@ -125,7 +125,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol task, err := vm.Reconfigure(ctx, virtualMachineConfigSpec) if err != nil { RecordvSphereMetric(APIAttachVolume, requestTime, err) - glog.Errorf("Failed to attach the disk with storagePolicy: %q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err) + klog.Errorf("Failed to attach the disk with storagePolicy: %q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err) if newSCSIController != nil { vm.deleteController(ctx, newSCSIController, vmDevices) } @@ -134,7 +134,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol err = task.Wait(ctx) RecordvSphereMetric(APIAttachVolume, requestTime, err) if err != nil { - glog.Errorf("Failed to attach the disk with storagePolicy: %+q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err) + klog.Errorf("Failed to attach the disk with storagePolicy: %+q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err) if newSCSIController != nil { vm.deleteController(ctx, newSCSIController, vmDevices) } @@ -144,7 +144,7 @@ func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, vol // Once disk is attached, get the disk UUID. diskUUID, err := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath) if err != nil { - glog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err) + klog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err) vm.DetachDisk(ctx, vmDiskPath) if newSCSIController != nil { vm.deleteController(ctx, newSCSIController, vmDevices) @@ -159,11 +159,11 @@ func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) err vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath) device, err := vm.getVirtualDeviceByPath(ctx, vmDiskPath) if err != nil { - glog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath) + klog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath) return err } if device == nil { - glog.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath) + klog.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath) return fmt.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath) } // Detach disk from VM @@ -171,7 +171,7 @@ func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) err err = vm.RemoveDevice(ctx, true, device) RecordvSphereMetric(APIDetachVolume, requestTime, err) if err != nil { - glog.Errorf("Error occurred while removing disk device for VM: %q. err: %v", vm.InventoryPath, err) + klog.Errorf("Error occurred while removing disk device for VM: %q. err: %v", vm.InventoryPath, err) return err } return nil @@ -181,7 +181,7 @@ func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) err func (vm *VirtualMachine) GetResourcePool(ctx context.Context) (*object.ResourcePool, error) { vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"resourcePool"}) if err != nil { - glog.Errorf("Failed to get resource pool from VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to get resource pool from VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } return object.NewResourcePool(vm.Client(), vmMoList[0].ResourcePool.Reference()), nil @@ -192,7 +192,7 @@ func (vm *VirtualMachine) GetResourcePool(ctx context.Context) (*object.Resource func (vm *VirtualMachine) IsActive(ctx context.Context) (bool, error) { vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary"}) if err != nil { - glog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err) + klog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err) return false, err } if vmMoList[0].Summary.Runtime.PowerState == ActivePowerState { @@ -206,14 +206,14 @@ func (vm *VirtualMachine) IsActive(ctx context.Context) (bool, error) { func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*DatastoreInfo, error) { host, err := vm.HostSystem(ctx) if err != nil { - glog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } var hostSystemMo mo.HostSystem s := object.NewSearchIndex(vm.Client()) err = s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo) if err != nil { - glog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err) + klog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err) return nil, err } var dsRefList []types.ManagedObjectReference @@ -226,11 +226,11 @@ func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*Da properties := []string{DatastoreInfoProperty} err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList) if err != nil { - glog.Errorf("Failed to get Datastore managed objects from datastore objects."+ + klog.Errorf("Failed to get Datastore managed objects from datastore objects."+ " dsObjList: %+v, properties: %+v, err: %v", dsRefList, properties, err) return nil, err } - glog.V(9).Infof("Result dsMoList: %+v", dsMoList) + klog.V(9).Infof("Result dsMoList: %+v", dsMoList) var dsObjList []*DatastoreInfo for _, dsMo := range dsMoList { dsObjList = append(dsObjList, @@ -247,7 +247,7 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d var newSCSIController types.BaseVirtualDevice vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to retrieve VM devices. err: %+v", err) + klog.Errorf("Failed to retrieve VM devices. err: %+v", err) return nil, nil, err } // find SCSI controller of particular type from VM devices @@ -256,20 +256,20 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d if scsiController == nil { newSCSIController, err = vm.createAndAttachSCSIController(ctx, volumeOptions.SCSIControllerType) if err != nil { - glog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.InventoryPath, err) return nil, nil, err } // Get VM device list vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to retrieve VM devices. err: %v", err) + klog.Errorf("Failed to retrieve VM devices. err: %v", err) return nil, nil, err } // verify scsi controller in virtual machine scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType) scsiController = getAvailableSCSIController(scsiControllersOfRequiredType) if scsiController == nil { - glog.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType) + klog.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType) // attempt clean up of scsi controller vm.deleteController(ctx, newSCSIController, vmDevices) return nil, nil, fmt.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType) @@ -278,7 +278,7 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d disk := vmDevices.CreateDisk(scsiController, dsObj.Reference(), diskPath) unitNumber, err := getNextUnitNumber(vmDevices, scsiController) if err != nil { - glog.Errorf("Cannot attach disk to VM, unitNumber limit reached - %+v.", err) + klog.Errorf("Cannot attach disk to VM, unitNumber limit reached - %+v.", err) return nil, nil, err } *disk.UnitNumber = unitNumber @@ -307,7 +307,7 @@ func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, d func (vm *VirtualMachine) GetVirtualDiskPath(ctx context.Context) (string, error) { vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) return "", err } // filter vm devices to retrieve device for the given vmdk file identified by disk path @@ -327,18 +327,18 @@ func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, dis // Get VM device list vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } allSCSIControllers := getSCSIControllers(vmDevices) if len(allSCSIControllers) >= SCSIControllerLimit { // we reached the maximum number of controllers we can attach - glog.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) + klog.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) } newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType) if err != nil { - glog.Errorf("Failed to create new SCSI controller on VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to create new SCSI controller on VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController() @@ -349,7 +349,7 @@ func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, dis // add the scsi controller to virtual machine err = vm.AddDevice(context.TODO(), newSCSIController) if err != nil { - glog.V(LogLevel).Infof("Cannot add SCSI controller to VM: %q. err: %+v", vm.InventoryPath, err) + klog.V(LogLevel).Infof("Cannot add SCSI controller to VM: %q. err: %+v", vm.InventoryPath, err) // attempt clean up of scsi controller vm.deleteController(ctx, newSCSIController, vmDevices) return nil, err @@ -361,7 +361,7 @@ func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, dis func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath string) (types.BaseVirtualDevice, error) { vmDevices, err := vm.Device(ctx) if err != nil { - glog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) return nil, err } @@ -371,7 +371,7 @@ func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath s virtualDevice := device.GetVirtualDevice() if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { if matchVirtualDiskAndVolPath(backing.FileName, diskPath) { - glog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath) + klog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath) return device, nil } } @@ -396,7 +396,7 @@ func (vm *VirtualMachine) deleteController(ctx context.Context, controllerDevice device := controllerDeviceList[len(controllerDeviceList)-1] err := vm.RemoveDevice(ctx, true, device) if err != nil { - glog.Errorf("Error occurred while removing device on VM: %q. err: %+v", vm.InventoryPath, err) + klog.Errorf("Error occurred while removing device on VM: %q. err: %+v", vm.InventoryPath, err) return err } return nil diff --git a/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go b/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go index eceba70496cc5..989ed4468158c 100644 --- a/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go +++ b/pkg/cloudprovider/providers/vsphere/vclib/volumeoptions.go @@ -19,7 +19,7 @@ package vclib import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // VolumeOptions specifies various options for a volume. @@ -59,7 +59,7 @@ func DiskformatValidOptions() string { // CheckDiskFormatSupported checks if the diskFormat is valid func CheckDiskFormatSupported(diskFormat string) bool { if DiskFormatValidType[diskFormat] == "" { - glog.Errorf("Not a valid Disk Format. Valid options are %+q", DiskformatValidOptions()) + klog.Errorf("Not a valid Disk Format. Valid options are %+q", DiskformatValidOptions()) return false } return true @@ -82,7 +82,7 @@ func CheckControllerSupported(ctrlType string) bool { return true } } - glog.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions()) + klog.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions()) return false } diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 7e22407ed5b63..26180e6aff929 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -33,7 +33,6 @@ import ( "gopkg.in/gcfg.v1" - "github.com/golang/glog" "github.com/vmware/govmomi/vapi/rest" "github.com/vmware/govmomi/vapi/tags" "github.com/vmware/govmomi/vim25/mo" @@ -42,6 +41,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers" @@ -264,13 +264,13 @@ func (vs *VSphere) SetInformers(informerFactory informers.SharedInformerFactory) // Only on controller node it is required to register listeners. // Register callbacks for node updates - glog.V(4).Infof("Setting up node informers for vSphere Cloud Provider") + klog.V(4).Infof("Setting up node informers for vSphere Cloud Provider") nodeInformer := informerFactory.Core().V1().Nodes().Informer() nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: vs.NodeAdded, DeleteFunc: vs.NodeDeleted, }) - glog.V(4).Infof("Node informers in vSphere cloud provider initialized") + klog.V(4).Infof("Node informers in vSphere cloud provider initialized") } @@ -280,12 +280,12 @@ func newWorkerNode() (*VSphere, error) { vs := VSphere{} vs.hostName, err = os.Hostname() if err != nil { - glog.Errorf("Failed to get hostname. err: %+v", err) + klog.Errorf("Failed to get hostname. err: %+v", err) return nil, err } vs.vmUUID, err = GetVMUUID() if err != nil { - glog.Errorf("Failed to get uuid. err: %+v", err) + klog.Errorf("Failed to get uuid. err: %+v", err) return nil, err } return &vs, nil @@ -296,18 +296,18 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance isSecretInfoProvided := true if cfg.Global.SecretName == "" || cfg.Global.SecretNamespace == "" { - glog.Warningf("SecretName and/or SecretNamespace is not provided. " + + klog.Warningf("SecretName and/or SecretNamespace is not provided. " + "VCP will use username and password from config file") isSecretInfoProvided = false } if isSecretInfoProvided { if cfg.Global.User != "" { - glog.Warning("Global.User and Secret info provided. VCP will use secret to get credentials") + klog.Warning("Global.User and Secret info provided. VCP will use secret to get credentials") cfg.Global.User = "" } if cfg.Global.Password != "" { - glog.Warning("Global.Password and Secret info provided. VCP will use secret to get credentials") + klog.Warning("Global.Password and Secret info provided. VCP will use secret to get credentials") cfg.Global.Password = "" } } @@ -315,28 +315,28 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance // Check if the vsphere.conf is in old format. In this // format the cfg.VirtualCenter will be nil or empty. if cfg.VirtualCenter == nil || len(cfg.VirtualCenter) == 0 { - glog.V(4).Infof("Config is not per virtual center and is in old format.") + klog.V(4).Infof("Config is not per virtual center and is in old format.") if !isSecretInfoProvided { if cfg.Global.User == "" { - glog.Error("Global.User is empty!") + klog.Error("Global.User is empty!") return nil, ErrUsernameMissing } if cfg.Global.Password == "" { - glog.Error("Global.Password is empty!") + klog.Error("Global.Password is empty!") return nil, ErrPasswordMissing } } if cfg.Global.WorkingDir == "" { - glog.Error("Global.WorkingDir is empty!") + klog.Error("Global.WorkingDir is empty!") return nil, errors.New("Global.WorkingDir is empty!") } if cfg.Global.VCenterIP == "" { - glog.Error("Global.VCenterIP is empty!") + klog.Error("Global.VCenterIP is empty!") return nil, errors.New("Global.VCenterIP is empty!") } if cfg.Global.Datacenter == "" { - glog.Error("Global.Datacenter is empty!") + klog.Error("Global.Datacenter is empty!") return nil, errors.New("Global.Datacenter is empty!") } cfg.Workspace.VCenterIP = cfg.Global.VCenterIP @@ -375,14 +375,14 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance if cfg.Workspace.VCenterIP == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" { msg := fmt.Sprintf("All fields in workspace are mandatory."+ " vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace) - glog.Error(msg) + klog.Error(msg) return nil, errors.New(msg) } for vcServer, vcConfig := range cfg.VirtualCenter { - glog.V(4).Infof("Initializing vc server %s", vcServer) + klog.V(4).Infof("Initializing vc server %s", vcServer) if vcServer == "" { - glog.Error("vsphere.conf does not have the VirtualCenter IP address specified") + klog.Error("vsphere.conf does not have the VirtualCenter IP address specified") return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified") } @@ -390,24 +390,24 @@ func populateVsphereInstanceMap(cfg *VSphereConfig) (map[string]*VSphereInstance if vcConfig.User == "" { vcConfig.User = cfg.Global.User if vcConfig.User == "" { - glog.Errorf("vcConfig.User is empty for vc %s!", vcServer) + klog.Errorf("vcConfig.User is empty for vc %s!", vcServer) return nil, ErrUsernameMissing } } if vcConfig.Password == "" { vcConfig.Password = cfg.Global.Password if vcConfig.Password == "" { - glog.Errorf("vcConfig.Password is empty for vc %s!", vcServer) + klog.Errorf("vcConfig.Password is empty for vc %s!", vcServer) return nil, ErrPasswordMissing } } } else { if vcConfig.User != "" { - glog.Warningf("vcConfig.User for server %s and Secret info provided. VCP will use secret to get credentials", vcServer) + klog.Warningf("vcConfig.User for server %s and Secret info provided. VCP will use secret to get credentials", vcServer) vcConfig.User = "" } if vcConfig.Password != "" { - glog.Warningf("vcConfig.Password for server %s and Secret info provided. VCP will use secret to get credentials", vcServer) + klog.Warningf("vcConfig.Password for server %s and Secret info provided. VCP will use secret to get credentials", vcServer) vcConfig.Password = "" } } @@ -461,7 +461,7 @@ func newControllerNode(cfg VSphereConfig) (*VSphere, error) { } vs.hostName, err = os.Hostname() if err != nil { - glog.Errorf("Failed to get hostname. err: %+v", err) + klog.Errorf("Failed to get hostname. err: %+v", err) return nil, err } if cfg.Global.VMUUID != "" { @@ -469,7 +469,7 @@ func newControllerNode(cfg VSphereConfig) (*VSphere, error) { } else { vs.vmUUID, err = getVMUUID() if err != nil { - glog.Errorf("Failed to get uuid. err: %+v", err) + klog.Errorf("Failed to get uuid. err: %+v", err) return nil, err } } @@ -487,7 +487,7 @@ func buildVSphereFromConfig(cfg VSphereConfig) (*VSphere, error) { if cfg.Disk.SCSIControllerType == "" { cfg.Disk.SCSIControllerType = vclib.PVSCSIControllerType } else if !vclib.CheckControllerSupported(cfg.Disk.SCSIControllerType) { - glog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType) + klog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType) return nil, errors.New("Controller type not supported. Please configure 'lsilogic-sas' OR 'pvscsi'") } if cfg.Global.WorkingDir != "" { @@ -532,13 +532,13 @@ func getLocalIP() ([]v1.NodeAddress, error) { addrs := []v1.NodeAddress{} ifaces, err := net.Interfaces() if err != nil { - glog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err) + klog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err) return nil, err } for _, i := range ifaces { localAddrs, err := i.Addrs() if err != nil { - glog.Warningf("Failed to extract addresses for NodeAddresses - %v", err) + klog.Warningf("Failed to extract addresses for NodeAddresses - %v", err) } else { for _, addr := range localAddrs { if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { @@ -558,7 +558,7 @@ func getLocalIP() ([]v1.NodeAddress, error) { }, ) } - glog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType) + klog.V(4).Infof("Find local IP address %v and set type to %v", ipnet.IP.String(), addressType) } } } @@ -570,7 +570,7 @@ func getLocalIP() ([]v1.NodeAddress, error) { func (vs *VSphere) getVSphereInstance(nodeName k8stypes.NodeName) (*VSphereInstance, error) { vsphereIns, err := vs.nodeManager.GetVSphereInstance(nodeName) if err != nil { - glog.Errorf("Cannot find node %q in cache. Node not found!!!", nodeName) + klog.Errorf("Cannot find node %q in cache. Node not found!!!", nodeName) return nil, err } return &vsphereIns, nil @@ -579,13 +579,13 @@ func (vs *VSphere) getVSphereInstance(nodeName k8stypes.NodeName) (*VSphereInsta func (vs *VSphere) getVSphereInstanceForServer(vcServer string, ctx context.Context) (*VSphereInstance, error) { vsphereIns, ok := vs.vsphereInstanceMap[vcServer] if !ok { - glog.Errorf("cannot find vcServer %q in cache. VC not found!!!", vcServer) + klog.Errorf("cannot find vcServer %q in cache. VC not found!!!", vcServer) return nil, errors.New(fmt.Sprintf("Cannot find node %q in vsphere configuration map", vcServer)) } // Ensure client is logged in and session is valid err := vs.nodeManager.vcConnect(ctx, vsphereIns) if err != nil { - glog.Errorf("failed connecting to vcServer %q with error %+v", vcServer, err) + klog.Errorf("failed connecting to vcServer %q with error %+v", vcServer, err) return nil, err } @@ -635,12 +635,12 @@ func (vs *VSphere) NodeAddresses(ctx context.Context, nodeName k8stypes.NodeName vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { - glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return nil, err } vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*vclib.VirtualMachine{vm}, []string{"guest.net"}) if err != nil { - glog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", convertToString(nodeName), err) + klog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", convertToString(nodeName), err) return nil, err } // retrieve VM's ip(s) @@ -694,7 +694,7 @@ func convertToK8sType(vmName string) k8stypes.NodeName { func (vs *VSphere) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { nodeName, err := vs.GetNodeNameFromProviderID(providerID) if err != nil { - glog.Errorf("Error while getting nodename for providerID %s", providerID) + klog.Errorf("Error while getting nodename for providerID %s", providerID) return false, err } _, err = vs.InstanceID(ctx, convertToK8sType(nodeName)) @@ -709,7 +709,7 @@ func (vs *VSphere) InstanceExistsByProviderID(ctx context.Context, providerID st func (vs *VSphere) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { nodeName, err := vs.GetNodeNameFromProviderID(providerID) if err != nil { - glog.Errorf("Error while getting nodename for providerID %s", providerID) + klog.Errorf("Error while getting nodename for providerID %s", providerID) return false, err } @@ -723,12 +723,12 @@ func (vs *VSphere) InstanceShutdownByProviderID(ctx context.Context, providerID } vm, err := vs.getVMFromNodeName(ctx, convertToK8sType(nodeName)) if err != nil { - glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeName, err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeName, err) return false, err } isActive, err := vm.IsActive(ctx) if err != nil { - glog.Errorf("Failed to check whether node %q is active. err: %+v.", nodeName, err) + klog.Errorf("Failed to check whether node %q is active. err: %+v.", nodeName, err) return false, err } return !isActive, nil @@ -761,18 +761,18 @@ func (vs *VSphere) InstanceID(ctx context.Context, nodeName k8stypes.NodeName) ( } vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { - glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return "", err } isActive, err := vm.IsActive(ctx) if err != nil { - glog.Errorf("Failed to check whether node %q is active. err: %+v.", convertToString(nodeName), err) + klog.Errorf("Failed to check whether node %q is active. err: %+v.", convertToString(nodeName), err) return "", err } if isActive { return vs.vmUUID, nil } - glog.Warningf("The VM: %s is not in %s state", convertToString(nodeName), vclib.ActivePowerState) + klog.Warningf("The VM: %s is not in %s state", convertToString(nodeName), vclib.ActivePowerState) return "", cloudprovider.InstanceNotFound } @@ -781,7 +781,7 @@ func (vs *VSphere) InstanceID(ctx context.Context, nodeName k8stypes.NodeName) ( if vclib.IsManagedObjectNotFoundError(err) { err = vs.nodeManager.RediscoverNode(nodeName) if err == nil { - glog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName)) + klog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName)) instanceID, err = instanceIDInternal() } else if err == vclib.ErrNoVMFound { return "", cloudprovider.InstanceNotFound @@ -820,7 +820,7 @@ func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) { // Zones returns an implementation of Zones for vSphere. func (vs *VSphere) Zones() (cloudprovider.Zones, bool) { if vs.cfg == nil { - glog.V(1).Info("The vSphere cloud provider does not support zones") + klog.V(1).Info("The vSphere cloud provider does not support zones") return nil, false } return vs, true @@ -852,13 +852,13 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { - glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return "", err } diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyName: storagePolicyName}) if err != nil { - glog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, convertToString(nodeName), err) + klog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, convertToString(nodeName), err) return "", err } return diskUUID, nil @@ -869,13 +869,13 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN if vclib.IsManagedObjectNotFoundError(err) { err = vs.nodeManager.RediscoverNode(nodeName) if err == nil { - glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName)) + klog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName)) diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName) - glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", diskUUID, err) + klog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", diskUUID, err) } } } - glog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err) + klog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err) vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err) return diskUUID, err } @@ -893,7 +893,7 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error if err != nil { // If node doesn't exist, disk is already detached from node. if err == vclib.ErrNoVMFound { - glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath) + klog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath) return nil } return err @@ -907,16 +907,16 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error if err != nil { // If node doesn't exist, disk is already detached from node. if err == vclib.ErrNoVMFound { - glog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath) + klog.Infof("Node %q does not exist, disk %s is already detached from node.", convertToString(nodeName), volPath) return nil } - glog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", convertToString(nodeName), err) return err } err = vm.DetachDisk(ctx, volPath) if err != nil { - glog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, convertToString(nodeName), err) + klog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, convertToString(nodeName), err) return err } return nil @@ -960,22 +960,22 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b vm, err := vs.getVMFromNodeName(ctx, nodeName) if err != nil { if err == vclib.ErrNoVMFound { - glog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath) + klog.Warningf("Node %q does not exist, vsphere CP will assume disk %v is not attached to it.", nodeName, volPath) // make the disk as detached and return false without error. return false, nil } - glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err) + klog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err) return false, err } volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath) attached, err := vm.IsDiskAttached(ctx, volPath) if err != nil { - glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q", + klog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q", volPath, vSphereInstance) } - glog.V(4).Infof("DiskIsAttached result: %v and error: %q, for volume: %q", attached, err, volPath) + klog.V(4).Infof("DiskIsAttached result: %v and error: %q, for volume: %q", attached, err, volPath) return attached, err } requestTime := time.Now() @@ -1024,7 +1024,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) for nodeName := range nodeVolumes { nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName) if err != nil { - glog.Errorf("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err) + klog.Errorf("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err) return nodesToRetry, err } VC_DC := nodeInfo.vcServer + nodeInfo.dataCenter.String() @@ -1042,7 +1042,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) globalErrMutex.Lock() globalErr = err globalErrMutex.Unlock() - glog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err) + klog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err) } } nodesToRetryMutex.Lock() @@ -1065,7 +1065,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) return nodesToRetry, nil } - glog.V(4).Infof("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes) + klog.V(4).Infof("Starting DisksAreAttached API for vSphere with nodeVolumes: %+v", nodeVolumes) // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1078,7 +1078,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) // Convert VolPaths into canonical form so that it can be compared with the VM device path. vmVolumes, err := vs.convertVolPathsToDevicePaths(ctx, nodeVolumes) if err != nil { - glog.Errorf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err) + klog.Errorf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err) return nil, err } attached := make(map[string]map[string]bool) @@ -1094,10 +1094,10 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) err = vs.nodeManager.RediscoverNode(nodeName) if err != nil { if err == vclib.ErrNoVMFound { - glog.V(4).Infof("node %s not found. err: %+v", nodeName, err) + klog.V(4).Infof("node %s not found. err: %+v", nodeName, err) continue } - glog.Errorf("Failed to rediscover node %s. err: %+v", nodeName, err) + klog.Errorf("Failed to rediscover node %s. err: %+v", nodeName, err) return nil, err } remainingNodesVolumes[nodeName] = nodeVolumes[nodeName] @@ -1107,7 +1107,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) if len(remainingNodesVolumes) != 0 { nodesToRetry, err = disksAreAttach(ctx, remainingNodesVolumes, attached, true) if err != nil || len(nodesToRetry) != 0 { - glog.Errorf("Failed to retry disksAreAttach for nodes %+v. err: %+v", remainingNodesVolumes, err) + klog.Errorf("Failed to retry disksAreAttach for nodes %+v. err: %+v", remainingNodesVolumes, err) return nil, err } } @@ -1116,7 +1116,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) disksAttached[convertToK8sType(nodeName)] = volPaths } } - glog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached) + klog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached) return disksAttached, nil } requestTime := time.Now() @@ -1130,7 +1130,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) // return value will be [DatastoreCluster/sharedVmfs-0] kubevols/.vmdk // else return value will be [sharedVmfs-0] kubevols/.vmdk func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) { - glog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions) + klog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions) createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (canonicalVolumePath string, err error) { var datastore string // If datastore not specified, then use default datastore @@ -1160,21 +1160,21 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo // This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime. cleanUpRoutineInitLock.Lock() if !cleanUpRoutineInitialized { - glog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's") + klog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's") go vs.cleanUpDummyVMs(DummyVMPrefixName) cleanUpRoutineInitialized = true } cleanUpRoutineInitLock.Unlock() vmOptions, err = vs.setVMOptions(ctx, dc, vs.cfg.Workspace.ResourcePoolPath) if err != nil { - glog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err) + klog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err) return "", err } } if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" { datastore, err = getPbmCompatibleDatastore(ctx, dc, volumeOptions.StoragePolicyName, vs.nodeManager) if err != nil { - glog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err) + klog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err) return "", err } } else { @@ -1182,7 +1182,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo // if the given datastore is a shared datastore across all node VMs. sharedDsList, err := getSharedDatastoresInK8SCluster(ctx, dc, vs.nodeManager) if err != nil { - glog.Errorf("Failed to get shared datastore: %+v", err) + klog.Errorf("Failed to get shared datastore: %+v", err) return "", err } found := false @@ -1205,7 +1205,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/" err = ds.CreateDirectory(ctx, kubeVolsPath, false) if err != nil && err != vclib.ErrFileAlreadyExist { - glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) + klog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) return "", err } volumePath := kubeVolsPath + volumeOptions.Name + ".vmdk" @@ -1216,13 +1216,13 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo } volumePath, err = disk.Create(ctx, ds) if err != nil { - glog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err) + klog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err) return "", err } // Get the canonical path for the volume path. canonicalVolumePath, err = getcanonicalVolumePath(ctx, dc, volumePath) if err != nil { - glog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err) + klog.Errorf("Failed to get canonical vsphere volume path for volume: %s with volumeOptions: %+v on datastore: %s. err: %+v", volumePath, volumeOptions, datastore, err) return "", err } if filepath.Base(datastore) != datastore { @@ -1234,13 +1234,13 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo requestTime := time.Now() canonicalVolumePath, err = createVolumeInternal(volumeOptions) vclib.RecordCreateVolumeMetric(volumeOptions, requestTime, err) - glog.V(4).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath) + klog.V(4).Infof("The canonical volume path for the newly created vSphere volume is %q", canonicalVolumePath) return canonicalVolumePath, err } // DeleteVolume deletes a volume given volume name. func (vs *VSphere) DeleteVolume(vmDiskPath string) error { - glog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath) + klog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath) deleteVolumeInternal := func(vmDiskPath string) error { // Create context ctx, cancel := context.WithCancel(context.Background()) @@ -1260,7 +1260,7 @@ func (vs *VSphere) DeleteVolume(vmDiskPath string) error { } err = disk.Delete(ctx, dc) if err != nil { - glog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err) + klog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err) } return err } @@ -1279,11 +1279,11 @@ func (vs *VSphere) HasClusterID() bool { func (vs *VSphere) NodeAdded(obj interface{}) { node, ok := obj.(*v1.Node) if node == nil || !ok { - glog.Warningf("NodeAdded: unrecognized object %+v", obj) + klog.Warningf("NodeAdded: unrecognized object %+v", obj) return } - glog.V(4).Infof("Node added: %+v", node) + klog.V(4).Infof("Node added: %+v", node) vs.nodeManager.RegisterNode(node) } @@ -1291,11 +1291,11 @@ func (vs *VSphere) NodeAdded(obj interface{}) { func (vs *VSphere) NodeDeleted(obj interface{}) { node, ok := obj.(*v1.Node) if node == nil || !ok { - glog.Warningf("NodeDeleted: unrecognized object %+v", obj) + klog.Warningf("NodeDeleted: unrecognized object %+v", obj) return } - glog.V(4).Infof("Node deleted: %+v", node) + klog.V(4).Infof("Node deleted: %+v", node) vs.nodeManager.UnRegisterNode(node) } @@ -1320,23 +1320,23 @@ func withTagsClient(ctx context.Context, connection *vclib.VSphereConnection, f func (vs *VSphere) GetZone(ctx context.Context) (cloudprovider.Zone, error) { nodeName, err := vs.CurrentNodeName(ctx, vs.hostName) if err != nil { - glog.Errorf("Cannot get node name.") + klog.Errorf("Cannot get node name.") return cloudprovider.Zone{}, err } zone := cloudprovider.Zone{} vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx) if err != nil { - glog.Errorf("Cannot connent to vsphere. Get zone for node %s error", nodeName) + klog.Errorf("Cannot connent to vsphere. Get zone for node %s error", nodeName) return cloudprovider.Zone{}, err } dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter) if err != nil { - glog.Errorf("Cannot connent to datacenter. Get zone for node %s error", nodeName) + klog.Errorf("Cannot connent to datacenter. Get zone for node %s error", nodeName) return cloudprovider.Zone{}, err } vmHost, err := dc.GetHostByVMUUID(ctx, vs.vmUUID) if err != nil { - glog.Errorf("Cannot find VM runtime host. Get zone for node %s error", nodeName) + klog.Errorf("Cannot find VM runtime host. Get zone for node %s error", nodeName) return cloudprovider.Zone{}, err } @@ -1354,23 +1354,23 @@ func (vs *VSphere) GetZone(ctx context.Context) (cloudprovider.Zone, error) { obj := objects[len(objects)-1-i] tags, err := client.ListAttachedTags(ctx, obj) if err != nil { - glog.Errorf("Cannot list attached tags. Get zone for node %s: %s", nodeName, err) + klog.Errorf("Cannot list attached tags. Get zone for node %s: %s", nodeName, err) return err } for _, value := range tags { tag, err := client.GetTag(ctx, value) if err != nil { - glog.Errorf("Get tag %s: %s", value, err) + klog.Errorf("Get tag %s: %s", value, err) return err } category, err := client.GetCategory(ctx, tag.CategoryID) if err != nil { - glog.Errorf("Get category %s error", value) + klog.Errorf("Get category %s error", value) return err } found := func() { - glog.Errorf("Found %q tag (%s) for %s attached to %s", category.Name, tag.Name, vs.vmUUID, obj.Reference()) + klog.Errorf("Found %q tag (%s) for %s attached to %s", category.Name, tag.Name, vs.vmUUID, obj.Reference()) } switch { case category.Name == vs.cfg.Labels.Zone: @@ -1401,7 +1401,7 @@ func (vs *VSphere) GetZone(ctx context.Context) (cloudprovider.Zone, error) { return nil }) if err != nil { - glog.Errorf("Get zone for node %s: %s", nodeName, err) + klog.Errorf("Get zone for node %s: %s", nodeName, err) return cloudprovider.Zone{}, err } return zone, nil diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/pkg/cloudprovider/providers/vsphere/vsphere_util.go index 8ef73978a29fa..04f241a89d398 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -27,9 +27,9 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" + "k8s.io/klog" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" @@ -87,27 +87,27 @@ func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nod // Check if the node VM is not found which indicates that the node info in the node manager is stale. // If so, rediscover the node and retry. if vclib.IsManagedObjectNotFoundError(err) { - glog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName) + klog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName) err = nodeManager.RediscoverNode(convertToK8sType(nodeVmDetail.NodeName)) if err == nil { - glog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName) + klog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName) nodeInfo, err := nodeManager.GetNodeInfo(convertToK8sType(nodeVmDetail.NodeName)) if err != nil { - glog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail) + klog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail) return nil, err } accessibleDatastores, err = nodeInfo.vm.GetAllAccessibleDatastores(ctx) if err != nil { - glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) + klog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) return nil, err } } else { - glog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail) + klog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail) return nil, err } } else { - glog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) + klog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) return nil, err } } @@ -118,22 +118,22 @@ func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nod func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) { nodeVmDetails, err := nodeManager.GetNodeDetails() if err != nil { - glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) + klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) return nil, err } if len(nodeVmDetails) == 0 { msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails) - glog.Error(msg) + klog.Error(msg) return nil, fmt.Errorf(msg) } var sharedDatastores []*vclib.DatastoreInfo for _, nodeVmDetail := range nodeVmDetails { - glog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName) + klog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName) accessibleDatastores, err := getAccessibleDatastores(ctx, &nodeVmDetail, nodeManager) if err != nil { if err == vclib.ErrNoVMFound { - glog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName) + klog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName) continue } return nil, err @@ -148,19 +148,19 @@ func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, } } } - glog.V(9).Infof("sharedDatastores : %+v", sharedDatastores) + klog.V(9).Infof("sharedDatastores : %+v", sharedDatastores) sharedDatastores, err = getDatastoresForEndpointVC(ctx, dc, sharedDatastores) if err != nil { - glog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err) + klog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err) return nil, err } - glog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores) + klog.V(9).Infof("sharedDatastores at endpoint VC: %+v", sharedDatastores) return sharedDatastores, nil } func intersect(list1 []*vclib.DatastoreInfo, list2 []*vclib.DatastoreInfo) []*vclib.DatastoreInfo { - glog.V(9).Infof("list1: %+v", list1) - glog.V(9).Infof("list2: %+v", list2) + klog.V(9).Infof("list1: %+v", list1) + klog.V(9).Infof("list2: %+v", list2) var sharedDs []*vclib.DatastoreInfo for _, val1 := range list1 { // Check if val1 is found in list2 @@ -202,10 +202,10 @@ func getDatastoresForEndpointVC(ctx context.Context, dc *vclib.Datacenter, share if ok { datastores = append(datastores, dsInfo) } else { - glog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url) + klog.V(4).Infof("Warning: Shared datastore with URL %s does not exist in endpoint VC", sharedDsInfo.Info.Url) } } - glog.V(9).Infof("Datastore from endpoint VC: %+v", datastores) + klog.V(9).Infof("Datastore from endpoint VC: %+v", datastores) return datastores, nil } @@ -216,32 +216,32 @@ func getPbmCompatibleDatastore(ctx context.Context, dc *vclib.Datacenter, storag } storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName) if err != nil { - glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err) + klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err) return "", err } sharedDs, err := getSharedDatastoresInK8SCluster(ctx, dc, nodeManager) if err != nil { - glog.Errorf("Failed to get shared datastores. err: %+v", err) + klog.Errorf("Failed to get shared datastores. err: %+v", err) return "", err } if len(sharedDs) == 0 { msg := "No shared datastores found in the endpoint virtual center" - glog.Errorf(msg) + klog.Errorf(msg) return "", errors.New(msg) } compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, dc, storagePolicyID, sharedDs) if err != nil { - glog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", + klog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", sharedDs, storagePolicyID, err) return "", err } - glog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores) + klog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores) datastore, err := getMostFreeDatastoreName(ctx, dc.Client(), compatibleDatastores) if err != nil { - glog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err) + klog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err) return "", err } - glog.V(4).Infof("Most free datastore : %+s", datastore) + klog.V(4).Infof("Most free datastore : %+s", datastore) return datastore, err } @@ -251,7 +251,7 @@ func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter, resou if err != nil { return nil, err } - glog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool) + klog.V(9).Infof("Resource pool path %s, resourcePool %+v", resourcePoolPath, resourcePool) folder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder) if err != nil { return nil, err @@ -270,18 +270,18 @@ func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) { time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute) vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx) if err != nil { - glog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err) + klog.V(4).Infof("Failed to get VSphere instance with err: %+v. Retrying again...", err) continue } dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter) if err != nil { - glog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err) + klog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Workspace.Datacenter, err) continue } // Get the folder reference for global working directory where the dummy VM needs to be created. vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder) if err != nil { - glog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err) + klog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err) continue } // A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests. @@ -290,7 +290,7 @@ func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) { defer cleanUpDummyVMLock.Unlock() err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc) if err != nil { - glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err) + klog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err) } } cleanUpDummyVMs() @@ -360,7 +360,7 @@ func convertVolPathToDevicePath(ctx context.Context, dc *vclib.Datacenter, volPa // Get the canonical volume path for volPath. canonicalVolumePath, err := getcanonicalVolumePath(ctx, dc, volPath) if err != nil { - glog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err) + klog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err) return "", err } // Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map @@ -387,7 +387,7 @@ func (vs *VSphere) convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes for i, volPath := range volPaths { deviceVolPath, err := convertVolPathToDevicePath(ctx, nodeInfo.dataCenter, volPath) if err != nil { - glog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err) + klog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err) return nil, err } volPaths[i] = deviceVolPath @@ -423,7 +423,7 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN vmMoList, err := nodeInfo.dataCenter.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name", "config.uuid"}) if err != nil { if vclib.IsManagedObjectNotFoundError(err) && !retry { - glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList) + klog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList) // Property Collector Query failed // VerifyVolumePaths per VM for _, nodeName := range nodes { @@ -434,13 +434,13 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN devices, err := nodeInfo.vm.VirtualMachine.Device(ctx) if err != nil { if vclib.IsManagedObjectNotFoundError(err) { - glog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm) + klog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm) nodesToRetry = append(nodesToRetry, nodeName) continue } return nodesToRetry, err } - glog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm) + klog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm) vclib.VerifyVolumePathsForVMDevices(devices, nodeVolumes[nodeName], convertToString(nodeName), attached) } } @@ -450,14 +450,14 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN vmMoMap := make(map[string]mo.VirtualMachine) for _, vmMo := range vmMoList { if vmMo.Config == nil { - glog.Errorf("Config is not available for VM: %q", vmMo.Name) + klog.Errorf("Config is not available for VM: %q", vmMo.Name) continue } - glog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid)) + klog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid)) vmMoMap[strings.ToLower(vmMo.Config.Uuid)] = vmMo } - glog.V(9).Infof("vmMoMap: +%v", vmMoMap) + klog.V(9).Infof("vmMoMap: +%v", vmMoMap) for _, nodeName := range nodes { node, err := vs.nodeManager.GetNode(nodeName) @@ -466,11 +466,11 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN } nodeUUID, err := GetNodeUUID(&node) if err != nil { - glog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) + klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) return nodesToRetry, err } nodeUUID = strings.ToLower(nodeUUID) - glog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap) + klog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap) vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached) } return nodesToRetry, nil @@ -517,7 +517,7 @@ func (vs *VSphere) GetNodeNameFromProviderID(providerID string) (string, error) var nodeName string nodes, err := vs.nodeManager.GetNodeDetails() if err != nil { - glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) + klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) return "", err } for _, node := range nodes { @@ -564,12 +564,12 @@ func GetUUIDFromProviderID(providerID string) string { func IsUUIDSupportedNode(node *v1.Node) (bool, error) { newVersion, err := version.ParseSemantic("v1.9.4") if err != nil { - glog.Errorf("Failed to determine whether node %+v is old with error %v", node, err) + klog.Errorf("Failed to determine whether node %+v is old with error %v", node, err) return false, err } nodeVersion, err := version.ParseSemantic(node.Status.NodeInfo.KubeletVersion) if err != nil { - glog.Errorf("Failed to determine whether node %+v is old with error %v", node, err) + klog.Errorf("Failed to determine whether node %+v is old with error %v", node, err) return false, err } if nodeVersion.LessThan(newVersion) { @@ -581,7 +581,7 @@ func IsUUIDSupportedNode(node *v1.Node) (bool, error) { func GetNodeUUID(node *v1.Node) (string, error) { oldNode, err := IsUUIDSupportedNode(node) if err != nil { - glog.Errorf("Failed to get node UUID for node %+v with error %v", node, err) + klog.Errorf("Failed to get node UUID for node %+v with error %v", node, err) return "", err } if oldNode { diff --git a/pkg/controller/.import-restrictions b/pkg/controller/.import-restrictions index 6dd2ce4cc4c6f..31aa4d0b50440 100644 --- a/pkg/controller/.import-restrictions +++ b/pkg/controller/.import-restrictions @@ -104,7 +104,6 @@ "github.com/cloudflare/cfssl/signer/local", "github.com/davecgh/go-spew/spew", "github.com/evanphx/json-patch", - "github.com/golang/glog", "github.com/golang/groupcache/lru", "github.com/prometheus/client_golang/prometheus", "github.com/robfig/cron", diff --git a/pkg/controller/BUILD b/pkg/controller/BUILD index 5853bc011a2df..b4915ac53656a 100644 --- a/pkg/controller/BUILD +++ b/pkg/controller/BUILD @@ -88,8 +88,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/client-go/util/integer:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/bootstrap/BUILD b/pkg/controller/bootstrap/BUILD index b308efb9989e6..922cbde6c1328 100644 --- a/pkg/controller/bootstrap/BUILD +++ b/pkg/controller/bootstrap/BUILD @@ -59,8 +59,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/gopkg.in/square/go-jose.v2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/bootstrap/bootstrapsigner.go b/pkg/controller/bootstrap/bootstrapsigner.go index f816311e861a5..60952976eb52e 100644 --- a/pkg/controller/bootstrap/bootstrapsigner.go +++ b/pkg/controller/bootstrap/bootstrapsigner.go @@ -20,7 +20,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "fmt" "k8s.io/api/core/v1" @@ -162,10 +162,10 @@ func (e *BootstrapSigner) Run(stopCh <-chan struct{}) { return } - glog.V(5).Infof("Starting workers") + klog.V(5).Infof("Starting workers") go wait.Until(e.serviceConfigMapQueue, 0, stopCh) <-stopCh - glog.V(1).Infof("Shutting down") + klog.V(1).Infof("Shutting down") } func (e *BootstrapSigner) pokeConfigMapSync() { @@ -198,7 +198,7 @@ func (e *BootstrapSigner) signConfigMap() { // First capture the config we are signing content, ok := newCM.Data[bootstrapapi.KubeConfigKey] if !ok { - glog.V(3).Infof("No %s key in %s/%s ConfigMap", bootstrapapi.KubeConfigKey, origCM.Namespace, origCM.Name) + klog.V(3).Infof("No %s key in %s/%s ConfigMap", bootstrapapi.KubeConfigKey, origCM.Namespace, origCM.Name) return } @@ -244,7 +244,7 @@ func (e *BootstrapSigner) signConfigMap() { func (e *BootstrapSigner) updateConfigMap(cm *v1.ConfigMap) { _, err := e.client.CoreV1().ConfigMaps(cm.Namespace).Update(cm) if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) { - glog.V(3).Infof("Error updating ConfigMap: %v", err) + klog.V(3).Infof("Error updating ConfigMap: %v", err) } } @@ -295,7 +295,7 @@ func (e *BootstrapSigner) getTokens() map[string]string { if _, ok := ret[tokenID]; ok { // This should never happen as we ensure a consistent secret name. // But leave this in here just in case. - glog.V(1).Infof("Duplicate bootstrap tokens found for id %s, ignoring on in %s/%s", tokenID, secret.Namespace, secret.Name) + klog.V(1).Infof("Duplicate bootstrap tokens found for id %s, ignoring on in %s/%s", tokenID, secret.Namespace, secret.Name) continue } diff --git a/pkg/controller/bootstrap/tokencleaner.go b/pkg/controller/bootstrap/tokencleaner.go index 841a61e0d1d6f..adaee9b58d534 100644 --- a/pkg/controller/bootstrap/tokencleaner.go +++ b/pkg/controller/bootstrap/tokencleaner.go @@ -20,7 +20,6 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +31,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" bootstrapapi "k8s.io/cluster-bootstrap/token/api" + "k8s.io/klog" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" @@ -113,8 +113,8 @@ func (tc *TokenCleaner) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer tc.queue.ShutDown() - glog.Infof("Starting token cleaner controller") - defer glog.Infof("Shutting down token cleaner controller") + klog.Infof("Starting token cleaner controller") + defer klog.Infof("Shutting down token cleaner controller") if !controller.WaitForCacheSync("token_cleaner", stopCh, tc.secretSynced) { return @@ -161,7 +161,7 @@ func (tc *TokenCleaner) processNextWorkItem() bool { func (tc *TokenCleaner) syncFunc(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -171,7 +171,7 @@ func (tc *TokenCleaner) syncFunc(key string) error { ret, err := tc.secretLister.Secrets(namespace).Get(name) if apierrors.IsNotFound(err) { - glog.V(3).Infof("secret has been deleted: %v", key) + klog.V(3).Infof("secret has been deleted: %v", key) return nil } @@ -188,7 +188,7 @@ func (tc *TokenCleaner) syncFunc(key string) error { func (tc *TokenCleaner) evalSecret(o interface{}) { secret := o.(*v1.Secret) if isSecretExpired(secret) { - glog.V(3).Infof("Deleting expired secret %s/%s", secret.Namespace, secret.Name) + klog.V(3).Infof("Deleting expired secret %s/%s", secret.Namespace, secret.Name) var options *metav1.DeleteOptions if len(secret.UID) > 0 { options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}} @@ -197,7 +197,7 @@ func (tc *TokenCleaner) evalSecret(o interface{}) { // NotFound isn't a real error (it's already been deleted) // Conflict isn't a real error (the UID precondition failed) if err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) { - glog.V(3).Infof("Error deleting Secret: %v", err) + klog.V(3).Infof("Error deleting Secret: %v", err) } } } diff --git a/pkg/controller/bootstrap/util.go b/pkg/controller/bootstrap/util.go index 985f005484a05..44d024af41ac5 100644 --- a/pkg/controller/bootstrap/util.go +++ b/pkg/controller/bootstrap/util.go @@ -20,7 +20,7 @@ import ( "regexp" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" bootstrapapi "k8s.io/cluster-bootstrap/token/api" @@ -52,24 +52,24 @@ func parseSecretName(name string) (secretID string, ok bool) { func validateSecretForSigning(secret *v1.Secret) (tokenID, tokenSecret string, ok bool) { nameTokenID, ok := parseSecretName(secret.Name) if !ok { - glog.V(3).Infof("Invalid secret name: %s. Must be of form %s.", secret.Name, bootstrapapi.BootstrapTokenSecretPrefix) + klog.V(3).Infof("Invalid secret name: %s. Must be of form %s.", secret.Name, bootstrapapi.BootstrapTokenSecretPrefix) return "", "", false } tokenID = getSecretString(secret, bootstrapapi.BootstrapTokenIDKey) if len(tokenID) == 0 { - glog.V(3).Infof("No %s key in %s/%s Secret", bootstrapapi.BootstrapTokenIDKey, secret.Namespace, secret.Name) + klog.V(3).Infof("No %s key in %s/%s Secret", bootstrapapi.BootstrapTokenIDKey, secret.Namespace, secret.Name) return "", "", false } if nameTokenID != tokenID { - glog.V(3).Infof("Token ID (%s) doesn't match secret name: %s", tokenID, nameTokenID) + klog.V(3).Infof("Token ID (%s) doesn't match secret name: %s", tokenID, nameTokenID) return "", "", false } tokenSecret = getSecretString(secret, bootstrapapi.BootstrapTokenSecretKey) if len(tokenSecret) == 0 { - glog.V(3).Infof("No %s key in %s/%s Secret", bootstrapapi.BootstrapTokenSecretKey, secret.Namespace, secret.Name) + klog.V(3).Infof("No %s key in %s/%s Secret", bootstrapapi.BootstrapTokenSecretKey, secret.Namespace, secret.Name) return "", "", false } @@ -95,12 +95,12 @@ func isSecretExpired(secret *v1.Secret) bool { if len(expiration) > 0 { expTime, err2 := time.Parse(time.RFC3339, expiration) if err2 != nil { - glog.V(3).Infof("Unparseable expiration time (%s) in %s/%s Secret: %v. Treating as expired.", + klog.V(3).Infof("Unparseable expiration time (%s) in %s/%s Secret: %v. Treating as expired.", expiration, secret.Namespace, secret.Name, err2) return true } if time.Now().After(expTime) { - glog.V(3).Infof("Expired bootstrap token in %s/%s Secret: %v", + klog.V(3).Infof("Expired bootstrap token in %s/%s Secret: %v", secret.Namespace, secret.Name, expiration) return true } diff --git a/pkg/controller/certificates/BUILD b/pkg/controller/certificates/BUILD index 4502d2f4efd91..26af700588601 100644 --- a/pkg/controller/certificates/BUILD +++ b/pkg/controller/certificates/BUILD @@ -25,8 +25,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/time/rate:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/certificates/certificate_controller.go b/pkg/controller/certificates/certificate_controller.go index 088faf50e2c2d..2ec79936ef2aa 100644 --- a/pkg/controller/certificates/certificate_controller.go +++ b/pkg/controller/certificates/certificate_controller.go @@ -22,8 +22,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "golang.org/x/time/rate" + "k8s.io/klog" certificates "k8s.io/api/certificates/v1beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -57,7 +57,7 @@ func NewCertificateController( ) *CertificateController { // Send events to the apiserver eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) cc := &CertificateController{ @@ -74,12 +74,12 @@ func NewCertificateController( csrInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { csr := obj.(*certificates.CertificateSigningRequest) - glog.V(4).Infof("Adding certificate request %s", csr.Name) + klog.V(4).Infof("Adding certificate request %s", csr.Name) cc.enqueueCertificateRequest(obj) }, UpdateFunc: func(old, new interface{}) { oldCSR := old.(*certificates.CertificateSigningRequest) - glog.V(4).Infof("Updating certificate request %s", oldCSR.Name) + klog.V(4).Infof("Updating certificate request %s", oldCSR.Name) cc.enqueueCertificateRequest(new) }, DeleteFunc: func(obj interface{}) { @@ -87,16 +87,16 @@ func NewCertificateController( if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.V(2).Infof("Couldn't get object from tombstone %#v", obj) + klog.V(2).Infof("Couldn't get object from tombstone %#v", obj) return } csr, ok = tombstone.Obj.(*certificates.CertificateSigningRequest) if !ok { - glog.V(2).Infof("Tombstone contained object that is not a CSR: %#v", obj) + klog.V(2).Infof("Tombstone contained object that is not a CSR: %#v", obj) return } } - glog.V(4).Infof("Deleting certificate request %s", csr.Name) + klog.V(4).Infof("Deleting certificate request %s", csr.Name) cc.enqueueCertificateRequest(obj) }, }) @@ -110,8 +110,8 @@ func (cc *CertificateController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer cc.queue.ShutDown() - glog.Infof("Starting certificate controller") - defer glog.Infof("Shutting down certificate controller") + klog.Infof("Starting certificate controller") + defer klog.Infof("Shutting down certificate controller") if !controller.WaitForCacheSync("certificate", stopCh, cc.csrsSynced) { return @@ -143,7 +143,7 @@ func (cc *CertificateController) processNextWorkItem() bool { if _, ignorable := err.(ignorableError); !ignorable { utilruntime.HandleError(fmt.Errorf("Sync %v failed with : %v", cKey, err)) } else { - glog.V(4).Infof("Sync %v failed with : %v", cKey, err) + klog.V(4).Infof("Sync %v failed with : %v", cKey, err) } return true } @@ -169,11 +169,11 @@ func (cc *CertificateController) enqueueCertificateRequest(obj interface{}) { func (cc *CertificateController) syncFunc(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Since(startTime)) }() csr, err := cc.csrLister.Get(key) if errors.IsNotFound(err) { - glog.V(3).Infof("csr has been deleted: %v", key) + klog.V(3).Infof("csr has been deleted: %v", key) return nil } if err != nil { diff --git a/pkg/controller/certificates/cleaner/BUILD b/pkg/controller/certificates/cleaner/BUILD index 64cc7730f8653..6f195c6f6f5d3 100644 --- a/pkg/controller/certificates/cleaner/BUILD +++ b/pkg/controller/certificates/cleaner/BUILD @@ -14,7 +14,7 @@ go_library( "//staging/src/k8s.io/client-go/informers/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/listers/certificates/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/certificates/cleaner/cleaner.go b/pkg/controller/certificates/cleaner/cleaner.go index bfe43fa028ba9..ebdac2956d596 100644 --- a/pkg/controller/certificates/cleaner/cleaner.go +++ b/pkg/controller/certificates/cleaner/cleaner.go @@ -26,7 +26,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" capi "k8s.io/api/certificates/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -78,8 +78,8 @@ func NewCSRCleanerController( func (ccc *CSRCleanerController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Infof("Starting CSR cleaner controller") - defer glog.Infof("Shutting down CSR cleaner controller") + klog.Infof("Starting CSR cleaner controller") + defer klog.Infof("Shutting down CSR cleaner controller") for i := 0; i < workers; i++ { go wait.Until(ccc.worker, pollingInterval, stopCh) @@ -92,12 +92,12 @@ func (ccc *CSRCleanerController) Run(workers int, stopCh <-chan struct{}) { func (ccc *CSRCleanerController) worker() { csrs, err := ccc.csrLister.List(labels.Everything()) if err != nil { - glog.Errorf("Unable to list CSRs: %v", err) + klog.Errorf("Unable to list CSRs: %v", err) return } for _, csr := range csrs { if err := ccc.handle(csr); err != nil { - glog.Errorf("Error while attempting to clean CSR %q: %v", csr.Name, err) + klog.Errorf("Error while attempting to clean CSR %q: %v", csr.Name, err) } } } @@ -124,7 +124,7 @@ func isIssuedExpired(csr *capi.CertificateSigningRequest) (bool, error) { } for _, c := range csr.Status.Conditions { if c.Type == capi.CertificateApproved && isIssued(csr) && isExpired { - glog.Infof("Cleaning CSR %q as the associated certificate is expired.", csr.Name) + klog.Infof("Cleaning CSR %q as the associated certificate is expired.", csr.Name) return true, nil } } @@ -138,7 +138,7 @@ func isPendingPastDeadline(csr *capi.CertificateSigningRequest) bool { // If there are no Conditions on the status, the CSR will appear via // `kubectl` as `Pending`. if len(csr.Status.Conditions) == 0 && isOlderThan(csr.CreationTimestamp, pendingExpiration) { - glog.Infof("Cleaning CSR %q as it is more than %v old and unhandled.", csr.Name, pendingExpiration) + klog.Infof("Cleaning CSR %q as it is more than %v old and unhandled.", csr.Name, pendingExpiration) return true } return false @@ -150,7 +150,7 @@ func isPendingPastDeadline(csr *capi.CertificateSigningRequest) bool { func isDeniedPastDeadline(csr *capi.CertificateSigningRequest) bool { for _, c := range csr.Status.Conditions { if c.Type == capi.CertificateDenied && isOlderThan(c.LastUpdateTime, deniedExpiration) { - glog.Infof("Cleaning CSR %q as it is more than %v old and denied.", csr.Name, deniedExpiration) + klog.Infof("Cleaning CSR %q as it is more than %v old and denied.", csr.Name, deniedExpiration) return true } } @@ -163,7 +163,7 @@ func isDeniedPastDeadline(csr *capi.CertificateSigningRequest) bool { func isIssuedPastDeadline(csr *capi.CertificateSigningRequest) bool { for _, c := range csr.Status.Conditions { if c.Type == capi.CertificateApproved && isIssued(csr) && isOlderThan(c.LastUpdateTime, approvedExpiration) { - glog.Infof("Cleaning CSR %q as it is more than %v old and approved.", csr.Name, approvedExpiration) + klog.Infof("Cleaning CSR %q as it is more than %v old and approved.", csr.Name, approvedExpiration) return true } } diff --git a/pkg/controller/certificates/rootcacertpublisher/BUILD b/pkg/controller/certificates/rootcacertpublisher/BUILD index 35f0ed8a93229..27e8395ea8d27 100644 --- a/pkg/controller/certificates/rootcacertpublisher/BUILD +++ b/pkg/controller/certificates/rootcacertpublisher/BUILD @@ -18,7 +18,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/certificates/rootcacertpublisher/publisher.go b/pkg/controller/certificates/rootcacertpublisher/publisher.go index 79a50357c24a9..f512bb1866748 100644 --- a/pkg/controller/certificates/rootcacertpublisher/publisher.go +++ b/pkg/controller/certificates/rootcacertpublisher/publisher.go @@ -21,7 +21,6 @@ import ( "reflect" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +31,7 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -102,8 +102,8 @@ func (c *Publisher) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Infof("Starting root CA certificate configmap publisher") - defer glog.Infof("Shutting down root CA certificate configmap publisher") + klog.Infof("Starting root CA certificate configmap publisher") + defer klog.Infof("Shutting down root CA certificate configmap publisher") if !controller.WaitForCacheSync("crt configmap", stopCh, c.cmListerSynced, c.nsListerSynced) { return @@ -190,7 +190,7 @@ func (c *Publisher) processNextWorkItem() bool { func (c *Publisher) syncNamespace(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime)) }() ns, err := c.nsLister.Get(key) diff --git a/pkg/controller/client_builder.go b/pkg/controller/client_builder.go index e8477445922cd..caac5649d2f77 100644 --- a/pkg/controller/client_builder.go +++ b/pkg/controller/client_builder.go @@ -38,7 +38,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/serviceaccount" - "github.com/golang/glog" + "k8s.io/klog" ) // ControllerClientBuilder allows you to get clients and configs for controllers @@ -65,7 +65,7 @@ func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config, func (b SimpleControllerClientBuilder) ConfigOrDie(name string) *restclient.Config { clientConfig, err := b.Config(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return clientConfig } @@ -81,7 +81,7 @@ func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface { client, err := b.Client(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return client } @@ -146,15 +146,15 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro } validConfig, valid, err := b.getAuthenticatedConfig(sa, string(secret.Data[v1.ServiceAccountTokenKey])) if err != nil { - glog.Warningf("error validating API token for %s/%s in secret %s: %v", sa.Name, sa.Namespace, secret.Name, err) + klog.Warningf("error validating API token for %s/%s in secret %s: %v", sa.Name, sa.Namespace, secret.Name, err) // continue watching for good tokens return false, nil } if !valid { - glog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Name, sa.Namespace) + klog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Name, sa.Namespace) // try to delete the secret containing the invalid token if err := b.CoreClient.Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { - glog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Name, sa.Namespace, err) + klog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Name, sa.Namespace, err) } // continue watching for good tokens return false, nil @@ -208,14 +208,14 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}} if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(tokenReview); err == nil { if !tokenResult.Status.Authenticated { - glog.Warningf("Token for %s/%s did not authenticate correctly", sa.Name, sa.Namespace) + klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Name, sa.Namespace) return nil, false, nil } if tokenResult.Status.User.Username != username { - glog.Warningf("Token for %s/%s authenticated as unexpected username: %s", sa.Name, sa.Namespace, tokenResult.Status.User.Username) + klog.Warningf("Token for %s/%s authenticated as unexpected username: %s", sa.Name, sa.Namespace, tokenResult.Status.User.Username) return nil, false, nil } - glog.V(4).Infof("Verified credential for %s/%s", sa.Name, sa.Namespace) + klog.V(4).Infof("Verified credential for %s/%s", sa.Name, sa.Namespace) return clientConfig, true, nil } @@ -229,7 +229,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, } err = client.Get().AbsPath("/apis").Do().Error() if apierrors.IsUnauthorized(err) { - glog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Name, sa.Namespace, err) + klog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Name, sa.Namespace, err) return nil, false, nil } @@ -239,7 +239,7 @@ func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, func (b SAControllerClientBuilder) ConfigOrDie(name string) *restclient.Config { clientConfig, err := b.Config(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return clientConfig } @@ -255,7 +255,7 @@ func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, err func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface { client, err := b.Client(name) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return client } diff --git a/pkg/controller/cloud/BUILD b/pkg/controller/cloud/BUILD index 58de6103e74a7..8999338aa2402 100644 --- a/pkg/controller/cloud/BUILD +++ b/pkg/controller/cloud/BUILD @@ -43,7 +43,7 @@ go_library( "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -74,8 +74,8 @@ go_test( "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/cloud/node_controller.go b/pkg/controller/cloud/node_controller.go index 0ffd3ede899b6..bfce4e59d4b7b 100644 --- a/pkg/controller/cloud/node_controller.go +++ b/pkg/controller/cloud/node_controller.go @@ -22,7 +22,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -84,12 +84,12 @@ func NewCloudNodeController( eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}) - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) if kubeClient != nil { - glog.V(0).Infof("Sending events to api server.") + klog.V(0).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) } else { - glog.V(0).Infof("No api server defined - no events will be sent to API server.") + klog.V(0).Infof("No api server defined - no events will be sent to API server.") } cnc := &CloudNodeController{ @@ -137,7 +137,7 @@ func (cnc *CloudNodeController) UpdateNodeStatus() { nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { - glog.Errorf("Error monitoring node status: %v", err) + klog.Errorf("Error monitoring node status: %v", err) return } @@ -151,27 +151,27 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud // Do not process nodes that are still tainted cloudTaint := getCloudTaint(node.Spec.Taints) if cloudTaint != nil { - glog.V(5).Infof("This node %s is still tainted. Will not process.", node.Name) + klog.V(5).Infof("This node %s is still tainted. Will not process.", node.Name) return } // Node that isn't present according to the cloud provider shouldn't have its address updated exists, err := ensureNodeExistsByProviderID(instances, node) if err != nil { // Continue to update node address when not sure the node is not exists - glog.Errorf("%v", err) + klog.Errorf("%v", err) } else if !exists { - glog.V(4).Infof("The node %s is no longer present according to the cloud provider, do not process.", node.Name) + klog.V(4).Infof("The node %s is no longer present according to the cloud provider, do not process.", node.Name) return } nodeAddresses, err := getNodeAddressesByProviderIDOrName(instances, node) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return } if len(nodeAddresses) == 0 { - glog.V(5).Infof("Skipping node address update for node %q since cloud provider did not return any", node.Name) + klog.V(5).Infof("Skipping node address update for node %q since cloud provider did not return any", node.Name) return } @@ -195,7 +195,7 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud // it can be found in the cloud as well (consistent with the behaviour in kubelet) if nodeIP, ok := ensureNodeProvidedIPExists(node, nodeAddresses); ok { if nodeIP == nil { - glog.Errorf("Specified Node IP not found in cloudprovider") + klog.Errorf("Specified Node IP not found in cloudprovider") return } } @@ -206,7 +206,7 @@ func (cnc *CloudNodeController) updateNodeAddress(node *v1.Node, instances cloud } _, _, err = nodeutil.PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode) if err != nil { - glog.Errorf("Error patching node with cloud ip addresses = [%v]", err) + klog.Errorf("Error patching node with cloud ip addresses = [%v]", err) } } @@ -221,7 +221,7 @@ func (cnc *CloudNodeController) MonitorNode() { nodes, err := cnc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"}) if err != nil { - glog.Errorf("Error monitoring node status: %v", err) + klog.Errorf("Error monitoring node status: %v", err) return } @@ -238,13 +238,13 @@ func (cnc *CloudNodeController) MonitorNode() { name := node.Name node, err = cnc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { - glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name) + klog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name) break } time.Sleep(retrySleepTime) } if currentReadyCondition == nil { - glog.Errorf("Update status of Node %v from CloudNodeController exceeds retry count or the Node was deleted.", node.Name) + klog.Errorf("Update status of Node %v from CloudNodeController exceeds retry count or the Node was deleted.", node.Name) continue } // If the known node status says that Node is NotReady, then check if the node has been removed @@ -256,14 +256,14 @@ func (cnc *CloudNodeController) MonitorNode() { // does not delete node from kubernetes cluster when instance it is shutdown see issue #46442 shutdown, err := nodectrlutil.ShutdownInCloudProvider(context.TODO(), cnc.cloud, node) if err != nil { - glog.Errorf("Error checking if node %s is shutdown: %v", node.Name, err) + klog.Errorf("Error checking if node %s is shutdown: %v", node.Name, err) } if shutdown && err == nil { // if node is shutdown add shutdown taint err = controller.AddOrUpdateTaintOnNode(cnc.kubeClient, node.Name, controller.ShutdownTaint) if err != nil { - glog.Errorf("Error patching node taints: %v", err) + klog.Errorf("Error patching node taints: %v", err) } // Continue checking the remaining nodes since the current one is shutdown. continue @@ -273,7 +273,7 @@ func (cnc *CloudNodeController) MonitorNode() { // doesn't, delete the node immediately. exists, err := ensureNodeExistsByProviderID(instances, node) if err != nil { - glog.Errorf("Error checking if node %s exists: %v", node.Name, err) + klog.Errorf("Error checking if node %s exists: %v", node.Name, err) continue } @@ -282,7 +282,7 @@ func (cnc *CloudNodeController) MonitorNode() { continue } - glog.V(2).Infof("Deleting node since it is no longer present in cloud provider: %s", node.Name) + klog.V(2).Infof("Deleting node since it is no longer present in cloud provider: %s", node.Name) ref := &v1.ObjectReference{ Kind: "Node", @@ -290,14 +290,14 @@ func (cnc *CloudNodeController) MonitorNode() { UID: types.UID(node.UID), Namespace: "", } - glog.V(2).Infof("Recording %s event message for node %s", "DeletingNode", node.Name) + klog.V(2).Infof("Recording %s event message for node %s", "DeletingNode", node.Name) cnc.recorder.Eventf(ref, v1.EventTypeNormal, fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name), "Node %s event: %s", node.Name, "DeletingNode") go func(nodeName string) { defer utilruntime.HandleCrash() if err := cnc.kubeClient.CoreV1().Nodes().Delete(nodeName, nil); err != nil { - glog.Errorf("unable to delete node %q: %v", nodeName, err) + klog.Errorf("unable to delete node %q: %v", nodeName, err) } }(node.Name) @@ -305,7 +305,7 @@ func (cnc *CloudNodeController) MonitorNode() { // if taint exist remove taint err = controller.RemoveTaintOffNode(cnc.kubeClient, node.Name, node, controller.ShutdownTaint) if err != nil { - glog.Errorf("Error patching node taints: %v", err) + klog.Errorf("Error patching node taints: %v", err) } } } @@ -326,7 +326,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) { cloudTaint := getCloudTaint(node.Spec.Taints) if cloudTaint == nil { - glog.V(2).Infof("This node %s is registered without the cloud taint. Will not process.", node.Name) + klog.V(2).Infof("This node %s is registered without the cloud taint. Will not process.", node.Name) return } @@ -365,7 +365,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) { // we should attempt to set providerID on curNode, but // we can continue if we fail since we will attempt to set // node addresses given the node name in getNodeAddressesByProviderIDOrName - glog.Errorf("failed to set node provider id: %v", err) + klog.Errorf("failed to set node provider id: %v", err) } } @@ -385,7 +385,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) { if instanceType, err := getInstanceTypeByProviderIDOrName(instances, curNode); err != nil { return err } else if instanceType != "" { - glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType) + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType) curNode.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType } @@ -395,11 +395,11 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) { return fmt.Errorf("failed to get zone from cloud provider: %v", err) } if zone.FailureDomain != "" { - glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain) + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain) curNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain } if zone.Region != "" { - glog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region) + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region) curNode.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region } } @@ -420,7 +420,7 @@ func (cnc *CloudNodeController) AddCloudNode(obj interface{}) { return } - glog.Infof("Successfully initialized node %s with cloud provider", node.Name) + klog.Infof("Successfully initialized node %s with cloud provider", node.Name) } func getCloudTaint(taints []v1.Taint) *v1.Taint { @@ -458,7 +458,7 @@ func ensureNodeExistsByProviderID(instances cloudprovider.Instances, node *v1.No } if providerID == "" { - glog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name) + klog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name) return false, nil } } diff --git a/pkg/controller/cloud/node_controller_test.go b/pkg/controller/cloud/node_controller_test.go index 5564b60b8f565..855594dcc8ea1 100644 --- a/pkg/controller/cloud/node_controller_test.go +++ b/pkg/controller/cloud/node_controller_test.go @@ -37,8 +37,8 @@ import ( kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - "github.com/golang/glog" "github.com/stretchr/testify/assert" + "k8s.io/klog" ) func TestEnsureNodeExistsByProviderID(t *testing.T) { @@ -250,7 +250,7 @@ func TestNodeShutdown(t *testing.T) { recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}), nodeStatusUpdateFrequency: 1 * time.Second, } - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) cloudNodeController.Run(wait.NeverStop) @@ -349,7 +349,7 @@ func TestNodeDeleted(t *testing.T) { recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}), nodeStatusUpdateFrequency: 1 * time.Second, } - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) cloudNodeController.Run(wait.NeverStop) @@ -429,7 +429,7 @@ func TestNodeInitialized(t *testing.T) { recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}), nodeStatusUpdateFrequency: 1 * time.Second, } - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) cloudNodeController.AddCloudNode(fnh.Existing[0]) @@ -494,7 +494,7 @@ func TestNodeIgnored(t *testing.T) { nodeMonitorPeriod: 5 * time.Second, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}), } - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) cloudNodeController.AddCloudNode(fnh.Existing[0]) assert.Equal(t, 0, len(fnh.UpdatedNodes), "Node was wrongly updated") @@ -568,7 +568,7 @@ func TestGCECondition(t *testing.T) { nodeMonitorPeriod: 1 * time.Second, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}), } - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) cloudNodeController.AddCloudNode(fnh.Existing[0]) @@ -658,7 +658,7 @@ func TestZoneInitialized(t *testing.T) { nodeMonitorPeriod: 5 * time.Second, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}), } - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) cloudNodeController.AddCloudNode(fnh.Existing[0]) @@ -749,7 +749,7 @@ func TestNodeAddresses(t *testing.T) { nodeStatusUpdateFrequency: 1 * time.Second, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}), } - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) cloudNodeController.AddCloudNode(fnh.Existing[0]) @@ -864,7 +864,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) { nodeStatusUpdateFrequency: 1 * time.Second, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}), } - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) cloudNodeController.AddCloudNode(fnh.Existing[0]) @@ -1156,7 +1156,7 @@ func TestNodeProviderID(t *testing.T) { nodeStatusUpdateFrequency: 1 * time.Second, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}), } - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) cloudNodeController.AddCloudNode(fnh.Existing[0]) @@ -1240,7 +1240,7 @@ func TestNodeProviderIDAlreadySet(t *testing.T) { nodeStatusUpdateFrequency: 1 * time.Second, recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}), } - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) cloudNodeController.AddCloudNode(fnh.Existing[0]) diff --git a/pkg/controller/cloud/pvlcontroller.go b/pkg/controller/cloud/pvlcontroller.go index 3459e74fcf436..5cce125a6aef9 100644 --- a/pkg/controller/cloud/pvlcontroller.go +++ b/pkg/controller/cloud/pvlcontroller.go @@ -22,7 +22,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" @@ -109,8 +109,8 @@ func (pvlc *PersistentVolumeLabelController) Run(threadiness int, stopCh <-chan defer utilruntime.HandleCrash() defer pvlc.queue.ShutDown() - glog.Infof("Starting PersistentVolumeLabelController") - defer glog.Infof("Shutting down PersistentVolumeLabelController") + klog.Infof("Starting PersistentVolumeLabelController") + defer klog.Infof("Shutting down PersistentVolumeLabelController") go pvlc.pvlController.Run(stopCh) @@ -197,7 +197,7 @@ func (pvlc *PersistentVolumeLabelController) addLabelsAndAffinityToVolume(vol *v } volumeLabels = labels } else { - glog.V(4).Info("cloud provider does not support PVLabeler") + klog.V(4).Info("cloud provider does not support PVLabeler") } return pvlc.updateVolume(vol, volumeLabels) } @@ -244,7 +244,7 @@ func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolum } // Populate NodeAffinity with requirements if there are no conflicting keys found if v1helper.NodeSelectorRequirementKeysExistInNodeSelectorTerms(requirements, newVolume.Spec.NodeAffinity.Required.NodeSelectorTerms) { - glog.V(4).Infof("NodeSelectorRequirements for cloud labels %v conflict with existing NodeAffinity %v. Skipping addition of NodeSelectorRequirements for cloud labels.", + klog.V(4).Infof("NodeSelectorRequirements for cloud labels %v conflict with existing NodeAffinity %v. Skipping addition of NodeSelectorRequirements for cloud labels.", requirements, newVolume.Spec.NodeAffinity) } else { for _, req := range requirements { @@ -255,7 +255,7 @@ func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolum } } newVolume.Initializers = removeInitializer(newVolume.Initializers, initializerName) - glog.V(4).Infof("removed initializer on PersistentVolume %s", newVolume.Name) + klog.V(4).Infof("removed initializer on PersistentVolume %s", newVolume.Name) oldData, err := json.Marshal(vol) if err != nil { @@ -276,7 +276,7 @@ func (pvlc *PersistentVolumeLabelController) createPatch(vol *v1.PersistentVolum func (pvlc *PersistentVolumeLabelController) updateVolume(vol *v1.PersistentVolume, volLabels map[string]string) error { volName := vol.Name - glog.V(4).Infof("updating PersistentVolume %s", volName) + klog.V(4).Infof("updating PersistentVolume %s", volName) patchBytes, err := pvlc.createPatch(vol, volLabels) if err != nil { return err @@ -286,7 +286,7 @@ func (pvlc *PersistentVolumeLabelController) updateVolume(vol *v1.PersistentVolu if err != nil { return fmt.Errorf("failed to update PersistentVolume %s: %v", volName, err) } - glog.V(4).Infof("updated PersistentVolume %s", volName) + klog.V(4).Infof("updated PersistentVolume %s", volName) return nil } diff --git a/pkg/controller/clusterroleaggregation/BUILD b/pkg/controller/clusterroleaggregation/BUILD index b2f74930f1e7b..2fef3c98f41ed 100644 --- a/pkg/controller/clusterroleaggregation/BUILD +++ b/pkg/controller/clusterroleaggregation/BUILD @@ -19,7 +19,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/rbac/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go b/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go index 05879e0e681d9..b5c9f873dbc55 100644 --- a/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go +++ b/pkg/controller/clusterroleaggregation/clusterroleaggregation_controller.go @@ -21,7 +21,7 @@ import ( "sort" "time" - "github.com/golang/glog" + "k8s.io/klog" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -145,8 +145,8 @@ func (c *ClusterRoleAggregationController) Run(workers int, stopCh <-chan struct defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Infof("Starting ClusterRoleAggregator") - defer glog.Infof("Shutting down ClusterRoleAggregator") + klog.Infof("Starting ClusterRoleAggregator") + defer klog.Infof("Shutting down ClusterRoleAggregator") if !controller.WaitForCacheSync("ClusterRoleAggregator", stopCh, c.clusterRolesSynced) { return diff --git a/pkg/controller/controller_ref_manager.go b/pkg/controller/controller_ref_manager.go index 6cf2ac189464d..f63afaca6f359 100644 --- a/pkg/controller/controller_ref_manager.go +++ b/pkg/controller/controller_ref_manager.go @@ -20,7 +20,6 @@ import ( "fmt" "sync" - "github.com/golang/glog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -28,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" ) type BaseControllerRefManager struct { @@ -223,7 +223,7 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error { // ReleasePod sends a patch to free the pod from the control of the controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error { - glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", + klog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pod.UID) err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch)) @@ -345,7 +345,7 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) er // ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error { - glog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", + klog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID) err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch)) @@ -480,7 +480,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history // ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller. // It returns the error if the patching fails. 404 and 422 errors are ignored. func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error { - glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", + klog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID) err := m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(deleteOwnerRefPatch)) diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 04ef26e0f4b49..6ccc32aed0f8b 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -51,7 +51,7 @@ import ( hashutil "k8s.io/kubernetes/pkg/util/hash" taintutils "k8s.io/kubernetes/pkg/util/taints" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -170,7 +170,7 @@ func (r *ControllerExpectations) GetExpectations(controllerKey string) (*Control func (r *ControllerExpectations) DeleteExpectations(controllerKey string) { if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { if err := r.Delete(exp); err != nil { - glog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err) + klog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err) } } } @@ -181,24 +181,24 @@ func (r *ControllerExpectations) DeleteExpectations(controllerKey string) { func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool { if exp, exists, err := r.GetExpectations(controllerKey); exists { if exp.Fulfilled() { - glog.V(4).Infof("Controller expectations fulfilled %#v", exp) + klog.V(4).Infof("Controller expectations fulfilled %#v", exp) return true } else if exp.isExpired() { - glog.V(4).Infof("Controller expectations expired %#v", exp) + klog.V(4).Infof("Controller expectations expired %#v", exp) return true } else { - glog.V(4).Infof("Controller still waiting on expectations %#v", exp) + klog.V(4).Infof("Controller still waiting on expectations %#v", exp) return false } } else if err != nil { - glog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err) + klog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err) } else { // When a new controller is created, it doesn't have expectations. // When it doesn't see expected watch events for > TTL, the expectations expire. // - In this case it wakes up, creates/deletes controllees, and sets expectations again. // When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire. // - In this case it continues without setting expectations till it needs to create/delete controllees. - glog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey) + klog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey) } // Trigger a sync if we either encountered and error (which shouldn't happen since we're // getting from local store) or this controller hasn't established expectations. @@ -215,7 +215,7 @@ func (exp *ControlleeExpectations) isExpired() bool { // SetExpectations registers new expectations for the given controller. Forgets existing expectations. func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error { exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: clock.RealClock{}.Now()} - glog.V(4).Infof("Setting expectations %#v", exp) + klog.V(4).Infof("Setting expectations %#v", exp) return r.Add(exp) } @@ -232,7 +232,7 @@ func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, de if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { exp.Add(int64(-add), int64(-del)) // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Lowered expectations %#v", exp) + klog.V(4).Infof("Lowered expectations %#v", exp) } } @@ -241,7 +241,7 @@ func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, de if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { exp.Add(int64(add), int64(del)) // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Raised expectations %#v", exp) + klog.V(4).Infof("Raised expectations %#v", exp) } } @@ -340,13 +340,13 @@ func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, delete defer u.uidStoreLock.Unlock() if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 { - glog.Errorf("Clobbering existing delete keys: %+v", existing) + klog.Errorf("Clobbering existing delete keys: %+v", existing) } expectedUIDs := sets.NewString() for _, k := range deletedKeys { expectedUIDs.Insert(k) } - glog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys) + klog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys) if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil { return err } @@ -360,7 +360,7 @@ func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey st uids := u.GetUIDs(rcKey) if uids != nil && uids.Has(deleteKey) { - glog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey) + klog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey) u.ControllerExpectationsInterface.DeletionObserved(rcKey) uids.Delete(deleteKey) } @@ -375,7 +375,7 @@ func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) { u.ControllerExpectationsInterface.DeleteExpectations(rcKey) if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists { if err := u.uidStore.Delete(uidExp); err != nil { - glog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err) + klog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err) } } } @@ -581,10 +581,10 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodT } else { accessor, err := meta.Accessor(object) if err != nil { - glog.Errorf("parentObject does not have ObjectMeta, %v", err) + klog.Errorf("parentObject does not have ObjectMeta, %v", err) return nil } - glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) + klog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name) } return nil @@ -595,7 +595,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime if err != nil { return fmt.Errorf("object does not have ObjectMeta, %v", err) } - glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) + klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil && !apierrors.IsNotFound(err) { r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) return fmt.Errorf("unable to delete pods: %v", err) @@ -806,7 +806,7 @@ func FilterActivePods(pods []*v1.Pod) []*v1.Pod { if IsPodActive(p) { result = append(result, p) } else { - glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", + klog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp) } } @@ -1024,14 +1024,14 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n // indicating that the controller identified by controllerName is waiting for syncs, followed by // either a successful or failed sync. func WaitForCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool { - glog.Infof("Waiting for caches to sync for %s controller", controllerName) + klog.Infof("Waiting for caches to sync for %s controller", controllerName) if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { utilruntime.HandleError(fmt.Errorf("Unable to sync caches for %s controller", controllerName)) return false } - glog.Infof("Caches are synced for %s controller", controllerName) + klog.Infof("Caches are synced for %s controller", controllerName) return true } diff --git a/pkg/controller/cronjob/BUILD b/pkg/controller/cronjob/BUILD index 82e659f74b4a8..f4b22f5d15fc6 100644 --- a/pkg/controller/cronjob/BUILD +++ b/pkg/controller/cronjob/BUILD @@ -34,8 +34,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/robfig/cron:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/cronjob/cronjob_controller.go b/pkg/controller/cronjob/cronjob_controller.go index 183645c966234..e7c0498bf4faa 100644 --- a/pkg/controller/cronjob/cronjob_controller.go +++ b/pkg/controller/cronjob/cronjob_controller.go @@ -33,7 +33,7 @@ import ( "sort" "time" - "github.com/golang/glog" + "k8s.io/klog" batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" @@ -68,7 +68,7 @@ type CronJobController struct { func NewCronJobController(kubeClient clientset.Interface) (*CronJobController, error) { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { @@ -91,11 +91,11 @@ func NewCronJobController(kubeClient clientset.Interface) (*CronJobController, e // Run the main goroutine responsible for watching and syncing jobs. func (jm *CronJobController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Infof("Starting CronJob Manager") + klog.Infof("Starting CronJob Manager") // Check things every 10 second. go wait.Until(jm.syncAll, 10*time.Second, stopCh) <-stopCh - glog.Infof("Shutting down CronJob Manager") + klog.Infof("Shutting down CronJob Manager") } // syncAll lists all the CronJobs and Jobs and reconciles them. @@ -110,7 +110,7 @@ func (jm *CronJobController) syncAll() { return } js := jl.Items - glog.V(4).Infof("Found %d jobs", len(js)) + klog.V(4).Infof("Found %d jobs", len(js)) sjl, err := jm.kubeClient.BatchV1beta1().CronJobs(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { @@ -118,10 +118,10 @@ func (jm *CronJobController) syncAll() { return } sjs := sjl.Items - glog.V(4).Infof("Found %d cronjobs", len(sjs)) + klog.V(4).Infof("Found %d cronjobs", len(sjs)) jobsBySj := groupJobsByParent(js) - glog.V(4).Infof("Found %d groups", len(jobsBySj)) + klog.V(4).Infof("Found %d groups", len(jobsBySj)) for _, sj := range sjs { syncOne(&sj, jobsBySj[sj.UID], time.Now(), jm.jobControl, jm.sjControl, jm.podControl, jm.recorder) @@ -170,7 +170,7 @@ func cleanupFinishedJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobContr // Update the CronJob, in case jobs were removed from the list. if _, err := sjc.UpdateStatus(sj); err != nil { nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name) - glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err) + klog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err) } } @@ -183,11 +183,11 @@ func removeOldestJobs(sj *batchv1beta1.CronJob, js []batchv1.Job, jc jobControlI } nameForLog := fmt.Sprintf("%s/%s", sj.Namespace, sj.Name) - glog.V(4).Infof("Cleaning up %d/%d jobs from %s", numToDelete, len(js), nameForLog) + klog.V(4).Infof("Cleaning up %d/%d jobs from %s", numToDelete, len(js), nameForLog) sort.Sort(byJobStartTime(js)) for i := 0; i < numToDelete; i++ { - glog.V(4).Infof("Removing job %s from %s", js[i].Name, nameForLog) + klog.V(4).Infof("Removing job %s from %s", js[i].Name, nameForLog) deleteJob(sj, &js[i], jc, pc, recorder, "history limit reached") } } @@ -234,7 +234,7 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo updatedSJ, err := sjc.UpdateStatus(sj) if err != nil { - glog.Errorf("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err) + klog.Errorf("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err) return } *sj = *updatedSJ @@ -246,23 +246,23 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo } if sj.Spec.Suspend != nil && *sj.Spec.Suspend { - glog.V(4).Infof("Not starting job for %s because it is suspended", nameForLog) + klog.V(4).Infof("Not starting job for %s because it is suspended", nameForLog) return } times, err := getRecentUnmetScheduleTimes(*sj, now) if err != nil { recorder.Eventf(sj, v1.EventTypeWarning, "FailedNeedsStart", "Cannot determine if job needs to be started: %v", err) - glog.Errorf("Cannot determine if %s needs to be started: %v", nameForLog, err) + klog.Errorf("Cannot determine if %s needs to be started: %v", nameForLog, err) return } // TODO: handle multiple unmet start times, from oldest to newest, updating status as needed. if len(times) == 0 { - glog.V(4).Infof("No unmet start times for %s", nameForLog) + klog.V(4).Infof("No unmet start times for %s", nameForLog) return } if len(times) > 1 { - glog.V(4).Infof("Multiple unmet start times for %s so only starting last one", nameForLog) + klog.V(4).Infof("Multiple unmet start times for %s so only starting last one", nameForLog) } scheduledTime := times[len(times)-1] @@ -271,7 +271,7 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo tooLate = scheduledTime.Add(time.Second * time.Duration(*sj.Spec.StartingDeadlineSeconds)).Before(now) } if tooLate { - glog.V(4).Infof("Missed starting window for %s", nameForLog) + klog.V(4).Infof("Missed starting window for %s", nameForLog) recorder.Eventf(sj, v1.EventTypeWarning, "MissSchedule", "Missed scheduled time to start a job: %s", scheduledTime.Format(time.RFC1123Z)) // TODO: Since we don't set LastScheduleTime when not scheduling, we are going to keep noticing // the miss every cycle. In order to avoid sending multiple events, and to avoid processing @@ -292,14 +292,14 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo // TODO: for Forbid, we could use the same name for every execution, as a lock. // With replace, we could use a name that is deterministic per execution time. // But that would mean that you could not inspect prior successes or failures of Forbid jobs. - glog.V(4).Infof("Not starting job for %s because of prior execution still running and concurrency policy is Forbid", nameForLog) + klog.V(4).Infof("Not starting job for %s because of prior execution still running and concurrency policy is Forbid", nameForLog) return } if sj.Spec.ConcurrencyPolicy == batchv1beta1.ReplaceConcurrent { for _, j := range sj.Status.Active { // TODO: this should be replaced with server side job deletion // currently this mimics JobReaper from pkg/kubectl/stop.go - glog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog) + klog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog) job, err := jc.GetJob(j.Namespace, j.Name) if err != nil { @@ -314,7 +314,7 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo jobReq, err := getJobFromTemplate(sj, scheduledTime) if err != nil { - glog.Errorf("Unable to make Job from template in %s: %v", nameForLog, err) + klog.Errorf("Unable to make Job from template in %s: %v", nameForLog, err) return } jobResp, err := jc.CreateJob(sj.Namespace, jobReq) @@ -322,7 +322,7 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo recorder.Eventf(sj, v1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err) return } - glog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog) + klog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog) recorder.Eventf(sj, v1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name) // ------------------------------------------------------------------ // @@ -338,13 +338,13 @@ func syncOne(sj *batchv1beta1.CronJob, js []batchv1.Job, now time.Time, jc jobCo // Add the just-started job to the status list. ref, err := getRef(jobResp) if err != nil { - glog.V(2).Infof("Unable to make object reference for job for %s", nameForLog) + klog.V(2).Infof("Unable to make object reference for job for %s", nameForLog) } else { sj.Status.Active = append(sj.Status.Active, *ref) } sj.Status.LastScheduleTime = &metav1.Time{Time: scheduledTime} if _, err := sjc.UpdateStatus(sj); err != nil { - glog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err) + klog.Infof("Unable to update status for %s (rv = %s): %v", nameForLog, sj.ResourceVersion, err) } return @@ -378,7 +378,7 @@ func deleteJob(sj *batchv1beta1.CronJob, job *batchv1.Job, jc jobControlInterfac } errList := []error{} for _, pod := range podList.Items { - glog.V(2).Infof("CronJob controller is deleting Pod %v/%v", pod.Namespace, pod.Name) + klog.V(2).Infof("CronJob controller is deleting Pod %v/%v", pod.Namespace, pod.Name) if err := pc.DeletePod(pod.Namespace, pod.Name); err != nil { // ignores the error when the pod isn't found if !errors.IsNotFound(err) { @@ -393,7 +393,7 @@ func deleteJob(sj *batchv1beta1.CronJob, job *batchv1.Job, jc jobControlInterfac // ... the job itself... if err := jc.DeleteJob(job.Namespace, job.Name); err != nil { recorder.Eventf(sj, v1.EventTypeWarning, "FailedDelete", "Deleted job: %v", err) - glog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err) + klog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err) return false } // ... and its reference from active list diff --git a/pkg/controller/cronjob/utils.go b/pkg/controller/cronjob/utils.go index 4ad7ae9994ff8..19fb91c4baa68 100644 --- a/pkg/controller/cronjob/utils.go +++ b/pkg/controller/cronjob/utils.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "github.com/robfig/cron" + "k8s.io/klog" batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" @@ -64,7 +64,7 @@ func getParentUIDFromJob(j batchv1.Job) (types.UID, bool) { } if controllerRef.Kind != "CronJob" { - glog.V(4).Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace) + klog.V(4).Infof("Job with non-CronJob parent, name %s namespace %s", j.Name, j.Namespace) return types.UID(""), false } @@ -78,7 +78,7 @@ func groupJobsByParent(js []batchv1.Job) map[types.UID][]batchv1.Job { for _, job := range js { parentUID, found := getParentUIDFromJob(job) if !found { - glog.V(4).Infof("Unable to get parent uid from job %s in namespace %s", job.Name, job.Namespace) + klog.V(4).Infof("Unable to get parent uid from job %s in namespace %s", job.Name, job.Namespace) continue } jobsBySj[parentUID] = append(jobsBySj[parentUID], job) diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index a2f2ca131bb0f..1d8a3541ddb02 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -53,7 +53,7 @@ go_library( "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//staging/src/k8s.io/client-go/util/integer:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 7f5015e105306..1b896118f7741 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -149,7 +149,7 @@ func NewDaemonSetsController( failedPodsBackoff *flowcontrol.Backoff, ) (*DaemonSetsController, error) { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { @@ -176,13 +176,13 @@ func NewDaemonSetsController( daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { ds := obj.(*apps.DaemonSet) - glog.V(4).Infof("Adding daemon set %s", ds.Name) + klog.V(4).Infof("Adding daemon set %s", ds.Name) dsc.enqueueDaemonSet(ds) }, UpdateFunc: func(old, cur interface{}) { oldDS := old.(*apps.DaemonSet) curDS := cur.(*apps.DaemonSet) - glog.V(4).Infof("Updating daemon set %s", oldDS.Name) + klog.V(4).Infof("Updating daemon set %s", oldDS.Name) dsc.enqueueDaemonSet(curDS) }, DeleteFunc: dsc.deleteDaemonset, @@ -257,7 +257,7 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) { return } } - glog.V(4).Infof("Deleting daemon set %s", ds.Name) + klog.V(4).Infof("Deleting daemon set %s", ds.Name) dsc.enqueueDaemonSet(ds) } @@ -266,8 +266,8 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer dsc.queue.ShutDown() - glog.Infof("Starting daemon sets controller") - defer glog.Infof("Shutting down daemon sets controller") + klog.Infof("Starting daemon sets controller") + defer klog.Infof("Shutting down daemon sets controller") if !controller.WaitForCacheSync("daemon sets", stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.historyStoreSynced, dsc.dsStoreSynced) { return @@ -363,7 +363,7 @@ func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.Controlle if len(daemonSets) > 1 { // ControllerRef will ensure we don't do anything crazy, but more than one // item in this list nevertheless constitutes user error. - glog.V(4).Infof("User error! more than one DaemonSets is selecting ControllerRevision %s/%s with labels: %#v", + klog.V(4).Infof("User error! more than one DaemonSets is selecting ControllerRevision %s/%s with labels: %#v", history.Namespace, history.Name, history.Labels) } return daemonSets @@ -386,7 +386,7 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) { if ds == nil { return } - glog.V(4).Infof("ControllerRevision %s added.", history.Name) + klog.V(4).Infof("ControllerRevision %s added.", history.Name) return } @@ -396,7 +396,7 @@ func (dsc *DaemonSetsController) addHistory(obj interface{}) { if len(daemonSets) == 0 { return } - glog.V(4).Infof("Orphan ControllerRevision %s added.", history.Name) + klog.V(4).Infof("Orphan ControllerRevision %s added.", history.Name) for _, ds := range daemonSets { dsc.enqueueDaemonSet(ds) } @@ -429,7 +429,7 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) { if ds == nil { return } - glog.V(4).Infof("ControllerRevision %s updated.", curHistory.Name) + klog.V(4).Infof("ControllerRevision %s updated.", curHistory.Name) dsc.enqueueDaemonSet(ds) return } @@ -442,7 +442,7 @@ func (dsc *DaemonSetsController) updateHistory(old, cur interface{}) { if len(daemonSets) == 0 { return } - glog.V(4).Infof("Orphan ControllerRevision %s updated.", curHistory.Name) + klog.V(4).Infof("Orphan ControllerRevision %s updated.", curHistory.Name) for _, ds := range daemonSets { dsc.enqueueDaemonSet(ds) } @@ -481,7 +481,7 @@ func (dsc *DaemonSetsController) deleteHistory(obj interface{}) { if ds == nil { return } - glog.V(4).Infof("ControllerRevision %s deleted.", history.Name) + klog.V(4).Infof("ControllerRevision %s deleted.", history.Name) dsc.enqueueDaemonSet(ds) } @@ -505,7 +505,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) { if err != nil { return } - glog.V(4).Infof("Pod %s added.", pod.Name) + klog.V(4).Infof("Pod %s added.", pod.Name) dsc.expectations.CreationObserved(dsKey) dsc.enqueueDaemonSet(ds) return @@ -519,7 +519,7 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) { if len(dss) == 0 { return } - glog.V(4).Infof("Orphan Pod %s added.", pod.Name) + klog.V(4).Infof("Orphan Pod %s added.", pod.Name) for _, ds := range dss { dsc.enqueueDaemonSet(ds) } @@ -553,7 +553,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) { if ds == nil { return } - glog.V(4).Infof("Pod %s updated.", curPod.Name) + klog.V(4).Infof("Pod %s updated.", curPod.Name) dsc.enqueueDaemonSet(ds) changedToReady := !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) // See https://github.com/kubernetes/kubernetes/pull/38076 for more details @@ -571,7 +571,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) { if len(dss) == 0 { return } - glog.V(4).Infof("Orphan Pod %s updated.", curPod.Name) + klog.V(4).Infof("Orphan Pod %s updated.", curPod.Name) labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels) if labelChanged || controllerRefChanged { for _, ds := range dss { @@ -602,10 +602,10 @@ func (dsc *DaemonSetsController) requeueSuspendedDaemonPods(node string) { dss := dsc.listSuspendedDaemonPods(node) for _, dsKey := range dss { if ns, name, err := cache.SplitMetaNamespaceKey(dsKey); err != nil { - glog.Errorf("Failed to get DaemonSet's namespace and name from %s: %v", dsKey, err) + klog.Errorf("Failed to get DaemonSet's namespace and name from %s: %v", dsKey, err) continue } else if ds, err := dsc.dsLister.DaemonSets(ns).Get(name); err != nil { - glog.Errorf("Failed to get DaemonSet %s/%s: %v", ns, name, err) + klog.Errorf("Failed to get DaemonSet %s/%s: %v", ns, name, err) continue } else { dsc.enqueueDaemonSetRateLimited(ds) @@ -682,7 +682,7 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) { if err != nil { return } - glog.V(4).Infof("Pod %s deleted.", pod.Name) + klog.V(4).Infof("Pod %s deleted.", pod.Name) dsc.expectations.DeletionObserved(dsKey) dsc.enqueueDaemonSet(ds) } @@ -691,7 +691,7 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) { // TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too). dsList, err := dsc.dsLister.List(labels.Everything()) if err != nil { - glog.V(4).Infof("Error enqueueing daemon sets: %v", err) + klog.V(4).Infof("Error enqueueing daemon sets: %v", err) return } node := obj.(*v1.Node) @@ -753,7 +753,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) { dsList, err := dsc.dsLister.List(labels.Everything()) if err != nil { - glog.V(4).Infof("Error listing daemon sets: %v", err) + klog.V(4).Infof("Error listing daemon sets: %v", err) return } // TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too). @@ -820,7 +820,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[s for _, pod := range claimedPods { nodeName, err := util.GetTargetNodeName(pod) if err != nil { - glog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v", + klog.Warningf("Failed to get target node name of Pod %v/%v in DaemonSet %v/%v", pod.Namespace, pod.Name, ds.Namespace, ds.Name) continue } @@ -899,7 +899,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode( inBackoff := dsc.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, now) if inBackoff { delay := dsc.failedPodsBackoff.Get(backoffKey) - glog.V(4).Infof("Deleting failed pod %s/%s on node %s has been limited by backoff - %v remaining", + klog.V(4).Infof("Deleting failed pod %s/%s on node %s has been limited by backoff - %v remaining", pod.Namespace, pod.Name, node.Name, delay) dsc.enqueueDaemonSetAfter(ds, delay) continue @@ -908,7 +908,7 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode( dsc.failedPodsBackoff.Next(backoffKey, now) msg := fmt.Sprintf("Found failed daemon pod %s/%s on node %s, will try to kill it", pod.Namespace, pod.Name, node.Name) - glog.V(2).Infof(msg) + klog.V(2).Infof(msg) // Emit an event so that it's discoverable to users. dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedDaemonPodReason, msg) podsToDelete = append(podsToDelete, pod.Name) @@ -1003,7 +1003,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod // error channel to communicate back failures. make the buffer big enough to avoid any blocking errCh := make(chan error, createDiff+deleteDiff) - glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff) + klog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff) createWait := sync.WaitGroup{} // If the returned error is not nil we have a parse error. // The controller handles this via the hash. @@ -1057,7 +1057,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod return } if err != nil { - glog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name) + klog.V(2).Infof("Failed creation, decrementing expectations for set %q/%q", ds.Namespace, ds.Name) dsc.expectations.CreationObserved(dsKey) errCh <- err utilruntime.HandleError(err) @@ -1068,7 +1068,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod // any skipped pods that we never attempted to start shouldn't be expected. skippedPods := createDiff - batchSize if errorCount < len(errCh) && skippedPods > 0 { - glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name) + klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for set %q/%q", skippedPods, ds.Namespace, ds.Name) for i := 0; i < skippedPods; i++ { dsc.expectations.CreationObserved(dsKey) } @@ -1078,14 +1078,14 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod } } - glog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff) + klog.V(4).Infof("Pods to delete for daemon set %s: %+v, deleting %d", ds.Name, podsToDelete, deleteDiff) deleteWait := sync.WaitGroup{} deleteWait.Add(deleteDiff) for i := 0; i < deleteDiff; i++ { go func(ix int) { defer deleteWait.Done() if err := dsc.podControl.DeletePod(ds.Namespace, podsToDelete[ix], ds); err != nil { - glog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name) + klog.V(2).Infof("Failed deletion, decrementing expectations for set %q/%q", ds.Namespace, ds.Name) dsc.expectations.DeletionObserved(dsKey) errCh <- err utilruntime.HandleError(err) @@ -1145,7 +1145,7 @@ func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps. } func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash string, updateObservedGen bool) error { - glog.V(4).Infof("Updating daemon set status") + klog.V(4).Infof("Updating daemon set status") nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds) if err != nil { return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err) @@ -1208,7 +1208,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash func (dsc *DaemonSetsController) syncDaemonSet(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -1217,7 +1217,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { } ds, err := dsc.dsLister.DaemonSets(namespace).Get(name) if errors.IsNotFound(err) { - glog.V(3).Infof("daemon set has been deleted %v", key) + klog.V(3).Infof("daemon set has been deleted %v", key) dsc.expectations.DeleteExpectations(key) return nil } @@ -1340,7 +1340,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps. reasons, nodeInfo, err := dsc.simulate(newPod, node, ds) if err != nil { - glog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err) + klog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err) return false, false, false, err } @@ -1349,7 +1349,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps. // into one result, e.g. selectedNode. var insufficientResourceErr error for _, r := range reasons { - glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason()) + klog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason()) switch reason := r.(type) { case *predicates.InsufficientResourceError: insufficientResourceErr = reason @@ -1392,10 +1392,10 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps. case predicates.ErrPodAffinityNotMatch, predicates.ErrServiceAffinityViolated: - glog.Warningf("unexpected predicate failure reason: %s", reason.GetReason()) + klog.Warningf("unexpected predicate failure reason: %s", reason.GetReason()) return false, false, false, fmt.Errorf("unexpected reason: DaemonSet Predicates should not return reason %s", reason.GetReason()) default: - glog.V(4).Infof("unknown predicate failure reason: %s", reason.GetReason()) + klog.V(4).Infof("unknown predicate failure reason: %s", reason.GetReason()) wantToRun, shouldSchedule, shouldContinueRunning = false, false, false emitEvent = true } diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index 74e42cc59f64b..ac13b32fb27db 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -22,7 +22,7 @@ import ( "reflect" "sort" - "github.com/golang/glog" + "k8s.io/klog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -55,23 +55,23 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, hash string) // for oldPods delete all not running pods var oldPodsToDelete []string - glog.V(4).Infof("Marking all unavailable old pods for deletion") + klog.V(4).Infof("Marking all unavailable old pods for deletion") for _, pod := range oldUnavailablePods { // Skip terminating pods. We won't delete them again if pod.DeletionTimestamp != nil { continue } - glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name) + klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name) oldPodsToDelete = append(oldPodsToDelete, pod.Name) } - glog.V(4).Infof("Marking old pods for deletion") + klog.V(4).Infof("Marking old pods for deletion") for _, pod := range oldAvailablePods { if numUnavailable >= maxUnavailable { - glog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable) + klog.V(4).Infof("Number of unavailable DaemonSet pods: %d, is equal to or exceeds allowed maximum: %d", numUnavailable, maxUnavailable) break } - glog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name) + klog.V(4).Infof("Marking pod %s/%s for deletion", ds.Name, pod.Name) oldPodsToDelete = append(oldPodsToDelete, pod.Name) numUnavailable++ } @@ -364,7 +364,7 @@ func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (* if updateErr != nil { return nil, updateErr } - glog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount) + klog.V(2).Infof("Found a hash collision for DaemonSet %q - bumping collisionCount to %d to resolve it", ds.Name, *currDS.Status.CollisionCount) return nil, outerErr } return history, err @@ -393,7 +393,7 @@ func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToD } func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) { - glog.V(4).Infof("Getting unavailable numbers") + klog.V(4).Infof("Getting unavailable numbers") // TODO: get nodeList once in syncDaemonSet and pass it to other functions nodeList, err := dsc.nodeLister.List(labels.Everything()) if err != nil { @@ -432,7 +432,7 @@ func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeT if err != nil { return -1, -1, fmt.Errorf("Invalid value for MaxUnavailable: %v", err) } - glog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable) + klog.V(4).Infof(" DaemonSet %s/%s, maxUnavailable: %d, numUnavailable: %d", ds.Namespace, ds.Name, maxUnavailable, numUnavailable) return maxUnavailable, numUnavailable, nil } diff --git a/pkg/controller/deployment/BUILD b/pkg/controller/deployment/BUILD index 1567169a168df..7f1964a4f7e14 100644 --- a/pkg/controller/deployment/BUILD +++ b/pkg/controller/deployment/BUILD @@ -42,7 +42,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/integer:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index 003dfb1ff23cf..702e49d89c8a5 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -25,7 +25,7 @@ import ( "reflect" "time" - "github.com/golang/glog" + "k8s.io/klog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -99,7 +99,7 @@ type DeploymentController struct { // NewDeploymentController creates a new DeploymentController. func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil { @@ -149,8 +149,8 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer dc.queue.ShutDown() - glog.Infof("Starting deployment controller") - defer glog.Infof("Shutting down deployment controller") + klog.Infof("Starting deployment controller") + defer klog.Infof("Shutting down deployment controller") if !controller.WaitForCacheSync("deployment", stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) { return @@ -165,14 +165,14 @@ func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) { func (dc *DeploymentController) addDeployment(obj interface{}) { d := obj.(*apps.Deployment) - glog.V(4).Infof("Adding deployment %s", d.Name) + klog.V(4).Infof("Adding deployment %s", d.Name) dc.enqueueDeployment(d) } func (dc *DeploymentController) updateDeployment(old, cur interface{}) { oldD := old.(*apps.Deployment) curD := cur.(*apps.Deployment) - glog.V(4).Infof("Updating deployment %s", oldD.Name) + klog.V(4).Infof("Updating deployment %s", oldD.Name) dc.enqueueDeployment(curD) } @@ -190,7 +190,7 @@ func (dc *DeploymentController) deleteDeployment(obj interface{}) { return } } - glog.V(4).Infof("Deleting deployment %s", d.Name) + klog.V(4).Infof("Deleting deployment %s", d.Name) dc.enqueueDeployment(d) } @@ -211,7 +211,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) { if d == nil { return } - glog.V(4).Infof("ReplicaSet %s added.", rs.Name) + klog.V(4).Infof("ReplicaSet %s added.", rs.Name) dc.enqueueDeployment(d) return } @@ -222,7 +222,7 @@ func (dc *DeploymentController) addReplicaSet(obj interface{}) { if len(ds) == 0 { return } - glog.V(4).Infof("Orphan ReplicaSet %s added.", rs.Name) + klog.V(4).Infof("Orphan ReplicaSet %s added.", rs.Name) for _, d := range ds { dc.enqueueDeployment(d) } @@ -242,7 +242,7 @@ func (dc *DeploymentController) getDeploymentsForReplicaSet(rs *apps.ReplicaSet) if len(deployments) > 1 { // ControllerRef will ensure we don't do anything crazy, but more than one // item in this list nevertheless constitutes user error. - glog.V(4).Infof("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s", + klog.V(4).Infof("user error! more than one deployment is selecting replica set %s/%s with labels: %#v, returning %s/%s", rs.Namespace, rs.Name, rs.Labels, deployments[0].Namespace, deployments[0].Name) } return deployments @@ -277,7 +277,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) { if d == nil { return } - glog.V(4).Infof("ReplicaSet %s updated.", curRS.Name) + klog.V(4).Infof("ReplicaSet %s updated.", curRS.Name) dc.enqueueDeployment(d) return } @@ -290,7 +290,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) { if len(ds) == 0 { return } - glog.V(4).Infof("Orphan ReplicaSet %s updated.", curRS.Name) + klog.V(4).Infof("Orphan ReplicaSet %s updated.", curRS.Name) for _, d := range ds { dc.enqueueDeployment(d) } @@ -329,7 +329,7 @@ func (dc *DeploymentController) deleteReplicaSet(obj interface{}) { if d == nil { return } - glog.V(4).Infof("ReplicaSet %s deleted.", rs.Name) + klog.V(4).Infof("ReplicaSet %s deleted.", rs.Name) dc.enqueueDeployment(d) } @@ -353,7 +353,7 @@ func (dc *DeploymentController) deletePod(obj interface{}) { return } } - glog.V(4).Infof("Pod %s deleted.", pod.Name) + klog.V(4).Infof("Pod %s deleted.", pod.Name) if d := dc.getDeploymentForPod(pod); d != nil && d.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType { // Sync if this Deployment now has no more Pods. rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.AppsV1())) @@ -421,7 +421,7 @@ func (dc *DeploymentController) getDeploymentForPod(pod *v1.Pod) *apps.Deploymen } rs, err = dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name) if err != nil || rs.UID != controllerRef.UID { - glog.V(4).Infof("Cannot get replicaset %q for pod %q: %v", controllerRef.Name, pod.Name, err) + klog.V(4).Infof("Cannot get replicaset %q for pod %q: %v", controllerRef.Name, pod.Name, err) return nil } @@ -481,13 +481,13 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) { } if dc.queue.NumRequeues(key) < maxRetries { - glog.V(2).Infof("Error syncing deployment %v: %v", key, err) + klog.V(2).Infof("Error syncing deployment %v: %v", key, err) dc.queue.AddRateLimited(key) return } utilruntime.HandleError(err) - glog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err) + klog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err) dc.queue.Forget(key) } @@ -559,9 +559,9 @@ func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsLis // This function is not meant to be invoked concurrently with the same key. func (dc *DeploymentController) syncDeployment(key string) error { startTime := time.Now() - glog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime) + klog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime) defer func() { - glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -570,7 +570,7 @@ func (dc *DeploymentController) syncDeployment(key string) error { } deployment, err := dc.dLister.Deployments(namespace).Get(name) if errors.IsNotFound(err) { - glog.V(2).Infof("Deployment %v has been deleted", key) + klog.V(2).Infof("Deployment %v has been deleted", key) return nil } if err != nil { diff --git a/pkg/controller/deployment/progress.go b/pkg/controller/deployment/progress.go index 601359c79c4e2..e340a5be89d83 100644 --- a/pkg/controller/deployment/progress.go +++ b/pkg/controller/deployment/progress.go @@ -21,7 +21,7 @@ import ( "reflect" "time" - "github.com/golang/glog" + "k8s.io/klog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -186,11 +186,11 @@ func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newSt // Make it ratelimited so we stay on the safe side, eventually the Deployment should // transition either to a Complete or to a TimedOut condition. if after < time.Second { - glog.V(4).Infof("Queueing up deployment %q for a progress check now", d.Name) + klog.V(4).Infof("Queueing up deployment %q for a progress check now", d.Name) dc.enqueueRateLimited(d) return time.Duration(0) } - glog.V(4).Infof("Queueing up deployment %q for a progress check after %ds", d.Name, int(after.Seconds())) + klog.V(4).Infof("Queueing up deployment %q for a progress check after %ds", d.Name, int(after.Seconds())) // Add a second to avoid milliseconds skew in AddAfter. // See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info. dc.enqueueAfter(d, after+time.Second) diff --git a/pkg/controller/deployment/rollback.go b/pkg/controller/deployment/rollback.go index 74396efdc3b16..6593630af41fc 100644 --- a/pkg/controller/deployment/rollback.go +++ b/pkg/controller/deployment/rollback.go @@ -20,7 +20,7 @@ import ( "fmt" "strconv" - "github.com/golang/glog" + "k8s.io/klog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -49,11 +49,11 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl for _, rs := range allRSs { v, err := deploymentutil.Revision(rs) if err != nil { - glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err) + klog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err) continue } if v == rollbackTo.Revision { - glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v) + klog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v) // rollback by copying podTemplate.Spec from the replica set // revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call // no-op if the spec matches current deployment's podTemplate.Spec @@ -75,7 +75,7 @@ func (dc *DeploymentController) rollback(d *apps.Deployment, rsList []*apps.Repl func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps.ReplicaSet) (bool, error) { performedRollback := false if !deploymentutil.EqualIgnoreHash(&d.Spec.Template, &rs.Spec.Template) { - glog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec) + klog.V(4).Infof("Rolling back deployment %q to template spec %+v", d.Name, rs.Spec.Template.Spec) deploymentutil.SetFromReplicaSetTemplate(d, rs.Spec.Template) // set RS (the old RS we'll rolling back to) annotations back to the deployment; // otherwise, the deployment's current annotations (should be the same as current new RS) will be copied to the RS after the rollback. @@ -91,7 +91,7 @@ func (dc *DeploymentController) rollbackToTemplate(d *apps.Deployment, rs *apps. deploymentutil.SetDeploymentAnnotationsTo(d, rs) performedRollback = true } else { - glog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name) + klog.V(4).Infof("Rolling back to a revision that contains the same template as current deployment %q, skipping rollback...", d.Name) eventMsg := fmt.Sprintf("The rollback revision contains the same template as current deployment %q", d.Name) dc.emitRollbackWarningEvent(d, deploymentutil.RollbackTemplateUnchanged, eventMsg) } @@ -111,7 +111,7 @@ func (dc *DeploymentController) emitRollbackNormalEvent(d *apps.Deployment, mess // It is assumed that the caller will have updated the deployment template appropriately (in case // we want to rollback). func (dc *DeploymentController) updateDeploymentAndClearRollbackTo(d *apps.Deployment) error { - glog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name) + klog.V(4).Infof("Cleans up rollbackTo of deployment %q", d.Name) setRollbackTo(d, nil) _, err := dc.client.AppsV1().Deployments(d.Namespace).Update(d) return err diff --git a/pkg/controller/deployment/rolling.go b/pkg/controller/deployment/rolling.go index 9dc77331b6133..6950c3e67983f 100644 --- a/pkg/controller/deployment/rolling.go +++ b/pkg/controller/deployment/rolling.go @@ -20,9 +20,9 @@ import ( "fmt" "sort" - "github.com/golang/glog" apps "k8s.io/api/apps/v1" "k8s.io/client-go/util/integer" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" ) @@ -91,7 +91,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe } allPodsCount := deploymentutil.GetReplicaCountForReplicaSets(allRSs) - glog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas) + klog.V(4).Infof("New replica set %s/%s has %d available pods.", newRS.Namespace, newRS.Name, newRS.Status.AvailableReplicas) maxUnavailable := deploymentutil.MaxUnavailable(*deployment) // Check if we can scale down. We can scale down in the following 2 cases: @@ -137,7 +137,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe if err != nil { return false, nil } - glog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount) + klog.V(4).Infof("Cleaned up unhealthy replicas from old RSes by %d", cleanupCount) // Scale down old replica sets, need check maxUnavailable to ensure we can scale down allRSs = append(oldRSs, newRS) @@ -145,7 +145,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*apps.ReplicaSe if err != nil { return false, nil } - glog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount) + klog.V(4).Infof("Scaled down old RSes of deployment %s by %d", deployment.Name, scaledDownCount) totalScaledDown := cleanupCount + scaledDownCount return totalScaledDown > 0, nil @@ -166,7 +166,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*apps.ReplicaS // cannot scale down this replica set. continue } - glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name) + klog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name) if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas { // no unhealthy replicas found, no scaling required. continue @@ -200,7 +200,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [ // Cannot scale down. return 0, nil } - glog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name) + klog.V(4).Infof("Found %d available pods in deployment %s, scaling down old RSes", availablePodCount, deployment.Name) sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs)) diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index 7f2ed0d601627..b59aec417db9c 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -22,11 +22,11 @@ import ( "sort" "strconv" - "github.com/golang/glog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" labelsutil "k8s.io/kubernetes/pkg/util/labels" @@ -248,7 +248,7 @@ func (dc *DeploymentController) getNewReplicaSet(d *apps.Deployment, rsList, old // error. _, dErr := dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(d) if dErr == nil { - glog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount) + klog.V(2).Infof("Found a hash collision for deployment %q - bumping collisionCount (%d->%d) to resolve it", d.Name, preCollisionCount, *d.Status.CollisionCount) } return nil, err case err != nil: @@ -440,7 +440,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep } sort.Sort(controller.ReplicaSetsByCreationTimestamp(cleanableRSes)) - glog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name) + klog.V(4).Infof("Looking to cleanup old replica sets for deployment %q", deployment.Name) for i := int32(0); i < diff; i++ { rs := cleanableRSes[i] @@ -448,7 +448,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*apps.ReplicaSet, dep if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration || rs.DeletionTimestamp != nil { continue } - glog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name) + klog.V(4).Infof("Trying to cleanup replica set %q for deployment %q", rs.Name, deployment.Name) if err := dc.client.AppsV1().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { // Return error instead of aggregating and continuing DELETEs on the theory // that we may be overloading the api server. diff --git a/pkg/controller/deployment/util/BUILD b/pkg/controller/deployment/util/BUILD index faba8b5e1a605..2b563e00ae27a 100644 --- a/pkg/controller/deployment/util/BUILD +++ b/pkg/controller/deployment/util/BUILD @@ -24,7 +24,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/util/integer:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index acdb472746de5..00e5c4c36e617 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -186,7 +186,7 @@ func MaxRevision(allRSs []*apps.ReplicaSet) int64 { for _, rs := range allRSs { if v, err := Revision(rs); err != nil { // Skip the replica sets when it failed to parse their revision information - glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) + klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) } else if v > max { max = v } @@ -200,7 +200,7 @@ func LastRevision(allRSs []*apps.ReplicaSet) int64 { for _, rs := range allRSs { if v, err := Revision(rs); err != nil { // Skip the replica sets when it failed to parse their revision information - glog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) + klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs) } else if v >= max { secMax = max max = v @@ -241,7 +241,7 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64) if err != nil { if oldRevision != "" { - glog.Warningf("Updating replica set revision OldRevision not int %s", err) + klog.Warningf("Updating replica set revision OldRevision not int %s", err) return false } //If the RS annotation is empty then initialise it to 0 @@ -249,13 +249,13 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic } newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64) if err != nil { - glog.Warningf("Updating replica set revision NewRevision not int %s", err) + klog.Warningf("Updating replica set revision NewRevision not int %s", err) return false } if oldRevisionInt < newRevisionInt { newRS.Annotations[RevisionAnnotation] = newRevision annotationChanged = true - glog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision) + klog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision) } // If a revision annotation already existed and this replica set was updated with a new revision // then that means we are rolling back to this replica set. We need to preserve the old revisions @@ -376,7 +376,7 @@ func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, boo } intValue, err := strconv.Atoi(annotationValue) if err != nil { - glog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name) + klog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name) return int32(0), false } return int32(intValue), true @@ -787,7 +787,7 @@ func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentS delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second timedOut := from.Add(delta).Before(now) - glog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now) + klog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now) return timedOut } diff --git a/pkg/controller/disruption/BUILD b/pkg/controller/disruption/BUILD index 37491c7227af7..440966beca991 100644 --- a/pkg/controller/disruption/BUILD +++ b/pkg/controller/disruption/BUILD @@ -38,7 +38,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index 12592d2b6274d..8c45cc060114b 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -49,7 +49,7 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" - "github.com/golang/glog" + "k8s.io/klog" ) const statusUpdateRetries = 2 @@ -285,18 +285,18 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer dc.queue.ShutDown() - glog.Infof("Starting disruption controller") - defer glog.Infof("Shutting down disruption controller") + klog.Infof("Starting disruption controller") + defer klog.Infof("Shutting down disruption controller") if !controller.WaitForCacheSync("disruption", stopCh, dc.podListerSynced, dc.pdbListerSynced, dc.rcListerSynced, dc.rsListerSynced, dc.dListerSynced, dc.ssListerSynced) { return } if dc.kubeClient != nil { - glog.Infof("Sending events to api server.") + klog.Infof("Sending events to api server.") dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.kubeClient.CoreV1().Events("")}) } else { - glog.Infof("No api server defined - no events will be sent to API server.") + klog.Infof("No api server defined - no events will be sent to API server.") } go wait.Until(dc.worker, time.Second, stopCh) go wait.Until(dc.recheckWorker, time.Second, stopCh) @@ -306,44 +306,44 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) { func (dc *DisruptionController) addDb(obj interface{}) { pdb := obj.(*policy.PodDisruptionBudget) - glog.V(4).Infof("add DB %q", pdb.Name) + klog.V(4).Infof("add DB %q", pdb.Name) dc.enqueuePdb(pdb) } func (dc *DisruptionController) updateDb(old, cur interface{}) { // TODO(mml) ignore updates where 'old' is equivalent to 'cur'. pdb := cur.(*policy.PodDisruptionBudget) - glog.V(4).Infof("update DB %q", pdb.Name) + klog.V(4).Infof("update DB %q", pdb.Name) dc.enqueuePdb(pdb) } func (dc *DisruptionController) removeDb(obj interface{}) { pdb := obj.(*policy.PodDisruptionBudget) - glog.V(4).Infof("remove DB %q", pdb.Name) + klog.V(4).Infof("remove DB %q", pdb.Name) dc.enqueuePdb(pdb) } func (dc *DisruptionController) addPod(obj interface{}) { pod := obj.(*v1.Pod) - glog.V(4).Infof("addPod called on pod %q", pod.Name) + klog.V(4).Infof("addPod called on pod %q", pod.Name) pdb := dc.getPdbForPod(pod) if pdb == nil { - glog.V(4).Infof("No matching pdb for pod %q", pod.Name) + klog.V(4).Infof("No matching pdb for pod %q", pod.Name) return } - glog.V(4).Infof("addPod %q -> PDB %q", pod.Name, pdb.Name) + klog.V(4).Infof("addPod %q -> PDB %q", pod.Name, pdb.Name) dc.enqueuePdb(pdb) } func (dc *DisruptionController) updatePod(old, cur interface{}) { pod := cur.(*v1.Pod) - glog.V(4).Infof("updatePod called on pod %q", pod.Name) + klog.V(4).Infof("updatePod called on pod %q", pod.Name) pdb := dc.getPdbForPod(pod) if pdb == nil { - glog.V(4).Infof("No matching pdb for pod %q", pod.Name) + klog.V(4).Infof("No matching pdb for pod %q", pod.Name) return } - glog.V(4).Infof("updatePod %q -> PDB %q", pod.Name, pdb.Name) + klog.V(4).Infof("updatePod %q -> PDB %q", pod.Name, pdb.Name) dc.enqueuePdb(pdb) } @@ -357,29 +357,29 @@ func (dc *DisruptionController) deletePod(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %+v", obj) + klog.Errorf("Couldn't get object from tombstone %+v", obj) return } pod, ok = tombstone.Obj.(*v1.Pod) if !ok { - glog.Errorf("Tombstone contained object that is not a pod %+v", obj) + klog.Errorf("Tombstone contained object that is not a pod %+v", obj) return } } - glog.V(4).Infof("deletePod called on pod %q", pod.Name) + klog.V(4).Infof("deletePod called on pod %q", pod.Name) pdb := dc.getPdbForPod(pod) if pdb == nil { - glog.V(4).Infof("No matching pdb for pod %q", pod.Name) + klog.V(4).Infof("No matching pdb for pod %q", pod.Name) return } - glog.V(4).Infof("deletePod %q -> PDB %q", pod.Name, pdb.Name) + klog.V(4).Infof("deletePod %q -> PDB %q", pod.Name, pdb.Name) dc.enqueuePdb(pdb) } func (dc *DisruptionController) enqueuePdb(pdb *policy.PodDisruptionBudget) { key, err := controller.KeyFunc(pdb) if err != nil { - glog.Errorf("Couldn't get key for PodDisruptionBudget object %+v: %v", pdb, err) + klog.Errorf("Couldn't get key for PodDisruptionBudget object %+v: %v", pdb, err) return } dc.queue.Add(key) @@ -388,7 +388,7 @@ func (dc *DisruptionController) enqueuePdb(pdb *policy.PodDisruptionBudget) { func (dc *DisruptionController) enqueuePdbForRecheck(pdb *policy.PodDisruptionBudget, delay time.Duration) { key, err := controller.KeyFunc(pdb) if err != nil { - glog.Errorf("Couldn't get key for PodDisruptionBudget object %+v: %v", pdb, err) + klog.Errorf("Couldn't get key for PodDisruptionBudget object %+v: %v", pdb, err) return } dc.recheckQueue.AddAfter(key, delay) @@ -400,13 +400,13 @@ func (dc *DisruptionController) getPdbForPod(pod *v1.Pod) *policy.PodDisruptionB // caller. pdbs, err := dc.pdbLister.GetPodPodDisruptionBudgets(pod) if err != nil { - glog.V(4).Infof("No PodDisruptionBudgets found for pod %v, PodDisruptionBudget controller will avoid syncing.", pod.Name) + klog.V(4).Infof("No PodDisruptionBudgets found for pod %v, PodDisruptionBudget controller will avoid syncing.", pod.Name) return nil } if len(pdbs) > 1 { msg := fmt.Sprintf("Pod %q/%q matches multiple PodDisruptionBudgets. Chose %q arbitrarily.", pod.Namespace, pod.Name, pdbs[0].Name) - glog.Warning(msg) + klog.Warning(msg) dc.recorder.Event(pod, v1.EventTypeWarning, "MultiplePodDisruptionBudgets", msg) } return pdbs[0] @@ -471,7 +471,7 @@ func (dc *DisruptionController) processNextRecheckWorkItem() bool { func (dc *DisruptionController) sync(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -480,7 +480,7 @@ func (dc *DisruptionController) sync(key string) error { } pdb, err := dc.pdbLister.PodDisruptionBudgets(namespace).Get(name) if errors.IsNotFound(err) { - glog.V(4).Infof("PodDisruptionBudget %q has been deleted", key) + klog.V(4).Infof("PodDisruptionBudget %q has been deleted", key) return nil } if err != nil { @@ -488,7 +488,7 @@ func (dc *DisruptionController) sync(key string) error { } if err := dc.trySync(pdb); err != nil { - glog.Errorf("Failed to sync pdb %s/%s: %v", pdb.Namespace, pdb.Name, err) + klog.Errorf("Failed to sync pdb %s/%s: %v", pdb.Namespace, pdb.Name, err) return dc.failSafe(pdb) } @@ -656,7 +656,7 @@ func (dc *DisruptionController) buildDisruptedPodMap(pods []*v1.Pod, pdb *policy } expectedDeletion := disruptionTime.Time.Add(DeletionTimeout) if expectedDeletion.Before(currentTime) { - glog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s", + klog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s", pod.Namespace, pod.Name, disruptionTime.String(), pdb.Namespace, pdb.Name) dc.recorder.Eventf(pod, v1.EventTypeWarning, "NotDeleted", "Pod was expected by PDB %s/%s to be deleted but it wasn't", pdb.Namespace, pdb.Namespace) diff --git a/pkg/controller/endpoint/BUILD b/pkg/controller/endpoint/BUILD index 400da5a208659..0ff21c914a809 100644 --- a/pkg/controller/endpoint/BUILD +++ b/pkg/controller/endpoint/BUILD @@ -33,7 +33,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 61c0f9205478d..403bd9c0eeebe 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -42,7 +42,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -146,8 +146,8 @@ func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer e.queue.ShutDown() - glog.Infof("Starting endpoint controller") - defer glog.Infof("Shutting down endpoint controller") + klog.Infof("Starting endpoint controller") + defer klog.Infof("Shutting down endpoint controller") if !controller.WaitForCacheSync("endpoint", stopCh, e.podsSynced, e.servicesSynced, e.endpointsSynced) { return @@ -324,7 +324,7 @@ func (e *EndpointController) deletePod(obj interface{}) { utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a Pod: %#v", obj)) return } - glog.V(4).Infof("Enqueuing services of deleted pod %s/%s having final state unrecorded", pod.Namespace, pod.Name) + klog.V(4).Infof("Enqueuing services of deleted pod %s/%s having final state unrecorded", pod.Namespace, pod.Name) e.addPod(pod) } @@ -368,12 +368,12 @@ func (e *EndpointController) handleErr(err error, key interface{}) { } if e.queue.NumRequeues(key) < maxRetries { - glog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err) + klog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err) e.queue.AddRateLimited(key) return } - glog.Warningf("Dropping service %q out of the queue: %v", key, err) + klog.Warningf("Dropping service %q out of the queue: %v", key, err) e.queue.Forget(key) utilruntime.HandleError(err) } @@ -381,7 +381,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) { func (e *EndpointController) syncService(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -408,7 +408,7 @@ func (e *EndpointController) syncService(key string) error { return nil } - glog.V(5).Infof("About to update endpoints for service %q", key) + klog.V(5).Infof("About to update endpoints for service %q", key) pods, err := e.podLister.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelectorPreValidated()) if err != nil { // Since we're getting stuff from a local cache, it is @@ -433,11 +433,11 @@ func (e *EndpointController) syncService(key string) error { for _, pod := range pods { if len(pod.Status.PodIP) == 0 { - glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name) + klog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name) continue } if !tolerateUnreadyEndpoints && pod.DeletionTimestamp != nil { - glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name) + klog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name) continue } @@ -462,7 +462,7 @@ func (e *EndpointController) syncService(key string) error { portProto := servicePort.Protocol portNum, err := podutil.FindPort(pod, servicePort) if err != nil { - glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err) + klog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err) continue } @@ -496,7 +496,7 @@ func (e *EndpointController) syncService(key string) error { if !createEndpoints && apiequality.Semantic.DeepEqual(currentEndpoints.Subsets, subsets) && apiequality.Semantic.DeepEqual(currentEndpoints.Labels, service.Labels) { - glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name) + klog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name) return nil } newEndpoints := currentEndpoints.DeepCopy() @@ -506,7 +506,7 @@ func (e *EndpointController) syncService(key string) error { newEndpoints.Annotations = make(map[string]string) } - glog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps) + klog.V(4).Infof("Update endpoints for %v/%v, ready: %d not ready: %d", service.Namespace, service.Name, totalReadyEps, totalNotReadyEps) if createEndpoints { // No previous endpoints, create them _, err = e.client.CoreV1().Endpoints(service.Namespace).Create(newEndpoints) @@ -520,7 +520,7 @@ func (e *EndpointController) syncService(key string) error { // 1. namespace is terminating, endpoint creation is not allowed by default. // 2. policy is misconfigured, in which case no service would function anywhere. // Given the frequency of 1, we log at a lower level. - glog.V(5).Infof("Forbidden from creating endpoints: %v", err) + klog.V(5).Infof("Forbidden from creating endpoints: %v", err) } return err } @@ -572,7 +572,7 @@ func addEndpointSubset(subsets []v1.EndpointSubset, pod *v1.Pod, epa v1.Endpoint }) readyEps++ } else if shouldPodBeInEndpoints(pod) { - glog.V(5).Infof("Pod is out of service: %s/%s", pod.Namespace, pod.Name) + klog.V(5).Infof("Pod is out of service: %s/%s", pod.Namespace, pod.Name) subsets = append(subsets, v1.EndpointSubset{ NotReadyAddresses: []v1.EndpointAddress{epa}, Ports: ports, diff --git a/pkg/controller/garbagecollector/BUILD b/pkg/controller/garbagecollector/BUILD index 00fcc6433ce19..c1c7f93abb022 100644 --- a/pkg/controller/garbagecollector/BUILD +++ b/pkg/controller/garbagecollector/BUILD @@ -43,12 +43,12 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", "//vendor/gonum.org/v1/gonum/graph:go_default_library", "//vendor/gonum.org/v1/gonum/graph/encoding:go_default_library", "//vendor/gonum.org/v1/gonum/graph/encoding/dot:go_default_library", "//vendor/gonum.org/v1/gonum/graph/simple:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/garbagecollector/garbagecollector.go b/pkg/controller/garbagecollector/garbagecollector.go index 5f1fff3effce2..e090a84fb57a2 100644 --- a/pkg/controller/garbagecollector/garbagecollector.go +++ b/pkg/controller/garbagecollector/garbagecollector.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -130,8 +130,8 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) { defer gc.attemptToOrphan.ShutDown() defer gc.dependencyGraphBuilder.graphChanges.ShutDown() - glog.Infof("Starting garbage collector controller") - defer glog.Infof("Shutting down garbage collector controller") + klog.Infof("Starting garbage collector controller") + defer klog.Infof("Shutting down garbage collector controller") go gc.dependencyGraphBuilder.Run(stopCh) @@ -139,7 +139,7 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) { return } - glog.Infof("Garbage collector: all resource monitors have synced. Proceeding to collect garbage") + klog.Infof("Garbage collector: all resource monitors have synced. Proceeding to collect garbage") // gc workers for i := 0; i < workers; i++ { @@ -172,13 +172,13 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf // This can occur if there is an internal error in GetDeletableResources. if len(newResources) == 0 { - glog.V(2).Infof("no resources reported by discovery, skipping garbage collector sync") + klog.V(2).Infof("no resources reported by discovery, skipping garbage collector sync") return } // Decide whether discovery has reported a change. if reflect.DeepEqual(oldResources, newResources) { - glog.V(5).Infof("no resource updates from discovery, skipping garbage collector sync") + klog.V(5).Infof("no resource updates from discovery, skipping garbage collector sync") return } @@ -196,18 +196,18 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf if attempt > 1 { newResources = GetDeletableResources(discoveryClient) if len(newResources) == 0 { - glog.V(2).Infof("no resources reported by discovery (attempt %d)", attempt) + klog.V(2).Infof("no resources reported by discovery (attempt %d)", attempt) return false, nil } } - glog.V(2).Infof("syncing garbage collector with updated resources from discovery (attempt %d): %s", attempt, printDiff(oldResources, newResources)) + klog.V(2).Infof("syncing garbage collector with updated resources from discovery (attempt %d): %s", attempt, printDiff(oldResources, newResources)) // Resetting the REST mapper will also invalidate the underlying discovery // client. This is a leaky abstraction and assumes behavior about the REST // mapper, but we'll deal with it for now. gc.restMapper.Reset() - glog.V(4).Infof("reset restmapper") + klog.V(4).Infof("reset restmapper") // Perform the monitor resync and wait for controllers to report cache sync. // @@ -222,7 +222,7 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors (attempt %d): %v", attempt, err)) return false, nil } - glog.V(4).Infof("resynced monitors") + klog.V(4).Infof("resynced monitors") // wait for caches to fill for a while (our sync period) before attempting to rediscover resources and retry syncing. // this protects us from deadlocks where available resources changed and one of our informer caches will never fill. @@ -242,7 +242,7 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf // have succeeded to ensure we'll retry on subsequent syncs if an error // occurred. oldResources = newResources - glog.V(2).Infof("synced garbage collector") + klog.V(2).Infof("synced garbage collector") }, period, stopCh) } @@ -308,7 +308,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool { // have a way to distinguish this from a valid type we will recognize // after the next discovery sync. // For now, record the error and retry. - glog.V(5).Infof("error syncing item %s: %v", n, err) + klog.V(5).Infof("error syncing item %s: %v", n, err) } else { utilruntime.HandleError(fmt.Errorf("error syncing item %s: %v", n, err)) } @@ -318,7 +318,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool { // requeue if item hasn't been observed via an informer event yet. // otherwise a virtual node for an item added AND removed during watch reestablishment can get stuck in the graph and never removed. // see https://issue.k8s.io/56121 - glog.V(5).Infof("item %s hasn't been observed via informer yet", n.identity) + klog.V(5).Infof("item %s hasn't been observed via informer yet", n.identity) gc.attemptToDelete.AddRateLimited(item) } return true @@ -330,7 +330,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool { func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *node) ( dangling bool, owner *unstructured.Unstructured, err error) { if gc.absentOwnerCache.Has(reference.UID) { - glog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name) + klog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name) return true, nil, nil } // TODO: we need to verify the reference resource is supported by the @@ -351,14 +351,14 @@ func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *no switch { case errors.IsNotFound(err): gc.absentOwnerCache.Add(reference.UID) - glog.V(5).Infof("object %s's owner %s/%s, %s is not found", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name) + klog.V(5).Infof("object %s's owner %s/%s, %s is not found", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name) return true, nil, nil case err != nil: return false, nil, err } if owner.GetUID() != reference.UID { - glog.V(5).Infof("object %s's owner %s/%s, %s is not found, UID mismatch", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name) + klog.V(5).Infof("object %s's owner %s/%s, %s is not found, UID mismatch", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name) gc.absentOwnerCache.Add(reference.UID) return true, nil, nil } @@ -405,10 +405,10 @@ func ownerRefsToUIDs(refs []metav1.OwnerReference) []types.UID { } func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { - glog.V(2).Infof("processing item %s", item.identity) + klog.V(2).Infof("processing item %s", item.identity) // "being deleted" is an one-way trip to the final deletion. We'll just wait for the final deletion, and then process the object's dependents. if item.isBeingDeleted() && !item.isDeletingDependents() { - glog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity) + klog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity) return nil } // TODO: It's only necessary to talk to the API server if this is a @@ -420,7 +420,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { // the GraphBuilder can add "virtual" node for an owner that doesn't // exist yet, so we need to enqueue a virtual Delete event to remove // the virtual node from GraphBuilder.uidToNode. - glog.V(5).Infof("item %v not found, generating a virtual delete event", item.identity) + klog.V(5).Infof("item %v not found, generating a virtual delete event", item.identity) gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity) // since we're manually inserting a delete event to remove this node, // we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete @@ -431,7 +431,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { } if latest.GetUID() != item.identity.UID { - glog.V(5).Infof("UID doesn't match, item %v not found, generating a virtual delete event", item.identity) + klog.V(5).Infof("UID doesn't match, item %v not found, generating a virtual delete event", item.identity) gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity) // since we're manually inserting a delete event to remove this node, // we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete @@ -448,7 +448,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { // compute if we should delete the item ownerReferences := latest.GetOwnerReferences() if len(ownerReferences) == 0 { - glog.V(2).Infof("object %s's doesn't have an owner, continue on next item", item.identity) + klog.V(2).Infof("object %s's doesn't have an owner, continue on next item", item.identity) return nil } @@ -456,15 +456,15 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { if err != nil { return err } - glog.V(5).Infof("classify references of %s.\nsolid: %#v\ndangling: %#v\nwaitingForDependentsDeletion: %#v\n", item.identity, solid, dangling, waitingForDependentsDeletion) + klog.V(5).Infof("classify references of %s.\nsolid: %#v\ndangling: %#v\nwaitingForDependentsDeletion: %#v\n", item.identity, solid, dangling, waitingForDependentsDeletion) switch { case len(solid) != 0: - glog.V(2).Infof("object %#v has at least one existing owner: %#v, will not garbage collect", solid, item.identity) + klog.V(2).Infof("object %#v has at least one existing owner: %#v, will not garbage collect", solid, item.identity) if len(dangling) == 0 && len(waitingForDependentsDeletion) == 0 { return nil } - glog.V(2).Infof("remove dangling references %#v and waiting references %#v for object %s", dangling, waitingForDependentsDeletion, item.identity) + klog.V(2).Infof("remove dangling references %#v and waiting references %#v for object %s", dangling, waitingForDependentsDeletion, item.identity) // waitingForDependentsDeletion needs to be deleted from the // ownerReferences, otherwise the referenced objects will be stuck with // the FinalizerDeletingDependents and never get deleted. @@ -483,7 +483,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { // problem. // there are multiple workers run attemptToDeleteItem in // parallel, the circle detection can fail in a race condition. - glog.V(2).Infof("processing object %s, some of its owners and its dependent [%s] have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the object is going to be deleted with Foreground", item.identity, dep.identity) + klog.V(2).Infof("processing object %s, some of its owners and its dependent [%s] have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the object is going to be deleted with Foreground", item.identity, dep.identity) patch, err := item.unblockOwnerReferencesStrategicMergePatch() if err != nil { return err @@ -494,7 +494,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { break } } - glog.V(2).Infof("at least one owner of object %s has FinalizerDeletingDependents, and the object itself has dependents, so it is going to be deleted in Foreground", item.identity) + klog.V(2).Infof("at least one owner of object %s has FinalizerDeletingDependents, and the object itself has dependents, so it is going to be deleted in Foreground", item.identity) // the deletion event will be observed by the graphBuilder, so the item // will be processed again in processDeletingDependentsItem. If it // doesn't have dependents, the function will remove the @@ -518,7 +518,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { // otherwise, default to background. policy = metav1.DeletePropagationBackground } - glog.V(2).Infof("delete object %s with propagation policy %s", item.identity, policy) + klog.V(2).Infof("delete object %s with propagation policy %s", item.identity, policy) return gc.deleteObject(item.identity, &policy) } } @@ -527,12 +527,12 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { func (gc *GarbageCollector) processDeletingDependentsItem(item *node) error { blockingDependents := item.blockingDependents() if len(blockingDependents) == 0 { - glog.V(2).Infof("remove DeleteDependents finalizer for item %s", item.identity) + klog.V(2).Infof("remove DeleteDependents finalizer for item %s", item.identity) return gc.removeFinalizer(item, metav1.FinalizerDeleteDependents) } for _, dep := range blockingDependents { if !dep.isDeletingDependents() { - glog.V(2).Infof("adding %s to attemptToDelete, because its owner %s is deletingDependents", dep.identity, item.identity) + klog.V(2).Infof("adding %s to attemptToDelete, because its owner %s is deletingDependents", dep.identity, item.identity) gc.attemptToDelete.Add(dep) } } @@ -570,7 +570,7 @@ func (gc *GarbageCollector) orphanDependents(owner objectReference, dependents [ if len(errorsSlice) != 0 { return fmt.Errorf("failed to orphan dependents of owner %s, got errors: %s", owner, utilerrors.NewAggregate(errorsSlice).Error()) } - glog.V(5).Infof("successfully updated all dependents of owner %s", owner) + klog.V(5).Infof("successfully updated all dependents of owner %s", owner) return nil } @@ -644,9 +644,9 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m preferredResources, err := discoveryClient.ServerPreferredResources() if err != nil { if discovery.IsGroupDiscoveryFailedError(err) { - glog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups) + klog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups) } else { - glog.Warningf("failed to discover preferred resources: %v", err) + klog.Warningf("failed to discover preferred resources: %v", err) } } if preferredResources == nil { @@ -660,7 +660,7 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m for _, rl := range deletableResources { gv, err := schema.ParseGroupVersion(rl.GroupVersion) if err != nil { - glog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err) + klog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err) continue } for i := range rl.APIResources { diff --git a/pkg/controller/garbagecollector/graph_builder.go b/pkg/controller/garbagecollector/graph_builder.go index 2c61c5769e38b..c48f1dd655c39 100644 --- a/pkg/controller/garbagecollector/graph_builder.go +++ b/pkg/controller/garbagecollector/graph_builder.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -176,16 +176,16 @@ func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind } shared, err := gb.sharedInformers.ForResource(resource) if err == nil { - glog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String()) + klog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String()) // need to clone because it's from a shared cache shared.Informer().AddEventHandlerWithResyncPeriod(handlers, ResourceResyncTime) return shared.Informer().GetController(), shared.Informer().GetStore(), nil } else { - glog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err) + klog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err) } // TODO: consider store in one storage. - glog.V(5).Infof("create storage for resource %s", resource) + klog.V(5).Infof("create storage for resource %s", resource) store, monitor := cache.NewInformer( listWatcher(gb.dynamicClient, resource), nil, @@ -245,7 +245,7 @@ func (gb *GraphBuilder) syncMonitors(resources map[schema.GroupVersionResource]s } } - glog.V(4).Infof("synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove)) + klog.V(4).Infof("synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove)) // NewAggregate returns nil if errs is 0-length return utilerrors.NewAggregate(errs) } @@ -277,7 +277,7 @@ func (gb *GraphBuilder) startMonitors() { started++ } } - glog.V(4).Infof("started %d new monitors, %d currently running", started, len(monitors)) + klog.V(4).Infof("started %d new monitors, %d currently running", started, len(monitors)) } // IsSynced returns true if any monitors exist AND all those monitors' @@ -289,13 +289,13 @@ func (gb *GraphBuilder) IsSynced() bool { defer gb.monitorLock.Unlock() if len(gb.monitors) == 0 { - glog.V(4).Info("garbage controller monitor not synced: no monitors") + klog.V(4).Info("garbage controller monitor not synced: no monitors") return false } for resource, monitor := range gb.monitors { if !monitor.controller.HasSynced() { - glog.V(4).Infof("garbage controller monitor not yet synced: %+v", resource) + klog.V(4).Infof("garbage controller monitor not yet synced: %+v", resource) return false } } @@ -305,8 +305,8 @@ func (gb *GraphBuilder) IsSynced() bool { // Run sets the stop channel and starts monitor execution until stopCh is // closed. Any running monitors will be stopped before Run returns. func (gb *GraphBuilder) Run(stopCh <-chan struct{}) { - glog.Infof("GraphBuilder running") - defer glog.Infof("GraphBuilder stopping") + klog.Infof("GraphBuilder running") + defer klog.Infof("GraphBuilder stopping") // Set up the stop channel. gb.monitorLock.Lock() @@ -333,7 +333,7 @@ func (gb *GraphBuilder) Run(stopCh <-chan struct{}) { // reset monitors so that the graph builder can be safely re-run/synced. gb.monitors = nil - glog.Infof("stopped %d of %d monitors", stopped, len(monitors)) + klog.Infof("stopped %d of %d monitors", stopped, len(monitors)) } var ignoredResources = map[schema.GroupResource]struct{}{ @@ -377,7 +377,7 @@ func (gb *GraphBuilder) addDependentToOwners(n *node, owners []metav1.OwnerRefer dependents: make(map[*node]struct{}), virtual: true, } - glog.V(5).Infof("add virtual node.identity: %s\n\n", ownerNode.identity) + klog.V(5).Infof("add virtual node.identity: %s\n\n", ownerNode.identity) gb.uidToNode.Write(ownerNode) } ownerNode.addDependent(n) @@ -515,7 +515,7 @@ func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerRe if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion { node, found := gb.uidToNode.Read(ref.UID) if !found { - glog.V(5).Infof("cannot find %s in uidToNode", ref.UID) + klog.V(5).Infof("cannot find %s in uidToNode", ref.UID) continue } gb.attemptToDelete.Add(node) @@ -527,7 +527,7 @@ func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerRe if wasBlocked && isUnblocked { node, found := gb.uidToNode.Read(c.newRef.UID) if !found { - glog.V(5).Infof("cannot find %s in uidToNode", c.newRef.UID) + klog.V(5).Infof("cannot find %s in uidToNode", c.newRef.UID) continue } gb.attemptToDelete.Add(node) @@ -537,12 +537,12 @@ func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerRe func (gb *GraphBuilder) processTransitions(oldObj interface{}, newAccessor metav1.Object, n *node) { if startsWaitingForDependentsOrphaned(oldObj, newAccessor) { - glog.V(5).Infof("add %s to the attemptToOrphan", n.identity) + klog.V(5).Infof("add %s to the attemptToOrphan", n.identity) gb.attemptToOrphan.Add(n) return } if startsWaitingForDependentsDeleted(oldObj, newAccessor) { - glog.V(2).Infof("add %s to the attemptToDelete, because it's waiting for its dependents to be deleted", n.identity) + klog.V(2).Infof("add %s to the attemptToDelete, because it's waiting for its dependents to be deleted", n.identity) // if the n is added as a "virtual" node, its deletingDependents field is not properly set, so always set it here. n.markDeletingDependents() for dep := range n.dependents { @@ -575,7 +575,7 @@ func (gb *GraphBuilder) processGraphChanges() bool { utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err)) return true } - glog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, uid %s, event type %v", event.gvk.GroupVersion().String(), event.gvk.Kind, accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType) + klog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, uid %s, event type %v", event.gvk.GroupVersion().String(), event.gvk.Kind, accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType) // Check if the node already exists existingNode, found := gb.uidToNode.Read(accessor.GetUID()) if found { @@ -627,7 +627,7 @@ func (gb *GraphBuilder) processGraphChanges() bool { gb.processTransitions(event.oldObj, accessor, existingNode) case event.eventType == deleteEvent: if !found { - glog.V(5).Infof("%v doesn't exist in the graph, this shouldn't happen", accessor.GetUID()) + klog.V(5).Infof("%v doesn't exist in the graph, this shouldn't happen", accessor.GetUID()) return true } // removeNode updates the graph diff --git a/pkg/controller/garbagecollector/operations.go b/pkg/controller/garbagecollector/operations.go index ec9a096bffeda..a6cf9dd514082 100644 --- a/pkg/controller/garbagecollector/operations.go +++ b/pkg/controller/garbagecollector/operations.go @@ -19,7 +19,7 @@ package garbagecollector import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -110,7 +110,7 @@ func (gc *GarbageCollector) removeFinalizer(owner *node, targetFinalizer string) newFinalizers = append(newFinalizers, f) } if !found { - glog.V(5).Infof("the %s finalizer is already removed from object %s", targetFinalizer, owner.identity) + klog.V(5).Infof("the %s finalizer is already removed from object %s", targetFinalizer, owner.identity) return nil } // remove the owner from dependent's OwnerReferences diff --git a/pkg/controller/job/BUILD b/pkg/controller/job/BUILD index 293b4b8bdd2c5..cb12dbe575b00 100644 --- a/pkg/controller/job/BUILD +++ b/pkg/controller/job/BUILD @@ -35,7 +35,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/integer:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index df97da888a6dd..ebdcd7e2640eb 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -45,7 +45,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" - "github.com/golang/glog" + "k8s.io/klog" ) const statusUpdateRetries = 3 @@ -91,7 +91,7 @@ type JobController struct { func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *JobController { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { @@ -140,8 +140,8 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer jm.queue.ShutDown() - glog.Infof("Starting job controller") - defer glog.Infof("Shutting down job controller") + klog.Infof("Starting job controller") + defer klog.Infof("Shutting down job controller") if !controller.WaitForCacheSync("job", stopCh, jm.podStoreSynced, jm.jobStoreSynced) { return @@ -343,7 +343,7 @@ func (jm *JobController) updateJob(old, cur interface{}) { total := time.Duration(*curADS) * time.Second // AddAfter will handle total < passed jm.queue.AddAfter(key, total-passed) - glog.V(4).Infof("job ActiveDeadlineSeconds updated, will rsync after %d seconds", total-passed) + klog.V(4).Infof("job ActiveDeadlineSeconds updated, will rsync after %d seconds", total-passed) } } } @@ -436,7 +436,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) { func (jm *JobController) syncJob(key string) (bool, error) { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime)) }() ns, name, err := cache.SplitMetaNamespaceKey(key) @@ -449,7 +449,7 @@ func (jm *JobController) syncJob(key string) (bool, error) { sharedJob, err := jm.jobLister.Jobs(ns).Get(name) if err != nil { if errors.IsNotFound(err) { - glog.V(4).Infof("Job has been deleted: %v", key) + klog.V(4).Infof("Job has been deleted: %v", key) jm.expectations.DeleteExpectations(key) return true, nil } @@ -485,7 +485,7 @@ func (jm *JobController) syncJob(key string) (bool, error) { job.Status.StartTime = &now // enqueue a sync to check if job past ActiveDeadlineSeconds if job.Spec.ActiveDeadlineSeconds != nil { - glog.V(4).Infof("Job %s have ActiveDeadlineSeconds will sync after %d seconds", + klog.V(4).Infof("Job %s have ActiveDeadlineSeconds will sync after %d seconds", key, *job.Spec.ActiveDeadlineSeconds) jm.queue.AddAfter(key, time.Duration(*job.Spec.ActiveDeadlineSeconds)*time.Second) } @@ -614,7 +614,7 @@ func (jm *JobController) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh cha defer wait.Done() if err := jm.podControl.DeletePod(job.Namespace, pods[ix].Name, job); err != nil { defer utilruntime.HandleError(err) - glog.V(2).Infof("Failed to delete %v, job %q/%q deadline exceeded", pods[ix].Name, job.Namespace, job.Name) + klog.V(2).Infof("Failed to delete %v, job %q/%q deadline exceeded", pods[ix].Name, job.Namespace, job.Name) errCh <- err } }(i) @@ -697,7 +697,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b diff := active - parallelism errCh = make(chan error, diff) jm.expectations.ExpectDeletions(jobKey, int(diff)) - glog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff) + klog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff) // Sort the pods in the order such that not-ready < ready, unscheduled // < scheduled, and pending < running. This ensures that we delete pods // in the earlier stages whenever possible. @@ -712,7 +712,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, job); err != nil { defer utilruntime.HandleError(err) // Decrement the expected number of deletes because the informer won't observe this deletion - glog.V(2).Infof("Failed to delete %v, decrementing expectations for job %q/%q", activePods[ix].Name, job.Namespace, job.Name) + klog.V(2).Infof("Failed to delete %v, decrementing expectations for job %q/%q", activePods[ix].Name, job.Namespace, job.Name) jm.expectations.DeletionObserved(jobKey) activeLock.Lock() active++ @@ -749,7 +749,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b } jm.expectations.ExpectCreations(jobKey, int(diff)) errCh = make(chan error, diff) - glog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, wantActive, diff) + klog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, wantActive, diff) active += diff wait := sync.WaitGroup{} @@ -782,7 +782,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b if err != nil { defer utilruntime.HandleError(err) // Decrement the expected number of creates because the informer won't observe this pod - glog.V(2).Infof("Failed creation, decrementing expectations for job %q/%q", job.Namespace, job.Name) + klog.V(2).Infof("Failed creation, decrementing expectations for job %q/%q", job.Namespace, job.Name) jm.expectations.CreationObserved(jobKey) activeLock.Lock() active-- @@ -795,7 +795,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b // any skipped pods that we never attempted to start shouldn't be expected. skippedPods := diff - batchSize if errorCount < len(errCh) && skippedPods > 0 { - glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for job %q/%q", skippedPods, job.Namespace, job.Name) + klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for job %q/%q", skippedPods, job.Namespace, job.Name) active -= skippedPods for i := int32(0); i < skippedPods; i++ { // Decrement the expected number of creates because the informer won't observe this pod diff --git a/pkg/controller/namespace/BUILD b/pkg/controller/namespace/BUILD index 4da5a46751eb7..8a08de38ab326 100644 --- a/pkg/controller/namespace/BUILD +++ b/pkg/controller/namespace/BUILD @@ -27,7 +27,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/namespace/deletion/BUILD b/pkg/controller/namespace/deletion/BUILD index 61da1501e3b42..0c058ee50dfd2 100644 --- a/pkg/controller/namespace/deletion/BUILD +++ b/pkg/controller/namespace/deletion/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/namespace/deletion/namespaced_resources_deleter.go b/pkg/controller/namespace/deletion/namespaced_resources_deleter.go index 62e09392f4a52..7e28a4e996381 100644 --- a/pkg/controller/namespace/deletion/namespaced_resources_deleter.go +++ b/pkg/controller/namespace/deletion/namespaced_resources_deleter.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -110,7 +110,7 @@ func (d *namespacedResourcesDeleter) Delete(nsName string) error { return nil } - glog.V(5).Infof("namespace controller - syncNamespace - namespace: %s, finalizerToken: %s", namespace.Name, d.finalizerToken) + klog.V(5).Infof("namespace controller - syncNamespace - namespace: %s, finalizerToken: %s", namespace.Name, d.finalizerToken) // ensure that the status is up to date on the namespace // if we get a not found error, we assume the namespace is truly gone @@ -169,13 +169,13 @@ func (d *namespacedResourcesDeleter) initOpCache() { utilruntime.HandleError(fmt.Errorf("unable to get all supported resources from server: %v", err)) } if len(resources) == 0 { - glog.Fatalf("Unable to get any supported resources from server: %v", err) + klog.Fatalf("Unable to get any supported resources from server: %v", err) } deletableGroupVersionResources := []schema.GroupVersionResource{} for _, rl := range resources { gv, err := schema.ParseGroupVersion(rl.GroupVersion) if err != nil { - glog.Errorf("Failed to parse GroupVersion %q, skipping: %v", rl.GroupVersion, err) + klog.Errorf("Failed to parse GroupVersion %q, skipping: %v", rl.GroupVersion, err) continue } @@ -184,7 +184,7 @@ func (d *namespacedResourcesDeleter) initOpCache() { verbs := sets.NewString([]string(r.Verbs)...) if !verbs.Has("delete") { - glog.V(6).Infof("Skipping resource %v because it cannot be deleted.", gvr) + klog.V(6).Infof("Skipping resource %v because it cannot be deleted.", gvr) } for _, op := range []operation{operationList, operationDeleteCollection} { @@ -329,11 +329,11 @@ func (d *namespacedResourcesDeleter) finalizeNamespace(namespace *v1.Namespace) // it returns true if the operation was supported on the server. // it returns an error if the operation was supported on the server but was unable to complete. func (d *namespacedResourcesDeleter) deleteCollection(gvr schema.GroupVersionResource, namespace string) (bool, error) { - glog.V(5).Infof("namespace controller - deleteCollection - namespace: %s, gvr: %v", namespace, gvr) + klog.V(5).Infof("namespace controller - deleteCollection - namespace: %s, gvr: %v", namespace, gvr) key := operationKey{operation: operationDeleteCollection, gvr: gvr} if !d.opCache.isSupported(key) { - glog.V(5).Infof("namespace controller - deleteCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr) + klog.V(5).Infof("namespace controller - deleteCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr) return false, nil } @@ -355,12 +355,12 @@ func (d *namespacedResourcesDeleter) deleteCollection(gvr schema.GroupVersionRes // when working with this resource type, we will get a literal not found error rather than expected method not supported // remember next time that this resource does not support delete collection... if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) { - glog.V(5).Infof("namespace controller - deleteCollection not supported - namespace: %s, gvr: %v", namespace, gvr) + klog.V(5).Infof("namespace controller - deleteCollection not supported - namespace: %s, gvr: %v", namespace, gvr) d.opCache.setNotSupported(key) return false, nil } - glog.V(5).Infof("namespace controller - deleteCollection unexpected error - namespace: %s, gvr: %v, error: %v", namespace, gvr, err) + klog.V(5).Infof("namespace controller - deleteCollection unexpected error - namespace: %s, gvr: %v, error: %v", namespace, gvr, err) return true, err } @@ -370,11 +370,11 @@ func (d *namespacedResourcesDeleter) deleteCollection(gvr schema.GroupVersionRes // a boolean if the operation is supported // an error if the operation is supported but could not be completed. func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResource, namespace string) (*unstructured.UnstructuredList, bool, error) { - glog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr) + klog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr) key := operationKey{operation: operationList, gvr: gvr} if !d.opCache.isSupported(key) { - glog.V(5).Infof("namespace controller - listCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr) + klog.V(5).Infof("namespace controller - listCollection ignored since not supported - namespace: %s, gvr: %v", namespace, gvr) return nil, false, nil } @@ -390,7 +390,7 @@ func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResou // when working with this resource type, we will get a literal not found error rather than expected method not supported // remember next time that this resource does not support delete collection... if errors.IsMethodNotSupported(err) || errors.IsNotFound(err) { - glog.V(5).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr) + klog.V(5).Infof("namespace controller - listCollection not supported - namespace: %s, gvr: %v", namespace, gvr) d.opCache.setNotSupported(key) return nil, false, nil } @@ -400,7 +400,7 @@ func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResou // deleteEachItem is a helper function that will list the collection of resources and delete each item 1 by 1. func (d *namespacedResourcesDeleter) deleteEachItem(gvr schema.GroupVersionResource, namespace string) error { - glog.V(5).Infof("namespace controller - deleteEachItem - namespace: %s, gvr: %v", namespace, gvr) + klog.V(5).Infof("namespace controller - deleteEachItem - namespace: %s, gvr: %v", namespace, gvr) unstructuredList, listSupported, err := d.listCollection(gvr, namespace) if err != nil { @@ -425,15 +425,15 @@ func (d *namespacedResourcesDeleter) deleteEachItem(gvr schema.GroupVersionResou func (d *namespacedResourcesDeleter) deleteAllContentForGroupVersionResource( gvr schema.GroupVersionResource, namespace string, namespaceDeletedAt metav1.Time) (int64, error) { - glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - namespace: %s, gvr: %v", namespace, gvr) + klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - namespace: %s, gvr: %v", namespace, gvr) // estimate how long it will take for the resource to be deleted (needed for objects that support graceful delete) estimate, err := d.estimateGracefulTermination(gvr, namespace, namespaceDeletedAt) if err != nil { - glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to estimate - namespace: %s, gvr: %v, err: %v", namespace, gvr, err) + klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - unable to estimate - namespace: %s, gvr: %v, err: %v", namespace, gvr, err) return estimate, err } - glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - estimate - namespace: %s, gvr: %v, estimate: %v", namespace, gvr, estimate) + klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - estimate - namespace: %s, gvr: %v, estimate: %v", namespace, gvr, estimate) // first try to delete the entire collection deleteCollectionSupported, err := d.deleteCollection(gvr, namespace) @@ -451,21 +451,21 @@ func (d *namespacedResourcesDeleter) deleteAllContentForGroupVersionResource( // verify there are no more remaining items // it is not an error condition for there to be remaining items if local estimate is non-zero - glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - checking for no more items in namespace: %s, gvr: %v", namespace, gvr) + klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - checking for no more items in namespace: %s, gvr: %v", namespace, gvr) unstructuredList, listSupported, err := d.listCollection(gvr, namespace) if err != nil { - glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - error verifying no items in namespace: %s, gvr: %v, err: %v", namespace, gvr, err) + klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - error verifying no items in namespace: %s, gvr: %v, err: %v", namespace, gvr, err) return estimate, err } if !listSupported { return estimate, nil } - glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining - namespace: %s, gvr: %v, items: %v", namespace, gvr, len(unstructuredList.Items)) + klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining - namespace: %s, gvr: %v, items: %v", namespace, gvr, len(unstructuredList.Items)) if len(unstructuredList.Items) != 0 && estimate == int64(0) { // if any item has a finalizer, we treat that as a normal condition, and use a default estimation to allow for GC to complete. for _, item := range unstructuredList.Items { if len(item.GetFinalizers()) > 0 { - glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining with finalizers - namespace: %s, gvr: %v, finalizers: %v", namespace, gvr, item.GetFinalizers()) + klog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining with finalizers - namespace: %s, gvr: %v, finalizers: %v", namespace, gvr, item.GetFinalizers()) return finalizerEstimateSeconds, nil } } @@ -480,7 +480,7 @@ func (d *namespacedResourcesDeleter) deleteAllContentForGroupVersionResource( // If estimate > 0, not all resources are guaranteed to be gone. func (d *namespacedResourcesDeleter) deleteAllContent(namespace string, namespaceDeletedAt metav1.Time) (int64, error) { estimate := int64(0) - glog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s", namespace) + klog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s", namespace) resources, err := d.discoverResourcesFn() if err != nil { return estimate, err @@ -506,14 +506,14 @@ func (d *namespacedResourcesDeleter) deleteAllContent(namespace string, namespac if len(errs) > 0 { return estimate, utilerrors.NewAggregate(errs) } - glog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s, estimate: %v", namespace, estimate) + klog.V(4).Infof("namespace controller - deleteAllContent - namespace: %s, estimate: %v", namespace, estimate) return estimate, nil } // estimateGrracefulTermination will estimate the graceful termination required for the specific entity in the namespace func (d *namespacedResourcesDeleter) estimateGracefulTermination(gvr schema.GroupVersionResource, ns string, namespaceDeletedAt metav1.Time) (int64, error) { groupResource := gvr.GroupResource() - glog.V(5).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource) + klog.V(5).Infof("namespace controller - estimateGracefulTermination - group %s, resource: %s", groupResource.Group, groupResource.Resource) estimate := int64(0) var err error switch groupResource { @@ -534,7 +534,7 @@ func (d *namespacedResourcesDeleter) estimateGracefulTermination(gvr schema.Grou // estimateGracefulTerminationForPods determines the graceful termination period for pods in the namespace func (d *namespacedResourcesDeleter) estimateGracefulTerminationForPods(ns string) (int64, error) { - glog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns) + klog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns) estimate := int64(0) podsGetter := d.podsGetter if podsGetter == nil || reflect.ValueOf(podsGetter).IsNil() { diff --git a/pkg/controller/namespace/namespace_controller.go b/pkg/controller/namespace/namespace_controller.go index 396f1a3ead4c9..53d4225066dd6 100644 --- a/pkg/controller/namespace/namespace_controller.go +++ b/pkg/controller/namespace/namespace_controller.go @@ -35,7 +35,7 @@ import ( "k8s.io/kubernetes/pkg/controller/namespace/deletion" "k8s.io/kubernetes/pkg/util/metrics" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -140,7 +140,7 @@ func (nm *NamespaceController) worker() { if estimate, ok := err.(*deletion.ResourcesRemainingError); ok { t := estimate.Estimate/2 + 1 - glog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", key, t) + klog.V(4).Infof("Content remaining in namespace %s, waiting %d seconds", key, t) nm.queue.AddAfter(key, time.Duration(t)*time.Second) } else { // rather than wait for a full resync, re-add the namespace to the queue to be processed @@ -163,12 +163,12 @@ func (nm *NamespaceController) worker() { func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime)) }() namespace, err := nm.lister.Get(key) if errors.IsNotFound(err) { - glog.Infof("Namespace has been deleted %v", key) + klog.Infof("Namespace has been deleted %v", key) return nil } if err != nil { @@ -183,14 +183,14 @@ func (nm *NamespaceController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer nm.queue.ShutDown() - glog.Infof("Starting namespace controller") - defer glog.Infof("Shutting down namespace controller") + klog.Infof("Starting namespace controller") + defer klog.Infof("Shutting down namespace controller") if !controller.WaitForCacheSync("namespace", stopCh, nm.listerSynced) { return } - glog.V(5).Info("Starting workers of namespace controller") + klog.V(5).Info("Starting workers of namespace controller") for i := 0; i < workers; i++ { go wait.Until(nm.worker, time.Second, stopCh) } diff --git a/pkg/controller/nodeipam/BUILD b/pkg/controller/nodeipam/BUILD index 7314b3960f39f..e8c6a4396389c 100644 --- a/pkg/controller/nodeipam/BUILD +++ b/pkg/controller/nodeipam/BUILD @@ -44,7 +44,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/nodeipam/ipam/BUILD b/pkg/controller/nodeipam/ipam/BUILD index bbf02e01fe79f..e95fd1f7c9127 100644 --- a/pkg/controller/nodeipam/ipam/BUILD +++ b/pkg/controller/nodeipam/ipam/BUILD @@ -69,7 +69,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/scheme:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/nodeipam/ipam/adapter.go b/pkg/controller/nodeipam/ipam/adapter.go index c72e2e5a22c35..96402cf78125a 100644 --- a/pkg/controller/nodeipam/ipam/adapter.go +++ b/pkg/controller/nodeipam/ipam/adapter.go @@ -21,7 +21,7 @@ import ( "encoding/json" "net" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,9 +48,9 @@ func newAdapter(k8s clientset.Interface, cloud *gce.Cloud) *adapter { } broadcaster := record.NewBroadcaster() - broadcaster.StartLogging(glog.Infof) + broadcaster.StartLogging(klog.Infof) ret.recorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudCIDRAllocator"}) - glog.V(0).Infof("Sending events to api server.") + klog.V(0).Infof("Sending events to api server.") broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ Interface: k8s.CoreV1().Events(""), }) @@ -70,7 +70,7 @@ func (a *adapter) Alias(ctx context.Context, nodeName string) (*net.IPNet, error case 1: break default: - glog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", nodeName, cidrs) + klog.Warningf("Node %q has more than one alias assigned (%v), defaulting to the first", nodeName, cidrs) } _, cidrRange, err := net.ParseCIDR(cidrs[0]) diff --git a/pkg/controller/nodeipam/ipam/cidr_allocator.go b/pkg/controller/nodeipam/ipam/cidr_allocator.go index 19f8bd0fde40f..d667d11eec06b 100644 --- a/pkg/controller/nodeipam/ipam/cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cidr_allocator.go @@ -21,7 +21,7 @@ import ( "net" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -121,7 +121,7 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) { LabelSelector: labels.Everything().String(), }) if err != nil { - glog.Errorf("Failed to list all nodes: %v", err) + klog.Errorf("Failed to list all nodes: %v", err) return false, nil } return true, nil diff --git a/pkg/controller/nodeipam/ipam/cidrset/BUILD b/pkg/controller/nodeipam/ipam/cidrset/BUILD index 805174fd3c260..89cbf0c2ee722 100644 --- a/pkg/controller/nodeipam/ipam/cidrset/BUILD +++ b/pkg/controller/nodeipam/ipam/cidrset/BUILD @@ -10,7 +10,7 @@ go_test( name = "go_default_test", srcs = ["cidr_set_test.go"], embed = [":go_default_library"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) go_library( diff --git a/pkg/controller/nodeipam/ipam/cidrset/cidr_set_test.go b/pkg/controller/nodeipam/ipam/cidrset/cidr_set_test.go index 826eeba9bd8e7..c9e1651274a61 100644 --- a/pkg/controller/nodeipam/ipam/cidrset/cidr_set_test.go +++ b/pkg/controller/nodeipam/ipam/cidrset/cidr_set_test.go @@ -22,7 +22,7 @@ import ( "reflect" "testing" - "github.com/golang/glog" + "k8s.io/klog" ) func TestCIDRSetFullyAllocated(t *testing.T) { @@ -478,17 +478,17 @@ func TestGetBitforCIDR(t *testing.T) { got, err := cs.getIndexForCIDR(subnetCIDR) if err == nil && tc.expectErr { - glog.Errorf("expected error but got null for %v", tc.description) + klog.Errorf("expected error but got null for %v", tc.description) continue } if err != nil && !tc.expectErr { - glog.Errorf("unexpected error: %v for %v", err, tc.description) + klog.Errorf("unexpected error: %v for %v", err, tc.description) continue } if got != tc.expectedBit { - glog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description) + klog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description) } } } diff --git a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go index 12bf16f5a289d..8c39a9f469e99 100644 --- a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -84,13 +84,13 @@ var _ CIDRAllocator = (*cloudCIDRAllocator)(nil) // NewCloudCIDRAllocator creates a new cloud CIDR allocator. func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer) (CIDRAllocator, error) { if client == nil { - glog.Fatalf("kubeClient is nil when starting NodeController") + klog.Fatalf("kubeClient is nil when starting NodeController") } eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) - eventBroadcaster.StartLogging(glog.Infof) - glog.V(0).Infof("Sending events to api server.") + eventBroadcaster.StartLogging(klog.Infof) + klog.V(0).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) gceCloud, ok := cloud.(*gce.Cloud) @@ -127,15 +127,15 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR), }) - glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName()) + klog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName()) return ca, nil } func (ca *cloudCIDRAllocator) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Infof("Starting cloud CIDR allocator") - defer glog.Infof("Shutting down cloud CIDR allocator") + klog.Infof("Starting cloud CIDR allocator") + defer klog.Infof("Shutting down cloud CIDR allocator") if !controller.WaitForCacheSync("cidrallocator", stopCh, ca.nodesSynced) { return @@ -153,22 +153,22 @@ func (ca *cloudCIDRAllocator) worker(stopChan <-chan struct{}) { select { case workItem, ok := <-ca.nodeUpdateChannel: if !ok { - glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed") + klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed") return } if err := ca.updateCIDRAllocation(workItem); err == nil { - glog.V(3).Infof("Updated CIDR for %q", workItem) + klog.V(3).Infof("Updated CIDR for %q", workItem) } else { - glog.Errorf("Error updating CIDR for %q: %v", workItem, err) + klog.Errorf("Error updating CIDR for %q: %v", workItem, err) if canRetry, timeout := ca.retryParams(workItem); canRetry { - glog.V(2).Infof("Retrying update for %q after %v", workItem, timeout) + klog.V(2).Infof("Retrying update for %q after %v", workItem, timeout) time.AfterFunc(timeout, func() { // Requeue the failed node for update again. ca.nodeUpdateChannel <- workItem }) continue } - glog.Errorf("Exceeded retry count for %q, dropping from queue", workItem) + klog.Errorf("Exceeded retry count for %q, dropping from queue", workItem) } ca.removeNodeFromProcessing(workItem) case <-stopChan: @@ -193,7 +193,7 @@ func (ca *cloudCIDRAllocator) retryParams(nodeName string) (bool, time.Duration) entry, ok := ca.nodesInProcessing[nodeName] if !ok { - glog.Errorf("Cannot get retryParams for %q as entry does not exist", nodeName) + klog.Errorf("Cannot get retryParams for %q as entry does not exist", nodeName) return false, 0 } @@ -231,11 +231,11 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { return nil } if !ca.insertNodeToProcessing(node.Name) { - glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name) + klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name) return nil } - glog.V(4).Infof("Putting node %s into the work queue", node.Name) + klog.V(4).Infof("Putting node %s into the work queue", node.Name) ca.nodeUpdateChannel <- node.Name return nil } @@ -247,7 +247,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { if errors.IsNotFound(err) { return nil // node no longer available, skip processing } - glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err) + klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", nodeName, err) return err } @@ -267,11 +267,11 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { podCIDR := cidr.String() if node.Spec.PodCIDR == podCIDR { - glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) + klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) // We don't return here, in order to set the NetworkUnavailable condition later below. } else { if node.Spec.PodCIDR != "" { - glog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR) + klog.Errorf("PodCIDR being reassigned! Node %v spec has %v, but cloud provider has assigned %v", node.Name, node.Spec.PodCIDR, podCIDR) // We fall through and set the CIDR despite this error. This // implements the same logic as implemented in the // rangeAllocator. @@ -280,14 +280,14 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { } for i := 0; i < cidrUpdateRetries; i++ { if err = utilnode.PatchNodeCIDR(ca.client, types.NodeName(node.Name), podCIDR); err == nil { - glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) + klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) break } } } if err != nil { nodeutil.RecordNodeStatusChange(ca.recorder, node, "CIDRAssignmentFailed") - glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err) + klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err) return err } @@ -299,13 +299,13 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error { LastTransitionTime: metav1.Now(), }) if err != nil { - glog.Errorf("Error setting route status for node %v: %v", node.Name, err) + klog.Errorf("Error setting route status for node %v: %v", node.Name, err) } return err } func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error { - glog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)", + klog.V(2).Infof("Node %v PodCIDR (%v) will be released by external cloud provider (not managed by controller)", node.Name, node.Spec.PodCIDR) return nil } diff --git a/pkg/controller/nodeipam/ipam/controller.go b/pkg/controller/nodeipam/ipam/controller.go index 18d1c2f368f37..1c8253d671f15 100644 --- a/pkg/controller/nodeipam/ipam/controller.go +++ b/pkg/controller/nodeipam/ipam/controller.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" informers "k8s.io/client-go/informers/core/v1" @@ -99,7 +99,7 @@ func NewController( // registers the informers for node changes. This will start synchronization // of the node and cloud CIDR range allocations. func (c *Controller) Start(nodeInformer informers.NodeInformer) error { - glog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config) + klog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config) nodes, err := listNodes(c.adapter.k8s) if err != nil { @@ -110,9 +110,9 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error { _, cidrRange, err := net.ParseCIDR(node.Spec.PodCIDR) if err == nil { c.set.Occupy(cidrRange) - glog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR) + klog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR) } else { - glog.Errorf("Node %q has an invalid CIDR (%q): %v", node.Name, node.Spec.PodCIDR, err) + klog.Errorf("Node %q has an invalid CIDR (%q): %v", node.Name, node.Spec.PodCIDR, err) } } @@ -180,7 +180,7 @@ func (c *Controller) onAdd(node *v1.Node) error { c.syncers[node.Name] = syncer go syncer.Loop(nil) } else { - glog.Warningf("Add for node %q that already exists", node.Name) + klog.Warningf("Add for node %q that already exists", node.Name) } syncer.Update(node) @@ -194,7 +194,7 @@ func (c *Controller) onUpdate(_, node *v1.Node) error { if sync, ok := c.syncers[node.Name]; ok { sync.Update(node) } else { - glog.Errorf("Received update for non-existent node %q", node.Name) + klog.Errorf("Received update for non-existent node %q", node.Name) return fmt.Errorf("unknown node %q", node.Name) } @@ -209,7 +209,7 @@ func (c *Controller) onDelete(node *v1.Node) error { syncer.Delete(node) delete(c.syncers, node.Name) } else { - glog.Warningf("Node %q was already deleted", node.Name) + klog.Warningf("Node %q was already deleted", node.Name) } return nil diff --git a/pkg/controller/nodeipam/ipam/range_allocator.go b/pkg/controller/nodeipam/ipam/range_allocator.go index de3e27a5537fc..9da52e63aa711 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator.go +++ b/pkg/controller/nodeipam/ipam/range_allocator.go @@ -21,7 +21,7 @@ import ( "net" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -69,13 +69,13 @@ type rangeAllocator struct { // can initialize its CIDR map. NodeList is only nil in testing. func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) { if client == nil { - glog.Fatalf("kubeClient is nil when starting NodeController") + klog.Fatalf("kubeClient is nil when starting NodeController") } eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) - eventBroadcaster.StartLogging(glog.Infof) - glog.V(0).Infof("Sending events to api server.") + eventBroadcaster.StartLogging(klog.Infof) + klog.V(0).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) set, err := cidrset.NewCIDRSet(clusterCIDR, subNetMaskSize) @@ -96,16 +96,16 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No if serviceCIDR != nil { ra.filterOutServiceRange(serviceCIDR) } else { - glog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.") + klog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.") } if nodeList != nil { for _, node := range nodeList.Items { if node.Spec.PodCIDR == "" { - glog.Infof("Node %v has no CIDR, ignoring", node.Name) + klog.Infof("Node %v has no CIDR, ignoring", node.Name) continue } else { - glog.Infof("Node %v has CIDR %s, occupying it in CIDR map", + klog.Infof("Node %v has CIDR %s, occupying it in CIDR map", node.Name, node.Spec.PodCIDR) } if err := ra.occupyCIDR(&node); err != nil { @@ -154,8 +154,8 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No func (r *rangeAllocator) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Infof("Starting range CIDR allocator") - defer glog.Infof("Shutting down range CIDR allocator") + klog.Infof("Starting range CIDR allocator") + defer klog.Infof("Shutting down range CIDR allocator") if !controller.WaitForCacheSync("cidrallocator", stopCh, r.nodesSynced) { return @@ -173,7 +173,7 @@ func (r *rangeAllocator) worker(stopChan <-chan struct{}) { select { case workItem, ok := <-r.nodeCIDRUpdateChannel: if !ok { - glog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed") + klog.Warning("Channel nodeCIDRUpdateChannel was unexpectedly closed") return } if err := r.updateCIDRAllocation(workItem); err != nil { @@ -225,7 +225,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { return nil } if !r.insertNodeToProcessing(node.Name) { - glog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name) + klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name) return nil } if node.Spec.PodCIDR != "" { @@ -238,7 +238,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { return fmt.Errorf("failed to allocate cidr: %v", err) } - glog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR) + klog.V(4).Infof("Putting node %s with CIDR %s into the work queue", node.Name, podCIDR) r.nodeCIDRUpdateChannel <- nodeAndCIDR{ nodeName: node.Name, cidr: podCIDR, @@ -255,7 +255,7 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error { return fmt.Errorf("Failed to parse CIDR %s on Node %v: %v", node.Spec.PodCIDR, node.Name, err) } - glog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR) + klog.V(4).Infof("release CIDR %s", node.Spec.PodCIDR) if err = r.cidrs.Release(podCIDR); err != nil { return fmt.Errorf("Error when releasing CIDR %v: %v", node.Spec.PodCIDR, err) } @@ -275,7 +275,7 @@ func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) { } if err := r.cidrs.Occupy(serviceCIDR); err != nil { - glog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err) + klog.Errorf("Error filtering out service cidr %v: %v", serviceCIDR, err) } } @@ -289,37 +289,37 @@ func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { node, err = r.nodeLister.Get(data.nodeName) if err != nil { - glog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err) + klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDR: %v", data.nodeName, err) return err } if node.Spec.PodCIDR == podCIDR { - glog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) + klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, podCIDR) return nil } if node.Spec.PodCIDR != "" { - glog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR) + klog.Errorf("Node %v already has a CIDR allocated %v. Releasing the new one %v.", node.Name, node.Spec.PodCIDR, podCIDR) if err := r.cidrs.Release(data.cidr); err != nil { - glog.Errorf("Error when releasing CIDR %v", podCIDR) + klog.Errorf("Error when releasing CIDR %v", podCIDR) } return nil } // If we reached here, it means that the node has no CIDR currently assigned. So we set it. for i := 0; i < cidrUpdateRetries; i++ { if err = utilnode.PatchNodeCIDR(r.client, types.NodeName(node.Name), podCIDR); err == nil { - glog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) + klog.Infof("Set node %v PodCIDR to %v", node.Name, podCIDR) return nil } } - glog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err) + klog.Errorf("Failed to update node %v PodCIDR to %v after multiple attempts: %v", node.Name, podCIDR, err) nodeutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed") // We accept the fact that we may leak CIDRs here. This is safer than releasing // them in case when we don't know if request went through. // NodeController restart will return all falsely allocated CIDRs to the pool. if !apierrors.IsServerTimeout(err) { - glog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err) + klog.Errorf("CIDR assignment for node %v failed: %v. Releasing allocated CIDR", node.Name, err) if releaseErr := r.cidrs.Release(data.cidr); releaseErr != nil { - glog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr) + klog.Errorf("Error releasing allocated CIDR for node %v: %v", node.Name, releaseErr) } } return err diff --git a/pkg/controller/nodeipam/ipam/sync/BUILD b/pkg/controller/nodeipam/ipam/sync/BUILD index 53e7e31344c92..0323850791fe4 100644 --- a/pkg/controller/nodeipam/ipam/sync/BUILD +++ b/pkg/controller/nodeipam/ipam/sync/BUILD @@ -8,7 +8,7 @@ go_library( deps = [ "//pkg/controller/nodeipam/ipam/cidrset:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -21,7 +21,7 @@ go_test( "//pkg/controller/nodeipam/ipam/test:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/nodeipam/ipam/sync/sync.go b/pkg/controller/nodeipam/ipam/sync/sync.go index 41806497ca867..ee95392b8ff95 100644 --- a/pkg/controller/nodeipam/ipam/sync/sync.go +++ b/pkg/controller/nodeipam/ipam/sync/sync.go @@ -22,7 +22,7 @@ import ( "net" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" @@ -120,7 +120,7 @@ func New(c controller, cloudAlias cloudAlias, kubeAPI kubeAPI, mode NodeSyncMode // Loop runs the sync loop for a given node. done is an optional channel that // is closed when the Loop() returns. func (sync *NodeSync) Loop(done chan struct{}) { - glog.V(2).Infof("Starting sync loop for node %q", sync.nodeName) + klog.V(2).Infof("Starting sync loop for node %q", sync.nodeName) defer func() { if done != nil { @@ -130,13 +130,13 @@ func (sync *NodeSync) Loop(done chan struct{}) { timeout := sync.c.ResyncTimeout() delayTimer := time.NewTimer(timeout) - glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout) + klog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout) for { select { case op, more := <-sync.opChan: if !more { - glog.V(2).Infof("Stopping sync loop") + klog.V(2).Infof("Stopping sync loop") return } sync.c.ReportResult(op.run(sync)) @@ -144,13 +144,13 @@ func (sync *NodeSync) Loop(done chan struct{}) { <-delayTimer.C } case <-delayTimer.C: - glog.V(4).Infof("Running resync for node %q", sync.nodeName) + klog.V(4).Infof("Running resync for node %q", sync.nodeName) sync.c.ReportResult((&updateOp{}).run(sync)) } timeout := sync.c.ResyncTimeout() delayTimer.Reset(timeout) - glog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout) + klog.V(4).Infof("Resync node %q in %v", sync.nodeName, timeout) } } @@ -190,15 +190,15 @@ func (op *updateOp) String() string { } func (op *updateOp) run(sync *NodeSync) error { - glog.V(3).Infof("Running updateOp %+v", op) + klog.V(3).Infof("Running updateOp %+v", op) ctx := context.Background() if op.node == nil { - glog.V(3).Infof("Getting node spec for %q", sync.nodeName) + klog.V(3).Infof("Getting node spec for %q", sync.nodeName) node, err := sync.kubeAPI.Node(ctx, sync.nodeName) if err != nil { - glog.Errorf("Error getting node %q spec: %v", sync.nodeName, err) + klog.Errorf("Error getting node %q spec: %v", sync.nodeName, err) return err } op.node = node @@ -206,7 +206,7 @@ func (op *updateOp) run(sync *NodeSync) error { aliasRange, err := sync.cloudAlias.Alias(ctx, sync.nodeName) if err != nil { - glog.Errorf("Error getting cloud alias for node %q: %v", sync.nodeName, err) + klog.Errorf("Error getting cloud alias for node %q: %v", sync.nodeName, err) return err } @@ -228,14 +228,14 @@ func (op *updateOp) run(sync *NodeSync) error { // match. func (op *updateOp) validateRange(ctx context.Context, sync *NodeSync, node *v1.Node, aliasRange *net.IPNet) error { if node.Spec.PodCIDR != aliasRange.String() { - glog.Errorf("Inconsistency detected between node PodCIDR and node alias (%v != %v)", + klog.Errorf("Inconsistency detected between node PodCIDR and node alias (%v != %v)", node.Spec.PodCIDR, aliasRange) sync.kubeAPI.EmitNodeWarningEvent(node.Name, MismatchEvent, "Node.Spec.PodCIDR != cloud alias (%v != %v)", node.Spec.PodCIDR, aliasRange) // User intervention is required in this case, as this is most likely due // to the user mucking around with their VM aliases on the side. } else { - glog.V(4).Infof("Node %q CIDR range %v is matches cloud assignment", node.Name, node.Spec.PodCIDR) + klog.V(4).Infof("Node %q CIDR range %v is matches cloud assignment", node.Name, node.Spec.PodCIDR) } return nil } @@ -249,26 +249,26 @@ func (op *updateOp) updateNodeFromAlias(ctx context.Context, sync *NodeSync, nod return fmt.Errorf("cannot sync from cloud in mode %q", sync.mode) } - glog.V(2).Infof("Updating node spec with alias range, node.PodCIDR = %v", aliasRange) + klog.V(2).Infof("Updating node spec with alias range, node.PodCIDR = %v", aliasRange) if err := sync.set.Occupy(aliasRange); err != nil { - glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName) + klog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName) return err } if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, aliasRange); err != nil { - glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, aliasRange, err) + klog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, aliasRange, err) return err } - glog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange) + klog.V(2).Infof("Node %q PodCIDR set to %v", node.Name, aliasRange) if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil { - glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err) + klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err) return err } - glog.V(2).Infof("Updated node %q PodCIDR from cloud alias %v", node.Name, aliasRange) + klog.V(2).Infof("Updated node %q PodCIDR from cloud alias %v", node.Name, aliasRange) return nil } @@ -283,27 +283,27 @@ func (op *updateOp) updateAliasFromNode(ctx context.Context, sync *NodeSync, nod _, aliasRange, err := net.ParseCIDR(node.Spec.PodCIDR) if err != nil { - glog.Errorf("Could not parse PodCIDR (%q) for node %q: %v", + klog.Errorf("Could not parse PodCIDR (%q) for node %q: %v", node.Spec.PodCIDR, node.Name, err) return err } if err := sync.set.Occupy(aliasRange); err != nil { - glog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName) + klog.Errorf("Error occupying range %v for node %v", aliasRange, sync.nodeName) return err } if err := sync.cloudAlias.AddAlias(ctx, node.Name, aliasRange); err != nil { - glog.Errorf("Could not add alias %v for node %q: %v", aliasRange, node.Name, err) + klog.Errorf("Could not add alias %v for node %q: %v", aliasRange, node.Name, err) return err } if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil { - glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err) + klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err) return err } - glog.V(2).Infof("Updated node %q cloud alias with node spec, node.PodCIDR = %v", + klog.V(2).Infof("Updated node %q cloud alias with node spec, node.PodCIDR = %v", node.Name, node.Spec.PodCIDR) return nil @@ -326,21 +326,21 @@ func (op *updateOp) allocateRange(ctx context.Context, sync *NodeSync, node *v1. // is no durable record of the range. The missing space will be // recovered on the next restart of the controller. if err := sync.cloudAlias.AddAlias(ctx, node.Name, cidrRange); err != nil { - glog.Errorf("Could not add alias %v for node %q: %v", cidrRange, node.Name, err) + klog.Errorf("Could not add alias %v for node %q: %v", cidrRange, node.Name, err) return err } if err := sync.kubeAPI.UpdateNodePodCIDR(ctx, node, cidrRange); err != nil { - glog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, cidrRange, err) + klog.Errorf("Could not update node %q PodCIDR to %v: %v", node.Name, cidrRange, err) return err } if err := sync.kubeAPI.UpdateNodeNetworkUnavailable(node.Name, false); err != nil { - glog.Errorf("Could not update node NetworkUnavailable status to false: %v", err) + klog.Errorf("Could not update node NetworkUnavailable status to false: %v", err) return err } - glog.V(2).Infof("Allocated PodCIDR %v for node %q", cidrRange, node.Name) + klog.V(2).Infof("Allocated PodCIDR %v for node %q", cidrRange, node.Name) return nil } @@ -358,15 +358,15 @@ func (op *deleteOp) String() string { } func (op *deleteOp) run(sync *NodeSync) error { - glog.V(3).Infof("Running deleteOp %+v", op) + klog.V(3).Infof("Running deleteOp %+v", op) if op.node.Spec.PodCIDR == "" { - glog.V(2).Infof("Node %q was deleted, node had no PodCIDR range assigned", op.node.Name) + klog.V(2).Infof("Node %q was deleted, node had no PodCIDR range assigned", op.node.Name) return nil } _, cidrRange, err := net.ParseCIDR(op.node.Spec.PodCIDR) if err != nil { - glog.Errorf("Deleted node %q has an invalid podCIDR %q: %v", + klog.Errorf("Deleted node %q has an invalid podCIDR %q: %v", op.node.Name, op.node.Spec.PodCIDR, err) sync.kubeAPI.EmitNodeWarningEvent(op.node.Name, InvalidPodCIDR, "Node %q has an invalid PodCIDR: %q", op.node.Name, op.node.Spec.PodCIDR) @@ -374,7 +374,7 @@ func (op *deleteOp) run(sync *NodeSync) error { } sync.set.Release(cidrRange) - glog.V(2).Infof("Node %q was deleted, releasing CIDR range %v", + klog.V(2).Infof("Node %q was deleted, releasing CIDR range %v", op.node.Name, op.node.Spec.PodCIDR) return nil diff --git a/pkg/controller/nodeipam/ipam/sync/sync_test.go b/pkg/controller/nodeipam/ipam/sync/sync_test.go index 4a47280d94b08..8c80b2c6453bd 100644 --- a/pkg/controller/nodeipam/ipam/sync/sync_test.go +++ b/pkg/controller/nodeipam/ipam/sync/sync_test.go @@ -24,8 +24,8 @@ import ( "testing" "time" - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" @@ -88,7 +88,7 @@ func (f *fakeAPIs) EmitNodeWarningEvent(nodeName, reason, fmtStr string, args .. } func (f *fakeAPIs) ReportResult(err error) { - glog.V(2).Infof("ReportResult %v", err) + klog.V(2).Infof("ReportResult %v", err) f.results = append(f.results, err) if f.reportChan != nil { f.reportChan <- struct{}{} @@ -104,7 +104,7 @@ func (f *fakeAPIs) ResyncTimeout() time.Duration { func (f *fakeAPIs) dumpTrace() { for i, x := range f.calls { - glog.Infof("trace %v: %v", i, x) + klog.Infof("trace %v: %v", i, x) } } diff --git a/pkg/controller/nodeipam/node_ipam_controller.go b/pkg/controller/nodeipam/node_ipam_controller.go index 4c3f55575d49c..8f87cf72b1e4f 100644 --- a/pkg/controller/nodeipam/node_ipam_controller.go +++ b/pkg/controller/nodeipam/node_ipam_controller.go @@ -20,7 +20,7 @@ import ( "net" "time" - "github.com/golang/glog" + "k8s.io/klog" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -90,13 +90,13 @@ func NewNodeIpamController( allocatorType ipam.CIDRAllocatorType) (*Controller, error) { if kubeClient == nil { - glog.Fatalf("kubeClient is nil when starting Controller") + klog.Fatalf("kubeClient is nil when starting Controller") } eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) - glog.Infof("Sending events to api server.") + klog.Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink( &v1core.EventSinkImpl{ Interface: kubeClient.CoreV1().Events(""), @@ -107,13 +107,13 @@ func NewNodeIpamController( } if clusterCIDR == nil { - glog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set") + klog.Fatal("Controller: Must specify --cluster-cidr if --allocate-node-cidrs is set") } mask := clusterCIDR.Mask if allocatorType != ipam.CloudAllocatorType { // Cloud CIDR allocator does not rely on clusterCIDR or nodeCIDRMaskSize for allocation. if maskSize, _ := mask.Size(); maskSize > nodeCIDRMaskSize { - glog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size") + klog.Fatal("Controller: Invalid --cluster-cidr, mask size of cluster CIDR must be less than --node-cidr-mask-size") } } @@ -141,10 +141,10 @@ func NewNodeIpamController( } ipamc, err := ipam.NewController(cfg, kubeClient, cloud, clusterCIDR, serviceCIDR, nodeCIDRMaskSize) if err != nil { - glog.Fatalf("Error creating ipam controller: %v", err) + klog.Fatalf("Error creating ipam controller: %v", err) } if err := ipamc.Start(nodeInformer); err != nil { - glog.Fatalf("Error trying to Init(): %v", err) + klog.Fatalf("Error trying to Init(): %v", err) } } else { var err error @@ -165,8 +165,8 @@ func NewNodeIpamController( func (nc *Controller) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Infof("Starting ipam controller") - defer glog.Infof("Shutting down ipam controller") + klog.Infof("Starting ipam controller") + defer klog.Infof("Shutting down ipam controller") if !controller.WaitForCacheSync("node", stopCh, nc.nodeInformerSynced) { return diff --git a/pkg/controller/nodelifecycle/BUILD b/pkg/controller/nodelifecycle/BUILD index 186919868a130..74eb860fa2ae6 100644 --- a/pkg/controller/nodelifecycle/BUILD +++ b/pkg/controller/nodelifecycle/BUILD @@ -43,8 +43,8 @@ go_library( "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index 431296a50d504..f474b55df9e52 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -29,7 +29,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" coordv1beta1 "k8s.io/api/coordination/v1beta1" "k8s.io/api/core/v1" @@ -262,14 +262,14 @@ func NewNodeLifecycleController( taintNodeByCondition bool) (*Controller, error) { if kubeClient == nil { - glog.Fatalf("kubeClient is nil when starting Controller") + klog.Fatalf("kubeClient is nil when starting Controller") } eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"}) - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) - glog.Infof("Sending events to api server.") + klog.Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink( &v1core.EventSinkImpl{ Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""), @@ -309,7 +309,7 @@ func NewNodeLifecycleController( nodeUpdateQueue: workqueue.New(), } if useTaintBasedEvictions { - glog.Infof("Controller is using taint based evictions.") + klog.Infof("Controller is using taint based evictions.") } nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc @@ -336,12 +336,12 @@ func NewNodeLifecycleController( if !isPod { deletedState, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Received unexpected object: %v", obj) + klog.Errorf("Received unexpected object: %v", obj) return } pod, ok = deletedState.Obj.(*v1.Pod) if !ok { - glog.Errorf("DeletedFinalStateUnknown contained non-Pod object: %v", deletedState.Obj) + klog.Errorf("DeletedFinalStateUnknown contained non-Pod object: %v", deletedState.Obj) return } } @@ -375,7 +375,7 @@ func NewNodeLifecycleController( } if nc.taintNodeByCondition { - glog.Infof("Controller will taint node by condition.") + klog.Infof("Controller will taint node by condition.") nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: nodeutil.CreateAddNodeHandler(func(node *v1.Node) error { nc.nodeUpdateQueue.Add(node.Name) @@ -420,8 +420,8 @@ func NewNodeLifecycleController( func (nc *Controller) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Infof("Starting node controller") - defer glog.Infof("Shutting down node controller") + klog.Infof("Starting node controller") + defer klog.Infof("Shutting down node controller") if !controller.WaitForCacheSync("taint", stopCh, nc.leaseInformerSynced, nc.nodeInformerSynced, nc.podInformerSynced, nc.daemonSetInformerSynced) { return @@ -459,7 +459,7 @@ func (nc *Controller) Run(stopCh <-chan struct{}) { // Incorporate the results of node health signal pushed from kubelet to master. go wait.Until(func() { if err := nc.monitorNodeHealth(); err != nil { - glog.Errorf("Error monitoring node health: %v", err) + klog.Errorf("Error monitoring node health: %v", err) } }, nc.nodeMonitorPeriod, stopCh) @@ -495,7 +495,7 @@ func (nc *Controller) doFixDeprecatedTaintKeyPass(node *v1.Node) error { return nil } - glog.Warningf("Detected deprecated taint keys: %v on node: %v, will substitute them with %v", + klog.Warningf("Detected deprecated taint keys: %v on node: %v, will substitute them with %v", taintsToDel, node.GetName(), taintsToAdd) if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, taintsToAdd, taintsToDel, node) { @@ -516,7 +516,7 @@ func (nc *Controller) doNoScheduleTaintingPassWorker() { if err := nc.doNoScheduleTaintingPass(nodeName); err != nil { // TODO (k82cn): Add nodeName back to the queue. - glog.Errorf("Failed to taint NoSchedule on node <%s>, requeue it: %v", nodeName, err) + klog.Errorf("Failed to taint NoSchedule on node <%s>, requeue it: %v", nodeName, err) } nc.nodeUpdateQueue.Done(nodeName) } @@ -585,10 +585,10 @@ func (nc *Controller) doNoExecuteTaintingPass() { nc.zoneNoExecuteTainter[k].Try(func(value scheduler.TimedValue) (bool, time.Duration) { node, err := nc.nodeLister.Get(value.Value) if apierrors.IsNotFound(err) { - glog.Warningf("Node %v no longer present in nodeLister!", value.Value) + klog.Warningf("Node %v no longer present in nodeLister!", value.Value) return true, 0 } else if err != nil { - glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) + klog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) // retry in 50 millisecond return false, 50 * time.Millisecond } else { @@ -607,7 +607,7 @@ func (nc *Controller) doNoExecuteTaintingPass() { oppositeTaint = *NotReadyTaintTemplate } else { // It seems that the Node is ready again, so there's no need to taint it. - glog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value) + klog.V(4).Infof("Node %v was in a taint queue, but it's ready now. Ignoring taint request.", value.Value) return true, 0 } @@ -624,9 +624,9 @@ func (nc *Controller) doEvictionPass() { nc.zonePodEvictor[k].Try(func(value scheduler.TimedValue) (bool, time.Duration) { node, err := nc.nodeLister.Get(value.Value) if apierrors.IsNotFound(err) { - glog.Warningf("Node %v no longer present in nodeLister!", value.Value) + klog.Warningf("Node %v no longer present in nodeLister!", value.Value) } else if err != nil { - glog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) + klog.Warningf("Failed to get Node %v from the nodeLister: %v", value.Value, err) } else { zone := utilnode.GetZoneKey(node) evictionsNumber.WithLabelValues(zone).Inc() @@ -638,7 +638,7 @@ func (nc *Controller) doEvictionPass() { return false, 0 } if remaining { - glog.Infof("Pods awaiting deletion due to Controller eviction") + klog.Infof("Pods awaiting deletion due to Controller eviction") } return true, 0 }) @@ -662,7 +662,7 @@ func (nc *Controller) monitorNodeHealth() error { } for i := range added { - glog.V(1).Infof("Controller observed a new Node: %#v", added[i].Name) + klog.V(1).Infof("Controller observed a new Node: %#v", added[i].Name) nodeutil.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name)) nc.knownNodeSet[added[i].Name] = added[i] nc.addPodEvictorForNewZone(added[i]) @@ -674,7 +674,7 @@ func (nc *Controller) monitorNodeHealth() error { } for i := range deleted { - glog.V(1).Infof("Controller observed a Node deletion: %v", deleted[i].Name) + klog.V(1).Infof("Controller observed a Node deletion: %v", deleted[i].Name) nodeutil.RecordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from Controller", deleted[i].Name)) delete(nc.knownNodeSet, deleted[i].Name) } @@ -693,12 +693,12 @@ func (nc *Controller) monitorNodeHealth() error { name := node.Name node, err = nc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { - glog.Errorf("Failed while getting a Node to retry updating node health. Probably Node %s was deleted.", name) + klog.Errorf("Failed while getting a Node to retry updating node health. Probably Node %s was deleted.", name) return false, err } return false, nil }); err != nil { - glog.Errorf("Update health of Node '%v' from Controller error: %v. "+ + klog.Errorf("Update health of Node '%v' from Controller error: %v. "+ "Skipping - no pods will be evicted.", node.Name, err) continue } @@ -717,10 +717,10 @@ func (nc *Controller) monitorNodeHealth() error { if taintutils.TaintExists(node.Spec.Taints, UnreachableTaintTemplate) { taintToAdd := *NotReadyTaintTemplate if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{UnreachableTaintTemplate}, node) { - glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") + klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") } } else if nc.markNodeForTainting(node) { - glog.V(2).Infof("Node %v is NotReady as of %v. Adding it to the Taint queue.", + klog.V(2).Infof("Node %v is NotReady as of %v. Adding it to the Taint queue.", node.Name, decisionTimestamp, ) @@ -728,7 +728,7 @@ func (nc *Controller) monitorNodeHealth() error { } else { if decisionTimestamp.After(nc.nodeHealthMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) { if nc.evictPods(node) { - glog.V(2).Infof("Node is NotReady. Adding Pods on Node %s to eviction queue: %v is later than %v + %v", + klog.V(2).Infof("Node is NotReady. Adding Pods on Node %s to eviction queue: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeHealthMap[node.Name].readyTransitionTimestamp, @@ -744,10 +744,10 @@ func (nc *Controller) monitorNodeHealth() error { if taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) { taintToAdd := *UnreachableTaintTemplate if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) { - glog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") + klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") } } else if nc.markNodeForTainting(node) { - glog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.", + klog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.", node.Name, decisionTimestamp, ) @@ -755,7 +755,7 @@ func (nc *Controller) monitorNodeHealth() error { } else { if decisionTimestamp.After(nc.nodeHealthMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout)) { if nc.evictPods(node) { - glog.V(2).Infof("Node is unresponsive. Adding Pods on Node %s to eviction queues: %v is later than %v + %v", + klog.V(2).Infof("Node is unresponsive. Adding Pods on Node %s to eviction queues: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeHealthMap[node.Name].readyTransitionTimestamp, @@ -769,20 +769,20 @@ func (nc *Controller) monitorNodeHealth() error { if nc.useTaintBasedEvictions { removed, err := nc.markNodeAsReachable(node) if err != nil { - glog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name) + klog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name) } if removed { - glog.V(2).Infof("Node %s is healthy again, removing all taints", node.Name) + klog.V(2).Infof("Node %s is healthy again, removing all taints", node.Name) } } else { if nc.cancelPodEviction(node) { - glog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name) + klog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name) } } // remove shutdown taint this is needed always depending do we use taintbased or not err := nc.markNodeAsNotShutdown(node) if err != nil { - glog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name) + klog.Errorf("Failed to remove taints from node %v. Will retry in next iteration.", node.Name) } } @@ -800,23 +800,23 @@ func (nc *Controller) monitorNodeHealth() error { // check is node shutdowned, if yes do not deleted it. Instead add taint shutdown, err := nc.nodeShutdownInCloudProvider(context.TODO(), node) if err != nil { - glog.Errorf("Error determining if node %v shutdown in cloud: %v", node.Name, err) + klog.Errorf("Error determining if node %v shutdown in cloud: %v", node.Name, err) } // node shutdown if shutdown && err == nil { err = controller.AddOrUpdateTaintOnNode(nc.kubeClient, node.Name, controller.ShutdownTaint) if err != nil { - glog.Errorf("Error patching node taints: %v", err) + klog.Errorf("Error patching node taints: %v", err) } continue } exists, err := nc.nodeExistsInCloudProvider(types.NodeName(node.Name)) if err != nil { - glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err) + klog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err) continue } if !exists { - glog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name) + klog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name) nodeutil.RecordNodeEvent(nc.recorder, node.Name, string(node.UID), v1.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name)) go func(nodeName string) { defer utilruntime.HandleCrash() @@ -824,7 +824,7 @@ func (nc *Controller) monitorNodeHealth() error { // is gone. Delete it without worrying about grace // periods. if err := nodeutil.ForcefullyDeleteNode(nc.kubeClient, nodeName); err != nil { - glog.Errorf("Unable to forcefully delete node %q: %v", nodeName, err) + klog.Errorf("Unable to forcefully delete node %q: %v", nodeName, err) } }(node.Name) } @@ -892,21 +892,21 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node } _, observedCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) if !found { - glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name) + klog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name) savedNodeHealth = &nodeHealthData{ status: &node.Status, probeTimestamp: nc.now(), readyTransitionTimestamp: nc.now(), } } else if savedCondition == nil && observedCondition != nil { - glog.V(1).Infof("Creating timestamp entry for newly observed Node %s", node.Name) + klog.V(1).Infof("Creating timestamp entry for newly observed Node %s", node.Name) savedNodeHealth = &nodeHealthData{ status: &node.Status, probeTimestamp: nc.now(), readyTransitionTimestamp: nc.now(), } } else if savedCondition != nil && observedCondition == nil { - glog.Errorf("ReadyCondition was removed from Status of Node %s", node.Name) + klog.Errorf("ReadyCondition was removed from Status of Node %s", node.Name) // TODO: figure out what to do in this case. For now we do the same thing as above. savedNodeHealth = &nodeHealthData{ status: &node.Status, @@ -918,15 +918,15 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node // If ReadyCondition changed since the last time we checked, we update the transition timestamp to "now", // otherwise we leave it as it is. if savedCondition.LastTransitionTime != observedCondition.LastTransitionTime { - glog.V(3).Infof("ReadyCondition for Node %s transitioned from %v to %v", node.Name, savedCondition, observedCondition) + klog.V(3).Infof("ReadyCondition for Node %s transitioned from %v to %v", node.Name, savedCondition, observedCondition) transitionTime = nc.now() } else { transitionTime = savedNodeHealth.readyTransitionTimestamp } - if glog.V(5) { - glog.V(5).Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, savedNodeHealth.status, node.Status) + if klog.V(5) { + klog.V(5).Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, savedNodeHealth.status, node.Status) } else { - glog.V(3).Infof("Node %s ReadyCondition updated. Updating timestamp.", node.Name) + klog.V(3).Infof("Node %s ReadyCondition updated. Updating timestamp.", node.Name) } savedNodeHealth = &nodeHealthData{ status: &node.Status, @@ -952,7 +952,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node // NodeReady condition or lease was last set longer ago than gracePeriod, so // update it to Unknown (regardless of its current value) in the master. if currentReadyCondition == nil { - glog.V(2).Infof("node %v is never updated by kubelet", node.Name) + klog.V(2).Infof("node %v is never updated by kubelet", node.Name) node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{ Type: v1.NodeReady, Status: v1.ConditionUnknown, @@ -962,7 +962,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node LastTransitionTime: nc.now(), }) } else { - glog.V(4).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v", + klog.V(4).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v", node.Name, nc.now().Time.Sub(savedNodeHealth.probeTimestamp.Time), observedReadyCondition) if observedReadyCondition.Status != v1.ConditionUnknown { currentReadyCondition.Status = v1.ConditionUnknown @@ -988,7 +988,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node for _, nodeConditionType := range remainingNodeConditionTypes { _, currentCondition := v1node.GetNodeCondition(&node.Status, nodeConditionType) if currentCondition == nil { - glog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name) + klog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name) node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{ Type: nodeConditionType, Status: v1.ConditionUnknown, @@ -998,7 +998,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node LastTransitionTime: nowTimestamp, }) } else { - glog.V(4).Infof("node %v hasn't been updated for %+v. Last %v is: %+v", + klog.V(4).Infof("node %v hasn't been updated for %+v. Last %v is: %+v", node.Name, nc.now().Time.Sub(savedNodeHealth.probeTimestamp.Time), nodeConditionType, currentCondition) if currentCondition.Status != v1.ConditionUnknown { currentCondition.Status = v1.ConditionUnknown @@ -1012,7 +1012,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node _, currentCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady) if !apiequality.Semantic.DeepEqual(currentCondition, &observedReadyCondition) { if _, err = nc.kubeClient.CoreV1().Nodes().UpdateStatus(node); err != nil { - glog.Errorf("Error updating node %s: %v", node.Name, err) + klog.Errorf("Error updating node %s: %v", node.Name, err) return gracePeriod, observedReadyCondition, currentReadyCondition, err } nc.nodeHealthMap[node.Name] = &nodeHealthData{ @@ -1041,7 +1041,7 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod } newZoneStates[k] = newState if _, had := nc.zoneStates[k]; !had { - glog.Errorf("Setting initial state for unseen zone: %v", k) + klog.Errorf("Setting initial state for unseen zone: %v", k) nc.zoneStates[k] = stateInitial } } @@ -1069,12 +1069,12 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod if !allAreFullyDisrupted || !allWasFullyDisrupted { // We're switching to full disruption mode if allAreFullyDisrupted { - glog.V(0).Info("Controller detected that all Nodes are not-Ready. Entering master disruption mode.") + klog.V(0).Info("Controller detected that all Nodes are not-Ready. Entering master disruption mode.") for i := range nodes { if nc.useTaintBasedEvictions { _, err := nc.markNodeAsReachable(nodes[i]) if err != nil { - glog.Errorf("Failed to remove taints from Node %v", nodes[i].Name) + klog.Errorf("Failed to remove taints from Node %v", nodes[i].Name) } } else { nc.cancelPodEviction(nodes[i]) @@ -1096,7 +1096,7 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod } // We're exiting full disruption mode if allWasFullyDisrupted { - glog.V(0).Info("Controller detected that some Nodes are Ready. Exiting master disruption mode.") + klog.V(0).Info("Controller detected that some Nodes are Ready. Exiting master disruption mode.") // When exiting disruption mode update probe timestamps on all Nodes. now := nc.now() for i := range nodes { @@ -1119,7 +1119,7 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod if v == newState { continue } - glog.V(0).Infof("Controller detected that zone %v is now in state %v.", k, newState) + klog.V(0).Infof("Controller detected that zone %v is now in state %v.", k, newState) nc.setLimiterInZone(k, len(zoneToNodeConditions[k]), newState) nc.zoneStates[k] = newState } @@ -1219,7 +1219,7 @@ func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) { flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, scheduler.EvictionRateLimiterBurst)) } // Init the metric for the new zone. - glog.Infof("Initializing eviction metric for zone: %v", zone) + klog.Infof("Initializing eviction metric for zone: %v", zone) evictionsNumber.WithLabelValues(zone).Add(0) } } @@ -1232,7 +1232,7 @@ func (nc *Controller) cancelPodEviction(node *v1.Node) bool { defer nc.evictorLock.Unlock() wasDeleting := nc.zonePodEvictor[zone].Remove(node.Name) if wasDeleting { - glog.V(2).Infof("Cancelling pod Eviction on Node: %v", node.Name) + klog.V(2).Infof("Cancelling pod Eviction on Node: %v", node.Name) return true } return false @@ -1257,12 +1257,12 @@ func (nc *Controller) markNodeAsReachable(node *v1.Node) (bool, error) { defer nc.evictorLock.Unlock() err := controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, UnreachableTaintTemplate) if err != nil { - glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err) + klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err) return false, err } err = controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, NotReadyTaintTemplate) if err != nil { - glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err) + klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err) return false, err } return nc.zoneNoExecuteTainter[utilnode.GetZoneKey(node)].Remove(node.Name), nil @@ -1273,7 +1273,7 @@ func (nc *Controller) markNodeAsNotShutdown(node *v1.Node) error { defer nc.evictorLock.Unlock() err := controller.RemoveTaintOffNode(nc.kubeClient, node.Name, node, controller.ShutdownTaint) if err != nil { - glog.Errorf("Failed to remove taint from node %v: %v", node.Name, err) + klog.Errorf("Failed to remove taint from node %v: %v", node.Name, err) return err } return nil diff --git a/pkg/controller/nodelifecycle/scheduler/BUILD b/pkg/controller/nodelifecycle/scheduler/BUILD index d8f2f98790a4a..ad28abb8e0a4d 100644 --- a/pkg/controller/nodelifecycle/scheduler/BUILD +++ b/pkg/controller/nodelifecycle/scheduler/BUILD @@ -26,7 +26,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/nodelifecycle/scheduler/rate_limited_queue.go b/pkg/controller/nodelifecycle/scheduler/rate_limited_queue.go index 5d562fb71fcd8..03a1fcb889f58 100644 --- a/pkg/controller/nodelifecycle/scheduler/rate_limited_queue.go +++ b/pkg/controller/nodelifecycle/scheduler/rate_limited_queue.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/flowcontrol" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -236,7 +236,7 @@ func (q *RateLimitedTimedQueue) Try(fn ActionFunc) { for ok { // rate limit the queue checking if !q.limiter.TryAccept() { - glog.V(10).Infof("Try rate limited for value: %v", val) + klog.V(10).Infof("Try rate limited for value: %v", val) // Try again later break } diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager.go b/pkg/controller/nodelifecycle/scheduler/taint_manager.go index 90e43757c4d9b..fbf683077f10e 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager.go @@ -38,7 +38,7 @@ import ( "k8s.io/kubernetes/pkg/apis/core/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -99,7 +99,7 @@ func deletePodHandler(c clientset.Interface, emitEventFunc func(types.Namespaced return func(args *WorkArgs) error { ns := args.NamespacedName.Namespace name := args.NamespacedName.Name - glog.V(0).Infof("NoExecuteTaintManager is deleting Pod: %v", args.NamespacedName.String()) + klog.V(0).Infof("NoExecuteTaintManager is deleting Pod: %v", args.NamespacedName.String()) if emitEventFunc != nil { emitEventFunc(args.NamespacedName) } @@ -170,12 +170,12 @@ func getMinTolerationTime(tolerations []v1.Toleration) time.Duration { func NewNoExecuteTaintManager(c clientset.Interface, getPod GetPodFunc, getNode GetNodeFunc) *NoExecuteTaintManager { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "taint-controller"}) - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) if c != nil { - glog.V(0).Infof("Sending events to api server.") + klog.V(0).Infof("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.CoreV1().Events("")}) } else { - glog.Fatalf("kubeClient is nil when starting NodeController") + klog.Fatalf("kubeClient is nil when starting NodeController") } tm := &NoExecuteTaintManager{ @@ -195,7 +195,7 @@ func NewNoExecuteTaintManager(c clientset.Interface, getPod GetPodFunc, getNode // Run starts NoExecuteTaintManager which will run in loop until `stopCh` is closed. func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) { - glog.V(0).Infof("Starting NoExecuteTaintManager") + klog.V(0).Infof("Starting NoExecuteTaintManager") for i := 0; i < UpdateWorkerSize; i++ { tc.nodeUpdateChannels = append(tc.nodeUpdateChannels, make(chan nodeUpdateItem, NodeUpdateChannelSize)) @@ -356,7 +356,7 @@ func (tc *NoExecuteTaintManager) processPodOnNode( } allTolerated, usedTolerations := v1helper.GetMatchingTolerations(taints, tolerations) if !allTolerated { - glog.V(2).Infof("Not all taints are tolerated after update for Pod %v on %v", podNamespacedName.String(), nodeName) + klog.V(2).Infof("Not all taints are tolerated after update for Pod %v on %v", podNamespacedName.String(), nodeName) // We're canceling scheduled work (if any), as we're going to delete the Pod right away. tc.cancelWorkWithEvent(podNamespacedName) tc.taintEvictionQueue.AddWork(NewWorkArgs(podNamespacedName.Name, podNamespacedName.Namespace), time.Now(), time.Now()) @@ -365,7 +365,7 @@ func (tc *NoExecuteTaintManager) processPodOnNode( minTolerationTime := getMinTolerationTime(usedTolerations) // getMinTolerationTime returns negative value to denote infinite toleration. if minTolerationTime < 0 { - glog.V(4).Infof("New tolerations for %v tolerate forever. Scheduled deletion won't be cancelled if already scheduled.", podNamespacedName.String()) + klog.V(4).Infof("New tolerations for %v tolerate forever. Scheduled deletion won't be cancelled if already scheduled.", podNamespacedName.String()) return } @@ -388,7 +388,7 @@ func (tc *NoExecuteTaintManager) handlePodUpdate(podUpdate podUpdateItem) { if apierrors.IsNotFound(err) { // Delete podNamespacedName := types.NamespacedName{Namespace: podUpdate.podNamespace, Name: podUpdate.podName} - glog.V(4).Infof("Noticed pod deletion: %#v", podNamespacedName) + klog.V(4).Infof("Noticed pod deletion: %#v", podNamespacedName) tc.cancelWorkWithEvent(podNamespacedName) return } @@ -403,7 +403,7 @@ func (tc *NoExecuteTaintManager) handlePodUpdate(podUpdate podUpdateItem) { // Create or Update podNamespacedName := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name} - glog.V(4).Infof("Noticed pod update: %#v", podNamespacedName) + klog.V(4).Infof("Noticed pod update: %#v", podNamespacedName) nodeName := pod.Spec.NodeName if nodeName == "" { return @@ -427,7 +427,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) { if err != nil { if apierrors.IsNotFound(err) { // Delete - glog.V(4).Infof("Noticed node deletion: %#v", nodeUpdate.nodeName) + klog.V(4).Infof("Noticed node deletion: %#v", nodeUpdate.nodeName) tc.taintedNodesLock.Lock() defer tc.taintedNodesLock.Unlock() delete(tc.taintedNodes, nodeUpdate.nodeName) @@ -438,12 +438,12 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) { } // Create or Update - glog.V(4).Infof("Noticed node update: %#v", nodeUpdate) + klog.V(4).Infof("Noticed node update: %#v", nodeUpdate) taints := getNoExecuteTaints(node.Spec.Taints) func() { tc.taintedNodesLock.Lock() defer tc.taintedNodesLock.Unlock() - glog.V(4).Infof("Updating known taints on node %v: %v", node.Name, taints) + klog.V(4).Infof("Updating known taints on node %v: %v", node.Name, taints) if len(taints) == 0 { delete(tc.taintedNodes, node.Name) } else { @@ -452,7 +452,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) { }() pods, err := getPodsAssignedToNode(tc.client, node.Name) if err != nil { - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) return } if len(pods) == 0 { @@ -460,7 +460,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) { } // Short circuit, to make this controller a bit faster. if len(taints) == 0 { - glog.V(4).Infof("All taints were removed from the Node %v. Cancelling all evictions...", node.Name) + klog.V(4).Infof("All taints were removed from the Node %v. Cancelling all evictions...", node.Name) for i := range pods { tc.cancelWorkWithEvent(types.NamespacedName{Namespace: pods[i].Namespace, Name: pods[i].Name}) } diff --git a/pkg/controller/nodelifecycle/scheduler/timed_workers.go b/pkg/controller/nodelifecycle/scheduler/timed_workers.go index 2eef59b041bfd..d995fb22a3662 100644 --- a/pkg/controller/nodelifecycle/scheduler/timed_workers.go +++ b/pkg/controller/nodelifecycle/scheduler/timed_workers.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/types" - "github.com/golang/glog" + "k8s.io/klog" ) // WorkArgs keeps arguments that will be passed to the function executed by the worker. @@ -107,12 +107,12 @@ func (q *TimedWorkerQueue) getWrappedWorkerFunc(key string) func(args *WorkArgs) // AddWork adds a work to the WorkerQueue which will be executed not earlier than `fireAt`. func (q *TimedWorkerQueue) AddWork(args *WorkArgs, createdAt time.Time, fireAt time.Time) { key := args.KeyFromWorkArgs() - glog.V(4).Infof("Adding TimedWorkerQueue item %v at %v to be fired at %v", key, createdAt, fireAt) + klog.V(4).Infof("Adding TimedWorkerQueue item %v at %v to be fired at %v", key, createdAt, fireAt) q.Lock() defer q.Unlock() if _, exists := q.workers[key]; exists { - glog.Warningf("Trying to add already existing work for %+v. Skipping.", args) + klog.Warningf("Trying to add already existing work for %+v. Skipping.", args) return } worker := CreateWorker(args, createdAt, fireAt, q.getWrappedWorkerFunc(key)) @@ -126,7 +126,7 @@ func (q *TimedWorkerQueue) CancelWork(key string) bool { worker, found := q.workers[key] result := false if found { - glog.V(4).Infof("Cancelling TimedWorkerQueue item %v at %v", key, time.Now()) + klog.V(4).Infof("Cancelling TimedWorkerQueue item %v at %v", key, time.Now()) if worker != nil { result = true worker.Cancel() diff --git a/pkg/controller/podautoscaler/BUILD b/pkg/controller/podautoscaler/BUILD index 801a5a522622d..7ff398dea32f8 100644 --- a/pkg/controller/podautoscaler/BUILD +++ b/pkg/controller/podautoscaler/BUILD @@ -40,7 +40,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 1ac7c1f980030..b043564abe832 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -21,7 +21,6 @@ import ( "math" "time" - "github.com/golang/glog" autoscalingv1 "k8s.io/api/autoscaling/v1" autoscalingv2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/api/core/v1" @@ -46,6 +45,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/controller" metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" @@ -108,7 +108,7 @@ func NewHorizontalController( ) *HorizontalController { broadcaster := record.NewBroadcaster() - broadcaster.StartLogging(glog.Infof) + broadcaster.StartLogging(klog.Infof) broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")}) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"}) @@ -153,8 +153,8 @@ func (a *HorizontalController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer a.queue.ShutDown() - glog.Infof("Starting HPA controller") - defer glog.Infof("Shutting down HPA controller") + klog.Infof("Starting HPA controller") + defer klog.Infof("Shutting down HPA controller") if !controller.WaitForCacheSync("HPA", stopCh, a.hpaListerSynced, a.podListerSynced) { return @@ -197,7 +197,7 @@ func (a *HorizontalController) deleteHPA(obj interface{}) { func (a *HorizontalController) worker() { for a.processNextWorkItem() { } - glog.Infof("horizontal pod autoscaler controller worker shutting down") + klog.Infof("horizontal pod autoscaler controller worker shutting down") } func (a *HorizontalController) processNextWorkItem() bool { @@ -306,7 +306,7 @@ func (a *HorizontalController) reconcileKey(key string) error { hpa, err := a.hpaLister.HorizontalPodAutoscalers(namespace).Get(name) if errors.IsNotFound(err) { - glog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace) + klog.Infof("Horizontal Pod Autoscaler %s has been deleted in %s", name, namespace) delete(a.recommendations, key) return nil } @@ -553,7 +553,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho return fmt.Errorf("failed to compute desired number of replicas based on listed metrics for %s: %v", reference, err) } - glog.V(4).Infof("proposing %v desired replicas (based on %s from %s) for %s", metricDesiredReplicas, metricName, timestamp, reference) + klog.V(4).Infof("proposing %v desired replicas (based on %s from %s) for %s", metricDesiredReplicas, metricName, timestamp, reference) rescaleMetric := "" if metricDesiredReplicas > desiredReplicas { @@ -585,10 +585,10 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho } setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededRescale", "the HPA controller was able to update the target scale to %d", desiredReplicas) a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason) - glog.Infof("Successful rescale of %s, old size: %d, new size: %d, reason: %s", + klog.Infof("Successful rescale of %s, old size: %d, new size: %d, reason: %s", hpa.Name, currentReplicas, desiredReplicas, rescaleReason) } else { - glog.V(4).Infof("decided not to scale %s to %v (last scale time was %s)", reference, desiredReplicas, hpa.Status.LastScaleTime) + klog.V(4).Infof("decided not to scale %s to %v (last scale time was %s)", reference, desiredReplicas, hpa.Status.LastScaleTime) desiredReplicas = currentReplicas } @@ -770,7 +770,7 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error()) return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err) } - glog.V(2).Infof("Successfully updated status for %s", hpa.Name) + klog.V(2).Infof("Successfully updated status for %s", hpa.Name) return nil } diff --git a/pkg/controller/podautoscaler/metrics/BUILD b/pkg/controller/podautoscaler/metrics/BUILD index f745c9ac0ef63..673b2859f8349 100644 --- a/pkg/controller/podautoscaler/metrics/BUILD +++ b/pkg/controller/podautoscaler/metrics/BUILD @@ -23,8 +23,8 @@ go_library( "//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1:go_default_library", "//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library", "//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go b/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go index cd4a1a18ac124..0db405482575b 100644 --- a/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go +++ b/pkg/controller/podautoscaler/metrics/legacy_metrics_client.go @@ -22,8 +22,8 @@ import ( "strings" "time" - "github.com/golang/glog" heapster "k8s.io/heapster/metrics/api/v1/types" + "k8s.io/klog" metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1" autoscaling "k8s.io/api/autoscaling/v2beta2" @@ -73,7 +73,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name return nil, time.Time{}, fmt.Errorf("failed to get pod resource metrics: %v", err) } - glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw)) + klog.V(4).Infof("Heapster metrics result: %s", string(resultRaw)) metrics := metricsapi.PodMetricsList{} err = json.Unmarshal(resultRaw, &metrics) @@ -94,7 +94,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name resValue, found := c.Usage[v1.ResourceName(resource)] if !found { missing = true - glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name) + klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name) continue } podSum += resValue.MilliValue() @@ -150,7 +150,7 @@ func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err) } - glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw)) + klog.V(4).Infof("Heapster metrics result: %s", string(resultRaw)) if len(metrics.Items) != len(podNames) { // if we get too many metrics or two few metrics, we have no way of knowing which metric goes to which pod diff --git a/pkg/controller/podautoscaler/metrics/rest_metrics_client.go b/pkg/controller/podautoscaler/metrics/rest_metrics_client.go index 138001088e44b..1f84866fc6814 100644 --- a/pkg/controller/podautoscaler/metrics/rest_metrics_client.go +++ b/pkg/controller/podautoscaler/metrics/rest_metrics_client.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" autoscaling "k8s.io/api/autoscaling/v2beta2" "k8s.io/api/core/v1" @@ -81,7 +81,7 @@ func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, name resValue, found := c.Usage[v1.ResourceName(resource)] if !found { missing = true - glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name) + klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name) break // containers loop } podSum += resValue.MilliValue() diff --git a/pkg/controller/podgc/BUILD b/pkg/controller/podgc/BUILD index 16464f6bfd4ac..eff9284cb77ee 100644 --- a/pkg/controller/podgc/BUILD +++ b/pkg/controller/podgc/BUILD @@ -26,7 +26,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/podgc/gc_controller.go b/pkg/controller/podgc/gc_controller.go index dfaa015eba774..a288bc86b37b1 100644 --- a/pkg/controller/podgc/gc_controller.go +++ b/pkg/controller/podgc/gc_controller.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -59,7 +59,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor kubeClient: kubeClient, terminatedPodThreshold: terminatedPodThreshold, deletePod: func(namespace, name string) error { - glog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name) + klog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name) return kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)) }, } @@ -73,8 +73,8 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor func (gcc *PodGCController) Run(stop <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Infof("Starting GC controller") - defer glog.Infof("Shutting down GC controller") + klog.Infof("Starting GC controller") + defer klog.Infof("Shutting down GC controller") if !controller.WaitForCacheSync("GC", stop, gcc.podListerSynced) { return @@ -88,7 +88,7 @@ func (gcc *PodGCController) Run(stop <-chan struct{}) { func (gcc *PodGCController) gc() { pods, err := gcc.podLister.List(labels.Everything()) if err != nil { - glog.Errorf("Error while listing all Pods: %v", err) + klog.Errorf("Error while listing all Pods: %v", err) return } if gcc.terminatedPodThreshold > 0 { @@ -122,7 +122,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) { deleteCount = terminatedPodCount } if deleteCount > 0 { - glog.Infof("garbage collecting %v pods", deleteCount) + klog.Infof("garbage collecting %v pods", deleteCount) } var wait sync.WaitGroup @@ -141,7 +141,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) { // gcOrphaned deletes pods that are bound to nodes that don't exist. func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) { - glog.V(4).Infof("GC'ing orphaned") + klog.V(4).Infof("GC'ing orphaned") // We want to get list of Nodes from the etcd, to make sure that it's as fresh as possible. nodes, err := gcc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { @@ -159,29 +159,29 @@ func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) { if nodeNames.Has(pod.Spec.NodeName) { continue } - glog.V(2).Infof("Found orphaned Pod %v/%v assigned to the Node %v. Deleting.", pod.Namespace, pod.Name, pod.Spec.NodeName) + klog.V(2).Infof("Found orphaned Pod %v/%v assigned to the Node %v. Deleting.", pod.Namespace, pod.Name, pod.Spec.NodeName) if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil { utilruntime.HandleError(err) } else { - glog.V(0).Infof("Forced deletion of orphaned Pod %v/%v succeeded", pod.Namespace, pod.Name) + klog.V(0).Infof("Forced deletion of orphaned Pod %v/%v succeeded", pod.Namespace, pod.Name) } } } // gcUnscheduledTerminating deletes pods that are terminating and haven't been scheduled to a particular node. func (gcc *PodGCController) gcUnscheduledTerminating(pods []*v1.Pod) { - glog.V(4).Infof("GC'ing unscheduled pods which are terminating.") + klog.V(4).Infof("GC'ing unscheduled pods which are terminating.") for _, pod := range pods { if pod.DeletionTimestamp == nil || len(pod.Spec.NodeName) > 0 { continue } - glog.V(2).Infof("Found unscheduled terminating Pod %v/%v not assigned to any Node. Deleting.", pod.Namespace, pod.Name) + klog.V(2).Infof("Found unscheduled terminating Pod %v/%v not assigned to any Node. Deleting.", pod.Namespace, pod.Name) if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil { utilruntime.HandleError(err) } else { - glog.V(0).Infof("Forced deletion of unscheduled terminating Pod %v/%v succeeded", pod.Namespace, pod.Name) + klog.V(0).Infof("Forced deletion of unscheduled terminating Pod %v/%v succeeded", pod.Namespace, pod.Name) } } } diff --git a/pkg/controller/replicaset/BUILD b/pkg/controller/replicaset/BUILD index 5d3a179e5a420..e833fdf6e373d 100644 --- a/pkg/controller/replicaset/BUILD +++ b/pkg/controller/replicaset/BUILD @@ -38,7 +38,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/integer:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index f874398063648..03edbe1365c56 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -35,7 +35,6 @@ import ( "sync" "time" - "github.com/golang/glog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -55,6 +54,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/integer" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" @@ -108,7 +108,7 @@ type ReplicaSetController struct { // NewReplicaSetController configures a replica set controller with the specified event recorder func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) return NewBaseController(rsInformer, podInformer, kubeClient, burstReplicas, apps.SchemeGroupVersion.WithKind("ReplicaSet"), @@ -179,8 +179,8 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) { defer rsc.queue.ShutDown() controllerName := strings.ToLower(rsc.Kind) - glog.Infof("Starting %v controller", controllerName) - defer glog.Infof("Shutting down %v controller", controllerName) + klog.Infof("Starting %v controller", controllerName) + defer klog.Infof("Shutting down %v controller", controllerName) if !controller.WaitForCacheSync(rsc.Kind, stopCh, rsc.podListerSynced, rsc.rsListerSynced) { return @@ -246,7 +246,7 @@ func (rsc *ReplicaSetController) updateRS(old, cur interface{}) { // that bad as ReplicaSets that haven't met expectations yet won't // sync, and all the listing is done using local stores. if *(oldRS.Spec.Replicas) != *(curRS.Spec.Replicas) { - glog.V(4).Infof("%v %v updated. Desired pod count change: %d->%d", rsc.Kind, curRS.Name, *(oldRS.Spec.Replicas), *(curRS.Spec.Replicas)) + klog.V(4).Infof("%v %v updated. Desired pod count change: %d->%d", rsc.Kind, curRS.Name, *(oldRS.Spec.Replicas), *(curRS.Spec.Replicas)) } rsc.enqueueReplicaSet(cur) } @@ -272,7 +272,7 @@ func (rsc *ReplicaSetController) addPod(obj interface{}) { if err != nil { return } - glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod) + klog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod) rsc.expectations.CreationObserved(rsKey) rsc.enqueueReplicaSet(rs) return @@ -286,7 +286,7 @@ func (rsc *ReplicaSetController) addPod(obj interface{}) { if len(rss) == 0 { return } - glog.V(4).Infof("Orphan Pod %s created: %#v.", pod.Name, pod) + klog.V(4).Infof("Orphan Pod %s created: %#v.", pod.Name, pod) for _, rs := range rss { rsc.enqueueReplicaSet(rs) } @@ -335,7 +335,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) { if rs == nil { return } - glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) + klog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) rsc.enqueueReplicaSet(rs) // TODO: MinReadySeconds in the Pod will generate an Available condition to be added in // the Pod status which in turn will trigger a requeue of the owning replica set thus @@ -345,7 +345,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) { // Note that this still suffers from #29229, we are just moving the problem one level // "closer" to kubelet (from the deployment to the replica set controller). if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rs.Spec.MinReadySeconds > 0 { - glog.V(2).Infof("%v %q will be enqueued after %ds for availability check", rsc.Kind, rs.Name, rs.Spec.MinReadySeconds) + klog.V(2).Infof("%v %q will be enqueued after %ds for availability check", rsc.Kind, rs.Name, rs.Spec.MinReadySeconds) // Add a second to avoid milliseconds skew in AddAfter. // See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info. rsc.enqueueReplicaSetAfter(rs, (time.Duration(rs.Spec.MinReadySeconds)*time.Second)+time.Second) @@ -360,7 +360,7 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) { if len(rss) == 0 { return } - glog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) + klog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) for _, rs := range rss { rsc.enqueueReplicaSet(rs) } @@ -402,7 +402,7 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) { if err != nil { return } - glog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod) + klog.V(4).Infof("Pod %s/%s deleted through %v, timestamp %+v: %#v.", pod.Namespace, pod.Name, utilruntime.GetCaller(), pod.DeletionTimestamp, pod) rsc.expectations.DeletionObserved(rsKey, controller.PodKey(pod)) rsc.enqueueReplicaSet(rs) } @@ -474,7 +474,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps // into a performance bottleneck. We should generate a UID for the pod // beforehand and store it via ExpectCreations. rsc.expectations.ExpectCreations(rsKey, diff) - glog.V(2).Infof("Too few replicas for %v %s/%s, need %d, creating %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) + klog.V(2).Infof("Too few replicas for %v %s/%s, need %d, creating %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) // Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize // and double with each successful iteration in a kind of "slow start". // This handles attempts to start large numbers of pods that would @@ -511,7 +511,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps // The skipped pods will be retried later. The next controller resync will // retry the slow start process. if skippedPods := diff - successfulCreations; skippedPods > 0 { - glog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for %v %v/%v", skippedPods, rsc.Kind, rs.Namespace, rs.Name) + klog.V(2).Infof("Slow-start failure. Skipping creation of %d pods, decrementing expectations for %v %v/%v", skippedPods, rsc.Kind, rs.Namespace, rs.Name) for i := 0; i < skippedPods; i++ { // Decrement the expected number of creates because the informer won't observe this pod rsc.expectations.CreationObserved(rsKey) @@ -522,7 +522,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps if diff > rsc.burstReplicas { diff = rsc.burstReplicas } - glog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) + klog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) // Choose which Pods to delete, preferring those in earlier phases of startup. podsToDelete := getPodsToDelete(filteredPods, diff) @@ -544,7 +544,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps if err := rsc.podControl.DeletePod(rs.Namespace, targetPod.Name, rs); err != nil { // Decrement the expected number of deletes because the informer won't observe this deletion podKey := controller.PodKey(targetPod) - glog.V(2).Infof("Failed to delete %v, decrementing expectations for %v %s/%s", podKey, rsc.Kind, rs.Namespace, rs.Name) + klog.V(2).Infof("Failed to delete %v, decrementing expectations for %v %s/%s", podKey, rsc.Kind, rs.Namespace, rs.Name) rsc.expectations.DeletionObserved(rsKey, podKey) errCh <- err } @@ -572,7 +572,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing %v %q (%v)", rsc.Kind, key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing %v %q (%v)", rsc.Kind, key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -581,7 +581,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error { } rs, err := rsc.rsLister.ReplicaSets(namespace).Get(name) if errors.IsNotFound(err) { - glog.V(4).Infof("%v %v has been deleted", rsc.Kind, key) + klog.V(4).Infof("%v %v has been deleted", rsc.Kind, key) rsc.expectations.DeleteExpectations(key) return nil } diff --git a/pkg/controller/replicaset/replica_set_utils.go b/pkg/controller/replicaset/replica_set_utils.go index de915e522a3c1..a2f7795726a74 100644 --- a/pkg/controller/replicaset/replica_set_utils.go +++ b/pkg/controller/replicaset/replica_set_utils.go @@ -22,7 +22,7 @@ import ( "fmt" "reflect" - "github.com/golang/glog" + "k8s.io/klog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -55,7 +55,7 @@ func updateReplicaSetStatus(c appsclient.ReplicaSetInterface, rs *apps.ReplicaSe var getErr, updateErr error var updatedRS *apps.ReplicaSet for i, rs := 0, rs; ; i++ { - glog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", rs.Kind, rs.Namespace, rs.Name) + + klog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", rs.Kind, rs.Namespace, rs.Name) + fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) + fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + fmt.Sprintf("readyReplicas %d->%d, ", rs.Status.ReadyReplicas, newStatus.ReadyReplicas) + diff --git a/pkg/controller/replication/BUILD b/pkg/controller/replication/BUILD index 635de92c6e056..344101c42b4a2 100644 --- a/pkg/controller/replication/BUILD +++ b/pkg/controller/replication/BUILD @@ -38,7 +38,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/replication/replication_controller.go b/pkg/controller/replication/replication_controller.go index 4b6ac5f4a7375..d59f62082671a 100644 --- a/pkg/controller/replication/replication_controller.go +++ b/pkg/controller/replication/replication_controller.go @@ -26,13 +26,13 @@ limitations under the License. package replication import ( - "github.com/golang/glog" "k8s.io/api/core/v1" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/replicaset" ) @@ -51,7 +51,7 @@ type ReplicationManager struct { // NewReplicationManager configures a replication manager with the specified event recorder func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer coreinformers.ReplicationControllerInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicationManager { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) return &ReplicationManager{ *replicaset.NewBaseController(informerAdapter{rcInformer}, podInformer, clientsetAdapter{kubeClient}, burstReplicas, diff --git a/pkg/controller/resourcequota/BUILD b/pkg/controller/resourcequota/BUILD index d09d8f4a76bdf..cce526c9bc84b 100644 --- a/pkg/controller/resourcequota/BUILD +++ b/pkg/controller/resourcequota/BUILD @@ -37,7 +37,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 2212bfd4b1652..503a9adb1649e 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -180,7 +180,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*Resou // enqueueAll is called at the fullResyncPeriod interval to force a full recalculation of quota usage statistics func (rq *ResourceQuotaController) enqueueAll() { - defer glog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage") + defer klog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage") rqs, err := rq.rqLister.List(labels.Everything()) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to enqueue all - error listing resource quotas: %v", err)) @@ -200,7 +200,7 @@ func (rq *ResourceQuotaController) enqueueAll() { func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", obj, err) + klog.Errorf("Couldn't get key for object %+v: %v", obj, err) return } rq.queue.Add(key) @@ -209,7 +209,7 @@ func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) { func (rq *ResourceQuotaController) addQuota(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { - glog.Errorf("Couldn't get key for object %+v: %v", obj, err) + klog.Errorf("Couldn't get key for object %+v: %v", obj, err) return } @@ -261,7 +261,7 @@ func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface) return func() { for { if quit := workFunc(); quit { - glog.Infof("resource quota controller worker shutting down") + klog.Infof("resource quota controller worker shutting down") return } } @@ -273,8 +273,8 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer rq.queue.ShutDown() - glog.Infof("Starting resource quota controller") - defer glog.Infof("Shutting down resource quota controller") + klog.Infof("Starting resource quota controller") + defer klog.Infof("Shutting down resource quota controller") if rq.quotaMonitor != nil { go rq.quotaMonitor.Run(stopCh) @@ -298,7 +298,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) { func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -307,11 +307,11 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err } quota, err := rq.rqLister.ResourceQuotas(namespace).Get(name) if errors.IsNotFound(err) { - glog.Infof("Resource quota has been deleted %v", key) + klog.Infof("Resource quota has been deleted %v", key) return nil } if err != nil { - glog.Infof("Unable to retrieve resource quota %v from store: %v", key, err) + klog.Infof("Unable to retrieve resource quota %v from store: %v", key, err) return err } return rq.syncResourceQuota(quota) @@ -426,12 +426,12 @@ func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, p // Decide whether discovery has reported a change. if reflect.DeepEqual(oldResources, newResources) { - glog.V(4).Infof("no resource updates from discovery, skipping resource quota sync") + klog.V(4).Infof("no resource updates from discovery, skipping resource quota sync") return } // Something has changed, so track the new state and perform a sync. - glog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources) + klog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources) oldResources = newResources // Ensure workers are paused to avoid processing events before informers diff --git a/pkg/controller/resourcequota/resource_quota_monitor.go b/pkg/controller/resourcequota/resource_quota_monitor.go index aa77fca731fdf..c3d9e1f3e6761 100644 --- a/pkg/controller/resourcequota/resource_quota_monitor.go +++ b/pkg/controller/resourcequota/resource_quota_monitor.go @@ -21,7 +21,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -173,11 +173,11 @@ func (qm *QuotaMonitor) controllerFor(resource schema.GroupVersionResource) (cac } shared, err := qm.informerFactory.ForResource(resource) if err == nil { - glog.V(4).Infof("QuotaMonitor using a shared informer for resource %q", resource.String()) + klog.V(4).Infof("QuotaMonitor using a shared informer for resource %q", resource.String()) shared.Informer().AddEventHandlerWithResyncPeriod(handlers, qm.resyncPeriod()) return shared.Informer().GetController(), nil } - glog.V(4).Infof("QuotaMonitor unable to use a shared informer for resource %q: %v", resource.String(), err) + klog.V(4).Infof("QuotaMonitor unable to use a shared informer for resource %q: %v", resource.String(), err) // TODO: if we can share storage with garbage collector, it may make sense to support other resources // until that time, aggregated api servers will have to run their own controller to reconcile their own quota. @@ -225,7 +225,7 @@ func (qm *QuotaMonitor) SyncMonitors(resources map[schema.GroupVersionResource]s listResourceFunc := generic.ListResourceUsingListerFunc(listerFunc, resource) evaluator = generic.NewObjectCountEvaluator(resource.GroupResource(), listResourceFunc, "") qm.registry.Add(evaluator) - glog.Infof("QuotaMonitor created object count evaluator for %s", resource.GroupResource()) + klog.Infof("QuotaMonitor created object count evaluator for %s", resource.GroupResource()) } // track the monitor @@ -240,7 +240,7 @@ func (qm *QuotaMonitor) SyncMonitors(resources map[schema.GroupVersionResource]s } } - glog.V(4).Infof("quota synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove)) + klog.V(4).Infof("quota synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove)) // NewAggregate returns nil if errs is 0-length return utilerrors.NewAggregate(errs) } @@ -272,7 +272,7 @@ func (qm *QuotaMonitor) StartMonitors() { started++ } } - glog.V(4).Infof("QuotaMonitor started %d new monitors, %d currently running", started, len(monitors)) + klog.V(4).Infof("QuotaMonitor started %d new monitors, %d currently running", started, len(monitors)) } // IsSynced returns true if any monitors exist AND all those monitors' @@ -298,8 +298,8 @@ func (qm *QuotaMonitor) IsSynced() bool { // Run sets the stop channel and starts monitor execution until stopCh is // closed. Any running monitors will be stopped before Run returns. func (qm *QuotaMonitor) Run(stopCh <-chan struct{}) { - glog.Infof("QuotaMonitor running") - defer glog.Infof("QuotaMonitor stopping") + klog.Infof("QuotaMonitor running") + defer klog.Infof("QuotaMonitor stopping") // Set up the stop channel. qm.monitorLock.Lock() @@ -323,7 +323,7 @@ func (qm *QuotaMonitor) Run(stopCh <-chan struct{}) { close(monitor.stopCh) } } - glog.Infof("QuotaMonitor stopped %d of %d monitors", stopped, len(monitors)) + klog.Infof("QuotaMonitor stopped %d of %d monitors", stopped, len(monitors)) } func (qm *QuotaMonitor) runProcessResourceChanges() { @@ -349,7 +349,7 @@ func (qm *QuotaMonitor) processResourceChanges() bool { utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err)) return true } - glog.V(4).Infof("QuotaMonitor process object: %s, namespace %s, name %s, uid %s, event type %v", event.gvr.String(), accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType) + klog.V(4).Infof("QuotaMonitor process object: %s, namespace %s, name %s, uid %s, event type %v", event.gvr.String(), accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType) qm.replenishmentFunc(event.gvr.GroupResource(), accessor.GetNamespace()) return true } diff --git a/pkg/controller/route/BUILD b/pkg/controller/route/BUILD index eada3d37d3bad..b756e1bc204d4 100644 --- a/pkg/controller/route/BUILD +++ b/pkg/controller/route/BUILD @@ -33,7 +33,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/route/route_controller.go b/pkg/controller/route/route_controller.go index 5ae499efc6e8c..a8fd29e39c8e4 100644 --- a/pkg/controller/route/route_controller.go +++ b/pkg/controller/route/route_controller.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -75,11 +75,11 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInform } if clusterCIDR == nil { - glog.Fatal("RouteController: Must specify clusterCIDR.") + klog.Fatal("RouteController: Must specify clusterCIDR.") } eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "route_controller"}) rc := &RouteController{ @@ -99,8 +99,8 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInform func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration) { defer utilruntime.HandleCrash() - glog.Info("Starting route controller") - defer glog.Info("Shutting down route controller") + klog.Info("Starting route controller") + defer klog.Info("Shutting down route controller") if !controller.WaitForCacheSync("route", stopCh, rc.nodeListerSynced) { return @@ -117,7 +117,7 @@ func (rc *RouteController) Run(stopCh <-chan struct{}, syncPeriod time.Duration) // trigger reconciliation for that node. go wait.NonSlidingUntil(func() { if err := rc.reconcileNodeRoutes(); err != nil { - glog.Errorf("Couldn't reconcile node routes: %v", err) + klog.Errorf("Couldn't reconcile node routes: %v", err) } }, syncPeriod, stopCh) @@ -173,7 +173,7 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R // Ensure that we don't have more than maxConcurrentRouteCreations // CreateRoute calls in flight. rateLimiter <- struct{}{} - glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime)) + klog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime)) err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route) <-rateLimiter @@ -189,14 +189,14 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R Namespace: "", }, v1.EventTypeWarning, "FailedToCreateRoute", msg) } - glog.V(4).Infof(msg) + klog.V(4).Infof(msg) return err } - glog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime)) + klog.Infof("Created route for node %s %s with hint %s after %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime)) return nil }) if err != nil { - glog.Errorf("Could not create route %s %s for node %s: %v", nameHint, route.DestinationCIDR, nodeName, err) + klog.Errorf("Could not create route %s %s for node %s: %v", nameHint, route.DestinationCIDR, nodeName, err) } }(nodeName, nameHint, route) } else { @@ -216,11 +216,11 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R // Delete the route. go func(route *cloudprovider.Route, startTime time.Time) { defer wg.Done() - glog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR) + klog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR) if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil { - glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Since(startTime), err) + klog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Since(startTime), err) } else { - glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Since(startTime)) + klog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Since(startTime)) } }(route, time.Now()) } @@ -254,13 +254,13 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro }) } if err != nil { - glog.V(4).Infof("Error updating node %s, retrying: %v", nodeName, err) + klog.V(4).Infof("Error updating node %s, retrying: %v", nodeName, err) } return err }) if err != nil { - glog.Errorf("Error updating node %s: %v", nodeName, err) + klog.Errorf("Error updating node %s: %v", nodeName, err) } return err @@ -269,7 +269,7 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro func (rc *RouteController) isResponsibleForRoute(route *cloudprovider.Route) bool { _, cidr, err := net.ParseCIDR(route.DestinationCIDR) if err != nil { - glog.Errorf("Ignoring route %s, unparsable CIDR: %v", route.Name, err) + klog.Errorf("Ignoring route %s, unparsable CIDR: %v", route.Name, err) return false } // Not responsible if this route's CIDR is not within our clusterCIDR diff --git a/pkg/controller/service/BUILD b/pkg/controller/service/BUILD index e2f0c1f92e773..70bc4cdb49f7b 100644 --- a/pkg/controller/service/BUILD +++ b/pkg/controller/service/BUILD @@ -33,7 +33,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/service/service_controller.go b/pkg/controller/service/service_controller.go index 22583df344b6e..667611bc5b175 100644 --- a/pkg/controller/service/service_controller.go +++ b/pkg/controller/service/service_controller.go @@ -24,7 +24,6 @@ import ( "reflect" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/runtime" @@ -40,6 +39,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/controller" kubefeatures "k8s.io/kubernetes/pkg/features" @@ -110,7 +110,7 @@ func New( clusterName string, ) (*ServiceController, error) { broadcaster := record.NewBroadcaster() - broadcaster.StartLogging(glog.Infof) + broadcaster.StartLogging(klog.Infof) broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "service-controller"}) @@ -160,7 +160,7 @@ func New( func (s *ServiceController) enqueueService(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { - glog.Errorf("Couldn't get key for object %#v: %v", obj, err) + klog.Errorf("Couldn't get key for object %#v: %v", obj, err) return } s.queue.Add(key) @@ -180,8 +180,8 @@ func (s *ServiceController) Run(stopCh <-chan struct{}, workers int) { defer runtime.HandleCrash() defer s.queue.ShutDown() - glog.Info("Starting service controller") - defer glog.Info("Shutting down service controller") + klog.Info("Starting service controller") + defer klog.Info("Shutting down service controller") if !controller.WaitForCacheSync("service", stopCh, s.serviceListerSynced, s.nodeListerSynced) { return @@ -287,7 +287,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S return fmt.Errorf("error getting LB for service %s: %v", key, err) } if exists { - glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key) + klog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key) s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") if err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service); err != nil { return err @@ -297,7 +297,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S newState = &v1.LoadBalancerStatus{} } else { - glog.V(2).Infof("Ensuring LB for service %s", key) + klog.V(2).Infof("Ensuring LB for service %s", key) // TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart @@ -327,7 +327,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.S return nil } } else { - glog.V(2).Infof("Not persisting unchanged LoadBalancerStatus for service %s to registry.", key) + klog.V(2).Infof("Not persisting unchanged LoadBalancerStatus for service %s to registry.", key) } return nil @@ -344,7 +344,7 @@ func (s *ServiceController) persistUpdate(service *v1.Service) error { // out so that we can process the delete, which we should soon be receiving // if we haven't already. if errors.IsNotFound(err) { - glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v", + klog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v", service.Namespace, service.Name, err) return nil } @@ -353,7 +353,7 @@ func (s *ServiceController) persistUpdate(service *v1.Service) error { if errors.IsConflict(err) { return err } - glog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v", + klog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v", service.Namespace, service.Name, err) time.Sleep(clientRetryInterval) } @@ -613,7 +613,7 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate { // We consider the node for load balancing only when its NodeReady condition status // is ConditionTrue if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue { - glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status) + klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status) return false } } @@ -626,7 +626,7 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate { func (s *ServiceController) nodeSyncLoop() { newHosts, err := s.nodeLister.ListWithPredicate(getNodeConditionPredicate()) if err != nil { - glog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err) + klog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err) return } if nodeSlicesEqualForLB(newHosts, s.knownHosts) { @@ -636,7 +636,7 @@ func (s *ServiceController) nodeSyncLoop() { return } - glog.Infof("Detected change in list of current cluster nodes. New node set: %v", + klog.Infof("Detected change in list of current cluster nodes. New node set: %v", nodeNames(newHosts)) // Try updating all services, and save the ones that fail to try again next @@ -644,7 +644,7 @@ func (s *ServiceController) nodeSyncLoop() { s.servicesToUpdate = s.cache.allServices() numServices := len(s.servicesToUpdate) s.servicesToUpdate = s.updateLoadBalancerHosts(s.servicesToUpdate, newHosts) - glog.Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes", + klog.Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes", numServices-len(s.servicesToUpdate), numServices) s.knownHosts = newHosts @@ -660,7 +660,7 @@ func (s *ServiceController) updateLoadBalancerHosts(services []*v1.Service, host return } if err := s.lockedUpdateLoadBalancerHosts(service, hosts); err != nil { - glog.Errorf("External error while updating load balancer: %v.", err) + klog.Errorf("External error while updating load balancer: %v.", err) servicesToRetry = append(servicesToRetry, service) } }() @@ -689,7 +689,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *v1.Service, h // It's only an actual error if the load balancer still exists. if _, exists, err := s.balancer.GetLoadBalancer(context.TODO(), s.clusterName, service); err != nil { - glog.Errorf("External error while checking if load balancer %q exists: name, %v", s.balancer.GetLoadBalancerName(context.TODO(), s.clusterName, service), err) + klog.Errorf("External error while checking if load balancer %q exists: name, %v", s.balancer.GetLoadBalancerName(context.TODO(), s.clusterName, service), err) } else if !exists { return nil } @@ -713,7 +713,7 @@ func (s *ServiceController) syncService(key string) error { startTime := time.Now() var cachedService *cachedService defer func() { - glog.V(4).Infof("Finished syncing service %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing service %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -726,10 +726,10 @@ func (s *ServiceController) syncService(key string) error { switch { case errors.IsNotFound(err): // service absence in store means watcher caught the deletion, ensure LB info is cleaned - glog.Infof("Service has been deleted %v. Attempting to cleanup load balancer resources", key) + klog.Infof("Service has been deleted %v. Attempting to cleanup load balancer resources", key) err = s.processServiceDeletion(key) case err != nil: - glog.Infof("Unable to retrieve service %v from store: %v", key, err) + klog.Infof("Unable to retrieve service %v from store: %v", key, err) default: cachedService = s.cache.getOrCreate(key) err = s.processServiceUpdate(cachedService, service, key) @@ -744,7 +744,7 @@ func (s *ServiceController) syncService(key string) error { func (s *ServiceController) processServiceDeletion(key string) error { cachedService, ok := s.cache.get(key) if !ok { - glog.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion", key) + klog.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion", key) return nil } return s.processLoadBalancerDelete(cachedService, key) diff --git a/pkg/controller/serviceaccount/BUILD b/pkg/controller/serviceaccount/BUILD index bf7efc43f0573..2e2fd79f0037b 100644 --- a/pkg/controller/serviceaccount/BUILD +++ b/pkg/controller/serviceaccount/BUILD @@ -34,7 +34,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -59,8 +59,8 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller.go b/pkg/controller/serviceaccount/serviceaccounts_controller.go index 8027e79ae1137..5f10312ae1772 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -20,7 +20,6 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +31,7 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -112,8 +112,8 @@ func (c *ServiceAccountsController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Infof("Starting service account controller") - defer glog.Infof("Shutting down service account controller") + klog.Infof("Starting service account controller") + defer klog.Infof("Shutting down service account controller") if !controller.WaitForCacheSync("service account", stopCh, c.saListerSynced, c.nsListerSynced) { return @@ -183,7 +183,7 @@ func (c *ServiceAccountsController) processNextWorkItem() bool { func (c *ServiceAccountsController) syncNamespace(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime)) }() ns, err := c.nsLister.Get(key) diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index 7b7f80ce43dbb..f93cd5822f594 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -21,7 +21,6 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,6 +35,7 @@ import ( "k8s.io/client-go/tools/cache" clientretry "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/registry/core/secret" "k8s.io/kubernetes/pkg/serviceaccount" @@ -173,13 +173,13 @@ func (e *TokensController) Run(workers int, stopCh <-chan struct{}) { return } - glog.V(5).Infof("Starting workers") + klog.V(5).Infof("Starting workers") for i := 0; i < workers; i++ { go wait.Until(e.syncServiceAccount, 0, stopCh) go wait.Until(e.syncSecret, 0, stopCh) } <-stopCh - glog.V(1).Infof("Shutting down") + klog.V(1).Infof("Shutting down") } func (e *TokensController) queueServiceAccountSync(obj interface{}) { @@ -207,7 +207,7 @@ func (e *TokensController) retryOrForget(queue workqueue.RateLimitingInterface, return } - glog.V(4).Infof("retried %d times: %#v", requeueCount, key) + klog.V(4).Infof("retried %d times: %#v", requeueCount, key) queue.Forget(key) } @@ -237,28 +237,28 @@ func (e *TokensController) syncServiceAccount() { saInfo, err := parseServiceAccountKey(key) if err != nil { - glog.Error(err) + klog.Error(err) return } sa, err := e.getServiceAccount(saInfo.namespace, saInfo.name, saInfo.uid, false) switch { case err != nil: - glog.Error(err) + klog.Error(err) retry = true case sa == nil: // service account no longer exists, so delete related tokens - glog.V(4).Infof("syncServiceAccount(%s/%s), service account deleted, removing tokens", saInfo.namespace, saInfo.name) + klog.V(4).Infof("syncServiceAccount(%s/%s), service account deleted, removing tokens", saInfo.namespace, saInfo.name) sa = &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: saInfo.namespace, Name: saInfo.name, UID: saInfo.uid}} retry, err = e.deleteTokens(sa) if err != nil { - glog.Errorf("error deleting serviceaccount tokens for %s/%s: %v", saInfo.namespace, saInfo.name, err) + klog.Errorf("error deleting serviceaccount tokens for %s/%s: %v", saInfo.namespace, saInfo.name, err) } default: // ensure a token exists and is referenced by this service account retry, err = e.ensureReferencedToken(sa) if err != nil { - glog.Errorf("error synchronizing serviceaccount %s/%s: %v", saInfo.namespace, saInfo.name, err) + klog.Errorf("error synchronizing serviceaccount %s/%s: %v", saInfo.namespace, saInfo.name, err) } } } @@ -278,14 +278,14 @@ func (e *TokensController) syncSecret() { secretInfo, err := parseSecretQueueKey(key) if err != nil { - glog.Error(err) + klog.Error(err) return } secret, err := e.getSecret(secretInfo.namespace, secretInfo.name, secretInfo.uid, false) switch { case err != nil: - glog.Error(err) + klog.Error(err) retry = true case secret == nil: // If the service account exists @@ -294,7 +294,7 @@ func (e *TokensController) syncSecret() { if err := clientretry.RetryOnConflict(RemoveTokenBackoff, func() error { return e.removeSecretReference(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, secretInfo.name) }); err != nil { - glog.Error(err) + klog.Error(err) } } default: @@ -302,19 +302,19 @@ func (e *TokensController) syncSecret() { sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, true) switch { case saErr != nil: - glog.Error(saErr) + klog.Error(saErr) retry = true case sa == nil: // Delete token - glog.V(4).Infof("syncSecret(%s/%s), service account does not exist, deleting token", secretInfo.namespace, secretInfo.name) + klog.V(4).Infof("syncSecret(%s/%s), service account does not exist, deleting token", secretInfo.namespace, secretInfo.name) if retriable, err := e.deleteToken(secretInfo.namespace, secretInfo.name, secretInfo.uid); err != nil { - glog.Errorf("error deleting serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err) + klog.Errorf("error deleting serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err) retry = retriable } default: // Update token if needed if retriable, err := e.generateTokenIfNeeded(sa, secret); err != nil { - glog.Errorf("error populating serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err) + klog.Errorf("error populating serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err) retry = retriable } } @@ -376,7 +376,7 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou } if liveServiceAccount.ResourceVersion != serviceAccount.ResourceVersion { // Retry if our liveServiceAccount doesn't match our cache's resourceVersion (either the live lookup or our cache are stale) - glog.V(4).Infof("liveServiceAccount.ResourceVersion (%s) does not match cache (%s), retrying", liveServiceAccount.ResourceVersion, serviceAccount.ResourceVersion) + klog.V(4).Infof("liveServiceAccount.ResourceVersion (%s) does not match cache (%s), retrying", liveServiceAccount.ResourceVersion, serviceAccount.ResourceVersion) return true, nil } @@ -455,10 +455,10 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou if !addedReference { // we weren't able to use the token, try to clean it up. - glog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err) + klog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err) deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}} if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil { - glog.Error(deleteErr) // if we fail, just log it + klog.Error(deleteErr) // if we fail, just log it } } @@ -524,7 +524,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou if liveSecret.ResourceVersion != cachedSecret.ResourceVersion { // our view of the secret is not up to date // we'll get notified of an update event later and get to try again - glog.V(2).Infof("secret %s/%s is not up to date, skipping token population", liveSecret.Namespace, liveSecret.Name) + klog.V(2).Infof("secret %s/%s is not up to date, skipping token population", liveSecret.Namespace, liveSecret.Name) return false, nil } diff --git a/pkg/controller/serviceaccount/tokens_controller_test.go b/pkg/controller/serviceaccount/tokens_controller_test.go index cf0c4f854fa13..e7b54cc009942 100644 --- a/pkg/controller/serviceaccount/tokens_controller_test.go +++ b/pkg/controller/serviceaccount/tokens_controller_test.go @@ -23,8 +23,8 @@ import ( "time" "github.com/davecgh/go-spew/spew" - "github.com/golang/glog" "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/klog" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -568,7 +568,7 @@ func TestTokenCreation(t *testing.T) { } for k, tc := range testcases { - glog.Infof(k) + klog.Infof(k) // Re-seed to reset name generation utilrand.Seed(1) diff --git a/pkg/controller/statefulset/BUILD b/pkg/controller/statefulset/BUILD index d1c19f1aaa36f..40a9cf224aa85 100644 --- a/pkg/controller/statefulset/BUILD +++ b/pkg/controller/statefulset/BUILD @@ -41,7 +41,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index e88a6dae255e9..43f07bf3362c3 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -41,7 +41,7 @@ import ( "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/history" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -86,7 +86,7 @@ func NewStatefulSetController( kubeClient clientset.Interface, ) *StatefulSetController { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset-controller"}) @@ -128,7 +128,7 @@ func NewStatefulSetController( oldPS := old.(*apps.StatefulSet) curPS := cur.(*apps.StatefulSet) if oldPS.Status.Replicas != curPS.Status.Replicas { - glog.V(4).Infof("Observed updated replica count for StatefulSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas) + klog.V(4).Infof("Observed updated replica count for StatefulSet: %v, %d->%d", curPS.Name, oldPS.Status.Replicas, curPS.Status.Replicas) } ssc.enqueueStatefulSet(cur) }, @@ -148,8 +148,8 @@ func (ssc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer ssc.queue.ShutDown() - glog.Infof("Starting stateful set controller") - defer glog.Infof("Shutting down statefulset controller") + klog.Infof("Starting stateful set controller") + defer klog.Infof("Shutting down statefulset controller") if !controller.WaitForCacheSync("stateful set", stopCh, ssc.podListerSynced, ssc.setListerSynced, ssc.pvcListerSynced, ssc.revListerSynced) { return @@ -179,7 +179,7 @@ func (ssc *StatefulSetController) addPod(obj interface{}) { if set == nil { return } - glog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels) + klog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels) ssc.enqueueStatefulSet(set) return } @@ -190,7 +190,7 @@ func (ssc *StatefulSetController) addPod(obj interface{}) { if len(sets) == 0 { return } - glog.V(4).Infof("Orphan Pod %s created, labels: %+v", pod.Name, pod.Labels) + klog.V(4).Infof("Orphan Pod %s created, labels: %+v", pod.Name, pod.Labels) for _, set := range sets { ssc.enqueueStatefulSet(set) } @@ -224,7 +224,7 @@ func (ssc *StatefulSetController) updatePod(old, cur interface{}) { if set == nil { return } - glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) + klog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) ssc.enqueueStatefulSet(set) return } @@ -236,7 +236,7 @@ func (ssc *StatefulSetController) updatePod(old, cur interface{}) { if len(sets) == 0 { return } - glog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) + klog.V(4).Infof("Orphan Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) for _, set := range sets { ssc.enqueueStatefulSet(set) } @@ -273,7 +273,7 @@ func (ssc *StatefulSetController) deletePod(obj interface{}) { if set == nil { return } - glog.V(4).Infof("Pod %s/%s deleted through %v.", pod.Namespace, pod.Name, utilruntime.GetCaller()) + klog.V(4).Infof("Pod %s/%s deleted through %v.", pod.Namespace, pod.Name, utilruntime.GetCaller()) ssc.enqueueStatefulSet(set) } @@ -415,7 +415,7 @@ func (ssc *StatefulSetController) worker() { func (ssc *StatefulSetController) sync(key string) error { startTime := time.Now() defer func() { - glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime)) + klog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime)) }() namespace, name, err := cache.SplitMetaNamespaceKey(key) @@ -424,7 +424,7 @@ func (ssc *StatefulSetController) sync(key string) error { } set, err := ssc.setLister.StatefulSets(namespace).Get(name) if errors.IsNotFound(err) { - glog.Infof("StatefulSet has been deleted %v", key) + klog.Infof("StatefulSet has been deleted %v", key) return nil } if err != nil { @@ -453,11 +453,11 @@ func (ssc *StatefulSetController) sync(key string) error { // syncStatefulSet syncs a tuple of (statefulset, []*v1.Pod). func (ssc *StatefulSetController) syncStatefulSet(set *apps.StatefulSet, pods []*v1.Pod) error { - glog.V(4).Infof("Syncing StatefulSet %v/%v with %d pods", set.Namespace, set.Name, len(pods)) + klog.V(4).Infof("Syncing StatefulSet %v/%v with %d pods", set.Namespace, set.Name, len(pods)) // TODO: investigate where we mutate the set during the update as it is not obvious. if err := ssc.control.UpdateStatefulSet(set.DeepCopy(), pods); err != nil { return err } - glog.V(4).Infof("Successfully synced StatefulSet %s/%s successful", set.Namespace, set.Name) + klog.V(4).Infof("Successfully synced StatefulSet %s/%s successful", set.Namespace, set.Name) return nil } diff --git a/pkg/controller/statefulset/stateful_set_control.go b/pkg/controller/statefulset/stateful_set_control.go index eb8664e5e1157..de193a137e1c3 100644 --- a/pkg/controller/statefulset/stateful_set_control.go +++ b/pkg/controller/statefulset/stateful_set_control.go @@ -20,7 +20,7 @@ import ( "math" "sort" - "github.com/golang/glog" + "k8s.io/klog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -99,7 +99,7 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, p return err } - glog.V(4).Infof("StatefulSet %s/%s pod status replicas=%d ready=%d current=%d updated=%d", + klog.V(4).Infof("StatefulSet %s/%s pod status replicas=%d ready=%d current=%d updated=%d", set.Namespace, set.Name, status.Replicas, @@ -107,7 +107,7 @@ func (ssc *defaultStatefulSetControl) UpdateStatefulSet(set *apps.StatefulSet, p status.CurrentReplicas, status.UpdatedReplicas) - glog.V(4).Infof("StatefulSet %s/%s revisions current=%s update=%s", + klog.V(4).Infof("StatefulSet %s/%s revisions current=%s update=%s", set.Namespace, set.Name, status.CurrentRevision, @@ -351,7 +351,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( } if unhealthy > 0 { - glog.V(4).Infof("StatefulSet %s/%s has %d unhealthy Pods starting with %s", + klog.V(4).Infof("StatefulSet %s/%s has %d unhealthy Pods starting with %s", set.Namespace, set.Name, unhealthy, @@ -415,7 +415,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( // If we find a Pod that is currently terminating, we must wait until graceful deletion // completes before we continue to make progress. if isTerminating(replicas[i]) && monotonic { - glog.V(4).Infof( + klog.V(4).Infof( "StatefulSet %s/%s is waiting for Pod %s to Terminate", set.Namespace, set.Name, @@ -426,7 +426,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( // We must ensure that all for each Pod, when we create it, all of its predecessors, with respect to its // ordinal, are Running and Ready. if !isRunningAndReady(replicas[i]) && monotonic { - glog.V(4).Infof( + klog.V(4).Infof( "StatefulSet %s/%s is waiting for Pod %s to be Running and Ready", set.Namespace, set.Name, @@ -452,7 +452,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( for target := len(condemned) - 1; target >= 0; target-- { // wait for terminating pods to expire if isTerminating(condemned[target]) { - glog.V(4).Infof( + klog.V(4).Infof( "StatefulSet %s/%s is waiting for Pod %s to Terminate prior to scale down", set.Namespace, set.Name, @@ -465,14 +465,14 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( } // if we are in monotonic mode and the condemned target is not the first unhealthy Pod block if !isRunningAndReady(condemned[target]) && monotonic && condemned[target] != firstUnhealthyPod { - glog.V(4).Infof( + klog.V(4).Infof( "StatefulSet %s/%s is waiting for Pod %s to be Running and Ready prior to scale down", set.Namespace, set.Name, firstUnhealthyPod.Name) return &status, nil } - glog.V(2).Infof("StatefulSet %s/%s terminating Pod %s for scale down", + klog.V(2).Infof("StatefulSet %s/%s terminating Pod %s for scale down", set.Namespace, set.Name, condemned[target].Name) @@ -506,7 +506,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( // delete the Pod if it is not already terminating and does not match the update revision. if getPodRevision(replicas[target]) != updateRevision.Name && !isTerminating(replicas[target]) { - glog.V(2).Infof("StatefulSet %s/%s terminating Pod %s for update", + klog.V(2).Infof("StatefulSet %s/%s terminating Pod %s for update", set.Namespace, set.Name, replicas[target].Name) @@ -517,7 +517,7 @@ func (ssc *defaultStatefulSetControl) updateStatefulSet( // wait for unhealthy Pods on update if !isHealthy(replicas[target]) { - glog.V(4).Infof( + klog.V(4).Infof( "StatefulSet %s/%s is waiting for Pod %s to update", set.Namespace, set.Name, diff --git a/pkg/controller/testutil/BUILD b/pkg/controller/testutil/BUILD index ac20e1bbd719a..b0b348c13a3d1 100644 --- a/pkg/controller/testutil/BUILD +++ b/pkg/controller/testutil/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/testutil/test_utils.go b/pkg/controller/testutil/test_utils.go index 7ee5e6be658af..069c2d61a8638 100644 --- a/pkg/controller/testutil/test_utils.go +++ b/pkg/controller/testutil/test_utils.go @@ -46,7 +46,7 @@ import ( utilnode "k8s.io/kubernetes/pkg/util/node" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -299,12 +299,12 @@ func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, su originalObjJS, err := json.Marshal(nodeCopy) if err != nil { - glog.Errorf("Failed to marshal %v", nodeCopy) + klog.Errorf("Failed to marshal %v", nodeCopy) return nil, nil } var originalNode v1.Node if err = json.Unmarshal(originalObjJS, &originalNode); err != nil { - glog.Errorf("Failed to unmarshal original object: %v", err) + klog.Errorf("Failed to unmarshal original object: %v", err) return nil, nil } @@ -313,31 +313,31 @@ func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, su case types.JSONPatchType: patchObj, err := jsonpatch.DecodePatch(data) if err != nil { - glog.Error(err.Error()) + klog.Error(err.Error()) return nil, nil } if patchedObjJS, err = patchObj.Apply(originalObjJS); err != nil { - glog.Error(err.Error()) + klog.Error(err.Error()) return nil, nil } case types.MergePatchType: if patchedObjJS, err = jsonpatch.MergePatch(originalObjJS, data); err != nil { - glog.Error(err.Error()) + klog.Error(err.Error()) return nil, nil } case types.StrategicMergePatchType: if patchedObjJS, err = strategicpatch.StrategicMergePatch(originalObjJS, data, originalNode); err != nil { - glog.Error(err.Error()) + klog.Error(err.Error()) return nil, nil } default: - glog.Errorf("unknown Content-Type header for patch: %v", pt) + klog.Errorf("unknown Content-Type header for patch: %v", pt) return nil, nil } var updatedNode v1.Node if err = json.Unmarshal(patchedObjJS, &updatedNode); err != nil { - glog.Errorf("Failed to unmarshal patched object: %v", err) + klog.Errorf("Failed to unmarshal patched object: %v", err) return nil, nil } @@ -382,7 +382,7 @@ func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time, defer f.Unlock() ref, err := ref.GetReference(legacyscheme.Scheme, obj) if err != nil { - glog.Errorf("Encountered error while getting reference: %v", err) + klog.Errorf("Encountered error while getting reference: %v", err) return } event := f.makeEvent(ref, eventtype, reason, message) diff --git a/pkg/controller/ttl/BUILD b/pkg/controller/ttl/BUILD index 900302a8a07df..3eff1a2ab62a9 100644 --- a/pkg/controller/ttl/BUILD +++ b/pkg/controller/ttl/BUILD @@ -24,7 +24,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/ttl/ttl_controller.go b/pkg/controller/ttl/ttl_controller.go index 2938ca8270b64..e9b6728e67eb9 100644 --- a/pkg/controller/ttl/ttl_controller.go +++ b/pkg/controller/ttl/ttl_controller.go @@ -47,7 +47,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/controller" - "github.com/golang/glog" + "k8s.io/klog" ) type TTLController struct { @@ -113,8 +113,8 @@ func (ttlc *TTLController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer ttlc.queue.ShutDown() - glog.Infof("Starting TTL controller") - defer glog.Infof("Shutting down TTL controller") + klog.Infof("Starting TTL controller") + defer klog.Infof("Shutting down TTL controller") if !controller.WaitForCacheSync("TTL", stopCh, ttlc.hasSynced) { return @@ -190,7 +190,7 @@ func (ttlc *TTLController) deleteNode(obj interface{}) { func (ttlc *TTLController) enqueueNode(node *v1.Node) { key, err := controller.KeyFunc(node) if err != nil { - glog.Errorf("Couldn't get key for object %+v", node) + klog.Errorf("Couldn't get key for object %+v", node) return } ttlc.queue.Add(key) @@ -235,7 +235,7 @@ func getIntFromAnnotation(node *v1.Node, annotationKey string) (int, bool) { } intValue, err := strconv.Atoi(annotationValue) if err != nil { - glog.Warningf("Cannot convert the value %q with annotation key %q for the node %q", + klog.Warningf("Cannot convert the value %q with annotation key %q for the node %q", annotationValue, annotationKey, node.Name) return 0, false } @@ -265,10 +265,10 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey } _, err = ttlc.kubeClient.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes) if err != nil { - glog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err) + klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err) return err } - glog.V(2).Infof("Changed ttl annotation for node %s to %d seconds", node.Name, value) + klog.V(2).Infof("Changed ttl annotation for node %s to %d seconds", node.Name, value) return nil } diff --git a/pkg/controller/ttlafterfinished/BUILD b/pkg/controller/ttlafterfinished/BUILD index 3eda34912f623..21c5ec3294b21 100644 --- a/pkg/controller/ttlafterfinished/BUILD +++ b/pkg/controller/ttlafterfinished/BUILD @@ -24,7 +24,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go b/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go index 99c5e625fdcfa..930d2df7d1ffa 100644 --- a/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go +++ b/pkg/controller/ttlafterfinished/ttlafterfinished_controller.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" batch "k8s.io/api/batch/v1" "k8s.io/api/core/v1" @@ -71,7 +71,7 @@ type Controller struct { // New creates an instance of Controller func New(jobInformer batchinformers.JobInformer, client clientset.Interface) *Controller { eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil { @@ -102,8 +102,8 @@ func (tc *Controller) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer tc.queue.ShutDown() - glog.Infof("Starting TTL after finished controller") - defer glog.Infof("Shutting down TTL after finished controller") + klog.Infof("Starting TTL after finished controller") + defer klog.Infof("Shutting down TTL after finished controller") if !controller.WaitForCacheSync("TTL after finished", stopCh, tc.jListerSynced) { return @@ -118,7 +118,7 @@ func (tc *Controller) Run(workers int, stopCh <-chan struct{}) { func (tc *Controller) addJob(obj interface{}) { job := obj.(*batch.Job) - glog.V(4).Infof("Adding job %s/%s", job.Namespace, job.Name) + klog.V(4).Infof("Adding job %s/%s", job.Namespace, job.Name) if job.DeletionTimestamp == nil && needsCleanup(job) { tc.enqueue(job) @@ -127,7 +127,7 @@ func (tc *Controller) addJob(obj interface{}) { func (tc *Controller) updateJob(old, cur interface{}) { job := cur.(*batch.Job) - glog.V(4).Infof("Updating job %s/%s", job.Namespace, job.Name) + klog.V(4).Infof("Updating job %s/%s", job.Namespace, job.Name) if job.DeletionTimestamp == nil && needsCleanup(job) { tc.enqueue(job) @@ -135,7 +135,7 @@ func (tc *Controller) updateJob(old, cur interface{}) { } func (tc *Controller) enqueue(job *batch.Job) { - glog.V(4).Infof("Add job %s/%s to cleanup", job.Namespace, job.Name) + klog.V(4).Infof("Add job %s/%s to cleanup", job.Namespace, job.Name) key, err := controller.KeyFunc(job) if err != nil { utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", job, err)) @@ -194,7 +194,7 @@ func (tc *Controller) processJob(key string) error { return err } - glog.V(4).Infof("Checking if Job %s/%s is ready for cleanup", namespace, name) + klog.V(4).Infof("Checking if Job %s/%s is ready for cleanup", namespace, name) // Ignore the Jobs that are already deleted or being deleted, or the ones that don't need clean up. job, err := tc.jLister.Jobs(namespace).Get(name) if errors.IsNotFound(err) { @@ -233,7 +233,7 @@ func (tc *Controller) processJob(key string) error { PropagationPolicy: &policy, Preconditions: &metav1.Preconditions{UID: &fresh.UID}, } - glog.V(4).Infof("Cleaning up Job %s/%s", namespace, name) + klog.V(4).Infof("Cleaning up Job %s/%s", namespace, name) return tc.client.BatchV1().Jobs(fresh.Namespace).Delete(fresh.Name, options) } @@ -284,10 +284,10 @@ func timeLeft(j *batch.Job, since *time.Time) (*time.Duration, error) { return nil, err } if finishAt.UTC().After(since.UTC()) { - glog.Warningf("Warning: Found Job %s/%s finished in the future. This is likely due to time skew in the cluster. Job cleanup will be deferred.", j.Namespace, j.Name) + klog.Warningf("Warning: Found Job %s/%s finished in the future. This is likely due to time skew in the cluster. Job cleanup will be deferred.", j.Namespace, j.Name) } remaining := expireAt.UTC().Sub(since.UTC()) - glog.V(4).Infof("Found Job %s/%s finished at %v, remaining TTL %v since %v, TTL will expire at %v", j.Namespace, j.Name, finishAt.UTC(), remaining, since.UTC(), expireAt.UTC()) + klog.V(4).Infof("Found Job %s/%s finished at %v, remaining TTL %v since %v, TTL will expire at %v", j.Namespace, j.Name, finishAt.UTC(), remaining, since.UTC(), expireAt.UTC()) return &remaining, nil } diff --git a/pkg/controller/util/node/BUILD b/pkg/controller/util/node/BUILD index fa48450d235cb..2dc7c49166dce 100644 --- a/pkg/controller/util/node/BUILD +++ b/pkg/controller/util/node/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/util/node/controller_utils.go b/pkg/controller/util/node/controller_utils.go index ff7ffe582bcbc..9be6845d0b8fa 100644 --- a/pkg/controller/util/node/controller_utils.go +++ b/pkg/controller/util/node/controller_utils.go @@ -40,7 +40,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" nodepkg "k8s.io/kubernetes/pkg/util/node" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -92,7 +92,7 @@ func DeletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n continue } - glog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name) + klog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name) recorder.Eventf(&pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { return false, err @@ -138,7 +138,7 @@ func ForcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error // given node from master return true if success func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error { nodeName := node.Name - glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName) + klog.V(2).Infof("Update ready status of pods on node [%v]", nodeName) opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()} pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(opts) if err != nil { @@ -155,10 +155,10 @@ func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error { for i, cond := range pod.Status.Conditions { if cond.Type == v1.PodReady { pod.Status.Conditions[i].Status = v1.ConditionFalse - glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name) + klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name) _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(&pod) if err != nil { - glog.Warningf("Failed to update status for pod %q: %v", format.Pod(&pod), err) + klog.Warningf("Failed to update status for pod %q: %v", format.Pod(&pod), err) errMsg = append(errMsg, fmt.Sprintf("%v", err)) } break @@ -209,7 +209,7 @@ func RecordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype UID: types.UID(nodeUID), Namespace: "", } - glog.V(2).Infof("Recording %s event message for node %s", event, nodeName) + klog.V(2).Infof("Recording %s event message for node %s", event, nodeName) recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event) } @@ -221,7 +221,7 @@ func RecordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, newSta UID: node.UID, Namespace: "", } - glog.V(2).Infof("Recording status change %s event message for node %s", newStatus, node.Name) + klog.V(2).Infof("Recording status change %s event message for node %s", newStatus, node.Name) // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. recorder.Eventf(ref, v1.EventTypeNormal, newStatus, "Node %s status is now: %s", node.Name, newStatus) @@ -245,7 +245,7 @@ func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taints err)) return false } - glog.V(4).Infof("Added %+v Taint to Node %v", taintsToAdd, node.Name) + klog.V(4).Infof("Added %+v Taint to Node %v", taintsToAdd, node.Name) err = controller.RemoveTaintOffNode(kubeClient, node.Name, node, taintsToRemove...) if err != nil { @@ -257,7 +257,7 @@ func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taints err)) return false } - glog.V(4).Infof("Made sure that Node %+v has no %v Taint", node.Name, taintsToRemove) + klog.V(4).Infof("Made sure that Node %+v has no %v Taint", node.Name, taintsToRemove) return true } @@ -293,12 +293,12 @@ func CreateDeleteNodeHandler(f func(node *v1.Node) error) func(obj interface{}) if !isNode { deletedState, ok := originalObj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Received unexpected object: %v", originalObj) + klog.Errorf("Received unexpected object: %v", originalObj) return } originalNode, ok = deletedState.Obj.(*v1.Node) if !ok { - glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) + klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) return } } diff --git a/pkg/controller/volume/attachdetach/BUILD b/pkg/controller/volume/attachdetach/BUILD index ed2db539700fc..f68704cfff164 100644 --- a/pkg/controller/volume/attachdetach/BUILD +++ b/pkg/controller/volume/attachdetach/BUILD @@ -40,7 +40,7 @@ go_library( "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index e2b1b6632f988..f76e3224a6eb3 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -23,7 +23,6 @@ import ( "net" "time" - "github.com/golang/glog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -41,6 +40,7 @@ import ( "k8s.io/client-go/util/workqueue" cloudprovider "k8s.io/cloud-provider" csiclient "k8s.io/csi-api/pkg/client/clientset/versioned" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics" @@ -143,7 +143,7 @@ func NewAttachDetachController( } eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"}) blkutil := volumepathhandler.NewBlockVolumePathHandler() @@ -312,8 +312,8 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) { defer runtime.HandleCrash() defer adc.pvcQueue.ShutDown() - glog.Infof("Starting attach detach controller") - defer glog.Infof("Shutting down attach detach controller") + klog.Infof("Starting attach detach controller") + defer klog.Infof("Shutting down attach detach controller") if !controller.WaitForCacheSync("attach detach", stopCh, adc.podsSynced, adc.nodesSynced, adc.pvcsSynced, adc.pvsSynced) { return @@ -321,11 +321,11 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) { err := adc.populateActualStateOfWorld() if err != nil { - glog.Errorf("Error populating the actual state of world: %v", err) + klog.Errorf("Error populating the actual state of world: %v", err) } err = adc.populateDesiredStateOfWorld() if err != nil { - glog.Errorf("Error populating the desired state of world: %v", err) + klog.Errorf("Error populating the desired state of world: %v", err) } go adc.reconciler.Run(stopCh) go adc.desiredStateOfWorldPopulator.Run(stopCh) @@ -341,7 +341,7 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) { } func (adc *attachDetachController) populateActualStateOfWorld() error { - glog.V(5).Infof("Populating ActualStateOfworld") + klog.V(5).Infof("Populating ActualStateOfworld") nodes, err := adc.nodeLister.List(labels.Everything()) if err != nil { return err @@ -358,7 +358,7 @@ func (adc *attachDetachController) populateActualStateOfWorld() error { // scans the pods and updates their volumes in the ActualStateOfWorld too. err = adc.actualStateOfWorld.MarkVolumeAsAttached(uniqueName, nil /* VolumeSpec */, nodeName, attachedVolume.DevicePath) if err != nil { - glog.Errorf("Failed to mark the volume as attached: %v", err) + klog.Errorf("Failed to mark the volume as attached: %v", err) continue } adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) @@ -391,7 +391,7 @@ func (adc *attachDetachController) getNodeVolumeDevicePath( } func (adc *attachDetachController) populateDesiredStateOfWorld() error { - glog.V(5).Infof("Populating DesiredStateOfworld") + klog.V(5).Infof("Populating DesiredStateOfworld") pods, err := adc.podLister.List(labels.Everything()) if err != nil { @@ -406,7 +406,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error { // pod will be detached and the spec is irrelevant. volumeSpec, err := util.CreateVolumeSpec(podVolume, podToAdd.Namespace, adc.pvcLister, adc.pvLister) if err != nil { - glog.Errorf( + klog.Errorf( "Error creating spec for volume %q, pod %q/%q: %v", podVolume.Name, podToAdd.Namespace, @@ -417,7 +417,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error { nodeName := types.NodeName(podToAdd.Spec.NodeName) plugin, err := adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) if err != nil || plugin == nil { - glog.V(10).Infof( + klog.V(10).Infof( "Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v", podVolume.Name, podToAdd.Namespace, @@ -427,7 +427,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error { } volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) if err != nil { - glog.Errorf( + klog.Errorf( "Failed to find unique name for volume %q, pod %q/%q: %v", podVolume.Name, podToAdd.Namespace, @@ -438,12 +438,12 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error { if adc.actualStateOfWorld.VolumeNodeExists(volumeName, nodeName) { devicePath, err := adc.getNodeVolumeDevicePath(volumeName, nodeName) if err != nil { - glog.Errorf("Failed to find device path: %v", err) + klog.Errorf("Failed to find device path: %v", err) continue } err = adc.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, devicePath) if err != nil { - glog.Errorf("Failed to update volume spec for node %s: %v", nodeName, err) + klog.Errorf("Failed to update volume spec for node %s: %v", nodeName, err) } } } @@ -542,7 +542,7 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) { nodeName := types.NodeName(node.Name) if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil { // This might happen during drain, but we still want it to appear in our logs - glog.Infof("error removing node %q from desired-state-of-world: %v", nodeName, err) + klog.Infof("error removing node %q from desired-state-of-world: %v", nodeName, err) } adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) @@ -585,15 +585,15 @@ func (adc *attachDetachController) processNextItem() bool { } func (adc *attachDetachController) syncPVCByKey(key string) error { - glog.V(5).Infof("syncPVCByKey[%s]", key) + klog.V(5).Infof("syncPVCByKey[%s]", key) namespace, name, err := kcache.SplitMetaNamespaceKey(key) if err != nil { - glog.V(4).Infof("error getting namespace & name of pvc %q to get pvc from informer: %v", key, err) + klog.V(4).Infof("error getting namespace & name of pvc %q to get pvc from informer: %v", key, err) return nil } pvc, err := adc.pvcLister.PersistentVolumeClaims(namespace).Get(name) if apierrors.IsNotFound(err) { - glog.V(4).Infof("error getting pvc %q from informer: %v", key, err) + klog.V(4).Infof("error getting pvc %q from informer: %v", key, err) return nil } if err != nil { @@ -631,7 +631,7 @@ func (adc *attachDetachController) syncPVCByKey(key string) error { // mounted. func (adc *attachDetachController) processVolumesInUse( nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) { - glog.V(4).Infof("processVolumesInUse for node %q", nodeName) + klog.V(4).Infof("processVolumesInUse for node %q", nodeName) for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) { mounted := false for _, volumeInUse := range volumesInUse { @@ -642,7 +642,7 @@ func (adc *attachDetachController) processVolumesInUse( } err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted) if err != nil { - glog.Warningf( + klog.Warningf( "SetVolumeMountedByNode(%q, %q, %v) returned an error: %v", attachedVolume.VolumeName, nodeName, mounted, err) } @@ -731,7 +731,7 @@ func (adc *attachDetachController) GetServiceAccountTokenFunc() func(_, _ string func (adc *attachDetachController) DeleteServiceAccountTokenFunc() func(types.UID) { return func(types.UID) { - glog.Errorf("DeleteServiceAccountToken unsupported in attachDetachController") + klog.Errorf("DeleteServiceAccountToken unsupported in attachDetachController") } } diff --git a/pkg/controller/volume/attachdetach/cache/BUILD b/pkg/controller/volume/attachdetach/cache/BUILD index eadd50debaafe..a2890223e0a4d 100644 --- a/pkg/controller/volume/attachdetach/cache/BUILD +++ b/pkg/controller/volume/attachdetach/cache/BUILD @@ -20,7 +20,7 @@ go_library( "//pkg/volume/util/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index 7dfe739627479..f25fb008816db 100644 --- a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -26,7 +26,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -304,7 +304,7 @@ func (asw *actualStateOfWorld) AddVolumeNode( // Update the fields for volume object except the nodes attached to the volumes. volumeObj.devicePath = devicePath volumeObj.spec = volumeSpec - glog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q", + klog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q", volumeName, nodeName, devicePath) @@ -321,7 +321,7 @@ func (asw *actualStateOfWorld) AddVolumeNode( detachRequestedTime: time.Time{}, } } else { - glog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q", + klog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q", volumeName, nodeName) } @@ -347,7 +347,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode( nodeObj.mountedByNode = mounted volumeObj.nodesAttachedTo[nodeName] = nodeObj - glog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t", + klog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t", volumeName, nodeName, mounted) @@ -361,7 +361,7 @@ func (asw *actualStateOfWorld) ResetDetachRequestTime( volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName) if err != nil { - glog.Errorf("Failed to ResetDetachRequestTime with error: %v", err) + klog.Errorf("Failed to ResetDetachRequestTime with error: %v", err) return } nodeObj.detachRequestedTime = time.Time{} @@ -381,7 +381,7 @@ func (asw *actualStateOfWorld) SetDetachRequestTime( if nodeObj.detachRequestedTime.IsZero() { nodeObj.detachRequestedTime = time.Now() volumeObj.nodesAttachedTo[nodeName] = nodeObj - glog.V(4).Infof("Set detach request time to current time for volume %v on node %q", + klog.V(4).Infof("Set detach request time to current time for volume %v on node %q", volumeName, nodeName) } @@ -441,7 +441,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached( volumeName v1.UniqueVolumeName, nodeName types.NodeName) { // In case the volume/node entry is no longer in attachedVolume list, skip the rest if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil { - glog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName) + klog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName) return } nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName] @@ -453,7 +453,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached( volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName), } asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate - glog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName) + klog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName) } _, nodeToUpdateVolumeExists := nodeToUpdate.volumesToReportAsAttached[volumeName] @@ -461,7 +461,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached( nodeToUpdate.statusUpdateNeeded = true nodeToUpdate.volumesToReportAsAttached[volumeName] = volumeName asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate - glog.V(4).Infof("Report volume %q as attached to node %q", volumeName, nodeName) + klog.V(4).Infof("Report volume %q as attached to node %q", volumeName, nodeName) } } @@ -488,7 +488,7 @@ func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName asw.Lock() defer asw.Unlock() if err := asw.updateNodeStatusUpdateNeeded(nodeName, true); err != nil { - glog.Warningf("Failed to update statusUpdateNeeded field in actual state of world: %v", err) + klog.Warningf("Failed to update statusUpdateNeeded field in actual state of world: %v", err) } } @@ -623,7 +623,7 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][ // of this node will be updated, so set the flag statusUpdateNeeded to false indicating // the current status is already updated. if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil { - glog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err) + klog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err) } } diff --git a/pkg/controller/volume/attachdetach/metrics/BUILD b/pkg/controller/volume/attachdetach/metrics/BUILD index d07ac9b9576ec..0bf0087134d97 100644 --- a/pkg/controller/volume/attachdetach/metrics/BUILD +++ b/pkg/controller/volume/attachdetach/metrics/BUILD @@ -11,8 +11,8 @@ go_library( "//pkg/volume:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/attachdetach/metrics/metrics.go b/pkg/controller/volume/attachdetach/metrics/metrics.go index 8167dba5f107f..5247ab13867bf 100644 --- a/pkg/controller/volume/attachdetach/metrics/metrics.go +++ b/pkg/controller/volume/attachdetach/metrics/metrics.go @@ -19,10 +19,10 @@ package metrics import ( "sync" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "k8s.io/apimachinery/pkg/labels" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/util" "k8s.io/kubernetes/pkg/volume" @@ -119,7 +119,7 @@ func (collector *attachDetachStateCollector) Collect(ch chan<- prometheus.Metric string(nodeName), pluginName) if err != nil { - glog.Warningf("Failed to create metric : %v", err) + klog.Warningf("Failed to create metric : %v", err) } ch <- metric } @@ -134,7 +134,7 @@ func (collector *attachDetachStateCollector) Collect(ch chan<- prometheus.Metric pluginName, string(stateName)) if err != nil { - glog.Warningf("Failed to create metric : %v", err) + klog.Warningf("Failed to create metric : %v", err) } ch <- metric } @@ -144,7 +144,7 @@ func (collector *attachDetachStateCollector) Collect(ch chan<- prometheus.Metric func (collector *attachDetachStateCollector) getVolumeInUseCount() volumeCount { pods, err := collector.podLister.List(labels.Everything()) if err != nil { - glog.Errorf("Error getting pod list") + klog.Errorf("Error getting pod list") return nil } diff --git a/pkg/controller/volume/attachdetach/populator/BUILD b/pkg/controller/volume/attachdetach/populator/BUILD index dffe2bbe487f1..b216acf90f704 100644 --- a/pkg/controller/volume/attachdetach/populator/BUILD +++ b/pkg/controller/volume/attachdetach/populator/BUILD @@ -21,7 +21,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go index 4065e25a80416..2d9005b97a141 100644 --- a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go +++ b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator.go @@ -22,7 +22,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" @@ -92,7 +92,7 @@ func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() { // findAndAddActivePods is called periodically, independently of the main // populator loop. if time.Since(dswp.timeOfLastListPods) < dswp.listPodsRetryDuration { - glog.V(5).Infof( + klog.V(5).Infof( "Skipping findAndAddActivePods(). Not permitted until %v (listPodsRetryDuration %v).", dswp.timeOfLastListPods.Add(dswp.listPodsRetryDuration), dswp.listPodsRetryDuration) @@ -109,7 +109,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { for dswPodUID, dswPodToAdd := range dswp.desiredStateOfWorld.GetPodToAdd() { dswPodKey, err := kcache.MetaNamespaceKeyFunc(dswPodToAdd.Pod) if err != nil { - glog.Errorf("MetaNamespaceKeyFunc failed for pod %q (UID %q) with: %v", dswPodKey, dswPodUID, err) + klog.Errorf("MetaNamespaceKeyFunc failed for pod %q (UID %q) with: %v", dswPodKey, dswPodUID, err) continue } @@ -124,7 +124,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { case errors.IsNotFound(err): // if we can't find the pod, we need to delete it below case err != nil: - glog.Errorf("podLister Get failed for pod %q (UID %q) with %v", dswPodKey, dswPodUID, err) + klog.Errorf("podLister Get failed for pod %q (UID %q) with %v", dswPodKey, dswPodUID, err) continue default: volumeActionFlag := util.DetermineVolumeAction( @@ -136,7 +136,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { informerPodUID := volutil.GetUniquePodName(informerPod) // Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer if informerPodUID == dswPodUID { - glog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID) + klog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID) continue } } @@ -144,7 +144,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { // the pod from dsw does not exist in pod informer, or it does not match the unique identifier retrieved // from the informer, delete it from dsw - glog.V(1).Infof("Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID) + klog.V(1).Infof("Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID) dswp.desiredStateOfWorld.DeletePod(dswPodUID, dswPodToAdd.VolumeName, dswPodToAdd.NodeName) } } @@ -152,7 +152,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { func (dswp *desiredStateOfWorldPopulator) findAndAddActivePods() { pods, err := dswp.podLister.List(labels.Everything()) if err != nil { - glog.Errorf("podLister List failed: %v", err) + klog.Errorf("podLister List failed: %v", err) return } dswp.timeOfLastListPods = time.Now() diff --git a/pkg/controller/volume/attachdetach/reconciler/BUILD b/pkg/controller/volume/attachdetach/reconciler/BUILD index dcdbf66d312ba..b09344c04fda2 100644 --- a/pkg/controller/volume/attachdetach/reconciler/BUILD +++ b/pkg/controller/volume/attachdetach/reconciler/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/attachdetach/reconciler/reconciler.go b/pkg/controller/volume/attachdetach/reconciler/reconciler.go index c52c608c2a4a4..deb257e00f4fc 100644 --- a/pkg/controller/volume/attachdetach/reconciler/reconciler.go +++ b/pkg/controller/volume/attachdetach/reconciler/reconciler.go @@ -24,11 +24,11 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/metrics" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater" @@ -110,11 +110,11 @@ func (rc *reconciler) reconciliationLoopFunc() func() { rc.reconcile() if rc.disableReconciliationSync { - glog.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line.") + klog.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line.") } else if rc.syncDuration < time.Second { - glog.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line.") + klog.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line.") } else if time.Since(rc.timeOfLastSync) > rc.syncDuration { - glog.V(5).Info("Starting reconciling attached volumes still attached") + klog.V(5).Info("Starting reconciling attached volumes still attached") rc.sync() } } @@ -184,21 +184,21 @@ func (rc *reconciler) reconcile() { // may pass while at the same time the volume leaves the pending state, resulting in // double detach attempts if rc.attacherDetacher.IsOperationPending(attachedVolume.VolumeName, "") { - glog.V(10).Infof("Operation for volume %q is already running. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName) + klog.V(10).Infof("Operation for volume %q is already running. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName) continue } // Set the detach request time elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName) if err != nil { - glog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err) + klog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err) continue } // Check whether timeout has reached the maximum waiting time timeout := elapsedTime > rc.maxWaitForUnmountDuration // Check whether volume is still mounted. Skip detach if it is still mounted unless timeout if attachedVolume.MountedByNode && !timeout { - glog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Cannot detach volume because it is still mounted", "")) + klog.V(12).Infof(attachedVolume.GenerateMsgDetailed("Cannot detach volume because it is still mounted", "")) continue } @@ -206,7 +206,7 @@ func (rc *reconciler) reconcile() { // If it fails to update node status, skip detach volume err = rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName) if err != nil { - glog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v", + klog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v", attachedVolume.VolumeName, attachedVolume.NodeName, err) @@ -216,27 +216,27 @@ func (rc *reconciler) reconcile() { err = rc.nodeStatusUpdater.UpdateNodeStatuses() if err != nil { // Skip detaching this volume if unable to update node status - glog.Errorf(attachedVolume.GenerateErrorDetailed("UpdateNodeStatuses failed while attempting to report volume as attached", err).Error()) + klog.Errorf(attachedVolume.GenerateErrorDetailed("UpdateNodeStatuses failed while attempting to report volume as attached", err).Error()) continue } // Trigger detach volume which requires verifing safe to detach step // If timeout is true, skip verifySafeToDetach check - glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", "")) + klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", "")) verifySafeToDetach := !timeout err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld) if err == nil { if !timeout { - glog.Infof(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", "")) + klog.Infof(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", "")) } else { metrics.RecordForcedDetachMetric() - glog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration))) + klog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration))) } } if err != nil && !exponentialbackoff.IsExponentialBackoff(err) { // Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error()) + klog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error()) } } } @@ -246,7 +246,7 @@ func (rc *reconciler) reconcile() { // Update Node Status err := rc.nodeStatusUpdater.UpdateNodeStatuses() if err != nil { - glog.Warningf("UpdateNodeStatuses failed with: %v", err) + klog.Warningf("UpdateNodeStatuses failed with: %v", err) } } @@ -255,16 +255,16 @@ func (rc *reconciler) attachDesiredVolumes() { for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() { if rc.actualStateOfWorld.VolumeNodeExists(volumeToAttach.VolumeName, volumeToAttach.NodeName) { // Volume/Node exists, touch it to reset detachRequestedTime - if glog.V(5) { - glog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", "")) + if klog.V(5) { + klog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", "")) } rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName) continue } // Don't even try to start an operation if there is already one running if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "") { - if glog.V(10) { - glog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName) + if klog.V(10) { + klog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName) } continue } @@ -281,17 +281,17 @@ func (rc *reconciler) attachDesiredVolumes() { } // Volume/Node doesn't exist, spawn a goroutine to attach it - if glog.V(5) { - glog.Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", "")) + if klog.V(5) { + klog.Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", "")) } err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld) if err == nil { - glog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", "")) + klog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", "")) } if err != nil && !exponentialbackoff.IsExponentialBackoff(err) { // Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error()) + klog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error()) } } } @@ -326,7 +326,7 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach // Log detailed message to system admin nodeList := strings.Join(otherNodesStr, ", ") detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already exclusively attached to node %s and can't be attached to another", nodeList)) - glog.Warningf(detailedMsg) + klog.Warningf(detailedMsg) return } @@ -367,5 +367,5 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach podNames = append(podNames, pod.Namespace+"/"+pod.Name) } detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already used by pods %s on node %s", strings.Join(podNames, ", "), strings.Join(otherNodesStr, ", "))) - glog.Warningf(detailedMsg) + klog.Warningf(detailedMsg) } diff --git a/pkg/controller/volume/attachdetach/statusupdater/BUILD b/pkg/controller/volume/attachdetach/statusupdater/BUILD index 97de6707a1696..b72929ecdaa0a 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/BUILD +++ b/pkg/controller/volume/attachdetach/statusupdater/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go index b5cbb22446486..615444d76e06a 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go +++ b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go @@ -19,7 +19,7 @@ limitations under the License. package statusupdater import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -65,7 +65,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { if errors.IsNotFound(err) { // If node does not exist, its status cannot be updated. // Do nothing so that there is no retry until node is created. - glog.V(2).Infof( + klog.V(2).Infof( "Could not update node status. Failed to find node %q in NodeInformer cache. Error: '%v'", nodeName, err) @@ -73,7 +73,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { } else if err != nil { // For all other errors, log error and reset flag statusUpdateNeeded // back to true to indicate this node status needs to be updated again. - glog.V(2).Infof("Error retrieving nodes from node lister. Error: %v", err) + klog.V(2).Infof("Error retrieving nodes from node lister. Error: %v", err) nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName) continue } @@ -83,7 +83,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { // to indicate this node status needs to be updated again nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName) - glog.V(2).Infof( + klog.V(2).Infof( "Could not update node status for %q; re-marking for update. %v", nodeName, err) @@ -103,6 +103,6 @@ func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj return err } - glog.V(4).Infof("Updating status %q for node %q succeeded. VolumesAttached: %v", patchBytes, nodeName, attachedVolumes) + klog.V(4).Infof("Updating status %q for node %q succeeded. VolumesAttached: %v", patchBytes, nodeName, attachedVolumes) return nil } diff --git a/pkg/controller/volume/attachdetach/testing/BUILD b/pkg/controller/volume/attachdetach/testing/BUILD index bc06368aebc66..1d52155d522e0 100644 --- a/pkg/controller/volume/attachdetach/testing/BUILD +++ b/pkg/controller/volume/attachdetach/testing/BUILD @@ -19,7 +19,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/attachdetach/testing/testvolumespec.go b/pkg/controller/volume/attachdetach/testing/testvolumespec.go index 7dceaba27326c..a243e724b25ca 100644 --- a/pkg/controller/volume/attachdetach/testing/testvolumespec.go +++ b/pkg/controller/volume/attachdetach/testing/testvolumespec.go @@ -21,7 +21,6 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -29,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -237,7 +237,7 @@ func (plugin *TestPlugin) GetVolumeName(spec *volume.Spec) (string, error) { plugin.pluginLock.Lock() defer plugin.pluginLock.Unlock() if spec == nil { - glog.Errorf("GetVolumeName called with nil volume spec") + klog.Errorf("GetVolumeName called with nil volume spec") plugin.ErrorEncountered = true } return spec.Name(), nil @@ -247,7 +247,7 @@ func (plugin *TestPlugin) CanSupport(spec *volume.Spec) bool { plugin.pluginLock.Lock() defer plugin.pluginLock.Unlock() if spec == nil { - glog.Errorf("CanSupport called with nil volume spec") + klog.Errorf("CanSupport called with nil volume spec") plugin.ErrorEncountered = true } return true @@ -261,7 +261,7 @@ func (plugin *TestPlugin) NewMounter(spec *volume.Spec, podRef *v1.Pod, opts vol plugin.pluginLock.Lock() defer plugin.pluginLock.Unlock() if spec == nil { - glog.Errorf("NewMounter called with nil volume spec") + klog.Errorf("NewMounter called with nil volume spec") plugin.ErrorEncountered = true } return nil, nil @@ -373,7 +373,7 @@ func (attacher *testPluginAttacher) Attach(spec *volume.Spec, nodeName types.Nod defer attacher.pluginLock.Unlock() if spec == nil { *attacher.ErrorEncountered = true - glog.Errorf("Attach called with nil volume spec") + klog.Errorf("Attach called with nil volume spec") return "", fmt.Errorf("Attach called with nil volume spec") } attacher.attachedVolumeMap[string(nodeName)] = append(attacher.attachedVolumeMap[string(nodeName)], spec.Name()) @@ -389,7 +389,7 @@ func (attacher *testPluginAttacher) WaitForAttach(spec *volume.Spec, devicePath defer attacher.pluginLock.Unlock() if spec == nil { *attacher.ErrorEncountered = true - glog.Errorf("WaitForAttach called with nil volume spec") + klog.Errorf("WaitForAttach called with nil volume spec") return "", fmt.Errorf("WaitForAttach called with nil volume spec") } fakePath := fmt.Sprintf("%s/%s", devicePath, spec.Name()) @@ -401,7 +401,7 @@ func (attacher *testPluginAttacher) GetDeviceMountPath(spec *volume.Spec) (strin defer attacher.pluginLock.Unlock() if spec == nil { *attacher.ErrorEncountered = true - glog.Errorf("GetDeviceMountPath called with nil volume spec") + klog.Errorf("GetDeviceMountPath called with nil volume spec") return "", fmt.Errorf("GetDeviceMountPath called with nil volume spec") } return "", nil @@ -412,7 +412,7 @@ func (attacher *testPluginAttacher) MountDevice(spec *volume.Spec, devicePath st defer attacher.pluginLock.Unlock() if spec == nil { *attacher.ErrorEncountered = true - glog.Errorf("MountDevice called with nil volume spec") + klog.Errorf("MountDevice called with nil volume spec") return fmt.Errorf("MountDevice called with nil volume spec") } return nil diff --git a/pkg/controller/volume/attachdetach/util/BUILD b/pkg/controller/volume/attachdetach/util/BUILD index 93889f8193094..59c1db5809f60 100644 --- a/pkg/controller/volume/attachdetach/util/BUILD +++ b/pkg/controller/volume/attachdetach/util/BUILD @@ -16,7 +16,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/attachdetach/util/util.go b/pkg/controller/volume/attachdetach/util/util.go index c17ab72990698..8affeb46f08be 100644 --- a/pkg/controller/volume/attachdetach/util/util.go +++ b/pkg/controller/volume/attachdetach/util/util.go @@ -19,10 +19,10 @@ package util import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -32,7 +32,7 @@ import ( // specified volume. It dereference any PVC to get PV objects, if needed. func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister) (*volume.Spec, error) { if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { - glog.V(10).Infof( + klog.V(10).Infof( "Found PVC, ClaimName: %q/%q", podNamespace, pvcSource.ClaimName) @@ -48,7 +48,7 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister coreli err) } - glog.V(10).Infof( + klog.V(10).Infof( "Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q", podNamespace, pvcSource.ClaimName, @@ -66,7 +66,7 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, pvcLister coreli err) } - glog.V(10).Infof( + klog.V(10).Infof( "Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)", volumeSpec.Name(), pvName, @@ -166,7 +166,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D } if len(pod.Spec.Volumes) <= 0 { - glog.V(10).Infof("Skipping processing of pod %q/%q: it has no volumes.", + klog.V(10).Infof("Skipping processing of pod %q/%q: it has no volumes.", pod.Namespace, pod.Name) return @@ -174,7 +174,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D nodeName := types.NodeName(pod.Spec.NodeName) if nodeName == "" { - glog.V(10).Infof( + klog.V(10).Infof( "Skipping processing of pod %q/%q: it is not scheduled to a node.", pod.Namespace, pod.Name) @@ -183,7 +183,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D // If the node the pod is scheduled to does not exist in the desired // state of the world data structure, that indicates the node is not // yet managed by the controller. Therefore, ignore the pod. - glog.V(4).Infof( + klog.V(4).Infof( "Skipping processing of pod %q/%q: it is scheduled to node %q which is not managed by the controller.", pod.Namespace, pod.Name, @@ -195,7 +195,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D for _, podVolume := range pod.Spec.Volumes { volumeSpec, err := CreateVolumeSpec(podVolume, pod.Namespace, pvcLister, pvLister) if err != nil { - glog.V(10).Infof( + klog.V(10).Infof( "Error processing volume %q for pod %q/%q: %v", podVolume.Name, pod.Namespace, @@ -207,7 +207,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D attachableVolumePlugin, err := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) if err != nil || attachableVolumePlugin == nil { - glog.V(10).Infof( + klog.V(10).Infof( "Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v", podVolume.Name, pod.Namespace, @@ -222,7 +222,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D _, err := desiredStateOfWorld.AddPod( uniquePodName, pod, volumeSpec, nodeName) if err != nil { - glog.V(10).Infof( + klog.V(10).Infof( "Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v", podVolume.Name, pod.Namespace, @@ -235,7 +235,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D uniqueVolumeName, err := util.GetUniqueVolumeNameFromSpec( attachableVolumePlugin, volumeSpec) if err != nil { - glog.V(10).Infof( + klog.V(10).Infof( "Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GetUniqueVolumeNameFromSpec failed with %v", podVolume.Name, pod.Namespace, diff --git a/pkg/controller/volume/expand/BUILD b/pkg/controller/volume/expand/BUILD index 424d4a48371f6..5b20360dc3894 100644 --- a/pkg/controller/volume/expand/BUILD +++ b/pkg/controller/volume/expand/BUILD @@ -39,7 +39,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/expand/cache/BUILD b/pkg/controller/volume/expand/cache/BUILD index e39803a409cee..a416cc859c5ec 100644 --- a/pkg/controller/volume/expand/cache/BUILD +++ b/pkg/controller/volume/expand/cache/BUILD @@ -19,7 +19,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/expand/cache/volume_resize_map.go b/pkg/controller/volume/expand/cache/volume_resize_map.go index d5392c1b65aef..76d85ec4d9277 100644 --- a/pkg/controller/volume/expand/cache/volume_resize_map.go +++ b/pkg/controller/volume/expand/cache/volume_resize_map.go @@ -21,13 +21,13 @@ import ( "fmt" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" commontypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/types" ) @@ -97,7 +97,7 @@ func NewVolumeResizeMap(kubeClient clientset.Interface) VolumeResizeMap { // the PVC and hopefully after a no-op resize in volume plugin, PVC will be updated with right values as well. func (resizeMap *volumeResizeMap) AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { if pv.Spec.ClaimRef == nil || pvc.Namespace != pv.Spec.ClaimRef.Namespace || pvc.Name != pv.Spec.ClaimRef.Name { - glog.V(4).Infof("Persistent Volume is not bound to PVC being updated : %s", util.ClaimToClaimKey(pvc)) + klog.V(4).Infof("Persistent Volume is not bound to PVC being updated : %s", util.ClaimToClaimKey(pvc)) return } @@ -112,7 +112,7 @@ func (resizeMap *volumeResizeMap) AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv return } - glog.V(4).Infof("Adding pvc %s with Size %s/%s for resizing", util.ClaimToClaimKey(pvc), pvcSize.String(), pvcStatusSize.String()) + klog.V(4).Infof("Adding pvc %s with Size %s/%s for resizing", util.ClaimToClaimKey(pvc), pvcSize.String(), pvcStatusSize.String()) pvcRequest := &PVCWithResizeRequest{ PVC: pvc, @@ -144,7 +144,7 @@ func (resizeMap *volumeResizeMap) GetPVCsWithResizeRequest() []*PVCWithResizeReq // deleting a pvc in this map doesn't affect operations that are already inflight. func (resizeMap *volumeResizeMap) DeletePVC(pvc *v1.PersistentVolumeClaim) { pvcUniqueName := types.UniquePVCName(pvc.UID) - glog.V(5).Infof("Removing PVC %v from resize map", pvcUniqueName) + klog.V(5).Infof("Removing PVC %v from resize map", pvcUniqueName) resizeMap.Lock() defer resizeMap.Unlock() delete(resizeMap.pvcrs, pvcUniqueName) @@ -156,7 +156,7 @@ func (resizeMap *volumeResizeMap) MarkAsResized(pvcr *PVCWithResizeRequest, newS err := resizeMap.updatePVCCapacityAndConditions(pvcr, newSize, emptyCondition) if err != nil { - glog.V(4).Infof("Error updating PV spec capacity for volume %q with : %v", pvcr.QualifiedName(), err) + klog.V(4).Infof("Error updating PV spec capacity for volume %q with : %v", pvcr.QualifiedName(), err) return err } return nil @@ -205,7 +205,7 @@ func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSi _, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(pvClone.Name, commontypes.StrategicMergePatchType, patchBytes) if updateErr != nil { - glog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr) + klog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr) return updateErr } return nil diff --git a/pkg/controller/volume/expand/expand_controller.go b/pkg/controller/volume/expand/expand_controller.go index f40bf080a8706..d53b953524d6c 100644 --- a/pkg/controller/volume/expand/expand_controller.go +++ b/pkg/controller/volume/expand/expand_controller.go @@ -24,7 +24,7 @@ import ( "net" "time" - "github.com/golang/glog" + "k8s.io/klog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" @@ -118,7 +118,7 @@ func NewExpandController( } eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) expc.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"}) blkutil := volumepathhandler.NewBlockVolumePathHandler() @@ -150,8 +150,8 @@ func NewExpandController( func (expc *expandController) Run(stopCh <-chan struct{}) { defer runtime.HandleCrash() - glog.Infof("Starting expand controller") - defer glog.Infof("Shutting down expand controller") + klog.Infof("Starting expand controller") + defer klog.Infof("Shutting down expand controller") if !controller.WaitForCacheSync("expand", stopCh, expc.pvcsSynced, expc.pvSynced) { return @@ -204,7 +204,7 @@ func (expc *expandController) pvcUpdate(oldObj, newObj interface{}) { if newSize.Cmp(oldSize) > 0 { pv, err := getPersistentVolume(newPVC, expc.pvLister) if err != nil { - glog.V(5).Infof("Error getting Persistent Volume for PVC %q : %v", newPVC.UID, err) + klog.V(5).Infof("Error getting Persistent Volume for PVC %q : %v", newPVC.UID, err) return } @@ -219,7 +219,7 @@ func (expc *expandController) pvcUpdate(oldObj, newObj interface{}) { } expc.recorder.Event(newPVC, eventType, events.ExternalExpanding, fmt.Sprintf("Ignoring the PVC: %v.", err)) - glog.V(3).Infof("Ignoring the PVC %q (uid: %q) : %v.", + klog.V(3).Infof("Ignoring the PVC %q (uid: %q) : %v.", util.GetPersistentVolumeClaimQualifiedName(newPVC), newPVC.UID, err) return } @@ -319,7 +319,7 @@ func (expc *expandController) GetServiceAccountTokenFunc() func(_, _ string, _ * func (expc *expandController) DeleteServiceAccountTokenFunc() func(types.UID) { return func(types.UID) { - glog.Errorf("DeleteServiceAccountToken unsupported in expandController") + klog.Errorf("DeleteServiceAccountToken unsupported in expandController") } } diff --git a/pkg/controller/volume/expand/pvc_populator.go b/pkg/controller/volume/expand/pvc_populator.go index 4f29e2292e222..20ccd6350b080 100644 --- a/pkg/controller/volume/expand/pvc_populator.go +++ b/pkg/controller/volume/expand/pvc_populator.go @@ -24,7 +24,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" @@ -84,7 +84,7 @@ func (populator *pvcPopulator) Run(stopCh <-chan struct{}) { func (populator *pvcPopulator) Sync() { pvcs, err := populator.pvcLister.List(labels.Everything()) if err != nil { - glog.Errorf("Listing PVCs failed in populator : %v", err) + klog.Errorf("Listing PVCs failed in populator : %v", err) return } @@ -92,7 +92,7 @@ func (populator *pvcPopulator) Sync() { pv, err := getPersistentVolume(pvc, populator.pvLister) if err != nil { - glog.V(5).Infof("Error getting persistent volume for PVC %q : %v", pvc.UID, err) + klog.V(5).Infof("Error getting persistent volume for PVC %q : %v", pvc.UID, err) continue } @@ -110,7 +110,7 @@ func (populator *pvcPopulator) Sync() { } populator.recorder.Event(pvc, eventType, events.ExternalExpanding, fmt.Sprintf("Ignoring the PVC: %v.", err)) - glog.V(3).Infof("Ignoring the PVC %q (uid: %q) : %v.", + klog.V(3).Infof("Ignoring the PVC %q (uid: %q) : %v.", util.GetPersistentVolumeClaimQualifiedName(pvc), pvc.UID, err) continue } diff --git a/pkg/controller/volume/expand/sync_volume_resize.go b/pkg/controller/volume/expand/sync_volume_resize.go index 9ad10eb277134..c355a96747295 100644 --- a/pkg/controller/volume/expand/sync_volume_resize.go +++ b/pkg/controller/volume/expand/sync_volume_resize.go @@ -19,11 +19,11 @@ package expand import ( "time" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller/volume/expand/cache" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" "k8s.io/kubernetes/pkg/volume/util" @@ -66,7 +66,7 @@ func (rc *syncResize) Sync() { uniqueVolumeKey := v1.UniqueVolumeName(pvcWithResizeRequest.UniquePVCKey()) updatedClaim, err := markPVCResizeInProgress(pvcWithResizeRequest, rc.kubeClient) if err != nil { - glog.V(5).Infof("Error setting PVC %s in progress with error : %v", pvcWithResizeRequest.QualifiedName(), err) + klog.V(5).Infof("Error setting PVC %s in progress with error : %v", pvcWithResizeRequest.QualifiedName(), err) continue } if updatedClaim != nil { @@ -74,15 +74,15 @@ func (rc *syncResize) Sync() { } if rc.opsExecutor.IsOperationPending(uniqueVolumeKey, "") { - glog.V(10).Infof("Operation for PVC %v is already pending", pvcWithResizeRequest.QualifiedName()) + klog.V(10).Infof("Operation for PVC %v is already pending", pvcWithResizeRequest.QualifiedName()) continue } growFuncError := rc.opsExecutor.ExpandVolume(pvcWithResizeRequest, rc.resizeMap) if growFuncError != nil && !exponentialbackoff.IsExponentialBackoff(growFuncError) { - glog.Errorf("Error growing pvc %s with %v", pvcWithResizeRequest.QualifiedName(), growFuncError) + klog.Errorf("Error growing pvc %s with %v", pvcWithResizeRequest.QualifiedName(), growFuncError) } if growFuncError == nil { - glog.V(5).Infof("Started opsExecutor.ExpandVolume for volume %s", pvcWithResizeRequest.QualifiedName()) + klog.V(5).Infof("Started opsExecutor.ExpandVolume for volume %s", pvcWithResizeRequest.QualifiedName()) } } } diff --git a/pkg/controller/volume/persistentvolume/BUILD b/pkg/controller/volume/persistentvolume/BUILD index 7ac8a718840fa..65fd1613486d8 100644 --- a/pkg/controller/volume/persistentvolume/BUILD +++ b/pkg/controller/volume/persistentvolume/BUILD @@ -58,8 +58,8 @@ go_library( "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -106,7 +106,7 @@ go_test( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index 4626e661e2aa4..a4fd34c552eaa 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -27,7 +27,7 @@ import ( "testing" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" @@ -159,7 +159,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj r.lock.Lock() defer r.lock.Unlock() - glog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource()) + klog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource()) // Inject error when requested err = r.injectReactError(action) @@ -183,7 +183,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj r.volumes[volume.Name] = volume r.changedObjects = append(r.changedObjects, volume) r.changedSinceLastSync++ - glog.V(4).Infof("created volume %s", volume.Name) + klog.V(4).Infof("created volume %s", volume.Name) return true, volume, nil case action.Matches("update", "persistentvolumes"): @@ -209,7 +209,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj r.volumes[volume.Name] = volume r.changedObjects = append(r.changedObjects, volume) r.changedSinceLastSync++ - glog.V(4).Infof("saved updated volume %s", volume.Name) + klog.V(4).Infof("saved updated volume %s", volume.Name) return true, volume, nil case action.Matches("update", "persistentvolumeclaims"): @@ -235,23 +235,23 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj r.claims[claim.Name] = claim r.changedObjects = append(r.changedObjects, claim) r.changedSinceLastSync++ - glog.V(4).Infof("saved updated claim %s", claim.Name) + klog.V(4).Infof("saved updated claim %s", claim.Name) return true, claim, nil case action.Matches("get", "persistentvolumes"): name := action.(core.GetAction).GetName() volume, found := r.volumes[name] if found { - glog.V(4).Infof("GetVolume: found %s", volume.Name) + klog.V(4).Infof("GetVolume: found %s", volume.Name) return true, volume, nil } else { - glog.V(4).Infof("GetVolume: volume %s not found", name) + klog.V(4).Infof("GetVolume: volume %s not found", name) return true, nil, fmt.Errorf("Cannot find volume %s", name) } case action.Matches("delete", "persistentvolumes"): name := action.(core.DeleteAction).GetName() - glog.V(4).Infof("deleted volume %s", name) + klog.V(4).Infof("deleted volume %s", name) _, found := r.volumes[name] if found { delete(r.volumes, name) @@ -263,7 +263,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj case action.Matches("delete", "persistentvolumeclaims"): name := action.(core.DeleteAction).GetName() - glog.V(4).Infof("deleted claim %s", name) + klog.V(4).Infof("deleted claim %s", name) _, found := r.volumes[name] if found { delete(r.claims, name) @@ -286,11 +286,11 @@ func (r *volumeReactor) injectReactError(action core.Action) error { } for i, expected := range r.errors { - glog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource()) + klog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource()) if action.Matches(expected.verb, expected.resource) { // That's the action we're waiting for, remove it from injectedErrors r.errors = append(r.errors[:i], r.errors[i+1:]...) - glog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error) + klog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error) return expected.error } } @@ -379,14 +379,14 @@ func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeCo select { case event, ok := <-fakeRecorder.Events: if ok { - glog.V(5).Infof("event recorder got event %s", event) + klog.V(5).Infof("event recorder got event %s", event) gotEvents = append(gotEvents, event) } else { - glog.V(5).Infof("event recorder finished") + klog.V(5).Infof("event recorder finished") finished = true } case _, _ = <-timer.C: - glog.V(5).Infof("event recorder timeout") + klog.V(5).Infof("event recorder timeout") finished = true } } @@ -426,10 +426,10 @@ func (r *volumeReactor) popChange() interface{} { switch obj.(type) { case *v1.PersistentVolume: vol, _ := obj.(*v1.PersistentVolume) - glog.V(4).Infof("reactor queue: %s", vol.Name) + klog.V(4).Infof("reactor queue: %s", vol.Name) case *v1.PersistentVolumeClaim: claim, _ := obj.(*v1.PersistentVolumeClaim) - glog.V(4).Infof("reactor queue: %s", claim.Name) + klog.V(4).Infof("reactor queue: %s", claim.Name) } } @@ -898,7 +898,7 @@ func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(c // Inject a hook before async operation starts ctrl.preOperationHook = func(operationName string) { // Inside the hook, run the function to inject - glog.V(4).Infof("reactor: scheduleOperation reached, injecting call") + klog.V(4).Infof("reactor: scheduleOperation reached, injecting call") injectBeforeOperation(ctrl, reactor) } @@ -945,7 +945,7 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReacto // 3. Compare resulting volumes and claims with expected volumes and claims. func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) { for _, test := range tests { - glog.V(4).Infof("starting test %q", test.name) + klog.V(4).Infof("starting test %q", test.name) // Initialize the controller client := &fake.Clientset{} @@ -1008,7 +1008,7 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag // Some limit of calls in enforced to prevent endless loops. func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) { for _, test := range tests { - glog.V(4).Infof("starting multisync test %q", test.name) + klog.V(4).Infof("starting multisync test %q", test.name) // Initialize the controller client := &fake.Clientset{} @@ -1046,7 +1046,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s counter := 0 for { counter++ - glog.V(4).Infof("test %q: iteration %d", test.name, counter) + klog.V(4).Infof("test %q: iteration %d", test.name, counter) if counter > 100 { t.Errorf("Test %q failed: too many iterations", test.name) @@ -1064,7 +1064,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s // Simulate "periodic sync" of everything (until it produces // no changes). firstSync = false - glog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name) + klog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name) reactor.syncAll() } else { // Last sync did not produce any updates, the test reached @@ -1085,7 +1085,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s if err != nil { if err == versionConflictError { // Ignore version errors - glog.V(4).Infof("test intentionaly ignores version error.") + klog.V(4).Infof("test intentionaly ignores version error.") } else { t.Errorf("Error calling syncClaim: %v", err) // Finish the loop on the first error @@ -1102,7 +1102,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s if err != nil { if err == versionConflictError { // Ignore version errors - glog.V(4).Infof("test intentionaly ignores version error.") + klog.V(4).Infof("test intentionaly ignores version error.") } else { t.Errorf("Error calling syncVolume: %v", err) // Finish the loop on the first error @@ -1114,7 +1114,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s } } evaluateTestResults(ctrl, reactor, test, t) - glog.V(4).Infof("test %q finished after %d iterations", test.name, counter) + klog.V(4).Infof("test %q finished after %d iterations", test.name, counter) } } @@ -1185,7 +1185,7 @@ func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.Provisioner, error) { if len(plugin.provisionCalls) > 0 { // mockVolumePlugin directly implements Provisioner interface - glog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner") + klog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner") plugin.provisionOptions = options return plugin, nil } else { @@ -1201,7 +1201,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi var pv *v1.PersistentVolume call := plugin.provisionCalls[plugin.provisionCallCounter] if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) { - glog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters) + klog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters) return nil, fmt.Errorf("Mock plugin error: invalid provisioner call") } if call.ret == nil { @@ -1229,7 +1229,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi } plugin.provisionCallCounter++ - glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret) + klog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret) return pv, call.ret } @@ -1238,7 +1238,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi func (plugin *mockVolumePlugin) NewDeleter(spec *vol.Spec) (vol.Deleter, error) { if len(plugin.deleteCalls) > 0 { // mockVolumePlugin directly implements Deleter interface - glog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter") + klog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter") return plugin, nil } else { return nil, fmt.Errorf("Mock plugin error: no deleteCalls configured") @@ -1251,7 +1251,7 @@ func (plugin *mockVolumePlugin) Delete() error { } ret := plugin.deleteCalls[plugin.deleteCallCounter] plugin.deleteCallCounter++ - glog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret) + klog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret) return ret } @@ -1277,6 +1277,6 @@ func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventReco } ret := plugin.recycleCalls[plugin.recycleCallCounter] plugin.recycleCallCounter++ - glog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret) + klog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret) return ret } diff --git a/pkg/controller/volume/persistentvolume/metrics/BUILD b/pkg/controller/volume/persistentvolume/metrics/BUILD index bcf22a490acf5..d686e4df42fe4 100644 --- a/pkg/controller/volume/persistentvolume/metrics/BUILD +++ b/pkg/controller/volume/persistentvolume/metrics/BUILD @@ -7,8 +7,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/persistentvolume/metrics/metrics.go b/pkg/controller/volume/persistentvolume/metrics/metrics.go index 2c26935b3a4cf..1184378777c1e 100644 --- a/pkg/controller/volume/persistentvolume/metrics/metrics.go +++ b/pkg/controller/volume/persistentvolume/metrics/metrics.go @@ -21,8 +21,8 @@ import ( "k8s.io/api/core/v1" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" ) const ( @@ -139,7 +139,7 @@ func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric) float64(number), storageClassName) if err != nil { - glog.Warningf("Create bound pv number metric failed: %v", err) + klog.Warningf("Create bound pv number metric failed: %v", err) continue } ch <- metric @@ -151,7 +151,7 @@ func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric) float64(number), storageClassName) if err != nil { - glog.Warningf("Create unbound pv number metric failed: %v", err) + klog.Warningf("Create unbound pv number metric failed: %v", err) continue } ch <- metric @@ -179,7 +179,7 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric) float64(number), namespace) if err != nil { - glog.Warningf("Create bound pvc number metric failed: %v", err) + klog.Warningf("Create bound pvc number metric failed: %v", err) continue } ch <- metric @@ -191,7 +191,7 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric) float64(number), namespace) if err != nil { - glog.Warningf("Create unbound pvc number metric failed: %v", err) + klog.Warningf("Create unbound pvc number metric failed: %v", err) continue } ch <- metric diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index e759c457e25bc..13d7a875e8df4 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -48,7 +48,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util/recyclerclient" - "github.com/golang/glog" + "k8s.io/klog" ) // ================================================================== @@ -238,7 +238,7 @@ type PersistentVolumeController struct { // For easier readability, it was split into syncUnboundClaim and syncBoundClaim // methods. func (ctrl *PersistentVolumeController) syncClaim(claim *v1.PersistentVolumeClaim) error { - glog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) + klog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) if !metav1.HasAnnotation(claim.ObjectMeta, annBindCompleted) { return ctrl.syncUnboundClaim(claim) @@ -330,11 +330,11 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol // [Unit test set 1] volume, err := ctrl.volumes.findBestMatchForClaim(claim, delayBinding) if err != nil { - glog.V(2).Infof("synchronizing unbound PersistentVolumeClaim[%s]: Error finding PV for claim: %v", claimToClaimKey(claim), err) + klog.V(2).Infof("synchronizing unbound PersistentVolumeClaim[%s]: Error finding PV for claim: %v", claimToClaimKey(claim), err) return fmt.Errorf("Error finding PV for claim %q: %v", claimToClaimKey(claim), err) } if volume == nil { - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim)) // No PV could be found // OBSERVATION: pvc is "Pending", will retry switch { @@ -358,7 +358,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol } else /* pv != nil */ { // Found a PV for this claim // OBSERVATION: pvc is "Pending", pv is "Available" - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), volume.Name, getVolumeStatusForLogging(volume)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), volume.Name, getVolumeStatusForLogging(volume)) if err = ctrl.bind(volume, claim); err != nil { // On any error saving the volume or the claim, subsequent // syncClaim will finish the binding. @@ -370,7 +370,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol } else /* pvc.Spec.VolumeName != nil */ { // [Unit test set 2] // User asked for a specific PV. - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested", claimToClaimKey(claim), claim.Spec.VolumeName) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested", claimToClaimKey(claim), claim.Spec.VolumeName) obj, found, err := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName) if err != nil { return err @@ -379,7 +379,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol // User asked for a PV that does not exist. // OBSERVATION: pvc is "Pending" // Retry later. - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and not found, will try again next time", claimToClaimKey(claim), claim.Spec.VolumeName) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and not found, will try again next time", claimToClaimKey(claim), claim.Spec.VolumeName) if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil { return err } @@ -389,13 +389,13 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol if !ok { return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) } - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) if volume.Spec.ClaimRef == nil { // User asked for a PV that is not claimed // OBSERVATION: pvc is "Pending", pv is "Available" - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume is unbound, binding", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume is unbound, binding", claimToClaimKey(claim)) if err = checkVolumeSatisfyClaim(volume, claim); err != nil { - glog.V(4).Infof("Can't bind the claim to volume %q: %v", volume.Name, err) + klog.V(4).Infof("Can't bind the claim to volume %q: %v", volume.Name, err) //send an event msg := fmt.Sprintf("Cannot bind to requested volume %q: %s", volume.Name, err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.VolumeMismatch, msg) @@ -413,7 +413,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol } else if isVolumeBoundToClaim(volume, claim) { // User asked for a PV that is claimed by this PVC // OBSERVATION: pvc is "Pending", pv is "Bound" - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim)) // Finish the volume binding by adding claim UID. if err = ctrl.bind(volume, claim); err != nil { @@ -425,7 +425,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol // User asked for a PV that is claimed by someone else // OBSERVATION: pvc is "Pending", pv is "Bound" if !metav1.HasAnnotation(claim.ObjectMeta, annBoundByController) { - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim by user, will retry later", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim by user, will retry later", claimToClaimKey(claim)) // User asked for a specific PV, retry later if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil { return err @@ -434,7 +434,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol } else { // This should never happen because someone had to remove // annBindCompleted annotation on the claim. - glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), claimrefToClaimKey(volume.Spec.ClaimRef)) return fmt.Errorf("Invalid binding of claim %q to volume %q: volume already claimed by %q", claimToClaimKey(claim), claim.Spec.VolumeName, claimrefToClaimKey(volume.Spec.ClaimRef)) } } @@ -472,13 +472,13 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj) } - glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) + klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) if volume.Spec.ClaimRef == nil { // Claim is bound but volume has come unbound. // Or, a claim was bound and the controller has not received updated // volume yet. We can't distinguish these cases. // Bind the volume again and set all states to Bound. - glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume is unbound, fixing", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume is unbound, fixing", claimToClaimKey(claim)) if err = ctrl.bind(volume, claim); err != nil { // Objects not saved, next syncPV or syncClaim will try again return err @@ -489,7 +489,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum // NOTE: syncPV can handle this so it can be left out. // NOTE: bind() call here will do nothing in most cases as // everything should be already set. - glog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: claim is already correctly bound", claimToClaimKey(claim)) + klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: claim is already correctly bound", claimToClaimKey(claim)) if err = ctrl.bind(volume, claim); err != nil { // Objects not saved, next syncPV or syncClaim will try again return err @@ -512,12 +512,12 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum // created, updated or periodically synced. We do not differentiate between // these events. func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) error { - glog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume)) // [Unit test set 4] if volume.Spec.ClaimRef == nil { // Volume is unused - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is unused", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is unused", volume.Name) if _, err := ctrl.updateVolumePhase(volume, v1.VolumeAvailable, ""); err != nil { // Nothing was saved; we will fall back into the same // condition in the next call to this method @@ -529,7 +529,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) if volume.Spec.ClaimRef.UID == "" { // The PV is reserved for a PVC; that PVC has not yet been // bound to this PV; the PVC sync will handle it. - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is pre-bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is pre-bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) if _, err := ctrl.updateVolumePhase(volume, v1.VolumeAvailable, ""); err != nil { // Nothing was saved; we will fall back into the same // condition in the next call to this method @@ -537,7 +537,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) } return nil } - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) // Get the PVC by _name_ var claim *v1.PersistentVolumeClaim claimName := claimrefToClaimKey(volume.Spec.ClaimRef) @@ -570,7 +570,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) } } if !found { - glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) // Fall through with claim = nil } else { var ok bool @@ -578,12 +578,12 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) if !ok { return fmt.Errorf("Cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj) } - glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim)) } if claim != nil && claim.UID != volume.Spec.ClaimRef.UID { // The claim that the PV was pointing to was deleted, and another // with the same name created. - glog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s has different UID, the old one must have been deleted", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s has different UID, the old one must have been deleted", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) // Treat the volume as bound to a missing claim. claim = nil } @@ -598,7 +598,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) // volume. if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed { // Also, log this only once: - glog.V(2).Infof("volume %q is released and reclaim policy %q will be executed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy) + klog.V(2).Infof("volume %q is released and reclaim policy %q will be executed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy) if volume, err = ctrl.updateVolumePhase(volume, v1.VolumeReleased, ""); err != nil { // Nothing was saved; we will fall back into the same condition // in the next call to this method @@ -626,10 +626,10 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) if metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) { // The binding is not completed; let PVC sync handle it - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name) } else { // Dangling PV; try to re-establish the link in the PVC sync - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume was bound and got unbound (by user?), waiting for syncClaim to fix it", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume was bound and got unbound (by user?), waiting for syncClaim to fix it", volume.Name) } // In both cases, the volume is Bound and the claim is Pending. // Next syncClaim will fix it. To speed it up, we enqueue the claim @@ -642,7 +642,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) return nil } else if claim.Spec.VolumeName == volume.Name { // Volume is bound to a claim properly, update status if necessary - glog.V(4).Infof("synchronizing PersistentVolume[%s]: all is bound", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: all is bound", volume.Name) if _, err = ctrl.updateVolumePhase(volume, v1.VolumeBound, ""); err != nil { // Nothing was saved; we will fall back into the same // condition in the next call to this method @@ -659,7 +659,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) // the user know. Don't overwrite existing Failed status! if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed { // Also, log this only once: - glog.V(2).Infof("dynamically volume %q is released and it will be deleted", volume.Name) + klog.V(2).Infof("dynamically volume %q is released and it will be deleted", volume.Name) if volume, err = ctrl.updateVolumePhase(volume, v1.VolumeReleased, ""); err != nil { // Nothing was saved; we will fall back into the same condition // in the next call to this method @@ -679,14 +679,14 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) // This is part of the normal operation of the controller; the // controller tried to use this volume for a claim but the claim // was fulfilled by another volume. We did this; fix it. - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name) if err = ctrl.unbindVolume(volume); err != nil { return err } return nil } else { // The PV must have been created with this ptr; leave it alone. - glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name) + klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name) // This just updates the volume phase and clears // volume.Spec.ClaimRef.UID. It leaves the volume pre-bound // to the claim. @@ -706,7 +706,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume) // phase - phase to set // volume - volume which Capacity is set into claim.Status.Capacity func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { - glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s", claimToClaimKey(claim), phase) + klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s", claimToClaimKey(claim), phase) dirty := false @@ -751,21 +751,21 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo if !dirty { // Nothing to do. - glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: phase %s already set", claimToClaimKey(claim), phase) + klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: phase %s already set", claimToClaimKey(claim), phase) return claim, nil } newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone) if err != nil { - glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err) + klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err) return newClaim, err } _, err = ctrl.storeClaimUpdate(newClaim) if err != nil { - glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: cannot update internal cache: %v", claimToClaimKey(claim), err) + klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: cannot update internal cache: %v", claimToClaimKey(claim), err) return newClaim, err } - glog.V(2).Infof("claim %q entered phase %q", claimToClaimKey(claim), phase) + klog.V(2).Infof("claim %q entered phase %q", claimToClaimKey(claim), phase) return newClaim, nil } @@ -778,10 +778,10 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo // volume - volume which Capacity is set into claim.Status.Capacity // eventtype, reason, message - event to send, see EventRecorder.Event() func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume, eventtype, reason, message string) (*v1.PersistentVolumeClaim, error) { - glog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase) + klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase) if claim.Status.Phase == phase { // Nothing to do. - glog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: phase %s already set", claimToClaimKey(claim), phase) + klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: phase %s already set", claimToClaimKey(claim), phase) return claim, nil } @@ -792,7 +792,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.Per // Emit the event only when the status change happens, not every time // syncClaim is called. - glog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message) + klog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message) ctrl.eventRecorder.Event(newClaim, eventtype, reason, message) return newClaim, nil @@ -800,10 +800,10 @@ func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.Per // updateVolumePhase saves new volume phase to API server. func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentVolume, phase v1.PersistentVolumePhase, message string) (*v1.PersistentVolume, error) { - glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase) + klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase) if volume.Status.Phase == phase { // Nothing to do. - glog.V(4).Infof("updating PersistentVolume[%s]: phase %s already set", volume.Name, phase) + klog.V(4).Infof("updating PersistentVolume[%s]: phase %s already set", volume.Name, phase) return volume, nil } @@ -813,15 +813,15 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(volumeClone) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) + klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) return newVol, err } _, err = ctrl.storeVolumeUpdate(newVol) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) + klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) return newVol, err } - glog.V(2).Infof("volume %q entered phase %q", volume.Name, phase) + klog.V(2).Infof("volume %q entered phase %q", volume.Name, phase) return newVol, err } @@ -829,10 +829,10 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV // given event on the volume. It saves the phase and emits the event only when // the phase has actually changed from the version saved in API server. func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.PersistentVolume, phase v1.PersistentVolumePhase, eventtype, reason, message string) (*v1.PersistentVolume, error) { - glog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: set phase %s", volume.Name, phase) + klog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: set phase %s", volume.Name, phase) if volume.Status.Phase == phase { // Nothing to do. - glog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: phase %s already set", volume.Name, phase) + klog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: phase %s already set", volume.Name, phase) return volume, nil } @@ -843,7 +843,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.Pe // Emit the event only when the status change happens, not every time // syncClaim is called. - glog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message) + klog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message) ctrl.eventRecorder.Event(newVol, eventtype, reason, message) return newVol, nil @@ -852,7 +852,7 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.Pe // bindVolumeToClaim modifies given volume to be bound to a claim and saves it to // API server. The claim is not modified in this method! func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { - glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim)) + klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim)) volumeClone, dirty, err := ctrl.getBindVolumeToClaim(volume, claim) if err != nil { @@ -864,27 +864,27 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV return ctrl.updateBindVolumeToClaim(volumeClone, claim, true) } - glog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim)) + klog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim)) return volume, nil } // bindVolumeToClaim modifies given volume to be bound to a claim and saves it to // API server. The claim is not modified in this method! func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, claim *v1.PersistentVolumeClaim, updateCache bool) (*v1.PersistentVolume, error) { - glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volumeClone.Name) + klog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volumeClone.Name) newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimToClaimKey(claim), err) + klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimToClaimKey(claim), err) return newVol, err } if updateCache { _, err = ctrl.storeVolumeUpdate(newVol) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volumeClone.Name, err) + klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volumeClone.Name, err) return newVol, err } } - glog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimToClaimKey(claim)) + klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimToClaimKey(claim)) return newVol, nil } @@ -928,7 +928,7 @@ func (ctrl *PersistentVolumeController) getBindVolumeToClaim(volume *v1.Persiste // bindClaimToVolume modifies the given claim to be bound to a volume and // saves it to API server. The volume is not modified in this method! func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { - glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q", claimToClaimKey(claim), volume.Name) + klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q", claimToClaimKey(claim), volume.Name) dirty := false @@ -960,22 +960,22 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo } if dirty { - glog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) + klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone) if err != nil { - glog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) + klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) return newClaim, err } _, err = ctrl.storeClaimUpdate(newClaim) if err != nil { - glog.V(4).Infof("updating PersistentVolumeClaim[%s]: cannot update internal cache: %v", claimToClaimKey(claim), err) + klog.V(4).Infof("updating PersistentVolumeClaim[%s]: cannot update internal cache: %v", claimToClaimKey(claim), err) return newClaim, err } - glog.V(4).Infof("updating PersistentVolumeClaim[%s]: bound to %q", claimToClaimKey(claim), volume.Name) + klog.V(4).Infof("updating PersistentVolumeClaim[%s]: bound to %q", claimToClaimKey(claim), volume.Name) return newClaim, nil } - glog.V(4).Infof("updating PersistentVolumeClaim[%s]: already bound to %q", claimToClaimKey(claim), volume.Name) + klog.V(4).Infof("updating PersistentVolumeClaim[%s]: already bound to %q", claimToClaimKey(claim), volume.Name) return claim, nil } @@ -990,35 +990,35 @@ func (ctrl *PersistentVolumeController) bind(volume *v1.PersistentVolume, claim var updatedClaim *v1.PersistentVolumeClaim var updatedVolume *v1.PersistentVolume - glog.V(4).Infof("binding volume %q to claim %q", volume.Name, claimToClaimKey(claim)) + klog.V(4).Infof("binding volume %q to claim %q", volume.Name, claimToClaimKey(claim)) if updatedVolume, err = ctrl.bindVolumeToClaim(volume, claim); err != nil { - glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume: %v", volume.Name, claimToClaimKey(claim), err) + klog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume: %v", volume.Name, claimToClaimKey(claim), err) return err } volume = updatedVolume if updatedVolume, err = ctrl.updateVolumePhase(volume, v1.VolumeBound, ""); err != nil { - glog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err) + klog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err) return err } volume = updatedVolume if updatedClaim, err = ctrl.bindClaimToVolume(claim, volume); err != nil { - glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim: %v", volume.Name, claimToClaimKey(claim), err) + klog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim: %v", volume.Name, claimToClaimKey(claim), err) return err } claim = updatedClaim if updatedClaim, err = ctrl.updateClaimStatus(claim, v1.ClaimBound, volume); err != nil { - glog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim status: %v", volume.Name, claimToClaimKey(claim), err) + klog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim status: %v", volume.Name, claimToClaimKey(claim), err) return err } claim = updatedClaim - glog.V(4).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) - glog.V(4).Infof("volume %q status after binding: %s", volume.Name, getVolumeStatusForLogging(volume)) - glog.V(4).Infof("claim %q status after binding: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) + klog.V(4).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) + klog.V(4).Infof("volume %q status after binding: %s", volume.Name, getVolumeStatusForLogging(volume)) + klog.V(4).Infof("claim %q status after binding: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) return nil } @@ -1029,7 +1029,7 @@ func (ctrl *PersistentVolumeController) bind(volume *v1.PersistentVolume, claim // It returns on first error, it's up to the caller to implement some retry // mechanism. func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume) error { - glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + klog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) // Save the PV only when any modification is necessary. volumeClone := volume.DeepCopy() @@ -1050,15 +1050,15 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) + klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) return err } _, err = ctrl.storeVolumeUpdate(newVol) if err != nil { - glog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) + klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) return err } - glog.V(4).Infof("updating PersistentVolume[%s]: rolled back", newVol.Name) + klog.V(4).Infof("updating PersistentVolume[%s]: rolled back", newVol.Name) // Update the status _, err = ctrl.updateVolumePhase(newVol, v1.VolumeAvailable, "") @@ -1070,10 +1070,10 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolume) error { switch volume.Spec.PersistentVolumeReclaimPolicy { case v1.PersistentVolumeReclaimRetain: - glog.V(4).Infof("reclaimVolume[%s]: policy is Retain, nothing to do", volume.Name) + klog.V(4).Infof("reclaimVolume[%s]: policy is Retain, nothing to do", volume.Name) case v1.PersistentVolumeReclaimRecycle: - glog.V(4).Infof("reclaimVolume[%s]: policy is Recycle", volume.Name) + klog.V(4).Infof("reclaimVolume[%s]: policy is Recycle", volume.Name) opName := fmt.Sprintf("recycle-%s[%s]", volume.Name, string(volume.UID)) ctrl.scheduleOperation(opName, func() error { ctrl.recycleVolumeOperation(volume) @@ -1081,7 +1081,7 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum }) case v1.PersistentVolumeReclaimDelete: - glog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name) + klog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name) opName := fmt.Sprintf("delete-%s[%s]", volume.Name, string(volume.UID)) startTime := time.Now() ctrl.scheduleOperation(opName, func() error { @@ -1103,33 +1103,33 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum // doRerecycleVolumeOperationcycleVolume recycles a volume. This method is // running in standalone goroutine and already has all necessary locks. func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.PersistentVolume) { - glog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name) + klog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name) // This method may have been waiting for a volume lock for some time. // Previous recycleVolumeOperation might just have saved an updated version, // so read current volume state now. newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) if err != nil { - glog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) + klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) return } needsReclaim, err := ctrl.isVolumeReleased(newVolume) if err != nil { - glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) + klog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) return } if !needsReclaim { - glog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name) + klog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name) return } pods, used, err := ctrl.isVolumeUsed(newVolume) if err != nil { - glog.V(3).Infof("can't recycle volume %q: %v", volume.Name, err) + klog.V(3).Infof("can't recycle volume %q: %v", volume.Name, err) return } if used { msg := fmt.Sprintf("Volume is used by pods: %s", strings.Join(pods, ",")) - glog.V(3).Infof("can't recycle volume %q: %s", volume.Name, msg) + klog.V(3).Infof("can't recycle volume %q: %s", volume.Name, msg) ctrl.eventRecorder.Event(volume, v1.EventTypeNormal, events.VolumeFailedRecycle, msg) return } @@ -1144,7 +1144,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis if err != nil { // No recycler found. Emit an event and mark the volume Failed. if _, err = ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedRecycle, "No recycler plugin found for the volume!"); err != nil { - glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + klog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) // Save failed, retry on the next deletion attempt return } @@ -1160,7 +1160,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis // Recycler failed strerr := fmt.Sprintf("Recycle failed: %s", err) if _, err = ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedRecycle, strerr); err != nil { - glog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + klog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) // Save failed, retry on the next deletion attempt return } @@ -1169,7 +1169,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis return } - glog.V(2).Infof("volume %q recycled", volume.Name) + klog.V(2).Infof("volume %q recycled", volume.Name) // Send an event ctrl.eventRecorder.Event(volume, v1.EventTypeNormal, events.VolumeRecycled, "Volume recycled") // Make the volume available again @@ -1178,7 +1178,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis // recycle the volume again on next update. We _could_ maintain a cache // of "recently recycled volumes" and avoid unnecessary recycling, this // is left out as future optimization. - glog.V(3).Infof("recycleVolumeOperation [%s]: failed to make recycled volume 'Available' (%v), we will recycle the volume again", volume.Name, err) + klog.V(3).Infof("recycleVolumeOperation [%s]: failed to make recycled volume 'Available' (%v), we will recycle the volume again", volume.Name, err) return } return @@ -1187,30 +1187,30 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis // deleteVolumeOperation deletes a volume. This method is running in standalone // goroutine and already has all necessary locks. func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.PersistentVolume) (string, error) { - glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name) + klog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name) // This method may have been waiting for a volume lock for some time. // Previous deleteVolumeOperation might just have saved an updated version, so // read current volume state now. newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) if err != nil { - glog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) + klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) return "", nil } needsReclaim, err := ctrl.isVolumeReleased(newVolume) if err != nil { - glog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) + klog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) return "", nil } if !needsReclaim { - glog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name) + klog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name) return "", nil } pluginName, deleted, err := ctrl.doDeleteVolume(volume) if err != nil { // Delete failed, update the volume and emit an event. - glog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err) + klog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err) if vol.IsDeletedVolumeInUse(err) { // The plugin needs more time, don't mark the volume as Failed // and send Normal event only @@ -1219,7 +1219,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist // The plugin failed, mark the volume as Failed and send Warning // event if _, err := ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedDelete, err.Error()); err != nil { - glog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + klog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) // Save failed, retry on the next deletion attempt return pluginName, err } @@ -1234,14 +1234,14 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist return pluginName, nil } - glog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) + klog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) // Delete the volume if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(volume.Name, nil); err != nil { // Oops, could not delete the volume and therefore the controller will // try to delete the volume again on next update. We _could_ maintain a // cache of "recently deleted volumes" and avoid unnecessary deletion, // this is left out as future optimization. - glog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err) + klog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err) return pluginName, nil } return pluginName, nil @@ -1254,13 +1254,13 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *v1.PersistentVo // A volume needs reclaim if it has ClaimRef and appropriate claim does not // exist. if volume.Spec.ClaimRef == nil { - glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is nil", volume.Name) + klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is nil", volume.Name) return false, nil } if volume.Spec.ClaimRef.UID == "" { // This is a volume bound by user and the controller has not finished // binding to the real claim yet. - glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is not bound", volume.Name) + klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is not bound", volume.Name) return false, nil } @@ -1287,11 +1287,11 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *v1.PersistentVo return true, nil } - glog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is still valid, volume is not released", volume.Name) + klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is still valid, volume is not released", volume.Name) return false, nil } - glog.V(2).Infof("isVolumeReleased[%s]: volume is released", volume.Name) + klog.V(2).Infof("isVolumeReleased[%s]: volume is released", volume.Name) return true, nil } @@ -1326,7 +1326,7 @@ func (ctrl *PersistentVolumeController) isVolumeUsed(pv *v1.PersistentVolume) ([ // 'false' when the volume cannot be deleted because of the deleter is external. No // error should be reported in this case. func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolume) (string, bool, error) { - glog.V(4).Infof("doDeleteVolume [%s]", volume.Name) + klog.V(4).Infof("doDeleteVolume [%s]", volume.Name) var err error plugin, err := ctrl.findDeletablePlugin(volume) @@ -1335,13 +1335,13 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu } if plugin == nil { // External deleter is requested, do nothing - glog.V(3).Infof("external deleter for volume %q requested, ignoring", volume.Name) + klog.V(3).Infof("external deleter for volume %q requested, ignoring", volume.Name) return "", false, nil } // Plugin found pluginName := plugin.GetPluginName() - glog.V(5).Infof("found a deleter plugin %q for volume %q", pluginName, volume.Name) + klog.V(5).Infof("found a deleter plugin %q for volume %q", pluginName, volume.Name) spec := vol.NewSpecFromPersistentVolume(volume, false) deleter, err := plugin.NewDeleter(spec) if err != nil { @@ -1357,7 +1357,7 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu return pluginName, false, err } - glog.V(2).Infof("volume %q deleted", volume.Name) + klog.V(2).Infof("volume %q deleted", volume.Name) return pluginName, true, nil } @@ -1367,7 +1367,7 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum if !ctrl.enableDynamicProvisioning { return nil } - glog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim)) + klog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim)) opName := fmt.Sprintf("provision-%s[%s]", claimToClaimKey(claim), string(claim.UID)) startTime := time.Now() ctrl.scheduleOperation(opName, func() error { @@ -1383,12 +1383,12 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum // standalone goroutine and already has all necessary locks. func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) (string, error) { claimClass := v1helper.GetPersistentVolumeClaimClass(claim) - glog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass) + klog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass) plugin, storageClass, err := ctrl.findProvisionablePlugin(claim) if err != nil { ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, err.Error()) - glog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err) + klog.V(2).Infof("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err) // The controller will retry provisioning the volume in every // syncVolume() call. return "", err @@ -1403,7 +1403,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis newClaim, err := ctrl.setClaimProvisioner(claim, storageClass) if err != nil { // Save failed, the controller will retry in the next sync - glog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err) + klog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err) return pluginName, err } claim = newClaim @@ -1414,7 +1414,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // and wait for the external provisioner msg := fmt.Sprintf("waiting for a volume to be created, either by external provisioner %q or manually created by system administrator", storageClass.Provisioner) ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ExternalProvisioning, msg) - glog.V(3).Infof("provisioning claim %q: %s", claimToClaimKey(claim), msg) + klog.V(3).Infof("provisioning claim %q: %s", claimToClaimKey(claim), msg) return pluginName, nil } @@ -1428,7 +1428,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) if err == nil && volume != nil { // Volume has been already provisioned, nothing to do. - glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) + klog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) return pluginName, err } @@ -1436,7 +1436,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // provisioned) claimRef, err := ref.GetReference(scheme.Scheme, claim) if err != nil { - glog.V(3).Infof("unexpected error getting claim reference: %v", err) + klog.V(3).Infof("unexpected error getting claim reference: %v", err) return pluginName, err } @@ -1460,7 +1460,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // of PV would be rejected by validation anyway if !plugin.SupportsMountOption() && len(options.MountOptions) > 0 { strerr := fmt.Sprintf("Mount options are not supported by the provisioner but StorageClass %q has mount options %v", storageClass.Name, options.MountOptions) - glog.V(2).Infof("Mount options are not supported by the provisioner but claim %q's StorageClass %q has mount options %v", claimToClaimKey(claim), storageClass.Name, options.MountOptions) + klog.V(2).Infof("Mount options are not supported by the provisioner but claim %q's StorageClass %q has mount options %v", claimToClaimKey(claim), storageClass.Name, options.MountOptions) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, fmt.Errorf("provisioner %q doesn't support mount options", plugin.GetPluginName()) } @@ -1469,7 +1469,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis provisioner, err := plugin.NewProvisioner(options) if err != nil { strerr := fmt.Sprintf("Failed to create provisioner: %v", err) - glog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) + klog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, err } @@ -1479,7 +1479,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis selectedNode, err = ctrl.NodeLister.Get(nodeName) if err != nil { strerr := fmt.Sprintf("Failed to get target node: %v", err) - glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err) + klog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, err } @@ -1496,12 +1496,12 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis ctrl.rescheduleProvisioning(claim) strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err) - glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) + klog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, err } - glog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim)) + klog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim)) // Create Kubernetes PV object for the volume. if volume.Name == "" { @@ -1518,26 +1518,26 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // Try to create the PV object several times for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { - glog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) + klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) var newVol *v1.PersistentVolume if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(volume); err == nil || apierrs.IsAlreadyExists(err) { // Save succeeded. if err != nil { - glog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim)) + klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim)) err = nil } else { - glog.V(3).Infof("volume %q for claim %q saved", volume.Name, claimToClaimKey(claim)) + klog.V(3).Infof("volume %q for claim %q saved", volume.Name, claimToClaimKey(claim)) _, updateErr := ctrl.storeVolumeUpdate(newVol) if updateErr != nil { // We will get an "volume added" event soon, this is not a big error - glog.V(4).Infof("provisionClaimOperation [%s]: cannot update internal cache: %v", volume.Name, updateErr) + klog.V(4).Infof("provisionClaimOperation [%s]: cannot update internal cache: %v", volume.Name, updateErr) } } break } // Save failed, try again after a while. - glog.V(3).Infof("failed to save volume %q for claim %q: %v", volume.Name, claimToClaimKey(claim), err) + klog.V(3).Infof("failed to save volume %q for claim %q: %v", volume.Name, claimToClaimKey(claim), err) time.Sleep(ctrl.createProvisionedPVInterval) } @@ -1547,7 +1547,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // Emit some event here and try to delete the storage asset several // times. strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), err) - glog.V(3).Info(strerr) + klog.V(3).Info(strerr) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) var deleteErr error @@ -1556,18 +1556,18 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis _, deleted, deleteErr = ctrl.doDeleteVolume(volume) if deleteErr == nil && deleted { // Delete succeeded - glog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name) + klog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name) break } if !deleted { // This is unreachable code, the volume was provisioned by an // internal plugin and therefore there MUST be an internal // plugin that deletes it. - glog.Errorf("Error finding internal deleter for volume plugin %q", plugin.GetPluginName()) + klog.Errorf("Error finding internal deleter for volume plugin %q", plugin.GetPluginName()) break } // Delete failed, try again after a while. - glog.V(3).Infof("failed to delete volume %q: %v", volume.Name, deleteErr) + klog.V(3).Infof("failed to delete volume %q: %v", volume.Name, deleteErr) time.Sleep(ctrl.createProvisionedPVInterval) } @@ -1575,11 +1575,11 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis // Delete failed several times. There is an orphaned volume and there // is nothing we can do about it. strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), deleteErr) - glog.V(2).Info(strerr) + klog.V(2).Info(strerr) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningCleanupFailed, strerr) } } else { - glog.V(2).Infof("volume %q provisioned for claim %q", volume.Name, claimToClaimKey(claim)) + klog.V(2).Infof("volume %q provisioned for claim %q", volume.Name, claimToClaimKey(claim)) msg := fmt.Sprintf("Successfully provisioned volume %s using %s", volume.Name, plugin.GetPluginName()) ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ProvisioningSucceeded, msg) } @@ -1600,12 +1600,12 @@ func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.Persist delete(newClaim.Annotations, annSelectedNode) // Try to update the PVC object if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(newClaim); err != nil { - glog.V(4).Infof("Failed to delete annotation 'annSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err) + klog.V(4).Infof("Failed to delete annotation 'annSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err) return } if _, err := ctrl.storeClaimUpdate(newClaim); err != nil { // We will get an "claim updated" event soon, this is not a big error - glog.V(4).Infof("Updating PersistentVolumeClaim %q: cannot update internal cache: %v", claimToClaimKey(newClaim), err) + klog.V(4).Infof("Updating PersistentVolumeClaim %q: cannot update internal cache: %v", claimToClaimKey(newClaim), err) } } @@ -1618,7 +1618,7 @@ func (ctrl *PersistentVolumeController) getProvisionedVolumeNameForClaim(claim * // scheduleOperation starts given asynchronous operation on given volume. It // makes sure the operation is already not running. func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, operation func() error) { - glog.V(4).Infof("scheduleOperation[%s]", operationName) + klog.V(4).Infof("scheduleOperation[%s]", operationName) // Poke test code that an operation is just about to get started. if ctrl.preOperationHook != nil { @@ -1629,11 +1629,11 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, if err != nil { switch { case goroutinemap.IsAlreadyExists(err): - glog.V(4).Infof("operation %q is already running, skipping", operationName) + klog.V(4).Infof("operation %q is already running, skipping", operationName) case exponentialbackoff.IsExponentialBackoff(err): - glog.V(4).Infof("operation %q postponed due to exponential backoff", operationName) + klog.V(4).Infof("operation %q postponed due to exponential backoff", operationName) default: - glog.Errorf("error scheduling operation %q: %v", operationName, err) + klog.Errorf("error scheduling operation %q: %v", operationName, err) } } } diff --git a/pkg/controller/volume/persistentvolume/pv_controller_base.go b/pkg/controller/volume/persistentvolume/pv_controller_base.go index aaf97de0cb5e0..e83f975da34a5 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_base.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_base.go @@ -44,7 +44,7 @@ import ( "k8s.io/kubernetes/pkg/util/goroutinemap" vol "k8s.io/kubernetes/pkg/volume" - "github.com/golang/glog" + "k8s.io/klog" ) // This file contains the controller base functionality, i.e. framework to @@ -73,7 +73,7 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error) eventRecorder := p.EventRecorder if eventRecorder == nil { broadcaster := record.NewBroadcaster() - broadcaster.StartLogging(glog.Infof) + broadcaster.StartLogging(klog.Infof) broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: p.KubeClient.CoreV1().Events("")}) eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"}) } @@ -134,27 +134,27 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error) func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) { volumeList, err := volumeLister.List(labels.Everything()) if err != nil { - glog.Errorf("PersistentVolumeController can't initialize caches: %v", err) + klog.Errorf("PersistentVolumeController can't initialize caches: %v", err) return } for _, volume := range volumeList { volumeClone := volume.DeepCopy() if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil { - glog.Errorf("error updating volume cache: %v", err) + klog.Errorf("error updating volume cache: %v", err) } } claimList, err := claimLister.List(labels.Everything()) if err != nil { - glog.Errorf("PersistentVolumeController can't initialize caches: %v", err) + klog.Errorf("PersistentVolumeController can't initialize caches: %v", err) return } for _, claim := range claimList { if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil { - glog.Errorf("error updating claim cache: %v", err) + klog.Errorf("error updating claim cache: %v", err) } } - glog.V(4).Infof("controller initialized") + klog.V(4).Infof("controller initialized") } // enqueueWork adds volume or claim to given work queue. @@ -165,10 +165,10 @@ func (ctrl *PersistentVolumeController) enqueueWork(queue workqueue.Interface, o } objName, err := controller.KeyFunc(obj) if err != nil { - glog.Errorf("failed to get key from object: %v", err) + klog.Errorf("failed to get key from object: %v", err) return } - glog.V(5).Infof("enqueued %q for sync", objName) + klog.V(5).Infof("enqueued %q for sync", objName) queue.Add(objName) } @@ -187,7 +187,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume // is an old version. new, err := ctrl.storeVolumeUpdate(volume) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } if !new { return @@ -198,9 +198,9 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. - glog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err) + klog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err) } else { - glog.Errorf("could not sync volume %q: %+v", volume.Name, err) + klog.Errorf("could not sync volume %q: %+v", volume.Name, err) } } } @@ -208,7 +208,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume // deleteVolume runs in worker thread and handles "volume deleted" event. func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume) { _ = ctrl.volumes.store.Delete(volume) - glog.V(4).Infof("volume %q deleted", volume.Name) + klog.V(4).Infof("volume %q deleted", volume.Name) if volume.Spec.ClaimRef == nil { return @@ -217,7 +217,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume // claim here in response to volume deletion prevents the claim from // waiting until the next sync period for its Lost status. claimKey := claimrefToClaimKey(volume.Spec.ClaimRef) - glog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey) + klog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey) ctrl.claimQueue.Add(claimKey) } @@ -228,7 +228,7 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl // an old version. new, err := ctrl.storeClaimUpdate(claim) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } if !new { return @@ -238,9 +238,9 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. - glog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err) + klog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err) } else { - glog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err) + klog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err) } } } @@ -248,17 +248,17 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl // deleteClaim runs in worker thread and handles "claim deleted" event. func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeClaim) { _ = ctrl.claims.Delete(claim) - glog.V(4).Infof("claim %q deleted", claimToClaimKey(claim)) + klog.V(4).Infof("claim %q deleted", claimToClaimKey(claim)) volumeName := claim.Spec.VolumeName if volumeName == "" { - glog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim)) + klog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim)) return } // sync the volume when its claim is deleted. Explicitly sync'ing the // volume here in response to claim deletion prevents the volume from // waiting until the next sync period for its Release. - glog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName) + klog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName) ctrl.volumeQueue.Add(volumeName) } @@ -268,8 +268,8 @@ func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) { defer ctrl.claimQueue.ShutDown() defer ctrl.volumeQueue.ShutDown() - glog.Infof("Starting persistent volume controller") - defer glog.Infof("Shutting down persistent volume controller") + klog.Infof("Starting persistent volume controller") + defer klog.Infof("Shutting down persistent volume controller") if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) { return @@ -296,11 +296,11 @@ func (ctrl *PersistentVolumeController) volumeWorker() { } defer ctrl.volumeQueue.Done(keyObj) key := keyObj.(string) - glog.V(5).Infof("volumeWorker[%s]", key) + klog.V(5).Infof("volumeWorker[%s]", key) _, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - glog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err) + klog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err) return false } volume, err := ctrl.volumeLister.Get(name) @@ -311,7 +311,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() { return false } if !errors.IsNotFound(err) { - glog.V(2).Infof("error getting volume %q from informer: %v", key, err) + klog.V(2).Infof("error getting volume %q from informer: %v", key, err) return false } @@ -319,18 +319,18 @@ func (ctrl *PersistentVolumeController) volumeWorker() { // "delete" volumeObj, found, err := ctrl.volumes.store.GetByKey(key) if err != nil { - glog.V(2).Infof("error getting volume %q from cache: %v", key, err) + klog.V(2).Infof("error getting volume %q from cache: %v", key, err) return false } if !found { // The controller has already processed the delete event and // deleted the volume from its cache - glog.V(2).Infof("deletion of volume %q was already processed", key) + klog.V(2).Infof("deletion of volume %q was already processed", key) return false } volume, ok := volumeObj.(*v1.PersistentVolume) if !ok { - glog.Errorf("expected volume, got %+v", volumeObj) + klog.Errorf("expected volume, got %+v", volumeObj) return false } ctrl.deleteVolume(volume) @@ -338,7 +338,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() { } for { if quit := workFunc(); quit { - glog.Infof("volume worker queue shutting down") + klog.Infof("volume worker queue shutting down") return } } @@ -354,11 +354,11 @@ func (ctrl *PersistentVolumeController) claimWorker() { } defer ctrl.claimQueue.Done(keyObj) key := keyObj.(string) - glog.V(5).Infof("claimWorker[%s]", key) + klog.V(5).Infof("claimWorker[%s]", key) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - glog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err) + klog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err) return false } claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name) @@ -369,25 +369,25 @@ func (ctrl *PersistentVolumeController) claimWorker() { return false } if !errors.IsNotFound(err) { - glog.V(2).Infof("error getting claim %q from informer: %v", key, err) + klog.V(2).Infof("error getting claim %q from informer: %v", key, err) return false } // The claim is not in informer cache, the event must have been "delete" claimObj, found, err := ctrl.claims.GetByKey(key) if err != nil { - glog.V(2).Infof("error getting claim %q from cache: %v", key, err) + klog.V(2).Infof("error getting claim %q from cache: %v", key, err) return false } if !found { // The controller has already processed the delete event and // deleted the claim from its cache - glog.V(2).Infof("deletion of claim %q was already processed", key) + klog.V(2).Infof("deletion of claim %q was already processed", key) return false } claim, ok := claimObj.(*v1.PersistentVolumeClaim) if !ok { - glog.Errorf("expected claim, got %+v", claimObj) + klog.Errorf("expected claim, got %+v", claimObj) return false } ctrl.deleteClaim(claim) @@ -395,7 +395,7 @@ func (ctrl *PersistentVolumeController) claimWorker() { } for { if quit := workFunc(); quit { - glog.Infof("claim worker queue shutting down") + klog.Infof("claim worker queue shutting down") return } } @@ -405,11 +405,11 @@ func (ctrl *PersistentVolumeController) claimWorker() { // all consumers of PV/PVC shared informer to have a short resync period, // therefore we do our own. func (ctrl *PersistentVolumeController) resync() { - glog.V(4).Infof("resyncing PV controller") + klog.V(4).Infof("resyncing PV controller") pvcs, err := ctrl.claimLister.List(labels.NewSelector()) if err != nil { - glog.Warningf("cannot list claims: %s", err) + klog.Warningf("cannot list claims: %s", err) return } for _, pvc := range pvcs { @@ -418,7 +418,7 @@ func (ctrl *PersistentVolumeController) resync() { pvs, err := ctrl.volumeLister.List(labels.NewSelector()) if err != nil { - glog.Warningf("cannot list persistent volumes: %s", err) + klog.Warningf("cannot list persistent volumes: %s", err) return } for _, pv := range pvs { @@ -504,7 +504,7 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo if !found { // This is a new object - glog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion()) + klog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion()) if err = store.Add(obj); err != nil { return false, fmt.Errorf("Error adding %s %q to controller cache: %v", className, objName, err) } @@ -528,11 +528,11 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo // Throw away only older version, let the same version pass - we do want to // get periodic sync events. if oldObjResourceVersion > objResourceVersion { - glog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion()) + klog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion()) return false, nil } - glog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion()) + klog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion()) if err = store.Update(obj); err != nil { return false, fmt.Errorf("Error updating %s %q in controller cache: %v", className, objName, err) } diff --git a/pkg/controller/volume/persistentvolume/pv_controller_test.go b/pkg/controller/volume/persistentvolume/pv_controller_test.go index cc37287967482..5f1c5ad761860 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_test.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -96,7 +96,7 @@ func TestControllerSync(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("starting test %q", test.name) + klog.V(4).Infof("starting test %q", test.name) // Initialize the controller client := &fake.Clientset{} @@ -140,7 +140,7 @@ func TestControllerSync(t *testing.T) { time.Sleep(10 * time.Millisecond) } - glog.V(4).Infof("controller synced, starting test") + klog.V(4).Infof("controller synced, starting test") // Call the tested function err = test.test(ctrl, reactor, test) diff --git a/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go b/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go index 0e4ade0faf1a6..292ee8f6ff2cd 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go +++ b/pkg/controller/volume/persistentvolume/scheduler_assume_cache.go @@ -21,7 +21,7 @@ import ( "strconv" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -152,7 +152,7 @@ func (c *assumeCache) add(obj interface{}) { name, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { - glog.Errorf("add failed: %v", &errObjectName{err}) + klog.Errorf("add failed: %v", &errObjectName{err}) return } @@ -162,27 +162,27 @@ func (c *assumeCache) add(obj interface{}) { if objInfo, _ := c.getObjInfo(name); objInfo != nil { newVersion, err := c.getObjVersion(name, obj) if err != nil { - glog.Errorf("add: couldn't get object version: %v", err) + klog.Errorf("add: couldn't get object version: %v", err) return } storedVersion, err := c.getObjVersion(name, objInfo.latestObj) if err != nil { - glog.Errorf("add: couldn't get stored object version: %v", err) + klog.Errorf("add: couldn't get stored object version: %v", err) return } // Only update object if version is newer. // This is so we don't override assumed objects due to informer resync. if newVersion <= storedVersion { - glog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion) + klog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion) return } } objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj} c.store.Update(objInfo) - glog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj) + klog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj) } func (c *assumeCache) update(oldObj interface{}, newObj interface{}) { @@ -196,7 +196,7 @@ func (c *assumeCache) delete(obj interface{}) { name, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { - glog.Errorf("delete failed: %v", &errObjectName{err}) + klog.Errorf("delete failed: %v", &errObjectName{err}) return } @@ -206,7 +206,7 @@ func (c *assumeCache) delete(obj interface{}) { objInfo := &objInfo{name: name} err = c.store.Delete(objInfo) if err != nil { - glog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err) + klog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err) } } @@ -257,14 +257,14 @@ func (c *assumeCache) List(indexObj interface{}) []interface{} { allObjs := []interface{}{} objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj}) if err != nil { - glog.Errorf("list index error: %v", err) + klog.Errorf("list index error: %v", err) return nil } for _, obj := range objs { objInfo, ok := obj.(*objInfo) if !ok { - glog.Errorf("list error: %v", &errWrongType{"objInfo", obj}) + klog.Errorf("list error: %v", &errWrongType{"objInfo", obj}) continue } allObjs = append(allObjs, objInfo.latestObj) @@ -302,7 +302,7 @@ func (c *assumeCache) Assume(obj interface{}) error { // Only update the cached object objInfo.latestObj = obj - glog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion) + klog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion) return nil } @@ -313,10 +313,10 @@ func (c *assumeCache) Restore(objName string) { objInfo, err := c.getObjInfo(objName) if err != nil { // This could be expected if object got deleted - glog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err) + klog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err) } else { objInfo.latestObj = objInfo.apiObj - glog.V(4).Infof("Restored %v %q", c.description, objName) + klog.V(4).Infof("Restored %v %q", c.description, objName) } } @@ -366,7 +366,7 @@ func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume for _, obj := range objs { pv, ok := obj.(*v1.PersistentVolume) if !ok { - glog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj}) + klog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj}) } pvs = append(pvs, pv) } diff --git a/pkg/controller/volume/persistentvolume/scheduler_binder.go b/pkg/controller/volume/persistentvolume/scheduler_binder.go index 8ef8b2adc1a07..85fd71cf4ebec 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_binder.go +++ b/pkg/controller/volume/persistentvolume/scheduler_binder.go @@ -21,7 +21,7 @@ import ( "sort" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -144,7 +144,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume podName := getPodName(pod) // Warning: Below log needs high verbosity as it can be printed several times (#60933). - glog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name) + klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name) // Initialize to true for pods that don't have volumes unboundVolumesSatisfied = true @@ -204,7 +204,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) { podName := getPodName(assumedPod) - glog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName) + klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName) start := time.Now() defer func() { VolumeSchedulingStageLatency.WithLabelValues("assume").Observe(time.Since(start).Seconds()) @@ -214,7 +214,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al }() if allBound := b.arePodVolumesBound(assumedPod); allBound { - glog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName) + klog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName) return true, nil } @@ -227,7 +227,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al newBindings := []*bindingInfo{} for _, binding := range claimsToBind { newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc) - glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v", + klog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v", podName, binding.pv.Name, binding.pvc.Name, @@ -280,7 +280,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al // by the PV controller. func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) (err error) { podName := getPodName(assumedPod) - glog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName) + klog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName) start := time.Now() defer func() { @@ -346,7 +346,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl // Do the actual prebinding. Let the PV controller take care of the rest // There is no API rollback if the actual binding fails for _, binding = range bindings { - glog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name) + klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name) // TODO: does it hurt if we make an api call and nothing needs to be updated? if _, err := b.ctrl.updateBindVolumeToClaim(binding.pv, binding.pvc, false); err != nil { return err @@ -357,7 +357,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl // Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest // PV controller is expect to signal back by removing related annotations if actual provisioning fails for _, claim = range claimsToProvision { - glog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim)) + klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim)) if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil { return err } @@ -426,7 +426,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claim } // All pvs and pvcs that we operated on are bound - glog.V(4).Infof("All PVCs for pod %q are bound", podName) + klog.V(4).Infof("All PVCs for pod %q are bound", podName) return true, nil } @@ -455,15 +455,15 @@ func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.Persiste pvName := pvc.Spec.VolumeName if pvName != "" { if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) { - glog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvName) + klog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvName) return true, pvc, nil } else { - glog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvName) + klog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvName) return false, pvc, nil } } - glog.V(5).Infof("PVC %q is not bound", pvcKey) + klog.V(5).Infof("PVC %q is not bound", pvcKey) return false, pvc, nil } @@ -523,13 +523,13 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node err = volumeutil.CheckNodeAffinity(pv, node.Labels) if err != nil { - glog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err) + klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err) return false, nil } - glog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName) + klog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName) } - glog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name) + klog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name) return true, nil } @@ -561,7 +561,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI return false, nil, err } if bindingInfo.pv == nil { - glog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name) + klog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name) unboundClaims = append(unboundClaims, bindingInfo.pvc) foundMatches = false continue @@ -570,7 +570,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI // matching PV needs to be excluded so we don't select it again chosenPVs[bindingInfo.pv.Name] = bindingInfo.pv matchedClaims = append(matchedClaims, bindingInfo) - glog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, pvcName, node.Name, podName) + klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, pvcName, node.Name, podName) } // Mark cache with all the matches for each PVC for this node @@ -579,7 +579,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI } if foundMatches { - glog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name) + klog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name) } return @@ -605,13 +605,13 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v } provisioner := class.Provisioner if provisioner == "" || provisioner == notSupportedProvisioner { - glog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName) + klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName) return false, nil } // Check if the node can satisfy the topology requirement in the class if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) { - glog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName) + klog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName) return false, nil } @@ -621,7 +621,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v provisionedClaims = append(provisionedClaims, claim) } - glog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name) + klog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name) // Mark cache with all the PVCs that need provisioning for this node b.podBindingCache.UpdateProvisionedPVCs(pod, node.Name, provisionedClaims) diff --git a/pkg/controller/volume/persistentvolume/scheduler_binder_test.go b/pkg/controller/volume/persistentvolume/scheduler_binder_test.go index 8b669caa5b1a4..b5b9f14d7f013 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_binder_test.go +++ b/pkg/controller/volume/persistentvolume/scheduler_binder_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -742,7 +742,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) { } for name, scenario := range scenarios { - glog.V(5).Infof("Running test case %q", name) + klog.V(5).Infof("Running test case %q", name) // Setup testEnv := newTestBinder(t) @@ -964,7 +964,7 @@ func TestAssumePodVolumes(t *testing.T) { } for name, scenario := range scenarios { - glog.V(5).Infof("Running test case %q", name) + klog.V(5).Infof("Running test case %q", name) // Setup testEnv := newTestBinder(t) @@ -1094,7 +1094,7 @@ func TestBindAPIUpdate(t *testing.T) { }, } for name, scenario := range scenarios { - glog.V(4).Infof("Running test case %q", name) + klog.V(4).Infof("Running test case %q", name) // Setup testEnv := newTestBinder(t) @@ -1253,7 +1253,7 @@ func TestCheckBindings(t *testing.T) { } for name, scenario := range scenarios { - glog.V(4).Infof("Running test case %q", name) + klog.V(4).Infof("Running test case %q", name) // Setup pod := makePod(nil) @@ -1386,7 +1386,7 @@ func TestBindPodVolumes(t *testing.T) { } for name, scenario := range scenarios { - glog.V(4).Infof("Running test case %q", name) + klog.V(4).Infof("Running test case %q", name) // Setup pod := makePod(nil) @@ -1407,7 +1407,7 @@ func TestBindPodVolumes(t *testing.T) { if scenario.delayFunc != nil { go func() { time.Sleep(5 * time.Second) - glog.V(5).Infof("Running delay function") + klog.V(5).Infof("Running delay function") scenario.delayFunc(t, testEnv, pod, scenario.binding.pv, scenario.binding.pvc) }() } diff --git a/pkg/controller/volume/persistentvolume/volume_host.go b/pkg/controller/volume/persistentvolume/volume_host.go index 298d5737a6283..6e4fc3fdd38b4 100644 --- a/pkg/controller/volume/persistentvolume/volume_host.go +++ b/pkg/controller/volume/persistentvolume/volume_host.go @@ -20,7 +20,6 @@ import ( "fmt" "net" - "github.com/golang/glog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -28,6 +27,7 @@ import ( "k8s.io/client-go/tools/record" cloudprovider "k8s.io/cloud-provider" csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" vol "k8s.io/kubernetes/pkg/volume" ) @@ -112,7 +112,7 @@ func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ s func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) { return func(types.UID) { - glog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController") + klog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController") } } diff --git a/pkg/controller/volume/pvcprotection/BUILD b/pkg/controller/volume/pvcprotection/BUILD index 363aac9dbcf3f..041d1fb4b9517 100644 --- a/pkg/controller/volume/pvcprotection/BUILD +++ b/pkg/controller/volume/pvcprotection/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -41,7 +41,7 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go index 8de612fffdfce..a832476c55233 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -20,7 +20,6 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" @@ -31,6 +30,7 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/slice" @@ -96,8 +96,8 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Infof("Starting PVC protection controller") - defer glog.Infof("Shutting down PVC protection controller") + klog.Infof("Starting PVC protection controller") + defer klog.Infof("Shutting down PVC protection controller") if !controller.WaitForCacheSync("PVC protection", stopCh, c.pvcListerSynced, c.podListerSynced) { return @@ -142,15 +142,15 @@ func (c *Controller) processNextWorkItem() bool { } func (c *Controller) processPVC(pvcNamespace, pvcName string) error { - glog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName) + klog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName) startTime := time.Now() defer func() { - glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Since(startTime)) + klog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Since(startTime)) }() pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName) if apierrs.IsNotFound(err) { - glog.V(4).Infof("PVC %s/%s not found, ignoring", pvcNamespace, pvcName) + klog.V(4).Infof("PVC %s/%s not found, ignoring", pvcNamespace, pvcName) return nil } if err != nil { @@ -188,10 +188,10 @@ func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error { claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer) _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone) if err != nil { - glog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err) + klog.V(3).Infof("Error adding protection finalizer to PVC %s/%s: %v", pvc.Namespace, pvc.Name, err) return err } - glog.V(3).Infof("Added protection finalizer to PVC %s/%s", pvc.Namespace, pvc.Name) + klog.V(3).Infof("Added protection finalizer to PVC %s/%s", pvc.Namespace, pvc.Name) return nil } @@ -200,10 +200,10 @@ func (c *Controller) removeFinalizer(pvc *v1.PersistentVolumeClaim) error { claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil) _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone) if err != nil { - glog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err) + klog.V(3).Infof("Error removing protection finalizer from PVC %s/%s: %v", pvc.Namespace, pvc.Name, err) return err } - glog.V(3).Infof("Removed protection finalizer from PVC %s/%s", pvc.Namespace, pvc.Name) + klog.V(3).Infof("Removed protection finalizer from PVC %s/%s", pvc.Namespace, pvc.Name) return nil } @@ -218,7 +218,7 @@ func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) { // prevents scheduling pods with deletion timestamp, so we can be // pretty sure it won't be scheduled in parallel to this check. // Therefore this pod does not block the PVC from deletion. - glog.V(4).Infof("Skipping unscheduled pod %s when checking PVC %s/%s", pod.Name, pvc.Namespace, pvc.Name) + klog.V(4).Infof("Skipping unscheduled pod %s when checking PVC %s/%s", pod.Name, pvc.Namespace, pvc.Name) continue } for _, volume := range pod.Spec.Volumes { @@ -226,13 +226,13 @@ func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) { continue } if volume.PersistentVolumeClaim.ClaimName == pvc.Name { - glog.V(2).Infof("Keeping PVC %s/%s, it is used by pod %s/%s", pvc.Namespace, pvc.Name, pod.Namespace, pod.Name) + klog.V(2).Infof("Keeping PVC %s/%s, it is used by pod %s/%s", pvc.Namespace, pvc.Name, pod.Namespace, pod.Name) return true, nil } } } - glog.V(3).Infof("PVC %s/%s is unused", pvc.Namespace, pvc.Name) + klog.V(3).Infof("PVC %s/%s is unused", pvc.Namespace, pvc.Name) return false, nil } @@ -248,7 +248,7 @@ func (c *Controller) pvcAddedUpdated(obj interface{}) { utilruntime.HandleError(fmt.Errorf("Couldn't get key for Persistent Volume Claim %#v: %v", pvc, err)) return } - glog.V(4).Infof("Got event on PVC %s", key) + klog.V(4).Infof("Got event on PVC %s", key) if needToAddFinalizer(pvc) || isDeletionCandidate(pvc) { c.queue.Add(key) @@ -276,7 +276,7 @@ func (c *Controller) podAddedDeletedUpdated(obj interface{}, deleted bool) { return } - glog.V(4).Infof("Got event on pod %s/%s", pod.Namespace, pod.Name) + klog.V(4).Infof("Got event on pod %s/%s", pod.Namespace, pod.Name) // Enqueue all PVCs that the pod uses for _, volume := range pod.Spec.Volumes { diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go index 288be8f20bfb2..c8eb87bf6522b 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go @@ -23,7 +23,7 @@ import ( "time" "github.com/davecgh/go-spew/spew" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -374,7 +374,7 @@ func TestPVCProtectionController(t *testing.T) { break } if ctrl.queue.Len() > 0 { - glog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len()) + klog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len()) ctrl.processNextWorkItem() } if ctrl.queue.Len() > 0 { @@ -385,7 +385,7 @@ func TestPVCProtectionController(t *testing.T) { if currentActionCount < len(test.expectedActions) { // Do not log evey wait, only when the action count changes. if lastReportedActionCount < currentActionCount { - glog.V(5).Infof("Test %q: got %d actions out of %d, waiting for the rest", test.name, currentActionCount, len(test.expectedActions)) + klog.V(5).Infof("Test %q: got %d actions out of %d, waiting for the rest", test.name, currentActionCount, len(test.expectedActions)) lastReportedActionCount = currentActionCount } // The test expected more to happen, wait for the actions. diff --git a/pkg/controller/volume/pvprotection/BUILD b/pkg/controller/volume/pvprotection/BUILD index a19318bd0a368..cdea877177098 100644 --- a/pkg/controller/volume/pvprotection/BUILD +++ b/pkg/controller/volume/pvprotection/BUILD @@ -19,7 +19,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -54,6 +54,6 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller.go b/pkg/controller/volume/pvprotection/pv_protection_controller.go index bc19eb5e52763..d14a7a06d1a07 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller.go @@ -20,7 +20,6 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -30,6 +29,7 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/slice" @@ -78,8 +78,8 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Infof("Starting PV protection controller") - defer glog.Infof("Shutting down PV protection controller") + klog.Infof("Starting PV protection controller") + defer klog.Infof("Shutting down PV protection controller") if !controller.WaitForCacheSync("PV protection", stopCh, c.pvListerSynced) { return @@ -120,15 +120,15 @@ func (c *Controller) processNextWorkItem() bool { } func (c *Controller) processPV(pvName string) error { - glog.V(4).Infof("Processing PV %s", pvName) + klog.V(4).Infof("Processing PV %s", pvName) startTime := time.Now() defer func() { - glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime)) + klog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime)) }() pv, err := c.pvLister.Get(pvName) if apierrs.IsNotFound(err) { - glog.V(4).Infof("PV %s not found, ignoring", pvName) + klog.V(4).Infof("PV %s not found, ignoring", pvName) return nil } if err != nil { @@ -163,10 +163,10 @@ func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error { pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer) _, err := c.client.CoreV1().PersistentVolumes().Update(pvClone) if err != nil { - glog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err) + klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err) return err } - glog.V(3).Infof("Added protection finalizer to PV %s", pv.Name) + klog.V(3).Infof("Added protection finalizer to PV %s", pv.Name) return nil } @@ -175,10 +175,10 @@ func (c *Controller) removeFinalizer(pv *v1.PersistentVolume) error { pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil) _, err := c.client.CoreV1().PersistentVolumes().Update(pvClone) if err != nil { - glog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err) + klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err) return err } - glog.V(3).Infof("Removed protection finalizer from PV %s", pv.Name) + klog.V(3).Infof("Removed protection finalizer from PV %s", pv.Name) return nil } @@ -200,7 +200,7 @@ func (c *Controller) pvAddedUpdated(obj interface{}) { utilruntime.HandleError(fmt.Errorf("PV informer returned non-PV object: %#v", obj)) return } - glog.V(4).Infof("Got event on PV %s", pv.Name) + klog.V(4).Infof("Got event on PV %s", pv.Name) if needToAddFinalizer(pv) || isDeletionCandidate(pv) { c.queue.Add(pv.Name) diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller_test.go b/pkg/controller/volume/pvprotection/pv_protection_controller_test.go index ba7b342a3d837..75caba4dab19b 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller_test.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller_test.go @@ -23,7 +23,7 @@ import ( "time" "github.com/davecgh/go-spew/spew" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -246,7 +246,7 @@ func TestPVProtectionController(t *testing.T) { break } if ctrl.queue.Len() > 0 { - glog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len()) + klog.V(5).Infof("Test %q: %d events queue, processing one", test.name, ctrl.queue.Len()) ctrl.processNextWorkItem() } if ctrl.queue.Len() > 0 { @@ -257,7 +257,7 @@ func TestPVProtectionController(t *testing.T) { if currentActionCount < len(test.expectedActions) { // Do not log evey wait, only when the action count changes. if lastReportedActionCount < currentActionCount { - glog.V(5).Infof("Test %q: got %d actions out of %d, waiting for the rest", test.name, currentActionCount, len(test.expectedActions)) + klog.V(5).Infof("Test %q: got %d actions out of %d, waiting for the rest", test.name, currentActionCount, len(test.expectedActions)) lastReportedActionCount = currentActionCount } // The test expected more to happen, wait for the actions. diff --git a/pkg/credentialprovider/BUILD b/pkg/credentialprovider/BUILD index f83f75be26baf..0dfa2dfdc59c2 100644 --- a/pkg/credentialprovider/BUILD +++ b/pkg/credentialprovider/BUILD @@ -18,7 +18,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/credentialprovider", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/credentialprovider/aws/BUILD b/pkg/credentialprovider/aws/BUILD index 1985724355887..5161307a71231 100644 --- a/pkg/credentialprovider/aws/BUILD +++ b/pkg/credentialprovider/aws/BUILD @@ -16,7 +16,7 @@ go_library( "//vendor/github.com/aws/aws-sdk-go/aws/request:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", "//vendor/github.com/aws/aws-sdk-go/service/ecr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/credentialprovider/aws/aws_credentials.go b/pkg/credentialprovider/aws/aws_credentials.go index 862c2c62c6f09..89869e76eaa69 100644 --- a/pkg/credentialprovider/aws/aws_credentials.go +++ b/pkg/credentialprovider/aws/aws_credentials.go @@ -26,7 +26,7 @@ import ( "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ecr" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/credentialprovider" ) @@ -47,7 +47,7 @@ func awsHandlerLogger(req *request.Request) { name = req.Operation.Name } - glog.V(3).Infof("AWS request: %s:%s in %s", service, name, *region) + klog.V(3).Infof("AWS request: %s:%s in %s", service, name, *region) } // An interface for testing purposes. @@ -101,7 +101,7 @@ func registryURL(region string) string { // This should be called only if using the AWS cloud provider. // This way, we avoid timeouts waiting for a non-existent provider. func RegisterCredentialsProvider(region string) { - glog.V(4).Infof("registering credentials provider for AWS region %q", region) + klog.V(4).Infof("registering credentials provider for AWS region %q", region) credentialprovider.RegisterCredentialProvider("aws-ecr-"+region, &lazyEcrProvider{ @@ -122,7 +122,7 @@ func (p *lazyEcrProvider) Enabled() bool { // provider only when we actually need it the first time. func (p *lazyEcrProvider) LazyProvide() *credentialprovider.DockerConfigEntry { if p.actualProvider == nil { - glog.V(2).Infof("Creating ecrProvider for %s", p.region) + klog.V(2).Infof("Creating ecrProvider for %s", p.region) p.actualProvider = &credentialprovider.CachingDockerConfigProvider{ Provider: newEcrProvider(p.region, nil), // Refresh credentials a little earlier than expiration time @@ -161,7 +161,7 @@ func newEcrProvider(region string, getter tokenGetter) *ecrProvider { // use ECR somehow? func (p *ecrProvider) Enabled() bool { if p.region == "" { - glog.Errorf("Called ecrProvider.Enabled() with no region set") + klog.Errorf("Called ecrProvider.Enabled() with no region set") return false } @@ -191,11 +191,11 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { params := &ecr.GetAuthorizationTokenInput{} output, err := p.getter.GetAuthorizationToken(params) if err != nil { - glog.Errorf("while requesting ECR authorization token %v", err) + klog.Errorf("while requesting ECR authorization token %v", err) return cfg } if output == nil { - glog.Errorf("Got back no ECR token") + klog.Errorf("Got back no ECR token") return cfg } @@ -204,7 +204,7 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { data.AuthorizationToken != nil { decodedToken, err := base64.StdEncoding.DecodeString(aws.StringValue(data.AuthorizationToken)) if err != nil { - glog.Errorf("while decoding token for endpoint %v %v", data.ProxyEndpoint, err) + klog.Errorf("while decoding token for endpoint %v %v", data.ProxyEndpoint, err) return cfg } parts := strings.SplitN(string(decodedToken), ":", 2) @@ -217,7 +217,7 @@ func (p *ecrProvider) Provide() credentialprovider.DockerConfig { Email: "not@val.id", } - glog.V(3).Infof("Adding credentials for user %s in %s", user, p.region) + klog.V(3).Infof("Adding credentials for user %s in %s", user, p.region) // Add our config entry for this region's registry URLs cfg[p.regionURL] = entry diff --git a/pkg/credentialprovider/azure/BUILD b/pkg/credentialprovider/azure/BUILD index 1bd18ed5a2b30..aa1b29b2c7871 100644 --- a/pkg/credentialprovider/azure/BUILD +++ b/pkg/credentialprovider/azure/BUILD @@ -21,8 +21,8 @@ go_library( "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/dgrijalva/jwt-go:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/pkg/credentialprovider/azure/azure_credentials.go b/pkg/credentialprovider/azure/azure_credentials.go index 1a50bea9a5766..28e43048d2294 100644 --- a/pkg/credentialprovider/azure/azure_credentials.go +++ b/pkg/credentialprovider/azure/azure_credentials.go @@ -27,8 +27,8 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" "sigs.k8s.io/yaml" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" @@ -133,7 +133,7 @@ func (a *acrProvider) loadConfig(rdr io.Reader) error { var err error a.config, err = parseConfig(rdr) if err != nil { - glog.Errorf("Failed to load azure credential file: %v", err) + klog.Errorf("Failed to load azure credential file: %v", err) } a.environment, err = auth.ParseAzureEnvironment(a.config.Cloud) @@ -146,26 +146,26 @@ func (a *acrProvider) loadConfig(rdr io.Reader) error { func (a *acrProvider) Enabled() bool { if a.file == nil || len(*a.file) == 0 { - glog.V(5).Infof("Azure config unspecified, disabling") + klog.V(5).Infof("Azure config unspecified, disabling") return false } f, err := os.Open(*a.file) if err != nil { - glog.Errorf("Failed to load config from file: %s", *a.file) + klog.Errorf("Failed to load config from file: %s", *a.file) return false } defer f.Close() err = a.loadConfig(f) if err != nil { - glog.Errorf("Failed to load config from file: %s", *a.file) + klog.Errorf("Failed to load config from file: %s", *a.file) return false } a.servicePrincipalToken, err = auth.GetServicePrincipalToken(a.config, a.environment) if err != nil { - glog.Errorf("Failed to create service principal token: %v", err) + klog.Errorf("Failed to create service principal token: %v", err) return false } @@ -179,16 +179,16 @@ func (a *acrProvider) Provide() credentialprovider.DockerConfig { defer cancel() if a.config.UseManagedIdentityExtension { - glog.V(4).Infof("listing registries") + klog.V(4).Infof("listing registries") result, err := a.registryClient.List(ctx) if err != nil { - glog.Errorf("Failed to list registries: %v", err) + klog.Errorf("Failed to list registries: %v", err) return cfg } for ix := range result { loginServer := getLoginServer(result[ix]) - glog.V(2).Infof("loginServer: %s", loginServer) + klog.V(2).Infof("loginServer: %s", loginServer) cred, err := getACRDockerEntryFromARMToken(a, loginServer) if err != nil { continue @@ -216,22 +216,22 @@ func getLoginServer(registry containerregistry.Registry) string { func getACRDockerEntryFromARMToken(a *acrProvider, loginServer string) (*credentialprovider.DockerConfigEntry, error) { armAccessToken := a.servicePrincipalToken.OAuthToken() - glog.V(4).Infof("discovering auth redirects for: %s", loginServer) + klog.V(4).Infof("discovering auth redirects for: %s", loginServer) directive, err := receiveChallengeFromLoginServer(loginServer) if err != nil { - glog.Errorf("failed to receive challenge: %s", err) + klog.Errorf("failed to receive challenge: %s", err) return nil, err } - glog.V(4).Infof("exchanging an acr refresh_token") + klog.V(4).Infof("exchanging an acr refresh_token") registryRefreshToken, err := performTokenExchange( loginServer, directive, a.config.TenantID, armAccessToken) if err != nil { - glog.Errorf("failed to perform token exchange: %s", err) + klog.Errorf("failed to perform token exchange: %s", err) return nil, err } - glog.V(4).Infof("adding ACR docker config entry for: %s", loginServer) + klog.V(4).Infof("adding ACR docker config entry for: %s", loginServer) return &credentialprovider.DockerConfigEntry{ Username: dockerTokenLoginUsernameGUID, Password: registryRefreshToken, diff --git a/pkg/credentialprovider/config.go b/pkg/credentialprovider/config.go index 433f28b15448f..a43e8c2b15d4a 100644 --- a/pkg/credentialprovider/config.go +++ b/pkg/credentialprovider/config.go @@ -27,7 +27,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // DockerConfigJson represents ~/.docker/config.json file info @@ -95,21 +95,21 @@ func ReadDockercfgFile(searchPaths []string) (cfg DockerConfig, err error) { for _, configPath := range searchPaths { absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configFileName)) if err != nil { - glog.Errorf("while trying to canonicalize %s: %v", configPath, err) + klog.Errorf("while trying to canonicalize %s: %v", configPath, err) continue } - glog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation) + klog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation) contents, err := ioutil.ReadFile(absDockerConfigFileLocation) if os.IsNotExist(err) { continue } if err != nil { - glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) + klog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) continue } cfg, err := readDockerConfigFileFromBytes(contents) if err == nil { - glog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation) + klog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation) return cfg, nil } } @@ -125,18 +125,18 @@ func ReadDockerConfigJSONFile(searchPaths []string) (cfg DockerConfig, err error for _, configPath := range searchPaths { absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configJsonFileName)) if err != nil { - glog.Errorf("while trying to canonicalize %s: %v", configPath, err) + klog.Errorf("while trying to canonicalize %s: %v", configPath, err) continue } - glog.V(4).Infof("looking for %s at %s", configJsonFileName, absDockerConfigFileLocation) + klog.V(4).Infof("looking for %s at %s", configJsonFileName, absDockerConfigFileLocation) cfg, err = ReadSpecificDockerConfigJsonFile(absDockerConfigFileLocation) if err != nil { if !os.IsNotExist(err) { - glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) + klog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) } continue } - glog.V(4).Infof("found valid %s at %s", configJsonFileName, absDockerConfigFileLocation) + klog.V(4).Infof("found valid %s at %s", configJsonFileName, absDockerConfigFileLocation) return cfg, nil } return nil, fmt.Errorf("couldn't find valid %s after checking in %v", configJsonFileName, searchPaths) @@ -188,7 +188,7 @@ func ReadUrl(url string, client *http.Client, header *http.Header) (body []byte, defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - glog.V(2).Infof("body of failing http response: %v", resp.Body) + klog.V(2).Infof("body of failing http response: %v", resp.Body) return nil, &HttpError{ StatusCode: resp.StatusCode, Url: url, @@ -213,7 +213,7 @@ func ReadDockerConfigFileFromUrl(url string, client *http.Client, header *http.H func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) { if err = json.Unmarshal(contents, &cfg); err != nil { - glog.Errorf("while trying to parse blob %q: %v", contents, err) + klog.Errorf("while trying to parse blob %q: %v", contents, err) return nil, err } return @@ -222,7 +222,7 @@ func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error func readDockerConfigJsonFileFromBytes(contents []byte) (cfg DockerConfig, err error) { var cfgJson DockerConfigJson if err = json.Unmarshal(contents, &cfgJson); err != nil { - glog.Errorf("while trying to parse blob %q: %v", contents, err) + klog.Errorf("while trying to parse blob %q: %v", contents, err) return nil, err } cfg = cfgJson.Auths diff --git a/pkg/credentialprovider/gcp/BUILD b/pkg/credentialprovider/gcp/BUILD index 6b34743bca787..1d61eaaef33cc 100644 --- a/pkg/credentialprovider/gcp/BUILD +++ b/pkg/credentialprovider/gcp/BUILD @@ -16,7 +16,7 @@ go_library( deps = [ "//pkg/credentialprovider:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/credentialprovider/gcp/metadata.go b/pkg/credentialprovider/gcp/metadata.go index 4c668d0b38475..7620f0c040fd6 100644 --- a/pkg/credentialprovider/gcp/metadata.go +++ b/pkg/credentialprovider/gcp/metadata.go @@ -23,8 +23,8 @@ import ( "strings" "time" - "github.com/golang/glog" utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/klog" "k8s.io/kubernetes/pkg/credentialprovider" ) @@ -117,7 +117,7 @@ func init() { func onGCEVM() bool { data, err := ioutil.ReadFile(gceProductNameFile) if err != nil { - glog.V(2).Infof("Error while reading product_name: %v", err) + klog.V(2).Infof("Error while reading product_name: %v", err) return false } name := strings.TrimSpace(string(data)) @@ -139,7 +139,7 @@ func (g *dockerConfigKeyProvider) Provide() credentialprovider.DockerConfig { // Read the contents of the google-dockercfg metadata key and // parse them as an alternate .dockercfg if cfg, err := credentialprovider.ReadDockerConfigFileFromUrl(dockerConfigKey, g.Client, metadataHeader); err != nil { - glog.Errorf("while reading 'google-dockercfg' metadata: %v", err) + klog.Errorf("while reading 'google-dockercfg' metadata: %v", err) } else { return cfg } @@ -156,17 +156,17 @@ func (g *dockerConfigUrlKeyProvider) LazyProvide() *credentialprovider.DockerCon func (g *dockerConfigUrlKeyProvider) Provide() credentialprovider.DockerConfig { // Read the contents of the google-dockercfg-url key and load a .dockercfg from there if url, err := credentialprovider.ReadUrl(dockerConfigUrlKey, g.Client, metadataHeader); err != nil { - glog.Errorf("while reading 'google-dockercfg-url' metadata: %v", err) + klog.Errorf("while reading 'google-dockercfg-url' metadata: %v", err) } else { if strings.HasPrefix(string(url), "http") { if cfg, err := credentialprovider.ReadDockerConfigFileFromUrl(string(url), g.Client, nil); err != nil { - glog.Errorf("while reading 'google-dockercfg-url'-specified url: %s, %v", string(url), err) + klog.Errorf("while reading 'google-dockercfg-url'-specified url: %s, %v", string(url), err) } else { return cfg } } else { // TODO(mattmoor): support reading alternate scheme URLs (e.g. gs:// or s3://) - glog.Errorf("Unsupported URL scheme: %s", string(url)) + klog.Errorf("Unsupported URL scheme: %s", string(url)) } } @@ -209,7 +209,7 @@ func (g *containerRegistryProvider) Enabled() bool { value := runWithBackoff(func() ([]byte, error) { value, err := credentialprovider.ReadUrl(serviceAccounts, g.Client, metadataHeader) if err != nil { - glog.V(2).Infof("Failed to Get service accounts from gce metadata server: %v", err) + klog.V(2).Infof("Failed to Get service accounts from gce metadata server: %v", err) } return value, err }) @@ -225,20 +225,20 @@ func (g *containerRegistryProvider) Enabled() bool { } } if !defaultServiceAccountExists { - glog.V(2).Infof("'default' service account does not exist. Found following service accounts: %q", string(value)) + klog.V(2).Infof("'default' service account does not exist. Found following service accounts: %q", string(value)) return false } url := metadataScopes + "?alt=json" value = runWithBackoff(func() ([]byte, error) { value, err := credentialprovider.ReadUrl(url, g.Client, metadataHeader) if err != nil { - glog.V(2).Infof("Failed to Get scopes in default service account from gce metadata server: %v", err) + klog.V(2).Infof("Failed to Get scopes in default service account from gce metadata server: %v", err) } return value, err }) var scopes []string if err := json.Unmarshal(value, &scopes); err != nil { - glog.Errorf("Failed to unmarshal scopes: %v", err) + klog.Errorf("Failed to unmarshal scopes: %v", err) return false } for _, v := range scopes { @@ -247,7 +247,7 @@ func (g *containerRegistryProvider) Enabled() bool { return true } } - glog.Warningf("Google container registry is disabled, no storage scope is available: %s", value) + klog.Warningf("Google container registry is disabled, no storage scope is available: %s", value) return false } @@ -268,19 +268,19 @@ func (g *containerRegistryProvider) Provide() credentialprovider.DockerConfig { tokenJsonBlob, err := credentialprovider.ReadUrl(metadataToken, g.Client, metadataHeader) if err != nil { - glog.Errorf("while reading access token endpoint: %v", err) + klog.Errorf("while reading access token endpoint: %v", err) return cfg } email, err := credentialprovider.ReadUrl(metadataEmail, g.Client, metadataHeader) if err != nil { - glog.Errorf("while reading email endpoint: %v", err) + klog.Errorf("while reading email endpoint: %v", err) return cfg } var parsedBlob tokenBlob if err := json.Unmarshal([]byte(tokenJsonBlob), &parsedBlob); err != nil { - glog.Errorf("while parsing json blob %s: %v", tokenJsonBlob, err) + klog.Errorf("while parsing json blob %s: %v", tokenJsonBlob, err) return cfg } diff --git a/pkg/credentialprovider/keyring.go b/pkg/credentialprovider/keyring.go index 8a6f563d08532..6f5fad5fc4783 100644 --- a/pkg/credentialprovider/keyring.go +++ b/pkg/credentialprovider/keyring.go @@ -23,7 +23,7 @@ import ( "sort" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" ) @@ -113,7 +113,7 @@ func (dk *BasicDockerKeyring) Add(cfg DockerConfig) { } parsed, err := url.Parse(value) if err != nil { - glog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err) + klog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err) continue } diff --git a/pkg/credentialprovider/plugins.go b/pkg/credentialprovider/plugins.go index c817fefa2b7e3..5ea3a000e8bcf 100644 --- a/pkg/credentialprovider/plugins.go +++ b/pkg/credentialprovider/plugins.go @@ -21,7 +21,7 @@ import ( "sort" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // All registered credential providers. @@ -38,9 +38,9 @@ func RegisterCredentialProvider(name string, provider DockerConfigProvider) { defer providersMutex.Unlock() _, found := providers[name] if found { - glog.Fatalf("Credential provider %q was registered twice", name) + klog.Fatalf("Credential provider %q was registered twice", name) } - glog.V(4).Infof("Registered credential provider %q", name) + klog.V(4).Infof("Registered credential provider %q", name) providers[name] = provider } @@ -61,7 +61,7 @@ func NewDockerKeyring() DockerKeyring { for _, key := range stringKeys { provider := providers[key] if provider.Enabled() { - glog.V(4).Infof("Registering credential provider: %v", key) + klog.V(4).Infof("Registering credential provider: %v", key) keyring.Providers = append(keyring.Providers, provider) } } diff --git a/pkg/credentialprovider/provider.go b/pkg/credentialprovider/provider.go index 422696e9b0b2f..16b4e601a10a6 100644 --- a/pkg/credentialprovider/provider.go +++ b/pkg/credentialprovider/provider.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" ) // DockerConfigProvider is the interface that registered extensions implement @@ -83,7 +83,7 @@ func (d *defaultDockerConfigProvider) Provide() DockerConfig { if cfg, err := ReadDockerConfigFile(); err == nil { return cfg } else if !os.IsNotExist(err) { - glog.V(4).Infof("Unable to parse Docker config file: %v", err) + klog.V(4).Infof("Unable to parse Docker config file: %v", err) } return DockerConfig{} } @@ -113,7 +113,7 @@ func (d *CachingDockerConfigProvider) Provide() DockerConfig { return d.cacheDockerConfig } - glog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) + klog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) d.cacheDockerConfig = d.Provider.Provide() d.expiration = time.Now().Add(d.Lifetime) return d.cacheDockerConfig diff --git a/pkg/credentialprovider/rancher/BUILD b/pkg/credentialprovider/rancher/BUILD index af1f490b1038f..02ab95c7fce8f 100644 --- a/pkg/credentialprovider/rancher/BUILD +++ b/pkg/credentialprovider/rancher/BUILD @@ -25,8 +25,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/credentialprovider/rancher", deps = [ "//pkg/credentialprovider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/rancher/go-rancher/client:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/credentialprovider/rancher/rancher_registry_credentials.go b/pkg/credentialprovider/rancher/rancher_registry_credentials.go index 3143560dfe959..d122308ed82bf 100644 --- a/pkg/credentialprovider/rancher/rancher_registry_credentials.go +++ b/pkg/credentialprovider/rancher/rancher_registry_credentials.go @@ -20,8 +20,8 @@ import ( "os" "time" - "github.com/golang/glog" "github.com/rancher/go-rancher/client" + "k8s.io/klog" "k8s.io/kubernetes/pkg/credentialprovider" ) @@ -102,13 +102,13 @@ func (g *rancherCredentialsGetter) getCredentials() []registryCredential { var registryCreds []registryCredential credColl, err := g.client.RegistryCredential.List(client.NewListOpts()) if err != nil { - glog.Errorf("Failed to pull registry credentials from rancher %v", err) + klog.Errorf("Failed to pull registry credentials from rancher %v", err) return registryCreds } for _, cred := range credColl.Data { registry := &client.Registry{} if err = g.client.GetLink(cred.Resource, "registry", registry); err != nil { - glog.Errorf("Failed to pull registry from rancher %v", err) + klog.Errorf("Failed to pull registry from rancher %v", err) return registryCreds } registryCred := registryCredential{ diff --git a/pkg/kubeapiserver/admission/BUILD b/pkg/kubeapiserver/admission/BUILD index fb9a80a8e924c..abe660e71779f 100644 --- a/pkg/kubeapiserver/admission/BUILD +++ b/pkg/kubeapiserver/admission/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/restmapper:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubeapiserver/admission/config.go b/pkg/kubeapiserver/admission/config.go index 526b2094e799a..a47e4559377a5 100644 --- a/pkg/kubeapiserver/admission/config.go +++ b/pkg/kubeapiserver/admission/config.go @@ -21,7 +21,7 @@ import ( "net/http" "time" - "github.com/golang/glog" + "k8s.io/klog" utilwait "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" @@ -54,7 +54,7 @@ func (c *Config) New(proxyTransport *http.Transport, serviceResolver webhook.Ser var err error cloudConfig, err = ioutil.ReadFile(c.CloudConfigFile) if err != nil { - glog.Fatalf("Error reading from cloud configuration file %s: %#v", c.CloudConfigFile, err) + klog.Fatalf("Error reading from cloud configuration file %s: %#v", c.CloudConfigFile, err) } } internalClient, err := internalclientset.NewForConfig(c.LoopbackClientConfig) diff --git a/pkg/kubeapiserver/options/BUILD b/pkg/kubeapiserver/options/BUILD index 0a9373a0ea05d..9ee6eb989f7fa 100644 --- a/pkg/kubeapiserver/options/BUILD +++ b/pkg/kubeapiserver/options/BUILD @@ -68,8 +68,8 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubeapiserver/options/authentication.go b/pkg/kubeapiserver/options/authentication.go index 2c149cbf3202f..c65dff800b959 100644 --- a/pkg/kubeapiserver/options/authentication.go +++ b/pkg/kubeapiserver/options/authentication.go @@ -22,8 +22,8 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/authentication/authenticator" @@ -350,10 +350,10 @@ func (s *BuiltInAuthenticationOptions) ToAuthenticationConfig() kubeauthenticato if len(s.WebHook.ConfigFile) > 0 && s.WebHook.CacheTTL > 0 { if s.TokenSuccessCacheTTL > 0 && s.WebHook.CacheTTL < s.TokenSuccessCacheTTL { - glog.Warningf("the webhook cache ttl of %s is shorter than the overall cache ttl of %s for successful token authentication attempts.", s.WebHook.CacheTTL, s.TokenSuccessCacheTTL) + klog.Warningf("the webhook cache ttl of %s is shorter than the overall cache ttl of %s for successful token authentication attempts.", s.WebHook.CacheTTL, s.TokenSuccessCacheTTL) } if s.TokenFailureCacheTTL > 0 && s.WebHook.CacheTTL < s.TokenFailureCacheTTL { - glog.Warningf("the webhook cache ttl of %s is shorter than the overall cache ttl of %s for failed token authentication attempts.", s.WebHook.CacheTTL, s.TokenFailureCacheTTL) + klog.Warningf("the webhook cache ttl of %s is shorter than the overall cache ttl of %s for failed token authentication attempts.", s.WebHook.CacheTTL, s.TokenFailureCacheTTL) } } } @@ -397,7 +397,7 @@ func (o *BuiltInAuthenticationOptions) ApplyAuthorization(authorization *BuiltIn // authorization ModeAlwaysAllow cannot be combined with AnonymousAuth. // in such a case the AnonymousAuth is stomped to false and you get a message if o.Anonymous.Allow && sets.NewString(authorization.Modes...).Has(authzmodes.ModeAlwaysAllow) { - glog.Warningf("AnonymousAuth is not allowed with the AlwaysAllow authorizer. Resetting AnonymousAuth to false. You should use a different authorizer") + klog.Warningf("AnonymousAuth is not allowed with the AlwaysAllow authorizer. Resetting AnonymousAuth to false. You should use a different authorizer") o.Anonymous.Allow = false } } diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index fbd9e945497eb..9a287ffa91a89 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -98,7 +98,7 @@ go_library( "//staging/src/k8s.io/client-go/util/integer:go_default_library", "//staging/src/k8s.io/client-go/util/jsonpath:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/vbom.ml/util/sortorder:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/annotate/BUILD b/pkg/kubectl/cmd/annotate/BUILD index 97cbb1f00eb59..728e940743f98 100644 --- a/pkg/kubectl/cmd/annotate/BUILD +++ b/pkg/kubectl/cmd/annotate/BUILD @@ -20,8 +20,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/annotate/annotate.go b/pkg/kubectl/cmd/annotate/annotate.go index 00f4551a493c9..56fe5603fb47b 100644 --- a/pkg/kubectl/cmd/annotate/annotate.go +++ b/pkg/kubectl/cmd/annotate/annotate.go @@ -22,8 +22,8 @@ import ( "io" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -271,7 +271,7 @@ func (o AnnotateOptions) RunAnnotate() error { return err } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } if err := o.updateAnnotations(obj); err != nil { return err @@ -283,7 +283,7 @@ func (o AnnotateOptions) RunAnnotate() error { patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) createdPatch := err == nil if err != nil { - glog.V(2).Infof("couldn't compute patch: %v", err) + klog.V(2).Infof("couldn't compute patch: %v", err) } mapping := info.ResourceMapping() diff --git a/pkg/kubectl/cmd/apply/BUILD b/pkg/kubectl/cmd/apply/BUILD index edde8b09e5387..8f9549825817f 100644 --- a/pkg/kubectl/cmd/apply/BUILD +++ b/pkg/kubectl/cmd/apply/BUILD @@ -38,9 +38,9 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/jonboulle/clockwork:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], diff --git a/pkg/kubectl/cmd/apply/apply.go b/pkg/kubectl/cmd/apply/apply.go index de8274169eadc..4f1e3d6d0409e 100644 --- a/pkg/kubectl/cmd/apply/apply.go +++ b/pkg/kubectl/cmd/apply/apply.go @@ -22,7 +22,6 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/jonboulle/clockwork" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" @@ -43,6 +42,7 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" + "k8s.io/klog" oapi "k8s.io/kube-openapi/pkg/util/proto" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/delete" @@ -347,7 +347,7 @@ func (o *ApplyOptions) Run() error { } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } // Get the modified configuration of the object. Embed the result diff --git a/pkg/kubectl/cmd/attach/BUILD b/pkg/kubectl/cmd/attach/BUILD index dde0b4da1048f..ae86f8d5bfda9 100644 --- a/pkg/kubectl/cmd/attach/BUILD +++ b/pkg/kubectl/cmd/attach/BUILD @@ -18,8 +18,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/attach/attach.go b/pkg/kubectl/cmd/attach/attach.go index df5f24790d0b0..c0bfd7831636f 100644 --- a/pkg/kubectl/cmd/attach/attach.go +++ b/pkg/kubectl/cmd/attach/attach.go @@ -22,8 +22,8 @@ import ( "net/url" "time" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -326,7 +326,7 @@ func (o *AttachOptions) containerToAttachTo(pod *corev1.Pod) (*corev1.Container, fmt.Fprintf(o.ErrOut, "%s\n", o.SuggestedCmdUsage) } - glog.V(4).Infof("defaulting container name to %s", pod.Spec.Containers[0].Name) + klog.V(4).Infof("defaulting container name to %s", pod.Spec.Containers[0].Name) return &pod.Spec.Containers[0], nil } diff --git a/pkg/kubectl/cmd/auth/BUILD b/pkg/kubectl/cmd/auth/BUILD index 756d269206790..9be5ff830ac52 100644 --- a/pkg/kubectl/cmd/auth/BUILD +++ b/pkg/kubectl/cmd/auth/BUILD @@ -32,8 +32,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/auth/reconcile.go b/pkg/kubectl/cmd/auth/reconcile.go index 089639705bb20..e2dafca743d93 100644 --- a/pkg/kubectl/cmd/auth/reconcile.go +++ b/pkg/kubectl/cmd/auth/reconcile.go @@ -20,8 +20,8 @@ import ( "errors" "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -251,7 +251,7 @@ func (o *ReconcileOptions) RunReconcile() error { return fmt.Errorf("only rbac.authorization.k8s.io/v1 is supported: not %T", t) default: - glog.V(1).Infof("skipping %#v", info.Object.GetObjectKind()) + klog.V(1).Infof("skipping %#v", info.Object.GetObjectKind()) // skip ignored resources } diff --git a/pkg/kubectl/cmd/autoscale/BUILD b/pkg/kubectl/cmd/autoscale/BUILD index 1c9aea0718ec5..fdb1dce6c240b 100644 --- a/pkg/kubectl/cmd/autoscale/BUILD +++ b/pkg/kubectl/cmd/autoscale/BUILD @@ -20,8 +20,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/autoscale/autoscale.go b/pkg/kubectl/cmd/autoscale/autoscale.go index 78b641a509e4b..b88d77c1d9f2a 100644 --- a/pkg/kubectl/cmd/autoscale/autoscale.go +++ b/pkg/kubectl/cmd/autoscale/autoscale.go @@ -19,8 +19,8 @@ package autoscale import ( "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" autoscalingv1 "k8s.io/api/autoscaling/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -234,7 +234,7 @@ func (o *AutoscaleOptions) Run() error { } if err := o.Recorder.Record(hpa); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } if o.dryRun { diff --git a/pkg/kubectl/cmd/convert/BUILD b/pkg/kubectl/cmd/convert/BUILD index 493542234fd4d..05cb8c4edba50 100644 --- a/pkg/kubectl/cmd/convert/BUILD +++ b/pkg/kubectl/cmd/convert/BUILD @@ -18,8 +18,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/convert/convert.go b/pkg/kubectl/cmd/convert/convert.go index 2faaf59f4fef9..a233b8cd1cbbb 100644 --- a/pkg/kubectl/cmd/convert/convert.go +++ b/pkg/kubectl/cmd/convert/convert.go @@ -19,8 +19,8 @@ package convert import ( "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -226,7 +226,7 @@ func asVersionedObject(infos []*resource.Info, forceList bool, specifiedOutputVe if len(actualVersion.Version) > 0 { defaultVersionInfo = fmt.Sprintf("Defaulting to %q", actualVersion.Version) } - glog.V(1).Infof("info: the output version specified is invalid. %s\n", defaultVersionInfo) + klog.V(1).Infof("info: the output version specified is invalid. %s\n", defaultVersionInfo) } return object, nil } diff --git a/pkg/kubectl/cmd/create/BUILD b/pkg/kubectl/cmd/create/BUILD index 3f043b50b5ec1..49ce594021eed 100644 --- a/pkg/kubectl/cmd/create/BUILD +++ b/pkg/kubectl/cmd/create/BUILD @@ -47,8 +47,8 @@ go_library( "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/create/create.go b/pkg/kubectl/cmd/create/create.go index b96b62eca8325..1a426bae61611 100644 --- a/pkg/kubectl/cmd/create/create.go +++ b/pkg/kubectl/cmd/create/create.go @@ -24,8 +24,8 @@ import ( "runtime" "strings" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -247,7 +247,7 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error { } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } if !o.DryRun { diff --git a/pkg/kubectl/cmd/delete/BUILD b/pkg/kubectl/cmd/delete/BUILD index 1768dad67a0f6..f1b922a72d64a 100644 --- a/pkg/kubectl/cmd/delete/BUILD +++ b/pkg/kubectl/cmd/delete/BUILD @@ -21,8 +21,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/delete/delete.go b/pkg/kubectl/cmd/delete/delete.go index b34bc9fa751a9..84b6db50bfe4b 100644 --- a/pkg/kubectl/cmd/delete/delete.go +++ b/pkg/kubectl/cmd/delete/delete.go @@ -21,8 +21,8 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -259,7 +259,7 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { responseMetadata, err := meta.Accessor(response) if err != nil { // we don't have UID, but we didn't fail the delete, next best thing is just skipping the UID - glog.V(1).Info(err) + klog.V(1).Info(err) return nil } uidMap[resourceLocation] = responseMetadata.GetUID() @@ -301,7 +301,7 @@ func (o *DeleteOptions) DeleteResult(r *resource.Result) error { if errors.IsForbidden(err) || errors.IsMethodNotSupported(err) { // if we're forbidden from waiting, we shouldn't fail. // if the resource doesn't support a verb we need, we shouldn't fail. - glog.V(1).Info(err) + klog.V(1).Info(err) return nil } return err diff --git a/pkg/kubectl/cmd/expose/BUILD b/pkg/kubectl/cmd/expose/BUILD index 71c17b46e654a..bd71a3589e98c 100644 --- a/pkg/kubectl/cmd/expose/BUILD +++ b/pkg/kubectl/cmd/expose/BUILD @@ -24,8 +24,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/expose/expose.go b/pkg/kubectl/cmd/expose/expose.go index c826e2a7b747c..2727e61c1bfc1 100644 --- a/pkg/kubectl/cmd/expose/expose.go +++ b/pkg/kubectl/cmd/expose/expose.go @@ -20,8 +20,8 @@ import ( "regexp" "strings" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -321,7 +321,7 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro } if err := o.Recorder.Record(object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } if o.DryRun { diff --git a/pkg/kubectl/cmd/get/BUILD b/pkg/kubectl/cmd/get/BUILD index a14665b829c0b..523fe5043cc29 100644 --- a/pkg/kubectl/cmd/get/BUILD +++ b/pkg/kubectl/cmd/get/BUILD @@ -49,8 +49,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/get/get.go b/pkg/kubectl/cmd/get/get.go index 2739b3bc8c438..05409948d97ba 100644 --- a/pkg/kubectl/cmd/get/get.go +++ b/pkg/kubectl/cmd/get/get.go @@ -23,8 +23,8 @@ import ( "io" "net/url" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" kapierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -471,7 +471,7 @@ func (o *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e } else { // if we are unable to decode server response into a v1beta1.Table, // fallback to client-side printing with whatever info the server returned. - glog.V(2).Infof("Unable to decode server response into a Table. Falling back to hardcoded types: %v", err) + klog.V(2).Infof("Unable to decode server response into a Table. Falling back to hardcoded types: %v", err) } } @@ -555,7 +555,7 @@ func (o *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e internalObj, err := legacyscheme.Scheme.ConvertToVersion(info.Object, info.Mapping.GroupVersionKind.GroupKind().WithVersion(runtime.APIVersionInternal).GroupVersion()) if err != nil { // if there's an error, try to print what you have (mirrors old behavior). - glog.V(1).Info(err) + klog.V(1).Info(err) printer.PrintObj(info.Object, w) } else { printer.PrintObj(internalObj, w) @@ -704,7 +704,7 @@ func (o *GetOptions) watch(f cmdutil.Factory, cmd *cobra.Command, args []string) func attemptToConvertToInternal(obj runtime.Object, converter runtime.ObjectConvertor, targetVersion schema.GroupVersion) runtime.Object { internalObject, err := converter.ConvertToVersion(obj, targetVersion) if err != nil { - glog.V(1).Infof("Unable to convert %T to %v: %v", obj, targetVersion, err) + klog.V(1).Infof("Unable to convert %T to %v: %v", obj, targetVersion, err) return obj } return internalObject diff --git a/pkg/kubectl/cmd/label/BUILD b/pkg/kubectl/cmd/label/BUILD index 7e9abb28d4d2f..549899a766145 100644 --- a/pkg/kubectl/cmd/label/BUILD +++ b/pkg/kubectl/cmd/label/BUILD @@ -22,8 +22,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/label/label.go b/pkg/kubectl/cmd/label/label.go index ae512805ae259..11251a278d21d 100644 --- a/pkg/kubectl/cmd/label/label.go +++ b/pkg/kubectl/cmd/label/label.go @@ -22,8 +22,8 @@ import ( "strings" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -288,7 +288,7 @@ func (o *LabelOptions) RunLabel() error { return err } if err := o.Recorder.Record(obj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } newObj, err := json.Marshal(obj) if err != nil { @@ -298,7 +298,7 @@ func (o *LabelOptions) RunLabel() error { patchBytes, err := jsonpatch.CreateMergePatch(oldData, newObj) createdPatch := err == nil if err != nil { - glog.V(2).Infof("couldn't compute patch: %v", err) + klog.V(2).Infof("couldn't compute patch: %v", err) } mapping := info.ResourceMapping() diff --git a/pkg/kubectl/cmd/patch/BUILD b/pkg/kubectl/cmd/patch/BUILD index ef65d083b9eff..683b0abef238c 100644 --- a/pkg/kubectl/cmd/patch/BUILD +++ b/pkg/kubectl/cmd/patch/BUILD @@ -22,8 +22,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/patch/patch.go b/pkg/kubectl/cmd/patch/patch.go index 37eb4f51c630b..cb8eb0c8c0ba9 100644 --- a/pkg/kubectl/cmd/patch/patch.go +++ b/pkg/kubectl/cmd/patch/patch.go @@ -22,8 +22,8 @@ import ( "strings" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -229,10 +229,10 @@ func (o *PatchOptions) RunPatch() error { // if the recorder makes a change, compute and create another patch if mergePatch, err := o.Recorder.MakeRecordMergePatch(patchedObj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } else if len(mergePatch) > 0 { if recordedObj, err := helper.Patch(info.Namespace, info.Name, types.MergePatchType, mergePatch, nil); err != nil { - glog.V(4).Infof("error recording reason: %v", err) + klog.V(4).Infof("error recording reason: %v", err) } else { patchedObj = recordedObj } diff --git a/pkg/kubectl/cmd/proxy/BUILD b/pkg/kubectl/cmd/proxy/BUILD index 3edb46af21613..b5c08df67368a 100644 --- a/pkg/kubectl/cmd/proxy/BUILD +++ b/pkg/kubectl/cmd/proxy/BUILD @@ -11,8 +11,8 @@ go_library( "//pkg/kubectl/util/i18n:go_default_library", "//pkg/kubectl/util/templates:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/proxy/proxy.go b/pkg/kubectl/cmd/proxy/proxy.go index 6701960675724..bf54bb21fb86d 100644 --- a/pkg/kubectl/cmd/proxy/proxy.go +++ b/pkg/kubectl/cmd/proxy/proxy.go @@ -24,9 +24,9 @@ import ( "os" "strings" - "github.com/golang/glog" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/klog" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/proxy" "k8s.io/kubernetes/pkg/kubectl/util/i18n" @@ -119,9 +119,9 @@ func RunProxy(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error { if staticDir != "" { fileInfo, err := os.Stat(staticDir) if err != nil { - glog.Warning("Failed to stat static file directory "+staticDir+": ", err) + klog.Warning("Failed to stat static file directory "+staticDir+": ", err) } else if !fileInfo.IsDir() { - glog.Warning("Static file directory " + staticDir + " is not a directory") + klog.Warning("Static file directory " + staticDir + " is not a directory") } } @@ -137,7 +137,7 @@ func RunProxy(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error { } if cmdutil.GetFlagBool(cmd, "disable-filter") { if path == "" { - glog.Warning("Request filter disabled, your proxy is vulnerable to XSRF attacks, please be cautious") + klog.Warning("Request filter disabled, your proxy is vulnerable to XSRF attacks, please be cautious") } filter = nil } @@ -155,9 +155,9 @@ func RunProxy(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error { l, err = server.ListenUnix(path) } if err != nil { - glog.Fatal(err) + klog.Fatal(err) } fmt.Fprintf(out, "Starting to serve on %s\n", l.Addr().String()) - glog.Fatal(server.ServeOnListener(l)) + klog.Fatal(server.ServeOnListener(l)) return nil } diff --git a/pkg/kubectl/cmd/replace/BUILD b/pkg/kubectl/cmd/replace/BUILD index 6e58729442fd9..864f5a3474735 100644 --- a/pkg/kubectl/cmd/replace/BUILD +++ b/pkg/kubectl/cmd/replace/BUILD @@ -18,8 +18,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/replace/replace.go b/pkg/kubectl/cmd/replace/replace.go index 1a72fd6a0889a..dcc8ddcb87818 100644 --- a/pkg/kubectl/cmd/replace/replace.go +++ b/pkg/kubectl/cmd/replace/replace.go @@ -25,7 +25,7 @@ import ( "github.com/spf13/cobra" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -225,7 +225,7 @@ func (o *ReplaceOptions) Run() error { } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } // Serialize the object with the annotation applied. @@ -316,7 +316,7 @@ func (o *ReplaceOptions) forceReplace() error { } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object, nil) diff --git a/pkg/kubectl/cmd/rollingupdate/BUILD b/pkg/kubectl/cmd/rollingupdate/BUILD index ea784dc334ba5..cd16b37319b69 100644 --- a/pkg/kubectl/cmd/rollingupdate/BUILD +++ b/pkg/kubectl/cmd/rollingupdate/BUILD @@ -23,8 +23,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/scale:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/rollingupdate/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate/rollingupdate.go index 4947e72030865..b39ad2049f6e1 100644 --- a/pkg/kubectl/cmd/rollingupdate/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate/rollingupdate.go @@ -21,8 +21,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" @@ -292,7 +292,7 @@ func (o *RollingUpdateOptions) Run() error { uncastVersionedObj, err := scheme.Scheme.ConvertToVersion(infos[0].Object, corev1.SchemeGroupVersion) if err != nil { - glog.V(4).Infof("Object %T is not a ReplicationController", infos[0].Object) + klog.V(4).Infof("Object %T is not a ReplicationController", infos[0].Object) return fmt.Errorf("%s contains a %v not a ReplicationController", filename, infos[0].Object.GetObjectKind().GroupVersionKind()) } switch t := uncastVersionedObj.(type) { @@ -301,7 +301,7 @@ func (o *RollingUpdateOptions) Run() error { newRc = t } if newRc == nil { - glog.V(4).Infof("Object %T is not a ReplicationController", infos[0].Object) + klog.V(4).Infof("Object %T is not a ReplicationController", infos[0].Object) return fmt.Errorf("%s contains a %v not a ReplicationController", filename, infos[0].Object.GetObjectKind().GroupVersionKind()) } } diff --git a/pkg/kubectl/cmd/run/BUILD b/pkg/kubectl/cmd/run/BUILD index bffca3654e9d9..180ba1b6e2c16 100644 --- a/pkg/kubectl/cmd/run/BUILD +++ b/pkg/kubectl/cmd/run/BUILD @@ -33,8 +33,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//vendor/github.com/docker/distribution/reference:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/run/run.go b/pkg/kubectl/cmd/run/run.go index d8da7818361d9..b6ee373edfce0 100644 --- a/pkg/kubectl/cmd/run/run.go +++ b/pkg/kubectl/cmd/run/run.go @@ -22,8 +22,8 @@ import ( "time" "github.com/docker/distribution/reference" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -674,7 +674,7 @@ func (o *RunOptions) createGeneratedObject(f cmdutil.Factory, cmd *cobra.Command } if err := o.Recorder.Record(obj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } actualObj := obj diff --git a/pkg/kubectl/cmd/scale/BUILD b/pkg/kubectl/cmd/scale/BUILD index 1c30a322743cd..5c76a02f140c4 100644 --- a/pkg/kubectl/cmd/scale/BUILD +++ b/pkg/kubectl/cmd/scale/BUILD @@ -25,8 +25,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/scale/scale.go b/pkg/kubectl/cmd/scale/scale.go index 1144e307565b5..997d37cefbb9e 100644 --- a/pkg/kubectl/cmd/scale/scale.go +++ b/pkg/kubectl/cmd/scale/scale.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" @@ -240,7 +240,7 @@ func (o *ScaleOptions) RunScale() error { // if the recorder makes a change, compute and create another patch if mergePatch, err := o.Recorder.MakeRecordMergePatch(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } else if len(mergePatch) > 0 { client, err := o.unstructuredClientForMapping(mapping) if err != nil { @@ -248,7 +248,7 @@ func (o *ScaleOptions) RunScale() error { } helper := resource.NewHelper(client, mapping) if _, err := helper.Patch(info.Namespace, info.Name, types.MergePatchType, mergePatch, nil); err != nil { - glog.V(4).Infof("error recording reason: %v", err) + klog.V(4).Infof("error recording reason: %v", err) } } diff --git a/pkg/kubectl/cmd/set/BUILD b/pkg/kubectl/cmd/set/BUILD index cf756dbef418e..aa428c673c24c 100644 --- a/pkg/kubectl/cmd/set/BUILD +++ b/pkg/kubectl/cmd/set/BUILD @@ -41,8 +41,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/set/set_image.go b/pkg/kubectl/cmd/set/set_image.go index 3a18f36bec54f..38677a803f086 100644 --- a/pkg/kubectl/cmd/set/set_image.go +++ b/pkg/kubectl/cmd/set/set_image.go @@ -19,8 +19,8 @@ package set import ( "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -256,7 +256,7 @@ func (o *SetImageOptions) Run() error { } // record this change (for rollout history) if err := o.Recorder.Record(obj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } return runtime.Encode(scheme.DefaultJSONEncoder(), obj) diff --git a/pkg/kubectl/cmd/set/set_resources.go b/pkg/kubectl/cmd/set/set_resources.go index 6b94b6a4febed..b7328a0cb4578 100644 --- a/pkg/kubectl/cmd/set/set_resources.go +++ b/pkg/kubectl/cmd/set/set_resources.go @@ -19,8 +19,8 @@ package set import ( "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -250,7 +250,7 @@ func (o *SetResourcesOptions) Run() error { } // record this change (for rollout history) if err := o.Recorder.Record(obj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } return runtime.Encode(scheme.DefaultJSONEncoder(), obj) diff --git a/pkg/kubectl/cmd/set/set_selector.go b/pkg/kubectl/cmd/set/set_selector.go index 9babe08a2ad44..167ae74717038 100644 --- a/pkg/kubectl/cmd/set/set_selector.go +++ b/pkg/kubectl/cmd/set/set_selector.go @@ -19,8 +19,8 @@ package set import ( "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -171,7 +171,7 @@ func (o *SetSelectorOptions) RunSelector() error { // record this change (for rollout history) if err := o.Recorder.Record(patch.Info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } return runtime.Encode(scheme.DefaultJSONEncoder(), info.Object) diff --git a/pkg/kubectl/cmd/set/set_serviceaccount.go b/pkg/kubectl/cmd/set/set_serviceaccount.go index 45b0ae3057d6e..eced76f7e9c3c 100644 --- a/pkg/kubectl/cmd/set/set_serviceaccount.go +++ b/pkg/kubectl/cmd/set/set_serviceaccount.go @@ -20,8 +20,8 @@ import ( "errors" "fmt" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -183,7 +183,7 @@ func (o *SetServiceAccountOptions) Run() error { } // record this change (for rollout history) if err := o.Recorder.Record(obj); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } return runtime.Encode(scheme.DefaultJSONEncoder(), obj) diff --git a/pkg/kubectl/cmd/taint/BUILD b/pkg/kubectl/cmd/taint/BUILD index 19767bf0ed78c..e5e42effbb96f 100644 --- a/pkg/kubectl/cmd/taint/BUILD +++ b/pkg/kubectl/cmd/taint/BUILD @@ -24,8 +24,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/taint/taint.go b/pkg/kubectl/cmd/taint/taint.go index d752b7b46e78b..02f5651307983 100644 --- a/pkg/kubectl/cmd/taint/taint.go +++ b/pkg/kubectl/cmd/taint/taint.go @@ -21,8 +21,8 @@ import ( "fmt" "strings" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -255,7 +255,7 @@ func (o TaintOptions) RunTaint() error { patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj) createdPatch := err == nil if err != nil { - glog.V(2).Infof("couldn't compute patch: %v", err) + klog.V(2).Infof("couldn't compute patch: %v", err) } mapping := info.ResourceMapping() diff --git a/pkg/kubectl/cmd/top/BUILD b/pkg/kubectl/cmd/top/BUILD index 2a879a973b106..0cec30b8f7b76 100644 --- a/pkg/kubectl/cmd/top/BUILD +++ b/pkg/kubectl/cmd/top/BUILD @@ -23,9 +23,9 @@ go_library( "//staging/src/k8s.io/metrics/pkg/apis/metrics:go_default_library", "//staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library", "//staging/src/k8s.io/metrics/pkg/client/clientset/versioned:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/top/top_pod.go b/pkg/kubectl/cmd/top/top_pod.go index c8f87d558ef67..5c9a3712761a1 100644 --- a/pkg/kubectl/cmd/top/top_pod.go +++ b/pkg/kubectl/cmd/top/top_pod.go @@ -34,9 +34,9 @@ import ( metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1" metricsclientset "k8s.io/metrics/pkg/client/clientset/versioned" - "github.com/golang/glog" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/klog" ) type TopPodOptions struct { @@ -256,10 +256,10 @@ func checkPodAge(pod *v1.Pod) error { age := time.Since(pod.CreationTimestamp.Time) if age > metricsCreationDelay { message := fmt.Sprintf("Metrics not available for pod %s/%s, age: %s", pod.Namespace, pod.Name, age.String()) - glog.Warningf(message) + klog.Warningf(message) return errors.New(message) } else { - glog.V(2).Infof("Metrics not yet available for pod %s/%s, age: %s", pod.Namespace, pod.Name, age.String()) + klog.V(2).Infof("Metrics not yet available for pod %s/%s, age: %s", pod.Namespace, pod.Name, age.String()) return nil } } diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index bad60cd16c852..91f282405b565 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -39,9 +39,9 @@ go_library( "//staging/src/k8s.io/client-go/scale:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/util/editor/BUILD b/pkg/kubectl/cmd/util/editor/BUILD index 464d84b1ff47c..ac104a73023d0 100644 --- a/pkg/kubectl/cmd/util/editor/BUILD +++ b/pkg/kubectl/cmd/util/editor/BUILD @@ -35,8 +35,8 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions/resource:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/cobra:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/cmd/util/editor/editoptions.go b/pkg/kubectl/cmd/util/editor/editoptions.go index ab43141cf7cbf..2b2add4585941 100644 --- a/pkg/kubectl/cmd/util/editor/editoptions.go +++ b/pkg/kubectl/cmd/util/editor/editoptions.go @@ -29,8 +29,8 @@ import ( "strings" "github.com/evanphx/json-patch" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -287,7 +287,7 @@ func (o *EditOptions) Run() error { if len(results.file) > 0 { os.Remove(results.file) } - glog.V(4).Infof("User edited:\n%s", string(edited)) + klog.V(4).Infof("User edited:\n%s", string(edited)) // Apply validation schema, err := o.f.Validator(o.EnableValidation) @@ -606,12 +606,12 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor patchType = types.MergePatchType patch, err = jsonpatch.CreateMergePatch(originalJS, editedJS) if err != nil { - glog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) + klog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) return err } for _, precondition := range preconditions { if !precondition(patch) { - glog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) + klog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) return fmt.Errorf("%s", "At least one of apiVersion, kind and name was changed") } } @@ -621,7 +621,7 @@ func (o *EditOptions) visitToPatch(originalInfos []*resource.Info, patchVisitor patchType = types.StrategicMergePatchType patch, err = strategicpatch.CreateTwoWayMergePatch(originalJS, editedJS, versionedObject, preconditions...) if err != nil { - glog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) + klog.V(4).Infof("Unable to calculate diff, no merge is possible: %v", err) if mergepatch.IsPreconditionFailed(err) { return fmt.Errorf("%s", "At least one of apiVersion, kind and name was changed") } @@ -674,7 +674,7 @@ func (o *EditOptions) visitAnnotation(annotationVisitor resource.Visitor) error } } if err := o.Recorder.Record(info.Object); err != nil { - glog.V(4).Infof("error recording current command: %v", err) + klog.V(4).Infof("error recording current command: %v", err) } return nil diff --git a/pkg/kubectl/cmd/util/editor/editor.go b/pkg/kubectl/cmd/util/editor/editor.go index a53e8c0970a4a..e7229870ec7a3 100644 --- a/pkg/kubectl/cmd/util/editor/editor.go +++ b/pkg/kubectl/cmd/util/editor/editor.go @@ -27,7 +27,7 @@ import ( "runtime" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubectl/util/term" ) @@ -124,7 +124,7 @@ func (e Editor) Launch(path string) error { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin - glog.V(5).Infof("Opening file with editor %v", args) + klog.V(5).Infof("Opening file with editor %v", args) if err := (term.TTY{In: os.Stdin, TryDev: true}).Safe(cmd.Run); err != nil { if err, ok := err.(*exec.Error); ok { if err.Err == exec.ErrNotFound { diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index da9e25d04de0b..9a392cde59b00 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -27,9 +27,9 @@ import ( "time" "github.com/evanphx/json-patch" - "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/pflag" + "k8s.io/klog" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -88,10 +88,10 @@ func DefaultBehaviorOnFatal() { } // fatal prints the message (if provided) and then exits. If V(2) or greater, -// glog.Fatal is invoked for extended information. +// klog.Fatal is invoked for extended information. func fatal(msg string, code int) { - if glog.V(2) { - glog.FatalDepth(2, msg) + if klog.V(2) { + klog.FatalDepth(2, msg) } if len(msg) > 0 { // add newline if needed @@ -189,13 +189,13 @@ func statusCausesToAggrError(scs []metav1.StatusCause) utilerrors.Aggregate { // StandardErrorMessage translates common errors into a human readable message, or returns // false if the error is not one of the recognized types. It may also log extended -// information to glog. +// information to klog. // // This method is generic to the command in use and may be used by non-Kubectl // commands. func StandardErrorMessage(err error) (string, bool) { if debugErr, ok := err.(debugError); ok { - glog.V(4).Infof(debugErr.DebugError()) + klog.V(4).Infof(debugErr.DebugError()) } status, isStatus := err.(kerrors.APIStatus) switch { @@ -213,7 +213,7 @@ func StandardErrorMessage(err error) (string, bool) { } switch t := err.(type) { case *url.Error: - glog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err) + klog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err) switch { case strings.Contains(t.Err.Error(), "connection refused"): host := t.URL @@ -300,7 +300,7 @@ func IsFilenameSliceEmpty(filenames []string) bool { func GetFlagString(cmd *cobra.Command, flag string) string { s, err := cmd.Flags().GetString(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return s } @@ -309,7 +309,7 @@ func GetFlagString(cmd *cobra.Command, flag string) string { func GetFlagStringSlice(cmd *cobra.Command, flag string) []string { s, err := cmd.Flags().GetStringSlice(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return s } @@ -318,7 +318,7 @@ func GetFlagStringSlice(cmd *cobra.Command, flag string) []string { func GetFlagStringArray(cmd *cobra.Command, flag string) []string { s, err := cmd.Flags().GetStringArray(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return s } @@ -326,7 +326,7 @@ func GetFlagStringArray(cmd *cobra.Command, flag string) []string { func GetFlagBool(cmd *cobra.Command, flag string) bool { b, err := cmd.Flags().GetBool(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return b } @@ -335,7 +335,7 @@ func GetFlagBool(cmd *cobra.Command, flag string) bool { func GetFlagInt(cmd *cobra.Command, flag string) int { i, err := cmd.Flags().GetInt(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return i } @@ -344,7 +344,7 @@ func GetFlagInt(cmd *cobra.Command, flag string) int { func GetFlagInt32(cmd *cobra.Command, flag string) int32 { i, err := cmd.Flags().GetInt32(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return i } @@ -353,7 +353,7 @@ func GetFlagInt32(cmd *cobra.Command, flag string) int32 { func GetFlagInt64(cmd *cobra.Command, flag string) int64 { i, err := cmd.Flags().GetInt64(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return i } @@ -361,7 +361,7 @@ func GetFlagInt64(cmd *cobra.Command, flag string) int64 { func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration { d, err := cmd.Flags().GetDuration(flag) if err != nil { - glog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) } return d } diff --git a/pkg/kubectl/proxy/BUILD b/pkg/kubectl/proxy/BUILD index 7d836c711bfda..a272308651a30 100644 --- a/pkg/kubectl/proxy/BUILD +++ b/pkg/kubectl/proxy/BUILD @@ -26,7 +26,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/proxy:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/transport:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/proxy/proxy_server.go b/pkg/kubectl/proxy/proxy_server.go index 7a8fee0e59c32..85fe8fc8fd83c 100644 --- a/pkg/kubectl/proxy/proxy_server.go +++ b/pkg/kubectl/proxy/proxy_server.go @@ -26,11 +26,11 @@ import ( "strings" "time" - "github.com/golang/glog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/proxy" "k8s.io/client-go/rest" "k8s.io/client-go/transport" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubectl/util" ) @@ -87,7 +87,7 @@ func MakeRegexpArray(str string) ([]*regexp.Regexp, error) { func MakeRegexpArrayOrDie(str string) []*regexp.Regexp { result, err := MakeRegexpArray(str) if err != nil { - glog.Fatalf("Error compiling re: %v", err) + klog.Fatalf("Error compiling re: %v", err) } return result } @@ -95,7 +95,7 @@ func MakeRegexpArrayOrDie(str string) []*regexp.Regexp { func matchesRegexp(str string, regexps []*regexp.Regexp) bool { for _, re := range regexps { if re.MatchString(str) { - glog.V(6).Infof("%v matched %s", str, re) + klog.V(6).Infof("%v matched %s", str, re) return true } } @@ -135,11 +135,11 @@ func extractHost(header string) (host string) { func (f *FilterServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) { host := extractHost(req.Host) if f.accept(req.Method, req.URL.Path, host) { - glog.V(3).Infof("Filter accepting %v %v %v", req.Method, req.URL.Path, host) + klog.V(3).Infof("Filter accepting %v %v %v", req.Method, req.URL.Path, host) f.delegate.ServeHTTP(rw, req) return } - glog.V(3).Infof("Filter rejecting %v %v %v", req.Method, req.URL.Path, host) + klog.V(3).Infof("Filter rejecting %v %v %v", req.Method, req.URL.Path, host) http.Error(rw, http.StatusText(http.StatusForbidden), http.StatusForbidden) } @@ -151,7 +151,7 @@ type Server struct { type responder struct{} func (r *responder) Error(w http.ResponseWriter, req *http.Request, err error) { - glog.Errorf("Error while proxying request: %v", err) + klog.Errorf("Error while proxying request: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) } diff --git a/pkg/kubectl/sorter.go b/pkg/kubectl/sorter.go index d2d33acf92b55..2e58f8c536439 100644 --- a/pkg/kubectl/sorter.go +++ b/pkg/kubectl/sorter.go @@ -22,7 +22,7 @@ import ( "reflect" "sort" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -272,12 +272,12 @@ func (r *RuntimeSort) Less(i, j int) bool { iValues, err = findJSONPathResults(parser, iObj) if err != nil { - glog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err) + klog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err) } jValues, err = findJSONPathResults(parser, jObj) if err != nil { - glog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err) + klog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err) } if len(iValues) == 0 || len(iValues[0]) == 0 { @@ -291,7 +291,7 @@ func (r *RuntimeSort) Less(i, j int) bool { less, err := isLess(iField, jField) if err != nil { - glog.Fatalf("Field %s in %T is an unsortable type: %s, err: %v", r.field, iObj, iField.Kind().String(), err) + klog.Fatalf("Field %s in %T is an unsortable type: %s, err: %v", r.field, iObj, iField.Kind().String(), err) } return less } @@ -324,7 +324,7 @@ func (t *TableSorter) Less(i, j int) bool { iValues := t.parsedRows[i] jValues := t.parsedRows[j] if len(iValues) == 0 || len(iValues[0]) == 0 || len(jValues) == 0 || len(jValues[0]) == 0 { - glog.Fatalf("couldn't find any field with path %q in the list of objects", t.field) + klog.Fatalf("couldn't find any field with path %q in the list of objects", t.field) } iField := iValues[0][0] @@ -332,7 +332,7 @@ func (t *TableSorter) Less(i, j int) bool { less, err := isLess(iField, jField) if err != nil { - glog.Fatalf("Field %s in %T is an unsortable type: %s, err: %v", t.field, t.parsedRows, iField.Kind().String(), err) + klog.Fatalf("Field %s in %T is an unsortable type: %s, err: %v", t.field, t.parsedRows, iField.Kind().String(), err) } return less } @@ -348,13 +348,13 @@ func NewTableSorter(table *metav1beta1.Table, field string) *TableSorter { parser := jsonpath.New("sorting").AllowMissingKeys(true) err := parser.Parse(field) if err != nil { - glog.Fatalf("sorting error: %v\n", err) + klog.Fatalf("sorting error: %v\n", err) } for i := range table.Rows { parsedRow, err := findJSONPathResults(parser, table.Rows[i].Object.Object) if err != nil { - glog.Fatalf("Failed to get values for %#v using %s (%#v)", parsedRow, field, err) + klog.Fatalf("Failed to get values for %#v using %s (%#v)", parsedRow, field, err) } parsedRows = append(parsedRows, parsedRow) } diff --git a/pkg/kubectl/util/i18n/BUILD b/pkg/kubectl/util/i18n/BUILD index fd48ae70cb952..7cfcf07abed38 100644 --- a/pkg/kubectl/util/i18n/BUILD +++ b/pkg/kubectl/util/i18n/BUILD @@ -13,7 +13,7 @@ go_library( deps = [ "//pkg/kubectl/generated:go_default_library", "//vendor/github.com/chai2010/gettext-go/gettext:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/util/i18n/i18n.go b/pkg/kubectl/util/i18n/i18n.go index 69d742ca2434d..a4ff9ac036ded 100644 --- a/pkg/kubectl/util/i18n/i18n.go +++ b/pkg/kubectl/util/i18n/i18n.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl/generated" "github.com/chai2010/gettext-go/gettext" - "github.com/golang/glog" + "k8s.io/klog" ) var knownTranslations = map[string][]string{ @@ -61,12 +61,12 @@ func loadSystemLanguage() string { } if langStr == "" { - glog.V(3).Infof("Couldn't find the LC_ALL, LC_MESSAGES or LANG environment variables, defaulting to en_US") + klog.V(3).Infof("Couldn't find the LC_ALL, LC_MESSAGES or LANG environment variables, defaulting to en_US") return "default" } pieces := strings.Split(langStr, ".") if len(pieces) != 2 { - glog.V(3).Infof("Unexpected system language (%s), defaulting to en_US", langStr) + klog.V(3).Infof("Unexpected system language (%s), defaulting to en_US", langStr) return "default" } return pieces[0] @@ -83,7 +83,7 @@ func findLanguage(root string, getLanguageFn func() string) string { } } } - glog.V(3).Infof("Couldn't find translations for %s, using default", langStr) + klog.V(3).Infof("Couldn't find translations for %s, using default", langStr) return "default" } @@ -101,7 +101,7 @@ func LoadTranslations(root string, getLanguageFn func() string) error { fmt.Sprintf("%s/%s/LC_MESSAGES/k8s.mo", root, langStr), } - glog.V(3).Infof("Setting language to %s", langStr) + klog.V(3).Infof("Setting language to %s", langStr) // TODO: list the directory and load all files. buf := new(bytes.Buffer) w := zip.NewWriter(buf) diff --git a/pkg/kubectl/util/logs/BUILD b/pkg/kubectl/util/logs/BUILD index 4a896ce16b6a4..9813e0015db8b 100644 --- a/pkg/kubectl/util/logs/BUILD +++ b/pkg/kubectl/util/logs/BUILD @@ -11,8 +11,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubectl/util/logs", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubectl/util/logs/logs.go b/pkg/kubectl/util/logs/logs.go index 2b33372b5ab3e..200416453e1a1 100644 --- a/pkg/kubectl/util/logs/logs.go +++ b/pkg/kubectl/util/logs/logs.go @@ -21,41 +21,42 @@ import ( "log" "time" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" ) var logFlushFreq = pflag.Duration("log-flush-frequency", 5*time.Second, "Maximum number of seconds between log flushes") // TODO(thockin): This is temporary until we agree on log dirs and put those into each cmd. func init() { + klog.InitFlags(flag.CommandLine) flag.Set("logtostderr", "true") } -// GlogWriter serves as a bridge between the standard log package and the glog package. -type GlogWriter struct{} +// KlogWriter serves as a bridge between the standard log package and the glog package. +type KlogWriter struct{} // Write implements the io.Writer interface. -func (writer GlogWriter) Write(data []byte) (n int, err error) { - glog.InfoDepth(1, string(data)) +func (writer KlogWriter) Write(data []byte) (n int, err error) { + klog.InfoDepth(1, string(data)) return len(data), nil } // InitLogs initializes logs the way we want for kubernetes. func InitLogs() { - log.SetOutput(GlogWriter{}) + log.SetOutput(KlogWriter{}) log.SetFlags(0) // The default glog flush interval is 5 seconds. - go wait.Until(glog.Flush, *logFlushFreq, wait.NeverStop) + go wait.Until(klog.Flush, *logFlushFreq, wait.NeverStop) } // FlushLogs flushes logs immediately. func FlushLogs() { - glog.Flush() + klog.Flush() } -// NewLogger creates a new log.Logger which sends logs to glog.Info. +// NewLogger creates a new log.Logger which sends logs to klog.Info. func NewLogger(prefix string) *log.Logger { - return log.New(GlogWriter{}, prefix, 0) + return log.New(KlogWriter{}, prefix, 0) } diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index b70e9933aaa07..290ef529ed721 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -143,11 +143,11 @@ go_library( "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//third_party/forked/golang/expansion:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", "//vendor/github.com/google/cadvisor/events:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/kubelet/cadvisor/BUILD b/pkg/kubelet/cadvisor/BUILD index 7199ed3dfd719..fb3f66a1ce1ff 100644 --- a/pkg/kubelet/cadvisor/BUILD +++ b/pkg/kubelet/cadvisor/BUILD @@ -31,13 +31,13 @@ go_library( "//vendor/github.com/google/cadvisor/info/v2:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/cache/memory:go_default_library", "//vendor/github.com/google/cadvisor/container:go_default_library", "//vendor/github.com/google/cadvisor/fs:go_default_library", "//vendor/github.com/google/cadvisor/manager:go_default_library", "//vendor/github.com/google/cadvisor/metrics:go_default_library", "//vendor/github.com/google/cadvisor/utils/sysfs:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ "//pkg/kubelet/winstats:go_default_library", diff --git a/pkg/kubelet/cadvisor/cadvisor_linux.go b/pkg/kubelet/cadvisor/cadvisor_linux.go index f4a3e8864dfbc..d15907fe86948 100644 --- a/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -26,7 +26,6 @@ import ( "path" "time" - "github.com/golang/glog" "github.com/google/cadvisor/cache/memory" cadvisormetrics "github.com/google/cadvisor/container" "github.com/google/cadvisor/events" @@ -35,6 +34,7 @@ import ( "github.com/google/cadvisor/manager" "github.com/google/cadvisor/metrics" "github.com/google/cadvisor/utils/sysfs" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -67,7 +67,7 @@ func init() { f.DefValue = defaultValue f.Value.Set(defaultValue) } else { - glog.Errorf("Expected cAdvisor flag %q not found", name) + klog.Errorf("Expected cAdvisor flag %q not found", name) } } } @@ -199,7 +199,7 @@ func (cc *cadvisorClient) getFsInfo(label string) (cadvisorapiv2.FsInfo, error) } // TODO(vmarmol): Handle this better when a label has more than one image filesystem. if len(res) > 1 { - glog.Warningf("More than one filesystem labeled %q: %#v. Only using the first one", label, res) + klog.Warningf("More than one filesystem labeled %q: %#v. Only using the first one", label, res) } return res[0], nil diff --git a/pkg/kubelet/certificate/BUILD b/pkg/kubelet/certificate/BUILD index fe65998402853..5c85202a4b777 100644 --- a/pkg/kubelet/certificate/BUILD +++ b/pkg/kubelet/certificate/BUILD @@ -26,8 +26,8 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/util/certificate:go_default_library", "//staging/src/k8s.io/client-go/util/connrotation:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/certificate/bootstrap/BUILD b/pkg/kubelet/certificate/bootstrap/BUILD index b0f21d503013c..0e0cdf77aaa6d 100644 --- a/pkg/kubelet/certificate/bootstrap/BUILD +++ b/pkg/kubelet/certificate/bootstrap/BUILD @@ -34,7 +34,7 @@ go_library( "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/certificate:go_default_library", "//staging/src/k8s.io/client-go/util/certificate/csr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/certificate/bootstrap/bootstrap.go b/pkg/kubelet/certificate/bootstrap/bootstrap.go index e8e87dcdc0fa4..cf8f4265be248 100644 --- a/pkg/kubelet/certificate/bootstrap/bootstrap.go +++ b/pkg/kubelet/certificate/bootstrap/bootstrap.go @@ -24,7 +24,7 @@ import ( "path/filepath" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/types" @@ -54,11 +54,11 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, return err } if ok { - glog.V(2).Infof("Kubeconfig %s exists and is valid, skipping bootstrap", kubeconfigPath) + klog.V(2).Infof("Kubeconfig %s exists and is valid, skipping bootstrap", kubeconfigPath) return nil } - glog.V(2).Info("Using bootstrap kubeconfig to generate TLS client cert, key and kubeconfig file") + klog.V(2).Info("Using bootstrap kubeconfig to generate TLS client cert, key and kubeconfig file") bootstrapClientConfig, err := loadRESTClientConfig(bootstrapPath) if err != nil { @@ -89,7 +89,7 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, // managed by the store. privKeyPath := filepath.Join(certDir, tmpPrivateKeyFile) if !verifyKeyData(keyData) { - glog.V(2).Infof("No valid private key and/or certificate found, reusing existing private key or creating a new one") + klog.V(2).Infof("No valid private key and/or certificate found, reusing existing private key or creating a new one") // Note: always call LoadOrGenerateKeyFile so that private key is // reused on next startup if CSR request fails. keyData, _, err = certutil.LoadOrGenerateKeyFile(privKeyPath) @@ -99,7 +99,7 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, } if err := waitForServer(*bootstrapClientConfig, 1*time.Minute); err != nil { - glog.Warningf("Error waiting for apiserver to come up: %v", err) + klog.Warningf("Error waiting for apiserver to come up: %v", err) } certData, err := csr.RequestNodeCertificate(bootstrapClient.CertificateSigningRequests(), keyData, nodeName) @@ -110,7 +110,7 @@ func LoadClientCert(kubeconfigPath string, bootstrapPath string, certDir string, return err } if err := os.Remove(privKeyPath); err != nil && !os.IsNotExist(err) { - glog.V(2).Infof("failed cleaning up private key file %q: %v", privKeyPath, err) + klog.V(2).Infof("failed cleaning up private key file %q: %v", privKeyPath, err) } pemPath := store.CurrentPath() @@ -232,7 +232,7 @@ func waitForServer(cfg restclient.Config, deadline time.Duration) error { var connected bool wait.JitterUntil(func() { if _, err := cli.Get().AbsPath("/healthz").Do().Raw(); err != nil { - glog.Infof("Failed to connect to apiserver: %v", err) + klog.Infof("Failed to connect to apiserver: %v", err) return } cancel() diff --git a/pkg/kubelet/certificate/transport.go b/pkg/kubelet/certificate/transport.go index 436bb8b4c9688..442f4a8a379cf 100644 --- a/pkg/kubelet/certificate/transport.go +++ b/pkg/kubelet/certificate/transport.go @@ -23,7 +23,7 @@ import ( "net/http" "time" - "github.com/golang/glog" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" @@ -105,18 +105,18 @@ func addCertRotation(stopCh <-chan struct{}, period time.Duration, clientConfig // the certificate has been deleted from disk or is otherwise corrupt if now.After(lastCertAvailable.Add(exitAfter)) { if clientCertificateManager.ServerHealthy() { - glog.Fatalf("It has been %s since a valid client cert was found and the server is responsive, exiting.", exitAfter) + klog.Fatalf("It has been %s since a valid client cert was found and the server is responsive, exiting.", exitAfter) } else { - glog.Errorf("It has been %s since a valid client cert was found, but the server is not responsive. A restart may be necessary to retrieve new initial credentials.", exitAfter) + klog.Errorf("It has been %s since a valid client cert was found, but the server is not responsive. A restart may be necessary to retrieve new initial credentials.", exitAfter) } } } else { // the certificate is expired if now.After(curr.Leaf.NotAfter) { if clientCertificateManager.ServerHealthy() { - glog.Fatalf("The currently active client certificate has expired and the server is responsive, exiting.") + klog.Fatalf("The currently active client certificate has expired and the server is responsive, exiting.") } else { - glog.Errorf("The currently active client certificate has expired, but the server is not responsive. A restart may be necessary to retrieve new initial credentials.") + klog.Errorf("The currently active client certificate has expired, but the server is not responsive. A restart may be necessary to retrieve new initial credentials.") } } lastCertAvailable = now @@ -129,7 +129,7 @@ func addCertRotation(stopCh <-chan struct{}, period time.Duration, clientConfig } lastCert = curr - glog.Infof("certificate rotation detected, shutting down client connections to start using new credentials") + klog.Infof("certificate rotation detected, shutting down client connections to start using new credentials") // The cert has been rotated. Close all existing connections to force the client // to reperform its TLS handshake with new cert. // diff --git a/pkg/kubelet/checkpoint/BUILD b/pkg/kubelet/checkpoint/BUILD index a57fff4cdbda8..d8d1fe7fd30df 100644 --- a/pkg/kubelet/checkpoint/BUILD +++ b/pkg/kubelet/checkpoint/BUILD @@ -10,7 +10,7 @@ go_library( "//pkg/kubelet/checkpointmanager:go_default_library", "//pkg/kubelet/checkpointmanager/checksum:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/checkpoint/checkpoint.go b/pkg/kubelet/checkpoint/checkpoint.go index bf84178dce8c5..f1fa9bdb7ae6e 100644 --- a/pkg/kubelet/checkpoint/checkpoint.go +++ b/pkg/kubelet/checkpoint/checkpoint.go @@ -20,7 +20,7 @@ import ( "encoding/json" "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/apis/core" @@ -93,14 +93,14 @@ func LoadPods(cpm checkpointmanager.CheckpointManager) ([]*v1.Pod, error) { checkpointKeys := []string{} checkpointKeys, err = cpm.ListCheckpoints() if err != nil { - glog.Errorf("Failed to list checkpoints: %v", err) + klog.Errorf("Failed to list checkpoints: %v", err) } for _, key := range checkpointKeys { checkpoint := NewPodCheckpoint(nil) err := cpm.GetCheckpoint(key, checkpoint) if err != nil { - glog.Errorf("Failed to retrieve checkpoint for pod %q: %v", key, err) + klog.Errorf("Failed to retrieve checkpoint for pod %q: %v", key, err) continue } pods = append(pods, checkpoint.GetPod()) diff --git a/pkg/kubelet/cloudresource/BUILD b/pkg/kubelet/cloudresource/BUILD index 2390abeddfc9c..1ab5c46dec238 100644 --- a/pkg/kubelet/cloudresource/BUILD +++ b/pkg/kubelet/cloudresource/BUILD @@ -10,7 +10,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/cloudresource/cloud_request_manager.go b/pkg/kubelet/cloudresource/cloud_request_manager.go index 8435c00a61eba..dbc05f094dffc 100644 --- a/pkg/kubelet/cloudresource/cloud_request_manager.go +++ b/pkg/kubelet/cloudresource/cloud_request_manager.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" cloudprovider "k8s.io/cloud-provider" - "github.com/golang/glog" + "k8s.io/klog" ) var nodeAddressesRetryPeriod = 5 * time.Second @@ -85,7 +85,7 @@ func (manager *cloudResourceSyncManager) NodeAddresses() ([]v1.NodeAddress, erro for { nodeAddresses, err := manager.getNodeAddressSafe() if len(nodeAddresses) == 0 && err == nil { - glog.V(5).Infof("Waiting for %v for cloud provider to provide node addresses", nodeAddressesRetryPeriod) + klog.V(5).Infof("Waiting for %v for cloud provider to provide node addresses", nodeAddressesRetryPeriod) time.Sleep(nodeAddressesRetryPeriod) continue } @@ -94,7 +94,7 @@ func (manager *cloudResourceSyncManager) NodeAddresses() ([]v1.NodeAddress, erro } func (manager *cloudResourceSyncManager) collectNodeAddresses(ctx context.Context, nodeName types.NodeName) { - glog.V(5).Infof("Requesting node addresses from cloud provider for node %q", nodeName) + klog.V(5).Infof("Requesting node addresses from cloud provider for node %q", nodeName) instances, ok := manager.cloud.Instances() if !ok { @@ -110,10 +110,10 @@ func (manager *cloudResourceSyncManager) collectNodeAddresses(ctx context.Contex nodeAddresses, err := instances.NodeAddresses(ctx, nodeName) if err != nil { manager.setNodeAddressSafe(nil, fmt.Errorf("failed to get node address from cloud provider: %v", err)) - glog.V(2).Infof("Node addresses from cloud provider for node %q not collected", nodeName) + klog.V(2).Infof("Node addresses from cloud provider for node %q not collected", nodeName) } else { manager.setNodeAddressSafe(nodeAddresses, nil) - glog.V(5).Infof("Node addresses from cloud provider for node %q collected", nodeName) + klog.V(5).Infof("Node addresses from cloud provider for node %q collected", nodeName) } } diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index dca92c084e766..12e67142dc8c0 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -40,7 +40,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//pkg/kubelet/cadvisor:go_default_library", diff --git a/pkg/kubelet/cm/cgroup_manager_linux.go b/pkg/kubelet/cm/cgroup_manager_linux.go index a66b9cc58901b..f1c81369367fb 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_manager_linux.go @@ -25,11 +25,11 @@ import ( "time" units "github.com/docker/go-units" - "github.com/golang/glog" libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" cgroupfs "github.com/opencontainers/runc/libcontainer/cgroups/fs" cgroupsystemd "github.com/opencontainers/runc/libcontainer/cgroups/systemd" libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -271,7 +271,7 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool { } if len(missingPaths) > 0 { - glog.V(4).Infof("The Cgroup %v has some missing paths: %v", name, missingPaths) + klog.V(4).Infof("The Cgroup %v has some missing paths: %v", name, missingPaths) return false } @@ -350,7 +350,7 @@ func setSupportedSubsystems(cgroupConfig *libcontainerconfigs.Cgroup) error { return fmt.Errorf("Failed to find subsystem mount for required subsystem: %v", sys.Name()) } // the cgroup is not mounted, but its not required so continue... - glog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name()) + klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name()) continue } if err := sys.Set(cgroupConfig.Paths[sys.Name()], cgroupConfig); err != nil { @@ -512,7 +512,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int { // WalkFunc which is called for each file and directory in the pod cgroup dir visitor := func(path string, info os.FileInfo, err error) error { if err != nil { - glog.V(4).Infof("cgroup manager encountered error scanning cgroup path %q: %v", path, err) + klog.V(4).Infof("cgroup manager encountered error scanning cgroup path %q: %v", path, err) return filepath.SkipDir } if !info.IsDir() { @@ -520,7 +520,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int { } pids, err = getCgroupProcs(path) if err != nil { - glog.V(4).Infof("cgroup manager encountered error getting procs for cgroup path %q: %v", path, err) + klog.V(4).Infof("cgroup manager encountered error getting procs for cgroup path %q: %v", path, err) return filepath.SkipDir } pidsToKill.Insert(pids...) @@ -530,7 +530,7 @@ func (m *cgroupManagerImpl) Pids(name CgroupName) []int { // container cgroups haven't been GCed yet. Get attached processes to // all such unwanted containers under the pod cgroup if err = filepath.Walk(dir, visitor); err != nil { - glog.V(4).Infof("cgroup manager encountered error scanning pids for directory: %q: %v", dir, err) + klog.V(4).Infof("cgroup manager encountered error scanning pids for directory: %q: %v", dir, err) } } return pidsToKill.List() @@ -558,7 +558,7 @@ func getStatsSupportedSubsystems(cgroupPaths map[string]string) (*libcontainercg return nil, fmt.Errorf("Failed to find subsystem mount for required subsystem: %v", sys.Name()) } // the cgroup is not mounted, but its not required so continue... - glog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name()) + klog.V(6).Infof("Unable to find subsystem mount for optional subsystem: %v", sys.Name()) continue } if err := sys.GetStats(cgroupPaths[sys.Name()], stats); err != nil { diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 96506163e4dae..8253df72bd15e 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -29,10 +29,10 @@ import ( "sync" "time" - "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -180,11 +180,11 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) { // CPU cgroup is required and so it expected to be mounted at this point. periodExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_period_us")) if err != nil { - glog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err) + klog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err) } quotaExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_quota_us")) if err != nil { - glog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err) + klog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err) } if quotaExists && periodExists { f.cpuHardcapping = true @@ -244,12 +244,12 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I if !cgroupManager.Exists(cgroupRoot) { return nil, fmt.Errorf("invalid configuration: cgroup-root %q doesn't exist: %v", cgroupRoot, err) } - glog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot) + klog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot) // Include the top level cgroup for enforcing node allocatable into cgroup-root. // This way, all sub modules can avoid having to understand the concept of node allocatable. cgroupRoot = NewCgroupName(cgroupRoot, defaultNodeAllocatableCgroupName) } - glog.Infof("Creating Container Manager object based on Node Config: %+v", nodeConfig) + klog.Infof("Creating Container Manager object based on Node Config: %+v", nodeConfig) qosContainerManager, err := NewQOSContainerManager(subsystems, cgroupRoot, nodeConfig, cgroupManager) if err != nil { @@ -268,7 +268,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I qosContainerManager: qosContainerManager, } - glog.Infof("Creating device plugin manager: %t", devicePluginEnabled) + klog.Infof("Creating device plugin manager: %t", devicePluginEnabled) if devicePluginEnabled { cm.deviceManager, err = devicemanager.NewManagerImpl() } else { @@ -288,7 +288,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I nodeConfig.KubeletRootDir, ) if err != nil { - glog.Errorf("failed to initialize cpu manager: %v", err) + klog.Errorf("failed to initialize cpu manager: %v", err) return nil, err } } @@ -370,9 +370,9 @@ func setupKernelTunables(option KernelTunableBehavior) error { case KernelTunableError: errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)) case KernelTunableWarn: - glog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) + klog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) case KernelTunableModify: - glog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) + klog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val) err = sysctl.SetSysctl(flag, expectedValue) if err != nil { errList = append(errList, err) @@ -424,13 +424,13 @@ func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error { // the cgroup for docker and serve stats for the runtime. // TODO(#27097): Fix this after NodeSpec is clearly defined. cm.periodicTasks = append(cm.periodicTasks, func() { - glog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration") + klog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration") cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile) if err != nil { - glog.Error(err) + klog.Error(err) return } - glog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont) + klog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont) cm.Lock() defer cm.Unlock() cm.RuntimeCgroupsName = cont @@ -467,12 +467,12 @@ func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error { } else { cm.periodicTasks = append(cm.periodicTasks, func() { if err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, nil); err != nil { - glog.Error(err) + klog.Error(err) return } cont, err := getContainer(os.Getpid()) if err != nil { - glog.Errorf("failed to find cgroups of kubelet - %v", err) + klog.Errorf("failed to find cgroups of kubelet - %v", err) return } cm.Lock() @@ -579,7 +579,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node, for _, cont := range cm.systemContainers { if cont.ensureStateFunc != nil { if err := cont.ensureStateFunc(cont.manager); err != nil { - glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err) + klog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err) } } } @@ -652,12 +652,12 @@ func isProcessRunningInHost(pid int) (bool, error) { if err != nil { return false, fmt.Errorf("failed to find pid namespace of init process") } - glog.V(10).Infof("init pid ns is %q", initPidNs) + klog.V(10).Infof("init pid ns is %q", initPidNs) processPidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", pid)) if err != nil { return false, fmt.Errorf("failed to find pid namespace of process %q", pid) } - glog.V(10).Infof("Pid %d pid ns is %q", pid, processPidNs) + klog.V(10).Infof("Pid %d pid ns is %q", pid, processPidNs) return initPidNs == processPidNs, nil } @@ -699,7 +699,7 @@ func getPidsForProcess(name, pidFile string) ([]int, error) { // Return error from getPidFromPidFile since that should have worked // and is the real source of the problem. - glog.V(4).Infof("unable to get pid from %s: %v", pidFile, err) + klog.V(4).Infof("unable to get pid from %s: %v", pidFile, err) return []int{}, err } @@ -737,7 +737,7 @@ func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs. return err } else if !runningInHost { // Process is running inside a container. Don't touch that. - glog.V(2).Infof("pid %d is not running in the host namespaces", pid) + klog.V(2).Infof("pid %d is not running in the host namespaces", pid) return nil } @@ -758,9 +758,9 @@ func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs. // Also apply oom-score-adj to processes oomAdjuster := oom.NewOOMAdjuster() - glog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid) + klog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid) if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil { - glog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err) + klog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err) errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d: %v", oomScoreAdj, pid, err)) } return utilerrors.NewAggregate(errs) @@ -800,10 +800,10 @@ func getContainer(pid int) (string, error) { // in addition, you would not get memory or cpu accounting for the runtime unless accounting was enabled on its unit (or globally). if systemd, found := cgs["name=systemd"]; found { if systemd != cpu { - glog.Warningf("CPUAccounting not enabled for pid: %d", pid) + klog.Warningf("CPUAccounting not enabled for pid: %d", pid) } if systemd != memory { - glog.Warningf("MemoryAccounting not enabled for pid: %d", pid) + klog.Warningf("MemoryAccounting not enabled for pid: %d", pid) } return systemd, nil } @@ -841,14 +841,14 @@ func ensureSystemCgroups(rootCgroupPath string, manager *fs.Manager) error { pids = append(pids, pid) } - glog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids)) + klog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids)) // Check if we have moved all the non-kernel PIDs. if len(pids) == 0 { break } - glog.Infof("Moving non-kernel processes: %v", pids) + klog.Infof("Moving non-kernel processes: %v", pids) for _, pid := range pids { err := manager.Apply(pid) if err != nil { diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index 8f948c64d2af3..47fdd617bf074 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -17,8 +17,8 @@ limitations under the License. package cm import ( - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/resource" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" @@ -36,7 +36,7 @@ type containerManagerStub struct{} var _ ContainerManager = &containerManagerStub{} func (cm *containerManagerStub) Start(_ *v1.Node, _ ActivePodsFunc, _ config.SourcesReady, _ status.PodStatusProvider, _ internalapi.RuntimeService) error { - glog.V(2).Infof("Starting stub container manager") + klog.V(2).Infof("Starting stub container manager") return nil } diff --git a/pkg/kubelet/cm/container_manager_windows.go b/pkg/kubelet/cm/container_manager_windows.go index aadc51d0f4637..409875eb33453 100644 --- a/pkg/kubelet/cm/container_manager_windows.go +++ b/pkg/kubelet/cm/container_manager_windows.go @@ -24,11 +24,11 @@ package cm import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" + "k8s.io/klog" kubefeatures "k8s.io/kubernetes/pkg/features" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" "k8s.io/kubernetes/pkg/kubelet/cadvisor" @@ -56,7 +56,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, runtimeService internalapi.RuntimeService) error { - glog.V(2).Infof("Starting Windows container manager") + klog.V(2).Infof("Starting Windows container manager") if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LocalStorageCapacityIsolation) { rootfs, err := cm.cadvisorInterface.RootFsInfo() diff --git a/pkg/kubelet/cm/cpumanager/BUILD b/pkg/kubelet/cm/cpumanager/BUILD index 4b3baa3184ec7..4e597c9b3882e 100644 --- a/pkg/kubelet/cm/cpumanager/BUILD +++ b/pkg/kubelet/cm/cpumanager/BUILD @@ -22,8 +22,8 @@ go_library( "//pkg/kubelet/status:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment.go b/pkg/kubelet/cm/cpumanager/cpu_assignment.go index 3f5f33bafedab..be6babab14c50 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment.go @@ -20,7 +20,7 @@ import ( "fmt" "sort" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" @@ -160,7 +160,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num // least a socket's-worth of CPUs. for _, s := range acc.freeSockets() { if acc.needs(acc.topo.CPUsPerSocket()) { - glog.V(4).Infof("[cpumanager] takeByTopology: claiming socket [%d]", s) + klog.V(4).Infof("[cpumanager] takeByTopology: claiming socket [%d]", s) acc.take(acc.details.CPUsInSocket(s)) if acc.isSatisfied() { return acc.result, nil @@ -172,7 +172,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num // a core's-worth of CPUs. for _, c := range acc.freeCores() { if acc.needs(acc.topo.CPUsPerCore()) { - glog.V(4).Infof("[cpumanager] takeByTopology: claiming core [%d]", c) + klog.V(4).Infof("[cpumanager] takeByTopology: claiming core [%d]", c) acc.take(acc.details.CPUsInCore(c)) if acc.isSatisfied() { return acc.result, nil @@ -184,7 +184,7 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num // on the same sockets as the whole cores we have already taken in this // allocation. for _, c := range acc.freeCPUs() { - glog.V(4).Infof("[cpumanager] takeByTopology: claiming CPU [%d]", c) + klog.V(4).Infof("[cpumanager] takeByTopology: claiming CPU [%d]", c) if acc.needs(1) { acc.take(cpuset.NewCPUSet(c)) } diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go index 0754a4da09255..4ccddd554da5f 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -22,10 +22,10 @@ import ( "sync" "time" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" @@ -110,7 +110,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo if err != nil { return nil, err } - glog.Infof("[cpumanager] detected CPU topology: %v", topo) + klog.Infof("[cpumanager] detected CPU topology: %v", topo) reservedCPUs, ok := nodeAllocatableReservation[v1.ResourceCPU] if !ok { // The static policy cannot initialize without this information. @@ -132,7 +132,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo policy = NewStaticPolicy(topo, numReservedCPUs) default: - glog.Errorf("[cpumanager] Unknown policy \"%s\", falling back to default policy \"%s\"", cpuPolicyName, PolicyNone) + klog.Errorf("[cpumanager] Unknown policy \"%s\", falling back to default policy \"%s\"", cpuPolicyName, PolicyNone) policy = NewNonePolicy() } @@ -152,8 +152,8 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo } func (m *manager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) { - glog.Infof("[cpumanager] starting with %s policy", m.policy.Name()) - glog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod) + klog.Infof("[cpumanager] starting with %s policy", m.policy.Name()) + klog.Infof("[cpumanager] reconciling every %v", m.reconcilePeriod) m.activePods = activePods m.podStatusProvider = podStatusProvider @@ -170,7 +170,7 @@ func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) e m.Lock() err := m.policy.AddContainer(m.state, p, c, containerID) if err != nil { - glog.Errorf("[cpumanager] AddContainer error: %v", err) + klog.Errorf("[cpumanager] AddContainer error: %v", err) m.Unlock() return err } @@ -180,17 +180,17 @@ func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) e if !cpus.IsEmpty() { err = m.updateContainerCPUSet(containerID, cpus) if err != nil { - glog.Errorf("[cpumanager] AddContainer error: %v", err) + klog.Errorf("[cpumanager] AddContainer error: %v", err) m.Lock() err := m.policy.RemoveContainer(m.state, containerID) if err != nil { - glog.Errorf("[cpumanager] AddContainer rollback state error: %v", err) + klog.Errorf("[cpumanager] AddContainer rollback state error: %v", err) } m.Unlock() } return err } - glog.V(5).Infof("[cpumanager] update container resources is skipped due to cpu set is empty") + klog.V(5).Infof("[cpumanager] update container resources is skipped due to cpu set is empty") return nil } @@ -200,7 +200,7 @@ func (m *manager) RemoveContainer(containerID string) error { err := m.policy.RemoveContainer(m.state, containerID) if err != nil { - glog.Errorf("[cpumanager] RemoveContainer error: %v", err) + klog.Errorf("[cpumanager] RemoveContainer error: %v", err) return err } return nil @@ -226,14 +226,14 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec for _, container := range allContainers { status, ok := m.podStatusProvider.GetPodStatus(pod.UID) if !ok { - glog.Warningf("[cpumanager] reconcileState: skipping pod; status not found (pod: %s, container: %s)", pod.Name, container.Name) + klog.Warningf("[cpumanager] reconcileState: skipping pod; status not found (pod: %s, container: %s)", pod.Name, container.Name) failure = append(failure, reconciledContainer{pod.Name, container.Name, ""}) break } containerID, err := findContainerIDByName(&status, container.Name) if err != nil { - glog.Warningf("[cpumanager] reconcileState: skipping container; ID not found in status (pod: %s, container: %s, error: %v)", pod.Name, container.Name, err) + klog.Warningf("[cpumanager] reconcileState: skipping container; ID not found in status (pod: %s, container: %s, error: %v)", pod.Name, container.Name, err) failure = append(failure, reconciledContainer{pod.Name, container.Name, ""}) continue } @@ -244,10 +244,10 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec // - container has been removed from state by RemoveContainer call (DeletionTimestamp is set) if _, ok := m.state.GetCPUSet(containerID); !ok { if status.Phase == v1.PodRunning && pod.DeletionTimestamp == nil { - glog.V(4).Infof("[cpumanager] reconcileState: container is not present in state - trying to add (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) + klog.V(4).Infof("[cpumanager] reconcileState: container is not present in state - trying to add (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) err := m.AddContainer(pod, &container, containerID) if err != nil { - glog.Errorf("[cpumanager] reconcileState: failed to add container (pod: %s, container: %s, container id: %s, error: %v)", pod.Name, container.Name, containerID, err) + klog.Errorf("[cpumanager] reconcileState: failed to add container (pod: %s, container: %s, container id: %s, error: %v)", pod.Name, container.Name, containerID, err) failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) continue } @@ -261,15 +261,15 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec cset := m.state.GetCPUSetOrDefault(containerID) if cset.IsEmpty() { // NOTE: This should not happen outside of tests. - glog.Infof("[cpumanager] reconcileState: skipping container; assigned cpuset is empty (pod: %s, container: %s)", pod.Name, container.Name) + klog.Infof("[cpumanager] reconcileState: skipping container; assigned cpuset is empty (pod: %s, container: %s)", pod.Name, container.Name) failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) continue } - glog.V(4).Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset) + klog.V(4).Infof("[cpumanager] reconcileState: updating container (pod: %s, container: %s, container id: %s, cpuset: \"%v\")", pod.Name, container.Name, containerID, cset) err = m.updateContainerCPUSet(containerID, cset) if err != nil { - glog.Errorf("[cpumanager] reconcileState: failed to update container (pod: %s, container: %s, container id: %s, cpuset: \"%v\", error: %v)", pod.Name, container.Name, containerID, cset, err) + klog.Errorf("[cpumanager] reconcileState: failed to update container (pod: %s, container: %s, container id: %s, cpuset: \"%v\", error: %v)", pod.Name, container.Name, containerID, cset, err) failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) continue } diff --git a/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go b/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go index f4af5667ea684..8240bbd497d90 100644 --- a/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/fake_cpu_manager.go @@ -17,8 +17,8 @@ limitations under the License. package cpumanager import ( - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/status" ) @@ -28,21 +28,21 @@ type fakeManager struct { } func (m *fakeManager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) { - glog.Info("[fake cpumanager] Start()") + klog.Info("[fake cpumanager] Start()") } func (m *fakeManager) Policy() Policy { - glog.Info("[fake cpumanager] Policy()") + klog.Info("[fake cpumanager] Policy()") return NewNonePolicy() } func (m *fakeManager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) error { - glog.Infof("[fake cpumanager] AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) + klog.Infof("[fake cpumanager] AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) return nil } func (m *fakeManager) RemoveContainer(containerID string) error { - glog.Infof("[fake cpumanager] RemoveContainer (container id: %s)", containerID) + klog.Infof("[fake cpumanager] RemoveContainer (container id: %s)", containerID) return nil } diff --git a/pkg/kubelet/cm/cpumanager/policy_none.go b/pkg/kubelet/cm/cpumanager/policy_none.go index 26e3335f73afd..294edc6bf31d7 100644 --- a/pkg/kubelet/cm/cpumanager/policy_none.go +++ b/pkg/kubelet/cm/cpumanager/policy_none.go @@ -17,8 +17,8 @@ limitations under the License. package cpumanager import ( - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" ) @@ -39,7 +39,7 @@ func (p *nonePolicy) Name() string { } func (p *nonePolicy) Start(s state.State) { - glog.Info("[cpumanager] none policy: Start") + klog.Info("[cpumanager] none policy: Start") } func (p *nonePolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error { diff --git a/pkg/kubelet/cm/cpumanager/policy_static.go b/pkg/kubelet/cm/cpumanager/policy_static.go index 651e5fe79c9ba..d72a44ad0e9cc 100644 --- a/pkg/kubelet/cm/cpumanager/policy_static.go +++ b/pkg/kubelet/cm/cpumanager/policy_static.go @@ -19,8 +19,8 @@ package cpumanager import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology" @@ -94,7 +94,7 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int) Policy panic(fmt.Sprintf("[cpumanager] unable to reserve the required amount of CPUs (size of %s did not equal %d)", reserved, numReservedCPUs)) } - glog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved) + klog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved) return &staticPolicy{ topology: topology, @@ -108,7 +108,7 @@ func (p *staticPolicy) Name() string { func (p *staticPolicy) Start(s state.State) { if err := p.validateState(s); err != nil { - glog.Errorf("[cpumanager] static policy invalid state: %s\n", err.Error()) + klog.Errorf("[cpumanager] static policy invalid state: %s\n", err.Error()) panic("[cpumanager] - please drain node and remove policy state file") } } @@ -172,17 +172,17 @@ func (p *staticPolicy) assignableCPUs(s state.State) cpuset.CPUSet { func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Container, containerID string) error { if numCPUs := guaranteedCPUs(pod, container); numCPUs != 0 { - glog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) + klog.Infof("[cpumanager] static policy: AddContainer (pod: %s, container: %s, container id: %s)", pod.Name, container.Name, containerID) // container belongs in an exclusively allocated pool if _, ok := s.GetCPUSet(containerID); ok { - glog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID) + klog.Infof("[cpumanager] static policy: container already present in state, skipping (container: %s, container id: %s)", container.Name, containerID) return nil } cpuset, err := p.allocateCPUs(s, numCPUs) if err != nil { - glog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err) + klog.Errorf("[cpumanager] unable to allocate %d CPUs (container id: %s, error: %v)", numCPUs, containerID, err) return err } s.SetCPUSet(containerID, cpuset) @@ -192,7 +192,7 @@ func (p *staticPolicy) AddContainer(s state.State, pod *v1.Pod, container *v1.Co } func (p *staticPolicy) RemoveContainer(s state.State, containerID string) error { - glog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID) + klog.Infof("[cpumanager] static policy: RemoveContainer (container id: %s)", containerID) if toRelease, ok := s.GetCPUSet(containerID); ok { s.Delete(containerID) // Mutate the shared pool, adding released cpus. @@ -202,7 +202,7 @@ func (p *staticPolicy) RemoveContainer(s state.State, containerID string) error } func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int) (cpuset.CPUSet, error) { - glog.Infof("[cpumanager] allocateCpus: (numCPUs: %d)", numCPUs) + klog.Infof("[cpumanager] allocateCpus: (numCPUs: %d)", numCPUs) result, err := takeByTopology(p.topology, p.assignableCPUs(s), numCPUs) if err != nil { return cpuset.NewCPUSet(), err @@ -210,7 +210,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int) (cpuset.CPUSet, // Remove allocated CPUs from the shared CPUSet. s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result)) - glog.Infof("[cpumanager] allocateCPUs: returning \"%v\"", result) + klog.Infof("[cpumanager] allocateCPUs: returning \"%v\"", result) return result, nil } diff --git a/pkg/kubelet/cm/cpumanager/state/BUILD b/pkg/kubelet/cm/cpumanager/state/BUILD index d39211962e8dc..6a95d1af163e1 100644 --- a/pkg/kubelet/cm/cpumanager/state/BUILD +++ b/pkg/kubelet/cm/cpumanager/state/BUILD @@ -16,7 +16,7 @@ go_library( "//pkg/kubelet/checkpointmanager/checksum:go_default_library", "//pkg/kubelet/checkpointmanager/errors:go_default_library", "//pkg/kubelet/cm/cpuset:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go b/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go index 6d92573b86652..badafab43d81a 100644 --- a/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go +++ b/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go @@ -21,7 +21,7 @@ import ( "path" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" @@ -97,8 +97,8 @@ func (sc *stateCheckpoint) restoreState() error { sc.cache.SetDefaultCPUSet(tmpDefaultCPUSet) sc.cache.SetCPUAssignments(tmpAssignments) - glog.V(2).Info("[cpumanager] state checkpoint: restored state from checkpoint") - glog.V(2).Infof("[cpumanager] state checkpoint: defaultCPUSet: %s", tmpDefaultCPUSet.String()) + klog.V(2).Info("[cpumanager] state checkpoint: restored state from checkpoint") + klog.V(2).Infof("[cpumanager] state checkpoint: defaultCPUSet: %s", tmpDefaultCPUSet.String()) return nil } diff --git a/pkg/kubelet/cm/cpumanager/state/state_file.go b/pkg/kubelet/cm/cpumanager/state/state_file.go index 6c2353cf10f5d..90d16693dc3d3 100644 --- a/pkg/kubelet/cm/cpumanager/state/state_file.go +++ b/pkg/kubelet/cm/cpumanager/state/state_file.go @@ -19,8 +19,8 @@ package state import ( "encoding/json" "fmt" - "github.com/golang/glog" "io/ioutil" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "os" "sync" @@ -79,7 +79,7 @@ func (sf *stateFile) tryRestoreState() error { // If the state file does not exist or has zero length, write a new file. if os.IsNotExist(err) || len(content) == 0 { sf.storeState() - glog.Infof("[cpumanager] state file: created new state file \"%s\"", sf.stateFilePath) + klog.Infof("[cpumanager] state file: created new state file \"%s\"", sf.stateFilePath) return nil } @@ -92,7 +92,7 @@ func (sf *stateFile) tryRestoreState() error { var readState stateFileData if err = json.Unmarshal(content, &readState); err != nil { - glog.Errorf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath) + klog.Errorf("[cpumanager] state file: could not unmarshal, corrupted state file - \"%s\"", sf.stateFilePath) return err } @@ -101,13 +101,13 @@ func (sf *stateFile) tryRestoreState() error { } if tmpDefaultCPUSet, err = cpuset.Parse(readState.DefaultCPUSet); err != nil { - glog.Errorf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet) + klog.Errorf("[cpumanager] state file: could not parse state file - [defaultCpuSet:\"%s\"]", readState.DefaultCPUSet) return err } for containerID, cpuString := range readState.Entries { if tmpContainerCPUSet, err = cpuset.Parse(cpuString); err != nil { - glog.Errorf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString) + klog.Errorf("[cpumanager] state file: could not parse state file - container id: %s, cpuset: \"%s\"", containerID, cpuString) return err } tmpAssignments[containerID] = tmpContainerCPUSet @@ -116,8 +116,8 @@ func (sf *stateFile) tryRestoreState() error { sf.cache.SetDefaultCPUSet(tmpDefaultCPUSet) sf.cache.SetCPUAssignments(tmpAssignments) - glog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath) - glog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String()) + klog.V(2).Infof("[cpumanager] state file: restored state from state file \"%s\"", sf.stateFilePath) + klog.V(2).Infof("[cpumanager] state file: defaultCPUSet: %s", tmpDefaultCPUSet.String()) return nil } diff --git a/pkg/kubelet/cm/cpumanager/state/state_mem.go b/pkg/kubelet/cm/cpumanager/state/state_mem.go index 816c89868e58c..77c5f4a525cc0 100644 --- a/pkg/kubelet/cm/cpumanager/state/state_mem.go +++ b/pkg/kubelet/cm/cpumanager/state/state_mem.go @@ -19,7 +19,7 @@ package state import ( "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) @@ -33,7 +33,7 @@ var _ State = &stateMemory{} // NewMemoryState creates new State for keeping track of cpu/pod assignment func NewMemoryState() State { - glog.Infof("[cpumanager] initializing new in-memory state store") + klog.Infof("[cpumanager] initializing new in-memory state store") return &stateMemory{ assignments: ContainerCPUAssignments{}, defaultCPUSet: cpuset.NewCPUSet(), @@ -73,7 +73,7 @@ func (s *stateMemory) SetCPUSet(containerID string, cset cpuset.CPUSet) { defer s.Unlock() s.assignments[containerID] = cset - glog.Infof("[cpumanager] updated desired cpuset (container id: %s, cpuset: \"%s\")", containerID, cset) + klog.Infof("[cpumanager] updated desired cpuset (container id: %s, cpuset: \"%s\")", containerID, cset) } func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) { @@ -81,7 +81,7 @@ func (s *stateMemory) SetDefaultCPUSet(cset cpuset.CPUSet) { defer s.Unlock() s.defaultCPUSet = cset - glog.Infof("[cpumanager] updated default cpuset: \"%s\"", cset) + klog.Infof("[cpumanager] updated default cpuset: \"%s\"", cset) } func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) { @@ -89,7 +89,7 @@ func (s *stateMemory) SetCPUAssignments(a ContainerCPUAssignments) { defer s.Unlock() s.assignments = a.Clone() - glog.Infof("[cpumanager] updated cpuset assignments: \"%v\"", a) + klog.Infof("[cpumanager] updated cpuset assignments: \"%v\"", a) } func (s *stateMemory) Delete(containerID string) { @@ -97,7 +97,7 @@ func (s *stateMemory) Delete(containerID string) { defer s.Unlock() delete(s.assignments, containerID) - glog.V(2).Infof("[cpumanager] deleted cpuset assignment (container id: %s)", containerID) + klog.V(2).Infof("[cpumanager] deleted cpuset assignment (container id: %s)", containerID) } func (s *stateMemory) ClearState() { @@ -106,5 +106,5 @@ func (s *stateMemory) ClearState() { s.defaultCPUSet = cpuset.CPUSet{} s.assignments = make(ContainerCPUAssignments) - glog.V(2).Infof("[cpumanager] cleared state") + klog.V(2).Infof("[cpumanager] cleared state") } diff --git a/pkg/kubelet/cm/cpumanager/topology/BUILD b/pkg/kubelet/cm/cpumanager/topology/BUILD index e9f6407855482..18ab41a33f4cf 100644 --- a/pkg/kubelet/cm/cpumanager/topology/BUILD +++ b/pkg/kubelet/cm/cpumanager/topology/BUILD @@ -10,8 +10,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/kubelet/cm/cpuset:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/cm/cpumanager/topology/topology.go b/pkg/kubelet/cm/cpumanager/topology/topology.go index 2491abe6c06aa..adb0aa7fc70bf 100644 --- a/pkg/kubelet/cm/cpumanager/topology/topology.go +++ b/pkg/kubelet/cm/cpumanager/topology/topology.go @@ -20,8 +20,8 @@ import ( "fmt" "sort" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" ) @@ -156,7 +156,7 @@ func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) { numPhysicalCores += len(socket.Cores) for _, core := range socket.Cores { if coreID, err = getUniqueCoreID(core.Threads); err != nil { - glog.Errorf("could not get unique coreID for socket: %d core %d threads: %v", + klog.Errorf("could not get unique coreID for socket: %d core %d threads: %v", socket.Id, core.Id, core.Threads) return nil, err } diff --git a/pkg/kubelet/cm/cpuset/BUILD b/pkg/kubelet/cm/cpuset/BUILD index 89126d680e327..9b81edc1961f2 100644 --- a/pkg/kubelet/cm/cpuset/BUILD +++ b/pkg/kubelet/cm/cpuset/BUILD @@ -5,7 +5,7 @@ go_library( srcs = ["cpuset.go"], importpath = "k8s.io/kubernetes/pkg/kubelet/cm/cpuset", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) go_test( diff --git a/pkg/kubelet/cm/cpuset/cpuset.go b/pkg/kubelet/cm/cpuset/cpuset.go index ccf577d819cb7..d87efc7859381 100644 --- a/pkg/kubelet/cm/cpuset/cpuset.go +++ b/pkg/kubelet/cm/cpuset/cpuset.go @@ -19,7 +19,7 @@ package cpuset import ( "bytes" "fmt" - "github.com/golang/glog" + "k8s.io/klog" "reflect" "sort" "strconv" @@ -221,7 +221,7 @@ func (s CPUSet) String() string { func MustParse(s string) CPUSet { res, err := Parse(s) if err != nil { - glog.Fatalf("unable to parse [%s] as CPUSet: %v", s, err) + klog.Fatalf("unable to parse [%s] as CPUSet: %v", s, err) } return res } diff --git a/pkg/kubelet/cm/devicemanager/BUILD b/pkg/kubelet/cm/devicemanager/BUILD index 719dbbd65e170..9a14643b71c08 100644 --- a/pkg/kubelet/cm/devicemanager/BUILD +++ b/pkg/kubelet/cm/devicemanager/BUILD @@ -28,8 +28,8 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/cm/devicemanager/endpoint.go b/pkg/kubelet/cm/devicemanager/endpoint.go index eed50de6561ac..624643e5efb06 100644 --- a/pkg/kubelet/cm/devicemanager/endpoint.go +++ b/pkg/kubelet/cm/devicemanager/endpoint.go @@ -23,8 +23,8 @@ import ( "sync" "time" - "github.com/golang/glog" "google.golang.org/grpc" + "k8s.io/klog" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" ) @@ -59,7 +59,7 @@ type endpointImpl struct { func newEndpointImpl(socketPath, resourceName string, callback monitorCallback) (*endpointImpl, error) { client, c, err := dial(socketPath) if err != nil { - glog.Errorf("Can't create new endpoint with path %s err %v", socketPath, err) + klog.Errorf("Can't create new endpoint with path %s err %v", socketPath, err) return nil, err } @@ -95,7 +95,7 @@ func (e *endpointImpl) callback(resourceName string, devices []pluginapi.Device) func (e *endpointImpl) run() { stream, err := e.client.ListAndWatch(context.Background(), &pluginapi.Empty{}) if err != nil { - glog.Errorf(errListAndWatch, e.resourceName, err) + klog.Errorf(errListAndWatch, e.resourceName, err) return } @@ -103,12 +103,12 @@ func (e *endpointImpl) run() { for { response, err := stream.Recv() if err != nil { - glog.Errorf(errListAndWatch, e.resourceName, err) + klog.Errorf(errListAndWatch, e.resourceName, err) return } devs := response.Devices - glog.V(2).Infof("State pushed for device plugin %s", e.resourceName) + klog.V(2).Infof("State pushed for device plugin %s", e.resourceName) var newDevs []pluginapi.Device for _, d := range devs { diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go index 8064b572b39a2..2a326aba9f8b9 100644 --- a/pkg/kubelet/cm/devicemanager/manager.go +++ b/pkg/kubelet/cm/devicemanager/manager.go @@ -25,8 +25,8 @@ import ( "sync" "time" - "github.com/golang/glog" "google.golang.org/grpc" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -105,7 +105,7 @@ func NewManagerImpl() (*ManagerImpl, error) { } func newManagerImpl(socketPath string) (*ManagerImpl, error) { - glog.V(2).Infof("Creating Device Plugin manager at %s", socketPath) + klog.V(2).Infof("Creating Device Plugin manager at %s", socketPath) if socketPath == "" || !filepath.IsAbs(socketPath) { return nil, fmt.Errorf(errBadSocket+" %s", socketPath) @@ -169,7 +169,7 @@ func (m *ManagerImpl) removeContents(dir string) error { } stat, err := os.Stat(filePath) if err != nil { - glog.Errorf("Failed to stat file %s: %v", filePath, err) + klog.Errorf("Failed to stat file %s: %v", filePath, err) continue } if stat.IsDir() { @@ -192,7 +192,7 @@ func (m *ManagerImpl) checkpointFile() string { // podDevices and allocatedDevices information from checkpoint-ed state and // starts device plugin registration service. func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady) error { - glog.V(2).Infof("Starting Device Plugin manager") + klog.V(2).Infof("Starting Device Plugin manager") m.activePods = activePods m.sourcesReady = sourcesReady @@ -200,7 +200,7 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc // Loads in allocatedDevices information from disk. err := m.readCheckpoint() if err != nil { - glog.Warningf("Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date. Err: %v", err) + klog.Warningf("Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date. Err: %v", err) } socketPath := filepath.Join(m.socketdir, m.socketname) @@ -209,12 +209,12 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc // Removes all stale sockets in m.socketdir. Device plugins can monitor // this and use it as a signal to re-register with the new Kubelet. if err := m.removeContents(m.socketdir); err != nil { - glog.Errorf("Fail to clean up stale contents under %s: %v", m.socketdir, err) + klog.Errorf("Fail to clean up stale contents under %s: %v", m.socketdir, err) } s, err := net.Listen("unix", socketPath) if err != nil { - glog.Errorf(errListenSocket+" %v", err) + klog.Errorf(errListenSocket+" %v", err) return err } @@ -227,7 +227,7 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc m.server.Serve(s) }() - glog.V(2).Infof("Serving device plugin registration server on %q", socketPath) + klog.V(2).Infof("Serving device plugin registration server on %q", socketPath) return nil } @@ -235,10 +235,10 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc // GetWatcherHandler returns the plugin handler func (m *ManagerImpl) GetWatcherHandler() watcher.PluginHandler { if f, err := os.Create(m.socketdir + "DEPRECATION"); err != nil { - glog.Errorf("Failed to create deprecation file at %s", m.socketdir) + klog.Errorf("Failed to create deprecation file at %s", m.socketdir) } else { f.Close() - glog.V(4).Infof("created deprecation file %s", f.Name()) + klog.V(4).Infof("created deprecation file %s", f.Name()) } return watcher.PluginHandler(m) @@ -246,7 +246,7 @@ func (m *ManagerImpl) GetWatcherHandler() watcher.PluginHandler { // ValidatePlugin validates a plugin if the version is correct and the name has the format of an extended resource func (m *ManagerImpl) ValidatePlugin(pluginName string, endpoint string, versions []string) error { - glog.V(2).Infof("Got Plugin %s at endpoint %s with versions %v", pluginName, endpoint, versions) + klog.V(2).Infof("Got Plugin %s at endpoint %s with versions %v", pluginName, endpoint, versions) if !m.isVersionCompatibleWithPlugin(versions) { return fmt.Errorf("manager version, %s, is not among plugin supported versions %v", pluginapi.Version, versions) @@ -263,7 +263,7 @@ func (m *ManagerImpl) ValidatePlugin(pluginName string, endpoint string, version // TODO: Start the endpoint and wait for the First ListAndWatch call // before registering the plugin func (m *ManagerImpl) RegisterPlugin(pluginName string, endpoint string) error { - glog.V(2).Infof("Registering Plugin %s at endpoint %s", pluginName, endpoint) + klog.V(2).Infof("Registering Plugin %s at endpoint %s", pluginName, endpoint) e, err := newEndpointImpl(endpoint, pluginName, m.callback) if err != nil { @@ -342,7 +342,7 @@ func (m *ManagerImpl) Allocate(node *schedulercache.NodeInfo, attrs *lifecycle.P // Register registers a device plugin. func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest) (*pluginapi.Empty, error) { - glog.Infof("Got registration request from device plugin with resource name %q", r.ResourceName) + klog.Infof("Got registration request from device plugin with resource name %q", r.ResourceName) metrics.DevicePluginRegistrationCount.WithLabelValues(r.ResourceName).Inc() var versionCompatible bool for _, v := range pluginapi.SupportedVersions { @@ -353,13 +353,13 @@ func (m *ManagerImpl) Register(ctx context.Context, r *pluginapi.RegisterRequest } if !versionCompatible { errorString := fmt.Sprintf(errUnsupportedVersion, r.Version, pluginapi.SupportedVersions) - glog.Infof("Bad registration request from device plugin with resource name %q: %s", r.ResourceName, errorString) + klog.Infof("Bad registration request from device plugin with resource name %q: %s", r.ResourceName, errorString) return &pluginapi.Empty{}, fmt.Errorf(errorString) } if !v1helper.IsExtendedResourceName(v1.ResourceName(r.ResourceName)) { errorString := fmt.Sprintf(errInvalidResourceName, r.ResourceName) - glog.Infof("Bad registration request from device plugin: %s", errorString) + klog.Infof("Bad registration request from device plugin: %s", errorString) return &pluginapi.Empty{}, fmt.Errorf(errorString) } @@ -396,7 +396,7 @@ func (m *ManagerImpl) registerEndpoint(resourceName string, options *pluginapi.D defer m.mutex.Unlock() m.endpoints[resourceName] = endpointInfo{e: e, opts: options} - glog.V(2).Infof("Registered endpoint %v", e) + klog.V(2).Infof("Registered endpoint %v", e) } func (m *ManagerImpl) runEndpoint(resourceName string, e endpoint) { @@ -410,13 +410,13 @@ func (m *ManagerImpl) runEndpoint(resourceName string, e endpoint) { m.markResourceUnhealthy(resourceName) } - glog.V(2).Infof("Endpoint (%s, %v) became unhealthy", resourceName, e) + klog.V(2).Infof("Endpoint (%s, %v) became unhealthy", resourceName, e) } func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) { new, err := newEndpointImpl(filepath.Join(m.socketdir, r.Endpoint), r.ResourceName, m.callback) if err != nil { - glog.Errorf("Failed to dial device plugin with request %v: %v", r, err) + klog.Errorf("Failed to dial device plugin with request %v: %v", r, err) return } m.registerEndpoint(r.ResourceName, r.Options, new) @@ -426,7 +426,7 @@ func (m *ManagerImpl) addEndpoint(r *pluginapi.RegisterRequest) { } func (m *ManagerImpl) markResourceUnhealthy(resourceName string) { - glog.V(2).Infof("Mark all resources Unhealthy for resource %s", resourceName) + klog.V(2).Infof("Mark all resources Unhealthy for resource %s", resourceName) healthyDevices := sets.NewString() if _, ok := m.healthyDevices[resourceName]; ok { healthyDevices = m.healthyDevices[resourceName] @@ -463,7 +463,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) // should always be consistent. Otherwise, we run with the risk // of failing to garbage collect non-existing resources or devices. if !ok { - glog.Errorf("unexpected: healthyDevices and endpoints are out of sync") + klog.Errorf("unexpected: healthyDevices and endpoints are out of sync") } delete(m.endpoints, resourceName) delete(m.healthyDevices, resourceName) @@ -478,7 +478,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string) eI, ok := m.endpoints[resourceName] if (ok && eI.e.stopGracePeriodExpired()) || !ok { if !ok { - glog.Errorf("unexpected: unhealthyDevices and endpoints are out of sync") + klog.Errorf("unexpected: unhealthyDevices and endpoints are out of sync") } delete(m.endpoints, resourceName) delete(m.unhealthyDevices, resourceName) @@ -524,7 +524,7 @@ func (m *ManagerImpl) readCheckpoint() error { err := m.checkpointManager.GetCheckpoint(kubeletDeviceManagerCheckpoint, cp) if err != nil { if err == errors.ErrCheckpointNotFound { - glog.Warningf("Failed to retrieve checkpoint for %q: %v", kubeletDeviceManagerCheckpoint, err) + klog.Warningf("Failed to retrieve checkpoint for %q: %v", kubeletDeviceManagerCheckpoint, err) return nil } return err @@ -561,7 +561,7 @@ func (m *ManagerImpl) updateAllocatedDevices(activePods []*v1.Pod) { if len(podsToBeRemoved) <= 0 { return } - glog.V(3).Infof("pods to be removed: %v", podsToBeRemoved.List()) + klog.V(3).Infof("pods to be removed: %v", podsToBeRemoved.List()) m.podDevices.delete(podsToBeRemoved.List()) // Regenerated allocatedDevices after we update pod allocation information. m.allocatedDevices = m.podDevices.devices() @@ -577,7 +577,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi // This can happen if a container restarts for example. devices := m.podDevices.containerDevices(podUID, contName, resource) if devices != nil { - glog.V(3).Infof("Found pre-allocated devices for resource %s container %q in Pod %q: %v", resource, contName, podUID, devices.List()) + klog.V(3).Infof("Found pre-allocated devices for resource %s container %q in Pod %q: %v", resource, contName, podUID, devices.List()) needed = needed - devices.Len() // A pod's resource is not expected to change once admitted by the API server, // so just fail loudly here. We can revisit this part if this no longer holds. @@ -589,7 +589,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi // No change, no work. return nil, nil } - glog.V(3).Infof("Needs to allocate %d %q for pod %q container %q", needed, resource, podUID, contName) + klog.V(3).Infof("Needs to allocate %d %q for pod %q container %q", needed, resource, podUID, contName) // Needs to allocate additional devices. if _, ok := m.healthyDevices[resource]; !ok { return nil, fmt.Errorf("can't allocate unregistered device %s", resource) @@ -640,7 +640,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont for k, v := range container.Resources.Limits { resource := string(k) needed := int(v.Value()) - glog.V(3).Infof("needs %d %s", needed, resource) + klog.V(3).Infof("needs %d %s", needed, resource) if !m.isDevicePluginResource(resource) { continue } @@ -684,7 +684,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont devs := allocDevices.UnsortedList() // TODO: refactor this part of code to just append a ContainerAllocationRequest // in a passed in AllocateRequest pointer, and issues a single Allocate call per pod. - glog.V(3).Infof("Making allocation request for devices %v for device plugin %s", devs, resource) + klog.V(3).Infof("Making allocation request for devices %v for device plugin %s", devs, resource) resp, err := eI.e.allocate(devs) metrics.DevicePluginAllocationLatency.WithLabelValues(resource).Observe(metrics.SinceInMicroseconds(startRPCTime)) if err != nil { @@ -743,7 +743,7 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s if eI.opts == nil || !eI.opts.PreStartRequired { m.mutex.Unlock() - glog.V(4).Infof("Plugin options indicate to skip PreStartContainer for resource: %s", resource) + klog.V(4).Infof("Plugin options indicate to skip PreStartContainer for resource: %s", resource) return nil } @@ -755,7 +755,7 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s m.mutex.Unlock() devs := devices.UnsortedList() - glog.V(4).Infof("Issuing an PreStartContainer call for container, %s, of pod %s", contName, podUID) + klog.V(4).Infof("Issuing an PreStartContainer call for container, %s, of pod %s", contName, podUID) _, err := eI.e.preStartContainer(devs) if err != nil { return fmt.Errorf("device plugin PreStartContainer rpc failed with err: %v", err) diff --git a/pkg/kubelet/cm/devicemanager/pod_devices.go b/pkg/kubelet/cm/devicemanager/pod_devices.go index 7df0e201c7119..d3d0cc00b1ecb 100644 --- a/pkg/kubelet/cm/devicemanager/pod_devices.go +++ b/pkg/kubelet/cm/devicemanager/pod_devices.go @@ -17,7 +17,7 @@ limitations under the License. package devicemanager import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" @@ -135,13 +135,13 @@ func (pdev podDevices) toCheckpointData() []checkpoint.PodDevicesEntry { for resource, devices := range resources { devIds := devices.deviceIds.UnsortedList() if devices.allocResp == nil { - glog.Errorf("Can't marshal allocResp for %v %v %v: allocation response is missing", podUID, conName, resource) + klog.Errorf("Can't marshal allocResp for %v %v %v: allocation response is missing", podUID, conName, resource) continue } allocResp, err := devices.allocResp.Marshal() if err != nil { - glog.Errorf("Can't marshal allocResp for %v %v %v: %v", podUID, conName, resource, err) + klog.Errorf("Can't marshal allocResp for %v %v %v: %v", podUID, conName, resource, err) continue } data = append(data, checkpoint.PodDevicesEntry{ @@ -159,7 +159,7 @@ func (pdev podDevices) toCheckpointData() []checkpoint.PodDevicesEntry { // Populates podDevices from the passed in checkpointData. func (pdev podDevices) fromCheckpointData(data []checkpoint.PodDevicesEntry) { for _, entry := range data { - glog.V(2).Infof("Get checkpoint entry: %v %v %v %v %v\n", + klog.V(2).Infof("Get checkpoint entry: %v %v %v %v %v\n", entry.PodUID, entry.ContainerName, entry.ResourceName, entry.DeviceIDs, entry.AllocResp) devIDs := sets.NewString() for _, devID := range entry.DeviceIDs { @@ -168,7 +168,7 @@ func (pdev podDevices) fromCheckpointData(data []checkpoint.PodDevicesEntry) { allocResp := &pluginapi.ContainerAllocateResponse{} err := allocResp.Unmarshal(entry.AllocResp) if err != nil { - glog.Errorf("Can't unmarshal allocResp for %v %v %v: %v", entry.PodUID, entry.ContainerName, entry.ResourceName, err) + klog.Errorf("Can't unmarshal allocResp for %v %v %v: %v", entry.PodUID, entry.ContainerName, entry.ResourceName, err) continue } pdev.insert(entry.PodUID, entry.ContainerName, entry.ResourceName, devIDs, allocResp) @@ -203,13 +203,13 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic // Updates RunContainerOptions.Envs. for k, v := range resp.Envs { if e, ok := envsMap[k]; ok { - glog.V(4).Infof("Skip existing env %s %s", k, v) + klog.V(4).Infof("Skip existing env %s %s", k, v) if e != v { - glog.Errorf("Environment variable %s has conflicting setting: %s and %s", k, e, v) + klog.Errorf("Environment variable %s has conflicting setting: %s and %s", k, e, v) } continue } - glog.V(4).Infof("Add env %s %s", k, v) + klog.V(4).Infof("Add env %s %s", k, v) envsMap[k] = v opts.Envs = append(opts.Envs, kubecontainer.EnvVar{Name: k, Value: v}) } @@ -217,14 +217,14 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic // Updates RunContainerOptions.Devices. for _, dev := range resp.Devices { if d, ok := devsMap[dev.ContainerPath]; ok { - glog.V(4).Infof("Skip existing device %s %s", dev.ContainerPath, dev.HostPath) + klog.V(4).Infof("Skip existing device %s %s", dev.ContainerPath, dev.HostPath) if d != dev.HostPath { - glog.Errorf("Container device %s has conflicting mapping host devices: %s and %s", + klog.Errorf("Container device %s has conflicting mapping host devices: %s and %s", dev.ContainerPath, d, dev.HostPath) } continue } - glog.V(4).Infof("Add device %s %s", dev.ContainerPath, dev.HostPath) + klog.V(4).Infof("Add device %s %s", dev.ContainerPath, dev.HostPath) devsMap[dev.ContainerPath] = dev.HostPath opts.Devices = append(opts.Devices, kubecontainer.DeviceInfo{ PathOnHost: dev.HostPath, @@ -236,14 +236,14 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic // Updates RunContainerOptions.Mounts. for _, mount := range resp.Mounts { if m, ok := mountsMap[mount.ContainerPath]; ok { - glog.V(4).Infof("Skip existing mount %s %s", mount.ContainerPath, mount.HostPath) + klog.V(4).Infof("Skip existing mount %s %s", mount.ContainerPath, mount.HostPath) if m != mount.HostPath { - glog.Errorf("Container mount %s has conflicting mapping host mounts: %s and %s", + klog.Errorf("Container mount %s has conflicting mapping host mounts: %s and %s", mount.ContainerPath, m, mount.HostPath) } continue } - glog.V(4).Infof("Add mount %s %s", mount.ContainerPath, mount.HostPath) + klog.V(4).Infof("Add mount %s %s", mount.ContainerPath, mount.HostPath) mountsMap[mount.ContainerPath] = mount.HostPath opts.Mounts = append(opts.Mounts, kubecontainer.Mount{ Name: mount.ContainerPath, @@ -258,13 +258,13 @@ func (pdev podDevices) deviceRunContainerOptions(podUID, contName string) *Devic // Updates for Annotations for k, v := range resp.Annotations { if e, ok := annotationsMap[k]; ok { - glog.V(4).Infof("Skip existing annotation %s %s", k, v) + klog.V(4).Infof("Skip existing annotation %s %s", k, v) if e != v { - glog.Errorf("Annotation %s has conflicting setting: %s and %s", k, e, v) + klog.Errorf("Annotation %s has conflicting setting: %s and %s", k, e, v) } continue } - glog.V(4).Infof("Add annotation %s %s", k, v) + klog.V(4).Infof("Add annotation %s %s", k, v) annotationsMap[k] = v opts.Annotations = append(opts.Annotations, kubecontainer.Annotation{Name: k, Value: v}) } diff --git a/pkg/kubelet/cm/node_container_manager_linux.go b/pkg/kubelet/cm/node_container_manager_linux.go index 52fb19983396b..8d7f16f1d190b 100644 --- a/pkg/kubelet/cm/node_container_manager_linux.go +++ b/pkg/kubelet/cm/node_container_manager_linux.go @@ -23,11 +23,11 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/events" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -48,7 +48,7 @@ func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { return nil } if err := cm.cgroupManager.Create(cgroupConfig); err != nil { - glog.Errorf("Failed to create %q cgroup", cm.cgroupRoot) + klog.Errorf("Failed to create %q cgroup", cm.cgroupRoot) return err } return nil @@ -66,7 +66,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { nodeAllocatable = cm.getNodeAllocatableAbsolute() } - glog.V(4).Infof("Attempting to enforce Node Allocatable with config: %+v", nc) + klog.V(4).Infof("Attempting to enforce Node Allocatable with config: %+v", nc) cgroupConfig := &CgroupConfig{ Name: cm.cgroupRoot, @@ -103,7 +103,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { } // Now apply kube reserved and system reserved limits if required. if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) { - glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved) + klog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved) if err := enforceExistingCgroup(cm.cgroupManager, ParseCgroupfsToCgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil { message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) @@ -112,7 +112,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) } if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) { - glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved) + klog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved) if err := enforceExistingCgroup(cm.cgroupManager, ParseCgroupfsToCgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil { message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) @@ -132,7 +132,7 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1. if cgroupConfig.ResourceParameters == nil { return fmt.Errorf("%q cgroup is not config properly", cgroupConfig.Name) } - glog.V(4).Infof("Enforcing limits on cgroup %q with %d cpu shares and %d bytes of memory", cName, cgroupConfig.ResourceParameters.CpuShares, cgroupConfig.ResourceParameters.Memory) + klog.V(4).Infof("Enforcing limits on cgroup %q with %d cpu shares and %d bytes of memory", cName, cgroupConfig.ResourceParameters.CpuShares, cgroupConfig.ResourceParameters.Memory) if !cgroupManager.Exists(cgroupConfig.Name) { return fmt.Errorf("%q cgroup does not exist", cgroupConfig.Name) } diff --git a/pkg/kubelet/cm/pod_container_manager_linux.go b/pkg/kubelet/cm/pod_container_manager_linux.go index 844f63986de53..b91c5babe92c8 100644 --- a/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/pkg/kubelet/cm/pod_container_manager_linux.go @@ -23,11 +23,11 @@ import ( "path" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" kubefeatures "k8s.io/kubernetes/pkg/features" ) @@ -137,7 +137,7 @@ func (m *podContainerManagerImpl) killOnePid(pid int) error { // Hate parsing strings, but // vendor/github.com/opencontainers/runc/libcontainer/ // also does this. - glog.V(3).Infof("process with pid %v no longer exists", pid) + klog.V(3).Infof("process with pid %v no longer exists", pid) return nil } return err @@ -159,18 +159,18 @@ func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName // We try killing all the pids multiple times for i := 0; i < 5; i++ { if i != 0 { - glog.V(3).Infof("Attempt %v failed to kill all unwanted process. Retyring", i) + klog.V(3).Infof("Attempt %v failed to kill all unwanted process. Retyring", i) } errlist = []error{} for _, pid := range pidsToKill { - glog.V(3).Infof("Attempt to kill process with pid: %v", pid) + klog.V(3).Infof("Attempt to kill process with pid: %v", pid) if err := m.killOnePid(pid); err != nil { - glog.V(3).Infof("failed to kill process with pid: %v", pid) + klog.V(3).Infof("failed to kill process with pid: %v", pid) errlist = append(errlist, err) } } if len(errlist) == 0 { - glog.V(3).Infof("successfully killed all unwanted processes.") + klog.V(3).Infof("successfully killed all unwanted processes.") return nil } } @@ -181,7 +181,7 @@ func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName func (m *podContainerManagerImpl) Destroy(podCgroup CgroupName) error { // Try killing all the processes attached to the pod cgroup if err := m.tryKillingCgroupProcesses(podCgroup); err != nil { - glog.V(3).Infof("failed to kill all the processes attached to the %v cgroups", podCgroup) + klog.V(3).Infof("failed to kill all the processes attached to the %v cgroups", podCgroup) return fmt.Errorf("failed to kill all the processes attached to the %v cgroups : %v", podCgroup, err) } @@ -269,7 +269,7 @@ func (m *podContainerManagerImpl) GetAllPodsFromCgroups() (map[types.UID]CgroupN parts := strings.Split(basePath, podCgroupNamePrefix) // the uid is missing, so we log the unexpected cgroup not of form pod if len(parts) != 2 { - glog.Errorf("pod cgroup manager ignoring unexpected cgroup %v because it is not a pod", cgroupfsPath) + klog.Errorf("pod cgroup manager ignoring unexpected cgroup %v because it is not a pod", cgroupfsPath) continue } podUID := parts[1] diff --git a/pkg/kubelet/cm/qos_container_manager_linux.go b/pkg/kubelet/cm/qos_container_manager_linux.go index 2cfc198c3a5bb..cebc9756e4388 100644 --- a/pkg/kubelet/cm/qos_container_manager_linux.go +++ b/pkg/kubelet/cm/qos_container_manager_linux.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/wait" @@ -138,7 +138,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis go wait.Until(func() { err := m.UpdateCgroups() if err != nil { - glog.Warningf("[ContainerManager] Failed to reserve QoS requests: %v", err) + klog.Warningf("[ContainerManager] Failed to reserve QoS requests: %v", err) } }, periodicQOSCgroupUpdateInterval, wait.NeverStop) @@ -222,17 +222,17 @@ func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*C resources := m.getNodeAllocatable() allocatableResource, ok := resources[v1.ResourceMemory] if !ok { - glog.V(2).Infof("[Container Manager] Allocatable memory value could not be determined. Not setting QOS memory limts.") + klog.V(2).Infof("[Container Manager] Allocatable memory value could not be determined. Not setting QOS memory limts.") return } allocatable := allocatableResource.Value() if allocatable == 0 { - glog.V(2).Infof("[Container Manager] Memory allocatable reported as 0, might be in standalone mode. Not setting QOS memory limts.") + klog.V(2).Infof("[Container Manager] Memory allocatable reported as 0, might be in standalone mode. Not setting QOS memory limts.") return } for qos, limits := range qosMemoryRequests { - glog.V(2).Infof("[Container Manager] %s pod requests total %d bytes (reserve %d%%)", qos, limits, percentReserve) + klog.V(2).Infof("[Container Manager] %s pod requests total %d bytes (reserve %d%%)", qos, limits, percentReserve) } // Calculate QOS memory limits @@ -252,7 +252,7 @@ func (m *qosContainerManagerImpl) retrySetMemoryReserve(configs map[v1.PodQOSCla for qos, config := range configs { stats, err := m.cgroupManager.GetResourceStats(config.Name) if err != nil { - glog.V(2).Infof("[Container Manager] %v", err) + klog.V(2).Infof("[Container Manager] %v", err) return } usage := stats.MemoryStats.Usage @@ -312,7 +312,7 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error { } } if updateSuccess { - glog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration") + klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration") return nil } @@ -330,12 +330,12 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error { for _, config := range qosConfigs { err := m.cgroupManager.Update(config) if err != nil { - glog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration") + klog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration") return err } } - glog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration") + klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration") return nil } diff --git a/pkg/kubelet/config/BUILD b/pkg/kubelet/config/BUILD index 82cb3a86ecde8..8b408d66a2e22 100644 --- a/pkg/kubelet/config/BUILD +++ b/pkg/kubelet/config/BUILD @@ -43,8 +43,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", diff --git a/pkg/kubelet/config/common.go b/pkg/kubelet/config/common.go index 8fb4199f831a6..933ee29e78638 100644 --- a/pkg/kubelet/config/common.go +++ b/pkg/kubelet/config/common.go @@ -40,7 +40,7 @@ import ( kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/util/hash" - "github.com/golang/glog" + "k8s.io/klog" ) // Generate a pod name that is unique among nodes by appending the nodeName. @@ -59,16 +59,16 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName types.Node } hash.DeepHashObject(hasher, pod) pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:])) - glog.V(5).Infof("Generated UID %q pod %q from %s", pod.UID, pod.Name, source) + klog.V(5).Infof("Generated UID %q pod %q from %s", pod.UID, pod.Name, source) } pod.Name = generatePodName(pod.Name, nodeName) - glog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source) + klog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source) if pod.Namespace == "" { pod.Namespace = metav1.NamespaceDefault } - glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source) + klog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source) // Set the Host field to indicate this pod is scheduled on the current node. pod.Spec.NodeName = string(nodeName) @@ -132,7 +132,7 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *v } v1Pod := &v1.Pod{} if err := k8s_api_v1.Convert_core_Pod_To_v1_Pod(newPod, v1Pod, nil); err != nil { - glog.Errorf("Pod %q failed to convert to v1", newPod.Name) + klog.Errorf("Pod %q failed to convert to v1", newPod.Name) return true, nil, err } return true, v1Pod, nil diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index 51c2e29c54625..75d42e0e73135 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -21,11 +21,11 @@ import ( "reflect" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/checkpoint" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -97,7 +97,7 @@ func (c *PodConfig) SeenAllSources(seenSources sets.String) bool { if c.pods == nil { return false } - glog.V(5).Infof("Looking for %v, have seen %v", c.sources.List(), seenSources) + klog.V(5).Infof("Looking for %v, have seen %v", c.sources.List(), seenSources) return seenSources.HasAll(c.sources.List()...) && c.pods.seenSources(c.sources.List()...) } @@ -279,16 +279,16 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de switch update.Op { case kubetypes.ADD, kubetypes.UPDATE, kubetypes.DELETE: if update.Op == kubetypes.ADD { - glog.V(4).Infof("Adding new pods from source %s : %v", source, update.Pods) + klog.V(4).Infof("Adding new pods from source %s : %v", source, update.Pods) } else if update.Op == kubetypes.DELETE { - glog.V(4).Infof("Graceful deleting pods from source %s : %v", source, update.Pods) + klog.V(4).Infof("Graceful deleting pods from source %s : %v", source, update.Pods) } else { - glog.V(4).Infof("Updating pods from source %s : %v", source, update.Pods) + klog.V(4).Infof("Updating pods from source %s : %v", source, update.Pods) } updatePodsFunc(update.Pods, pods, pods) case kubetypes.REMOVE: - glog.V(4).Infof("Removing pods from source %s : %v", source, update.Pods) + klog.V(4).Infof("Removing pods from source %s : %v", source, update.Pods) for _, value := range update.Pods { if existing, found := pods[value.UID]; found { // this is a delete @@ -300,7 +300,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de } case kubetypes.SET: - glog.V(4).Infof("Setting pods for source %s", source) + klog.V(4).Infof("Setting pods for source %s", source) s.markSourceSet(source) // Clear the old map entries by just creating a new map oldPods := pods @@ -313,13 +313,13 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de } } case kubetypes.RESTORE: - glog.V(4).Infof("Restoring pods for source %s", source) + klog.V(4).Infof("Restoring pods for source %s", source) for _, value := range update.Pods { restorePods = append(restorePods, value) } default: - glog.Warningf("Received invalid update type: %v", update) + klog.Warningf("Received invalid update type: %v", update) } @@ -354,7 +354,7 @@ func filterInvalidPods(pods []*v1.Pod, source string, recorder record.EventRecor // This function only checks if there is any naming conflict. name := kubecontainer.GetPodFullName(pod) if names.Has(name) { - glog.Warningf("Pod[%d] (%s) from %s failed validation due to duplicate pod name %q, ignoring", i+1, format.Pod(pod), source, pod.Name) + klog.Warningf("Pod[%d] (%s) from %s failed validation due to duplicate pod name %q, ignoring", i+1, format.Pod(pod), source, pod.Name) recorder.Eventf(pod, v1.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s due to duplicate pod name %q, ignoring", format.Pod(pod), source, pod.Name) continue } else { @@ -411,7 +411,7 @@ func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool { // recordFirstSeenTime records the first seen time of this pod. func recordFirstSeenTime(pod *v1.Pod) { - glog.V(4).Infof("Receiving a new pod %q", format.Pod(pod)) + klog.V(4).Infof("Receiving a new pod %q", format.Pod(pod)) pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString() } diff --git a/pkg/kubelet/config/file.go b/pkg/kubelet/config/file.go index 683dc9a3c94ef..5eee61d3cf458 100644 --- a/pkg/kubelet/config/file.go +++ b/pkg/kubelet/config/file.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -65,7 +65,7 @@ func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, u path = strings.TrimRight(path, string(os.PathSeparator)) config := newSourceFile(path, nodeName, period, updates) - glog.V(1).Infof("Watching path %q", path) + klog.V(1).Infof("Watching path %q", path) config.run() } @@ -95,17 +95,17 @@ func (s *sourceFile) run() { go func() { // Read path immediately to speed up startup. if err := s.listConfig(); err != nil { - glog.Errorf("Unable to read config path %q: %v", s.path, err) + klog.Errorf("Unable to read config path %q: %v", s.path, err) } for { select { case <-listTicker.C: if err := s.listConfig(); err != nil { - glog.Errorf("Unable to read config path %q: %v", s.path, err) + klog.Errorf("Unable to read config path %q: %v", s.path, err) } case e := <-s.watchEvents: if err := s.consumeWatchEvent(e); err != nil { - glog.Errorf("Unable to process watch event: %v", err) + klog.Errorf("Unable to process watch event: %v", err) } } } @@ -173,31 +173,31 @@ func (s *sourceFile) extractFromDir(name string) ([]*v1.Pod, error) { for _, path := range dirents { statInfo, err := os.Stat(path) if err != nil { - glog.Errorf("Can't get metadata for %q: %v", path, err) + klog.Errorf("Can't get metadata for %q: %v", path, err) continue } switch { case statInfo.Mode().IsDir(): - glog.Errorf("Not recursing into manifest path %q", path) + klog.Errorf("Not recursing into manifest path %q", path) case statInfo.Mode().IsRegular(): pod, err := s.extractFromFile(path) if err != nil { if !os.IsNotExist(err) { - glog.Errorf("Can't process manifest file %q: %v", path, err) + klog.Errorf("Can't process manifest file %q: %v", path, err) } } else { pods = append(pods, pod) } default: - glog.Errorf("Manifest path %q is not a directory or file: %v", path, statInfo.Mode()) + klog.Errorf("Manifest path %q is not a directory or file: %v", path, statInfo.Mode()) } } return pods, nil } func (s *sourceFile) extractFromFile(filename string) (pod *v1.Pod, err error) { - glog.V(3).Infof("Reading config file %q", filename) + klog.V(3).Infof("Reading config file %q", filename) defer func() { if err == nil && pod != nil { objKey, keyErr := cache.MetaNamespaceKeyFunc(pod) diff --git a/pkg/kubelet/config/file_linux.go b/pkg/kubelet/config/file_linux.go index 4e6b1a5a23761..98803ec44927d 100644 --- a/pkg/kubelet/config/file_linux.go +++ b/pkg/kubelet/config/file_linux.go @@ -26,8 +26,8 @@ import ( "strings" "time" - "github.com/golang/glog" "golang.org/x/exp/inotify" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -58,7 +58,7 @@ func (s *sourceFile) startWatch() { } if err := s.doWatch(); err != nil { - glog.Errorf("Unable to read config path %q: %v", s.path, err) + klog.Errorf("Unable to read config path %q: %v", s.path, err) if _, retryable := err.(*retryableError); !retryable { backOff.Next(backOffId, time.Now()) } @@ -103,13 +103,13 @@ func (s *sourceFile) doWatch() error { func (s *sourceFile) produceWatchEvent(e *inotify.Event) error { // Ignore file start with dots if strings.HasPrefix(filepath.Base(e.Name), ".") { - glog.V(4).Infof("Ignored pod manifest: %s, because it starts with dots", e.Name) + klog.V(4).Infof("Ignored pod manifest: %s, because it starts with dots", e.Name) return nil } var eventType podEventType switch { case (e.Mask & inotify.IN_ISDIR) > 0: - glog.Errorf("Not recursing into manifest path %q", s.path) + klog.Errorf("Not recursing into manifest path %q", s.path) return nil case (e.Mask & inotify.IN_CREATE) > 0: eventType = podAdd diff --git a/pkg/kubelet/config/file_unsupported.go b/pkg/kubelet/config/file_unsupported.go index 4bee74f544df0..d46b5f361deb7 100644 --- a/pkg/kubelet/config/file_unsupported.go +++ b/pkg/kubelet/config/file_unsupported.go @@ -22,11 +22,11 @@ package config import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" ) func (s *sourceFile) startWatch() { - glog.Errorf("Watching source file is unsupported in this build") + klog.Errorf("Watching source file is unsupported in this build") } func (s *sourceFile) consumeWatchEvent(e *watchEvent) error { diff --git a/pkg/kubelet/config/http.go b/pkg/kubelet/config/http.go index 0ef43e062a54e..1eb8ad042a923 100644 --- a/pkg/kubelet/config/http.go +++ b/pkg/kubelet/config/http.go @@ -29,8 +29,8 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" ) type sourceURL struct { @@ -54,7 +54,7 @@ func NewSourceURL(url string, header http.Header, nodeName types.NodeName, perio // read the manifest URL passed to kubelet. client: &http.Client{Timeout: 10 * time.Second}, } - glog.V(1).Infof("Watching URL %s", url) + klog.V(1).Infof("Watching URL %s", url) go wait.Until(config.run, period, wait.NeverStop) } @@ -63,16 +63,16 @@ func (s *sourceURL) run() { // Don't log this multiple times per minute. The first few entries should be // enough to get the point across. if s.failureLogs < 3 { - glog.Warningf("Failed to read pods from URL: %v", err) + klog.Warningf("Failed to read pods from URL: %v", err) } else if s.failureLogs == 3 { - glog.Warningf("Failed to read pods from URL. Dropping verbosity of this message to V(4): %v", err) + klog.Warningf("Failed to read pods from URL. Dropping verbosity of this message to V(4): %v", err) } else { - glog.V(4).Infof("Failed to read pods from URL: %v", err) + klog.V(4).Infof("Failed to read pods from URL: %v", err) } s.failureLogs++ } else { if s.failureLogs > 0 { - glog.Info("Successfully read pods from URL.") + klog.Info("Successfully read pods from URL.") s.failureLogs = 0 } } diff --git a/pkg/kubelet/container/BUILD b/pkg/kubelet/container/BUILD index d7b267083338a..3995b79805a8d 100644 --- a/pkg/kubelet/container/BUILD +++ b/pkg/kubelet/container/BUILD @@ -34,7 +34,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//third_party/forked/golang/expansion:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/container/container_gc.go b/pkg/kubelet/container/container_gc.go index 72fa4bd722ee7..790596b2289e4 100644 --- a/pkg/kubelet/container/container_gc.go +++ b/pkg/kubelet/container/container_gc.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" ) // Specified a policy for garbage collecting containers. @@ -82,6 +82,6 @@ func (cgc *realContainerGC) GarbageCollect() error { } func (cgc *realContainerGC) DeleteAllUnusedContainers() error { - glog.Infof("attempting to delete unused containers") + klog.Infof("attempting to delete unused containers") return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true) } diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index 01fe7129d38f6..10d848a2d871b 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -21,7 +21,7 @@ import ( "hash/fnv" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -75,13 +75,13 @@ func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus } // Check RestartPolicy for dead container if pod.Spec.RestartPolicy == v1.RestartPolicyNever { - glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) + klog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) return false } if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure { // Check the exit code. if status.ExitCode == 0 { - glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) + klog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod)) return false } } @@ -311,7 +311,7 @@ func MakePortMappings(container *v1.Container) (ports []PortMapping) { // Protect against exposing the same protocol-port more than once in a container. if _, ok := names[pm.Name]; ok { - glog.Warningf("Port name conflicted, %q is defined more than once", pm.Name) + klog.Warningf("Port name conflicted, %q is defined more than once", pm.Name) continue } ports = append(ports, pm) diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index 4859342abcbc5..2d9fcd33fd99b 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -25,11 +25,11 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/remotecommand" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/volume" ) @@ -203,7 +203,7 @@ func BuildContainerID(typ, ID string) ContainerID { func ParseContainerID(containerID string) ContainerID { var id ContainerID if err := id.ParseString(containerID); err != nil { - glog.Error(err) + klog.Error(err) } return id } diff --git a/pkg/kubelet/dockershim/BUILD b/pkg/kubelet/dockershim/BUILD index d835f2570609a..5915ed0212458 100644 --- a/pkg/kubelet/dockershim/BUILD +++ b/pkg/kubelet/dockershim/BUILD @@ -67,7 +67,7 @@ go_library( "//vendor/github.com/docker/docker/api/types/strslice:go_default_library", "//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library", "//vendor/github.com/docker/go-connections/nat:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:windows": [ diff --git a/pkg/kubelet/dockershim/cm/BUILD b/pkg/kubelet/dockershim/cm/BUILD index 754952527a985..80736ce241050 100644 --- a/pkg/kubelet/dockershim/cm/BUILD +++ b/pkg/kubelet/dockershim/cm/BUILD @@ -33,9 +33,9 @@ go_library( "//pkg/kubelet/qos:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ "//pkg/kubelet/dockershim/libdocker:go_default_library", diff --git a/pkg/kubelet/dockershim/cm/container_manager_linux.go b/pkg/kubelet/dockershim/cm/container_manager_linux.go index 8484fd2d87396..b59c6a08364cc 100644 --- a/pkg/kubelet/dockershim/cm/container_manager_linux.go +++ b/pkg/kubelet/dockershim/cm/container_manager_linux.go @@ -25,11 +25,11 @@ import ( "strconv" "time" - "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" kubecm "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/qos" @@ -83,19 +83,19 @@ func (m *containerManager) Start() error { func (m *containerManager) doWork() { v, err := m.client.Version() if err != nil { - glog.Errorf("Unable to get docker version: %v", err) + klog.Errorf("Unable to get docker version: %v", err) return } version, err := utilversion.ParseGeneric(v.APIVersion) if err != nil { - glog.Errorf("Unable to parse docker version %q: %v", v.APIVersion, err) + klog.Errorf("Unable to parse docker version %q: %v", v.APIVersion, err) return } // EnsureDockerInContainer does two things. // 1. Ensure processes run in the cgroups if m.cgroupsManager is not nil. // 2. Ensure processes have the OOM score applied. if err := kubecm.EnsureDockerInContainer(version, dockerOOMScoreAdj, m.cgroupsManager); err != nil { - glog.Errorf("Unable to ensure the docker processes run in the desired containers: %v", err) + klog.Errorf("Unable to ensure the docker processes run in the desired containers: %v", err) } } @@ -104,7 +104,7 @@ func createCgroupManager(name string) (*fs.Manager, error) { memoryCapacity, err := getMemoryCapacity() if err != nil { - glog.Errorf("Failed to get the memory capacity on machine: %v", err) + klog.Errorf("Failed to get the memory capacity on machine: %v", err) } else { memoryLimit = memoryCapacity * dockerMemoryLimitThresholdPercent / 100 } @@ -112,7 +112,7 @@ func createCgroupManager(name string) (*fs.Manager, error) { if err != nil || memoryLimit < minDockerMemoryLimit { memoryLimit = minDockerMemoryLimit } - glog.V(2).Infof("Configure resource-only container %q with memory limit: %d", name, memoryLimit) + klog.V(2).Infof("Configure resource-only container %q with memory limit: %d", name, memoryLimit) allowAllDevices := true cm := &fs.Manager{ diff --git a/pkg/kubelet/dockershim/docker_container.go b/pkg/kubelet/dockershim/docker_container.go index 6343e9be19759..3c6b9b4849721 100644 --- a/pkg/kubelet/dockershim/docker_container.go +++ b/pkg/kubelet/dockershim/docker_container.go @@ -27,7 +27,7 @@ import ( dockercontainer "github.com/docker/docker/api/types/container" dockerfilters "github.com/docker/docker/api/types/filters" dockerstrslice "github.com/docker/docker/api/types/strslice" - "github.com/golang/glog" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" @@ -71,7 +71,7 @@ func (ds *dockerService) ListContainers(_ context.Context, r *runtimeapi.ListCon converted, err := toRuntimeAPIContainer(&c) if err != nil { - glog.V(4).Infof("Unable to convert docker to runtime API container: %v", err) + klog.V(4).Infof("Unable to convert docker to runtime API container: %v", err) continue } @@ -191,7 +191,7 @@ func (ds *dockerService) createContainerLogSymlink(containerID string) error { } if path == "" { - glog.V(5).Infof("Container %s log path isn't specified, will not create the symlink", containerID) + klog.V(5).Infof("Container %s log path isn't specified, will not create the symlink", containerID) return nil } @@ -199,7 +199,7 @@ func (ds *dockerService) createContainerLogSymlink(containerID string) error { // Only create the symlink when container log path is specified and log file exists. // Delete possibly existing file first if err = ds.os.Remove(path); err == nil { - glog.Warningf("Deleted previously existing symlink file: %q", path) + klog.Warningf("Deleted previously existing symlink file: %q", path) } if err = ds.os.Symlink(realPath, path); err != nil { return fmt.Errorf("failed to create symbolic link %q to the container log file %q for container %q: %v", @@ -208,14 +208,14 @@ func (ds *dockerService) createContainerLogSymlink(containerID string) error { } else { supported, err := ds.IsCRISupportedLogDriver() if err != nil { - glog.Warningf("Failed to check supported logging driver by CRI: %v", err) + klog.Warningf("Failed to check supported logging driver by CRI: %v", err) return nil } if supported { - glog.Warningf("Cannot create symbolic link because container log file doesn't exist!") + klog.Warningf("Cannot create symbolic link because container log file doesn't exist!") } else { - glog.V(5).Infof("Unsupported logging driver by CRI") + klog.V(5).Infof("Unsupported logging driver by CRI") } } diff --git a/pkg/kubelet/dockershim/docker_image.go b/pkg/kubelet/dockershim/docker_image.go index e4c450bc8b0c6..c1089a037a3d5 100644 --- a/pkg/kubelet/dockershim/docker_image.go +++ b/pkg/kubelet/dockershim/docker_image.go @@ -25,7 +25,7 @@ import ( dockerfilters "github.com/docker/docker/api/types/filters" "github.com/docker/docker/pkg/jsonmessage" - "github.com/golang/glog" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" ) @@ -52,7 +52,7 @@ func (ds *dockerService) ListImages(_ context.Context, r *runtimeapi.ListImagesR for _, i := range images { apiImage, err := imageToRuntimeAPIImage(&i) if err != nil { - glog.V(5).Infof("Failed to convert docker API image %+v to runtime API image: %v", i, err) + klog.V(5).Infof("Failed to convert docker API image %+v to runtime API image: %v", i, err) continue } result = append(result, apiImage) diff --git a/pkg/kubelet/dockershim/docker_image_windows.go b/pkg/kubelet/dockershim/docker_image_windows.go index 8fd6d2c869e06..e9df2f663fa6e 100644 --- a/pkg/kubelet/dockershim/docker_image_windows.go +++ b/pkg/kubelet/dockershim/docker_image_windows.go @@ -22,7 +22,7 @@ import ( "context" "time" - "github.com/golang/glog" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/winstats" @@ -32,14 +32,14 @@ import ( func (ds *dockerService) ImageFsInfo(_ context.Context, _ *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) { info, err := ds.client.Info() if err != nil { - glog.Errorf("Failed to get docker info: %v", err) + klog.Errorf("Failed to get docker info: %v", err) return nil, err } statsClient := &winstats.StatsClient{} fsinfo, err := statsClient.GetDirFsInfo(info.DockerRootDir) if err != nil { - glog.Errorf("Failed to get dir fsInfo for %q: %v", info.DockerRootDir, err) + klog.Errorf("Failed to get dir fsInfo for %q: %v", info.DockerRootDir, err) return nil, err } diff --git a/pkg/kubelet/dockershim/docker_sandbox.go b/pkg/kubelet/dockershim/docker_sandbox.go index 744ebdf1c8e25..0443793d8db15 100644 --- a/pkg/kubelet/dockershim/docker_sandbox.go +++ b/pkg/kubelet/dockershim/docker_sandbox.go @@ -27,8 +27,8 @@ import ( dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" dockerfilters "github.com/docker/docker/api/types/filters" - "github.com/golang/glog" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors" @@ -225,11 +225,11 @@ func (ds *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopP if checkpointErr != errors.ErrCheckpointNotFound { err := ds.checkpointManager.RemoveCheckpoint(podSandboxID) if err != nil { - glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, err) + klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, err) } } if libdocker.IsContainerNotFoundError(statusErr) { - glog.Warningf("Both sandbox container and checkpoint for id %q could not be found. "+ + klog.Warningf("Both sandbox container and checkpoint for id %q could not be found. "+ "Proceed without further sandbox information.", podSandboxID) } else { return nil, utilerrors.NewAggregate([]error{ @@ -264,7 +264,7 @@ func (ds *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopP if err := ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod); err != nil { // Do not return error if the container does not exist if !libdocker.IsContainerNotFoundError(err) { - glog.Errorf("Failed to stop sandbox %q: %v", podSandboxID, err) + klog.Errorf("Failed to stop sandbox %q: %v", podSandboxID, err) errList = append(errList, err) } else { // remove the checkpoint for any sandbox that is not found in the runtime @@ -381,7 +381,7 @@ func (ds *dockerService) getIP(podSandboxID string, sandbox *dockertypes.Contain // If all else fails, warn but don't return an error, as pod status // should generally not return anything except fatal errors // FIXME: handle network errors by restarting the pod somehow? - glog.Warningf("failed to read pod IP from plugin/docker: %v", err) + klog.Warningf("failed to read pod IP from plugin/docker: %v", err) return "" } @@ -498,7 +498,7 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod if filter == nil { checkpoints, err = ds.checkpointManager.ListCheckpoints() if err != nil { - glog.Errorf("Failed to list checkpoints: %v", err) + klog.Errorf("Failed to list checkpoints: %v", err) } } @@ -515,7 +515,7 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod c := containers[i] converted, err := containerToRuntimeAPISandbox(&c) if err != nil { - glog.V(4).Infof("Unable to convert docker to runtime API sandbox %+v: %v", c, err) + klog.V(4).Infof("Unable to convert docker to runtime API sandbox %+v: %v", c, err) continue } if filterOutReadySandboxes && converted.State == runtimeapi.PodSandboxState_SANDBOX_READY { @@ -535,11 +535,11 @@ func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPod checkpoint := NewPodSandboxCheckpoint("", "", &CheckpointData{}) err := ds.checkpointManager.GetCheckpoint(id, checkpoint) if err != nil { - glog.Errorf("Failed to retrieve checkpoint for sandbox %q: %v", id, err) + klog.Errorf("Failed to retrieve checkpoint for sandbox %q: %v", id, err) if err == errors.ErrCorruptCheckpoint { err = ds.checkpointManager.RemoveCheckpoint(id) if err != nil { - glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", id, err) + klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", id, err) } } continue @@ -687,14 +687,14 @@ func toCheckpointProtocol(protocol runtimeapi.Protocol) Protocol { case runtimeapi.Protocol_SCTP: return protocolSCTP } - glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) + klog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) return protocolTCP } // rewriteResolvFile rewrites resolv.conf file generated by docker. func rewriteResolvFile(resolvFilePath string, dns []string, dnsSearch []string, dnsOptions []string) error { if len(resolvFilePath) == 0 { - glog.Errorf("ResolvConfPath is empty.") + klog.Errorf("ResolvConfPath is empty.") return nil } @@ -719,9 +719,9 @@ func rewriteResolvFile(resolvFilePath string, dns []string, dnsSearch []string, resolvFileContentStr := strings.Join(resolvFileContent, "\n") resolvFileContentStr += "\n" - glog.V(4).Infof("Will attempt to re-write config file %s with: \n%s", resolvFilePath, resolvFileContent) + klog.V(4).Infof("Will attempt to re-write config file %s with: \n%s", resolvFilePath, resolvFileContent) if err := rewriteFile(resolvFilePath, resolvFileContentStr); err != nil { - glog.Errorf("resolv.conf could not be updated: %v", err) + klog.Errorf("resolv.conf could not be updated: %v", err) return err } } diff --git a/pkg/kubelet/dockershim/docker_service.go b/pkg/kubelet/dockershim/docker_service.go index ae1f70f5218d4..97f6543c4bb81 100644 --- a/pkg/kubelet/dockershim/docker_service.go +++ b/pkg/kubelet/dockershim/docker_service.go @@ -27,7 +27,7 @@ import ( "github.com/blang/semver" dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" @@ -233,7 +233,7 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon // lead to retries of the same failure, so just fail hard. return nil, err } - glog.Infof("Hairpin mode set to %q", pluginSettings.HairpinMode) + klog.Infof("Hairpin mode set to %q", pluginSettings.HairpinMode) // dockershim currently only supports CNI plugins. pluginSettings.PluginBinDirs = cni.SplitDirs(pluginSettings.PluginBinDirString) @@ -248,25 +248,25 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon return nil, fmt.Errorf("didn't find compatible CNI plugin with given settings %+v: %v", pluginSettings, err) } ds.network = network.NewPluginManager(plug) - glog.Infof("Docker cri networking managed by %v", plug.Name()) + klog.Infof("Docker cri networking managed by %v", plug.Name()) // NOTE: cgroup driver is only detectable in docker 1.11+ cgroupDriver := defaultCgroupDriver dockerInfo, err := ds.client.Info() - glog.Infof("Docker Info: %+v", dockerInfo) + klog.Infof("Docker Info: %+v", dockerInfo) if err != nil { - glog.Errorf("Failed to execute Info() call to the Docker client: %v", err) - glog.Warningf("Falling back to use the default driver: %q", cgroupDriver) + klog.Errorf("Failed to execute Info() call to the Docker client: %v", err) + klog.Warningf("Falling back to use the default driver: %q", cgroupDriver) } else if len(dockerInfo.CgroupDriver) == 0 { - glog.Warningf("No cgroup driver is set in Docker") - glog.Warningf("Falling back to use the default driver: %q", cgroupDriver) + klog.Warningf("No cgroup driver is set in Docker") + klog.Warningf("Falling back to use the default driver: %q", cgroupDriver) } else { cgroupDriver = dockerInfo.CgroupDriver } if len(kubeCgroupDriver) != 0 && kubeCgroupDriver != cgroupDriver { return nil, fmt.Errorf("misconfiguration: kubelet cgroup driver: %q is different from docker cgroup driver: %q", kubeCgroupDriver, cgroupDriver) } - glog.Infof("Setting cgroupDriver to %s", cgroupDriver) + klog.Infof("Setting cgroupDriver to %s", cgroupDriver) ds.cgroupDriver = cgroupDriver ds.versionCache = cache.NewObjectCache( func() (interface{}, error) { @@ -342,7 +342,7 @@ func (ds *dockerService) UpdateRuntimeConfig(_ context.Context, r *runtimeapi.Up return &runtimeapi.UpdateRuntimeConfigResponse{}, nil } - glog.Infof("docker cri received runtime config %+v", runtimeConfig) + klog.Infof("docker cri received runtime config %+v", runtimeConfig) if ds.network != nil && runtimeConfig.NetworkConfig.PodCidr != "" { event := make(map[string]interface{}) event[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = runtimeConfig.NetworkConfig.PodCidr @@ -375,7 +375,7 @@ func (ds *dockerService) GetPodPortMappings(podSandboxID string) ([]*hostport.Po } errRem := ds.checkpointManager.RemoveCheckpoint(podSandboxID) if errRem != nil { - glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, errRem) + klog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, errRem) } return nil, err } @@ -398,7 +398,7 @@ func (ds *dockerService) Start() error { if ds.startLocalStreamingServer { go func() { if err := ds.streamingServer.Start(true); err != nil { - glog.Fatalf("Streaming server stopped unexpectedly: %v", err) + klog.Fatalf("Streaming server stopped unexpectedly: %v", err) } }() } @@ -450,7 +450,7 @@ func (ds *dockerService) GenerateExpectedCgroupParent(cgroupParent string) (stri cgroupParent = path.Base(cgroupParent) } } - glog.V(3).Infof("Setting cgroup parent to: %q", cgroupParent) + klog.V(3).Infof("Setting cgroup parent to: %q", cgroupParent) return cgroupParent, nil } @@ -518,7 +518,7 @@ func toAPIProtocol(protocol Protocol) v1.Protocol { case protocolSCTP: return v1.ProtocolSCTP } - glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) + klog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) return v1.ProtocolTCP } @@ -537,7 +537,7 @@ func effectiveHairpinMode(s *NetworkPluginSettings) error { // This is not a valid combination, since promiscuous-bridge only works on kubenet. Users might be using the // default values (from before the hairpin-mode flag existed) and we // should keep the old behavior. - glog.Warningf("Hairpin mode set to %q but kubenet is not enabled, falling back to %q", s.HairpinMode, kubeletconfig.HairpinVeth) + klog.Warningf("Hairpin mode set to %q but kubenet is not enabled, falling back to %q", s.HairpinMode, kubeletconfig.HairpinVeth) s.HairpinMode = kubeletconfig.HairpinVeth return nil } diff --git a/pkg/kubelet/dockershim/docker_streaming.go b/pkg/kubelet/dockershim/docker_streaming.go index d65b970041ac0..1c4dc813b0823 100644 --- a/pkg/kubelet/dockershim/docker_streaming.go +++ b/pkg/kubelet/dockershim/docker_streaming.go @@ -27,7 +27,7 @@ import ( "time" dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/client-go/tools/remotecommand" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" @@ -199,7 +199,7 @@ func portForward(client libdocker.Interface, podSandboxID string, port int32, st } commandString := fmt.Sprintf("%s %s", nsenterPath, strings.Join(args, " ")) - glog.V(4).Infof("executing port forwarding command: %s", commandString) + klog.V(4).Infof("executing port forwarding command: %s", commandString) command := exec.Command(nsenterPath, args...) command.Stdout = stream diff --git a/pkg/kubelet/dockershim/exec.go b/pkg/kubelet/dockershim/exec.go index aaaff8487d344..4b0d085b5a179 100644 --- a/pkg/kubelet/dockershim/exec.go +++ b/pkg/kubelet/dockershim/exec.go @@ -22,7 +22,7 @@ import ( "time" dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/client-go/tools/remotecommand" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -124,7 +124,7 @@ func (*NativeExecHandler) ExecInContainer(client libdocker.Interface, container count++ if count == 5 { - glog.Errorf("Exec session %s in container %s terminated but process still running!", execObj.ID, container.ID) + klog.Errorf("Exec session %s in container %s terminated but process still running!", execObj.ID, container.ID) break } diff --git a/pkg/kubelet/dockershim/helpers.go b/pkg/kubelet/dockershim/helpers.go index 6719422d0d913..21166b82d1f94 100644 --- a/pkg/kubelet/dockershim/helpers.go +++ b/pkg/kubelet/dockershim/helpers.go @@ -26,7 +26,7 @@ import ( dockercontainer "github.com/docker/docker/api/types/container" dockerfilters "github.com/docker/docker/api/types/filters" dockernat "github.com/docker/go-connections/nat" - "github.com/golang/glog" + "k8s.io/klog" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/pkg/credentialprovider" @@ -142,7 +142,7 @@ func generateMountBindings(mounts []*runtimeapi.Mount) []string { case runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER: attrs = append(attrs, "rslave") default: - glog.Warningf("unknown propagation mode for hostPath %q", m.HostPath) + klog.Warningf("unknown propagation mode for hostPath %q", m.HostPath) // Falls back to "private" } @@ -175,7 +175,7 @@ func makePortsAndBindings(pm []*runtimeapi.PortMapping) (dockernat.PortSet, map[ case runtimeapi.Protocol_SCTP: protocol = "/sctp" default: - glog.Warningf("Unknown protocol %q: defaulting to TCP", port.Protocol) + klog.Warningf("Unknown protocol %q: defaulting to TCP", port.Protocol) protocol = "/tcp" } @@ -283,12 +283,12 @@ func recoverFromCreationConflictIfNeeded(client libdocker.Interface, createConfi } id := matches[1] - glog.Warningf("Unable to create pod sandbox due to conflict. Attempting to remove sandbox %q", id) + klog.Warningf("Unable to create pod sandbox due to conflict. Attempting to remove sandbox %q", id) if rmErr := client.RemoveContainer(id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true}); rmErr == nil { - glog.V(2).Infof("Successfully removed conflicting container %q", id) + klog.V(2).Infof("Successfully removed conflicting container %q", id) return nil, err } else { - glog.Errorf("Failed to remove the conflicting container %q: %v", id, rmErr) + klog.Errorf("Failed to remove the conflicting container %q: %v", id, rmErr) // Return if the error is not container not found error. if !libdocker.IsContainerNotFoundError(rmErr) { return nil, err @@ -297,7 +297,7 @@ func recoverFromCreationConflictIfNeeded(client libdocker.Interface, createConfi // randomize the name to avoid conflict. createConfig.Name = randomizeName(createConfig.Name) - glog.V(2).Infof("Create the container with randomized name %s", createConfig.Name) + klog.V(2).Infof("Create the container with randomized name %s", createConfig.Name) return client.CreateContainer(createConfig) } @@ -332,7 +332,7 @@ func ensureSandboxImageExists(client libdocker.Interface, image string) error { keyring := credentialprovider.NewDockerKeyring() creds, withCredentials := keyring.Lookup(repoToPull) if !withCredentials { - glog.V(3).Infof("Pulling image %q without credentials", image) + klog.V(3).Infof("Pulling image %q without credentials", image) err := client.PullImage(image, dockertypes.AuthConfig{}, dockertypes.ImagePullOptions{}) if err != nil { diff --git a/pkg/kubelet/dockershim/helpers_unsupported.go b/pkg/kubelet/dockershim/helpers_unsupported.go index 2867898f301f7..d78a7eb7b5392 100644 --- a/pkg/kubelet/dockershim/helpers_unsupported.go +++ b/pkg/kubelet/dockershim/helpers_unsupported.go @@ -23,7 +23,7 @@ import ( "github.com/blang/semver" dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" ) @@ -32,7 +32,7 @@ func DefaultMemorySwap() int64 { } func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune) ([]string, error) { - glog.Warningf("getSecurityOpts is unsupported in this build") + klog.Warningf("getSecurityOpts is unsupported in this build") return nil, nil } @@ -41,12 +41,12 @@ func (ds *dockerService) updateCreateConfig( config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig, podSandboxID string, securityOptSep rune, apiVersion *semver.Version) error { - glog.Warningf("updateCreateConfig is unsupported in this build") + klog.Warningf("updateCreateConfig is unsupported in this build") return nil } func (ds *dockerService) determinePodIPBySandboxID(uid string) string { - glog.Warningf("determinePodIPBySandboxID is unsupported in this build") + klog.Warningf("determinePodIPBySandboxID is unsupported in this build") return "" } diff --git a/pkg/kubelet/dockershim/helpers_windows.go b/pkg/kubelet/dockershim/helpers_windows.go index 436701546c583..d6c8ebabb96de 100644 --- a/pkg/kubelet/dockershim/helpers_windows.go +++ b/pkg/kubelet/dockershim/helpers_windows.go @@ -25,7 +25,7 @@ import ( dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" dockerfilters "github.com/docker/docker/api/types/filters" - "github.com/golang/glog" + "k8s.io/klog" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" @@ -37,7 +37,7 @@ func DefaultMemorySwap() int64 { func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune) ([]string, error) { if seccompProfile != "" { - glog.Warningf("seccomp annotations are not supported on windows") + klog.Warningf("seccomp annotations are not supported on windows") } return nil, nil } diff --git a/pkg/kubelet/dockershim/libdocker/BUILD b/pkg/kubelet/dockershim/libdocker/BUILD index 25c1a0e2d3819..f706d66e3e881 100644 --- a/pkg/kubelet/dockershim/libdocker/BUILD +++ b/pkg/kubelet/dockershim/libdocker/BUILD @@ -40,8 +40,8 @@ go_library( "//vendor/github.com/docker/docker/client:go_default_library", "//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library", "//vendor/github.com/docker/docker/pkg/stdcopy:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/opencontainers/go-digest:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/dockershim/libdocker/client.go b/pkg/kubelet/dockershim/libdocker/client.go index 73f0a4dc7d734..d43da1bd22e35 100644 --- a/pkg/kubelet/dockershim/libdocker/client.go +++ b/pkg/kubelet/dockershim/libdocker/client.go @@ -23,7 +23,7 @@ import ( dockercontainer "github.com/docker/docker/api/types/container" dockerimagetypes "github.com/docker/docker/api/types/image" dockerapi "github.com/docker/docker/client" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -72,7 +72,7 @@ type Interface interface { // DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec func getDockerClient(dockerEndpoint string) (*dockerapi.Client, error) { if len(dockerEndpoint) > 0 { - glog.Infof("Connecting to docker on %s", dockerEndpoint) + klog.Infof("Connecting to docker on %s", dockerEndpoint) return dockerapi.NewClient(dockerEndpoint, "", nil, nil) } return dockerapi.NewEnvClient() @@ -99,8 +99,8 @@ func ConnectToDockerOrDie(dockerEndpoint string, requestTimeout, imagePullProgre } client, err := getDockerClient(dockerEndpoint) if err != nil { - glog.Fatalf("Couldn't connect to docker: %v", err) + klog.Fatalf("Couldn't connect to docker: %v", err) } - glog.Infof("Start docker client with request timeout=%v", requestTimeout) + klog.Infof("Start docker client with request timeout=%v", requestTimeout) return newKubeDockerClient(client, requestTimeout, imagePullProgressDeadline) } diff --git a/pkg/kubelet/dockershim/libdocker/helpers.go b/pkg/kubelet/dockershim/libdocker/helpers.go index f8a785cbd0a4d..a26ca48a36e25 100644 --- a/pkg/kubelet/dockershim/libdocker/helpers.go +++ b/pkg/kubelet/dockershim/libdocker/helpers.go @@ -22,8 +22,8 @@ import ( dockerref "github.com/docker/distribution/reference" dockertypes "github.com/docker/docker/api/types" - "github.com/golang/glog" godigest "github.com/opencontainers/go-digest" + "k8s.io/klog" ) // ParseDockerTimestamp parses the timestamp returned by Interface from string to time.Time @@ -42,7 +42,7 @@ func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool { // https://github.com/docker/distribution/blob/master/reference/reference.go#L4 named, err := dockerref.ParseNormalizedNamed(image) if err != nil { - glog.V(4).Infof("couldn't parse image reference %q: %v", image, err) + klog.V(4).Infof("couldn't parse image reference %q: %v", image, err) return false } _, isTagged := named.(dockerref.Tagged) @@ -100,7 +100,7 @@ func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool { for _, repoDigest := range inspected.RepoDigests { named, err := dockerref.ParseNormalizedNamed(repoDigest) if err != nil { - glog.V(4).Infof("couldn't parse image RepoDigest reference %q: %v", repoDigest, err) + klog.V(4).Infof("couldn't parse image RepoDigest reference %q: %v", repoDigest, err) continue } if d, isDigested := named.(dockerref.Digested); isDigested { @@ -114,14 +114,14 @@ func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool { // process the ID as a digest id, err := godigest.Parse(inspected.ID) if err != nil { - glog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err) + klog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err) return false } if digest.Digest().Algorithm().String() == id.Algorithm().String() && digest.Digest().Hex() == id.Hex() { return true } } - glog.V(4).Infof("Inspected image (%q) does not match %s", inspected.ID, image) + klog.V(4).Infof("Inspected image (%q) does not match %s", inspected.ID, image) return false } @@ -138,19 +138,19 @@ func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool { // Otherwise, we should try actual parsing to be more correct ref, err := dockerref.Parse(image) if err != nil { - glog.V(4).Infof("couldn't parse image reference %q: %v", image, err) + klog.V(4).Infof("couldn't parse image reference %q: %v", image, err) return false } digest, isDigested := ref.(dockerref.Digested) if !isDigested { - glog.V(4).Infof("the image reference %q was not a digest reference", image) + klog.V(4).Infof("the image reference %q was not a digest reference", image) return false } id, err := godigest.Parse(inspected.ID) if err != nil { - glog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err) + klog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err) return false } @@ -158,7 +158,7 @@ func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool { return true } - glog.V(4).Infof("The reference %s does not directly refer to the given image's ID (%q)", image, inspected.ID) + klog.V(4).Infof("The reference %s does not directly refer to the given image's ID (%q)", image, inspected.ID) return false } diff --git a/pkg/kubelet/dockershim/libdocker/kube_docker_client.go b/pkg/kubelet/dockershim/libdocker/kube_docker_client.go index 786713754102d..0cdb9955e3d7e 100644 --- a/pkg/kubelet/dockershim/libdocker/kube_docker_client.go +++ b/pkg/kubelet/dockershim/libdocker/kube_docker_client.go @@ -28,7 +28,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" dockertypes "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" @@ -88,8 +88,8 @@ func newKubeDockerClient(dockerClient *dockerapi.Client, requestTimeout, imagePu // Notice that this assumes that docker is running before kubelet is started. v, err := k.Version() if err != nil { - glog.Errorf("failed to retrieve docker version: %v", err) - glog.Warningf("Using empty version for docker client, this may sometimes cause compatibility issue.") + klog.Errorf("failed to retrieve docker version: %v", err) + klog.Warningf("Using empty version for docker client, this may sometimes cause compatibility issue.") } else { // Update client version with real api version. dockerClient.NegotiateAPIVersionPing(dockertypes.Ping{APIVersion: v.APIVersion}) @@ -338,14 +338,14 @@ func (p *progressReporter) start() { progress, timestamp := p.progress.get() // If there is no progress for p.imagePullProgressDeadline, cancel the operation. if time.Since(timestamp) > p.imagePullProgressDeadline { - glog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress) + klog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress) p.cancel() return } - glog.V(2).Infof("Pulling image %q: %q", p.image, progress) + klog.V(2).Infof("Pulling image %q: %q", p.image, progress) case <-p.stopCh: progress, _ := p.progress.get() - glog.V(2).Infof("Stop pulling image %q: %q", p.image, progress) + klog.V(2).Infof("Stop pulling image %q: %q", p.image, progress) return } } diff --git a/pkg/kubelet/dockershim/network/BUILD b/pkg/kubelet/dockershim/network/BUILD index 13ab4c3d8530a..330764a3c087f 100644 --- a/pkg/kubelet/dockershim/network/BUILD +++ b/pkg/kubelet/dockershim/network/BUILD @@ -18,7 +18,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/kubelet/dockershim/network/cni/BUILD b/pkg/kubelet/dockershim/network/cni/BUILD index 6f9b6cfee9140..8691c7344f2a4 100644 --- a/pkg/kubelet/dockershim/network/cni/BUILD +++ b/pkg/kubelet/dockershim/network/cni/BUILD @@ -22,7 +22,7 @@ go_library( "//pkg/util/bandwidth:go_default_library", "//vendor/github.com/containernetworking/cni/libcni:go_default_library", "//vendor/github.com/containernetworking/cni/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:windows": [ diff --git a/pkg/kubelet/dockershim/network/cni/cni.go b/pkg/kubelet/dockershim/network/cni/cni.go index 86df39dd87b75..77546a0974c69 100644 --- a/pkg/kubelet/dockershim/network/cni/cni.go +++ b/pkg/kubelet/dockershim/network/cni/cni.go @@ -27,7 +27,7 @@ import ( "github.com/containernetworking/cni/libcni" cnitypes "github.com/containernetworking/cni/pkg/types" - "github.com/golang/glog" + "k8s.io/klog" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -146,34 +146,34 @@ func getDefaultCNINetwork(confDir string, binDirs []string) (*cniNetwork, error) if strings.HasSuffix(confFile, ".conflist") { confList, err = libcni.ConfListFromFile(confFile) if err != nil { - glog.Warningf("Error loading CNI config list file %s: %v", confFile, err) + klog.Warningf("Error loading CNI config list file %s: %v", confFile, err) continue } } else { conf, err := libcni.ConfFromFile(confFile) if err != nil { - glog.Warningf("Error loading CNI config file %s: %v", confFile, err) + klog.Warningf("Error loading CNI config file %s: %v", confFile, err) continue } // Ensure the config has a "type" so we know what plugin to run. // Also catches the case where somebody put a conflist into a conf file. if conf.Network.Type == "" { - glog.Warningf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile) + klog.Warningf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile) continue } confList, err = libcni.ConfListFromConf(conf) if err != nil { - glog.Warningf("Error converting CNI config file %s to list: %v", confFile, err) + klog.Warningf("Error converting CNI config file %s to list: %v", confFile, err) continue } } if len(confList.Plugins) == 0 { - glog.Warningf("CNI config list %s has no networks, skipping", confFile) + klog.Warningf("CNI config list %s has no networks, skipping", confFile) continue } - glog.V(4).Infof("Using CNI configuration file %s", confFile) + klog.V(4).Infof("Using CNI configuration file %s", confFile) network := &cniNetwork{ name: confList.Name, @@ -200,7 +200,7 @@ func (plugin *cniNetworkPlugin) Init(host network.Host, hairpinMode kubeletconfi func (plugin *cniNetworkPlugin) syncNetworkConfig() { network, err := getDefaultCNINetwork(plugin.confDir, plugin.binDirs) if err != nil { - glog.Warningf("Unable to update cni config: %s", err) + klog.Warningf("Unable to update cni config: %s", err) return } plugin.setDefaultNetwork(network) @@ -247,12 +247,12 @@ func (plugin *cniNetworkPlugin) Event(name string, details map[string]interface{ podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string) if !ok { - glog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE) + klog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE) return } if plugin.podCidr != "" { - glog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR) + klog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR) return } @@ -299,7 +299,7 @@ func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id ku // Lack of namespace should not be fatal on teardown netnsPath, err := plugin.host.GetNetNS(id.ID) if err != nil { - glog.Warningf("CNI failed to retrieve network namespace path: %v", err) + klog.Warningf("CNI failed to retrieve network namespace path: %v", err) } return plugin.deleteFromNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath, nil) @@ -312,40 +312,40 @@ func podDesc(namespace, name string, id kubecontainer.ContainerID) string { func (plugin *cniNetworkPlugin) addToNetwork(network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations, options map[string]string) (cnitypes.Result, error) { rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath, annotations, options) if err != nil { - glog.Errorf("Error adding network when building cni runtime conf: %v", err) + klog.Errorf("Error adding network when building cni runtime conf: %v", err) return nil, err } pdesc := podDesc(podNamespace, podName, podSandboxID) netConf, cniNet := network.NetworkConfig, network.CNIConfig - glog.V(4).Infof("Adding %s to network %s/%s netns %q", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, podNetnsPath) + klog.V(4).Infof("Adding %s to network %s/%s netns %q", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, podNetnsPath) res, err := cniNet.AddNetworkList(netConf, rt) if err != nil { - glog.Errorf("Error adding %s to network %s/%s: %v", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, err) + klog.Errorf("Error adding %s to network %s/%s: %v", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, err) return nil, err } - glog.V(4).Infof("Added %s to network %s: %v", pdesc, netConf.Name, res) + klog.V(4).Infof("Added %s to network %s: %v", pdesc, netConf.Name, res) return res, nil } func (plugin *cniNetworkPlugin) deleteFromNetwork(network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string, annotations map[string]string) error { rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath, annotations, nil) if err != nil { - glog.Errorf("Error deleting network when building cni runtime conf: %v", err) + klog.Errorf("Error deleting network when building cni runtime conf: %v", err) return err } pdesc := podDesc(podNamespace, podName, podSandboxID) netConf, cniNet := network.NetworkConfig, network.CNIConfig - glog.V(4).Infof("Deleting %s from network %s/%s netns %q", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, podNetnsPath) + klog.V(4).Infof("Deleting %s from network %s/%s netns %q", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, podNetnsPath) err = cniNet.DelNetworkList(netConf, rt) // The pod may not get deleted successfully at the first time. // Ignore "no such file or directory" error in case the network has already been deleted in previous attempts. if err != nil && !strings.Contains(err.Error(), "no such file or directory") { - glog.Errorf("Error deleting %s from network %s/%s: %v", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, err) + klog.Errorf("Error deleting %s from network %s/%s: %v", pdesc, netConf.Plugins[0].Network.Type, netConf.Name, err) return err } - glog.V(4).Infof("Deleted %s from network %s/%s", pdesc, netConf.Plugins[0].Network.Type, netConf.Name) + klog.V(4).Infof("Deleted %s from network %s/%s", pdesc, netConf.Plugins[0].Network.Type, netConf.Name) return nil } diff --git a/pkg/kubelet/dockershim/network/cni/cni_windows.go b/pkg/kubelet/dockershim/network/cni/cni_windows.go index 29f8f31ee5f86..76b78e143cbad 100644 --- a/pkg/kubelet/dockershim/network/cni/cni_windows.go +++ b/pkg/kubelet/dockershim/network/cni/cni_windows.go @@ -22,7 +22,7 @@ import ( "fmt" cniTypes020 "github.com/containernetworking/cni/pkg/types/020" - "github.com/golang/glog" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/network" @@ -45,9 +45,9 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name strin result, err := plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath, nil, nil) - glog.V(5).Infof("GetPodNetworkStatus result %+v", result) + klog.V(5).Infof("GetPodNetworkStatus result %+v", result) if err != nil { - glog.Errorf("error while adding to cni network: %s", err) + klog.Errorf("error while adding to cni network: %s", err) return nil, err } @@ -55,7 +55,7 @@ func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name strin var result020 *cniTypes020.Result result020, err = cniTypes020.GetResult(result) if err != nil { - glog.Errorf("error while cni parsing result: %s", err) + klog.Errorf("error while cni parsing result: %s", err) return nil, err } return &network.PodNetworkStatus{IP: result020.IP4.IP.IP}, nil diff --git a/pkg/kubelet/dockershim/network/hairpin/BUILD b/pkg/kubelet/dockershim/network/hairpin/BUILD index 94401b4692223..570c027ee74e3 100644 --- a/pkg/kubelet/dockershim/network/hairpin/BUILD +++ b/pkg/kubelet/dockershim/network/hairpin/BUILD @@ -11,7 +11,7 @@ go_library( srcs = ["hairpin.go"], importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hairpin", deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/kubelet/dockershim/network/hairpin/hairpin.go b/pkg/kubelet/dockershim/network/hairpin/hairpin.go index d933131ed31fb..262e78460dfbf 100644 --- a/pkg/kubelet/dockershim/network/hairpin/hairpin.go +++ b/pkg/kubelet/dockershim/network/hairpin/hairpin.go @@ -25,7 +25,7 @@ import ( "regexp" "strconv" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/utils/exec" ) @@ -72,7 +72,7 @@ func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceN } func setUpInterface(ifName string) error { - glog.V(3).Infof("Enabling hairpin on interface %s", ifName) + klog.V(3).Infof("Enabling hairpin on interface %s", ifName) ifPath := path.Join(sysfsNetPath, ifName) if _, err := os.Stat(ifPath); err != nil { return err diff --git a/pkg/kubelet/dockershim/network/hostport/BUILD b/pkg/kubelet/dockershim/network/hostport/BUILD index d1f438e0ff8ab..cceadb121aed1 100644 --- a/pkg/kubelet/dockershim/network/hostport/BUILD +++ b/pkg/kubelet/dockershim/network/hostport/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/kubelet/dockershim/network/hostport/hostport.go b/pkg/kubelet/dockershim/network/hostport/hostport.go index e04107fa3899b..4f9f7751b3f03 100644 --- a/pkg/kubelet/dockershim/network/hostport/hostport.go +++ b/pkg/kubelet/dockershim/network/hostport/hostport.go @@ -21,7 +21,7 @@ import ( "net" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" utiliptables "k8s.io/kubernetes/pkg/util/iptables" @@ -97,7 +97,7 @@ func openLocalPort(hp *hostport) (closeable, error) { default: return nil, fmt.Errorf("unknown protocol %q", hp.protocol) } - glog.V(3).Infof("Opened local port %s", hp.String()) + klog.V(3).Infof("Opened local port %s", hp.String()) return socket, nil } @@ -111,7 +111,7 @@ func portMappingToHostport(portMapping *PortMapping) hostport { // ensureKubeHostportChains ensures the KUBE-HOSTPORTS chain is setup correctly func ensureKubeHostportChains(iptables utiliptables.Interface, natInterfaceName string) error { - glog.V(4).Info("Ensuring kubelet hostport chains") + klog.V(4).Info("Ensuring kubelet hostport chains") // Ensure kubeHostportChain if _, err := iptables.EnsureChain(utiliptables.TableNAT, kubeHostportsChain); err != nil { return fmt.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubeHostportsChain, err) diff --git a/pkg/kubelet/dockershim/network/hostport/hostport_manager.go b/pkg/kubelet/dockershim/network/hostport/hostport_manager.go index 70bfd16dab7ba..ce5108fd318a0 100644 --- a/pkg/kubelet/dockershim/network/hostport/hostport_manager.go +++ b/pkg/kubelet/dockershim/network/hostport/hostport_manager.go @@ -25,9 +25,9 @@ import ( "strings" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" "k8s.io/kubernetes/pkg/util/conntrack" utiliptables "k8s.io/kubernetes/pkg/util/iptables" @@ -65,7 +65,7 @@ func NewHostportManager(iptables utiliptables.Interface) HostPortManager { } h.conntrackFound = conntrack.Exists(h.execer) if !h.conntrackFound { - glog.Warningf("The binary conntrack is not installed, this can cause failures in network connection cleanup.") + klog.Warningf("The binary conntrack is not installed, this can cause failures in network connection cleanup.") } return h } @@ -173,11 +173,11 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt // create a new conntrack entry without any DNAT. That will result in blackhole of the traffic even after correct // iptables rules have been added back. if hm.execer != nil && hm.conntrackFound { - glog.Infof("Starting to delete udp conntrack entries: %v, isIPv6 - %v", conntrackPortsToRemove, isIpv6) + klog.Infof("Starting to delete udp conntrack entries: %v, isIPv6 - %v", conntrackPortsToRemove, isIpv6) for _, port := range conntrackPortsToRemove { err = conntrack.ClearEntriesForPort(hm.execer, port, isIpv6, v1.ProtocolUDP) if err != nil { - glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", port, err) + klog.Errorf("Failed to clear udp conntrack for port %d, error: %v", port, err) } } } @@ -246,7 +246,7 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er // syncIPTables executes iptables-restore with given lines func (hm *hostportManager) syncIPTables(lines []byte) error { - glog.V(3).Infof("Restoring iptables rules: %s", lines) + klog.V(3).Infof("Restoring iptables rules: %s", lines) err := hm.iptables.RestoreAll(lines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { return fmt.Errorf("Failed to execute iptables-restore: %v", err) @@ -283,7 +283,7 @@ func (hm *hostportManager) openHostports(podPortMapping *PodPortMapping) (map[ho if retErr != nil { for hp, socket := range ports { if err := socket.Close(); err != nil { - glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podPortMapping), err) + klog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podPortMapping), err) } } return nil, retErr @@ -297,7 +297,7 @@ func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error for _, pm := range hostportMappings { hp := portMappingToHostport(pm) if socket, ok := hm.hostPortMap[hp]; ok { - glog.V(2).Infof("Closing host port %s", hp.String()) + klog.V(2).Infof("Closing host port %s", hp.String()) if err := socket.Close(); err != nil { errList = append(errList, fmt.Errorf("failed to close host port %s: %v", hp.String(), err)) continue diff --git a/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go b/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go index 1f9df7e9b9887..5274f890dff12 100644 --- a/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go +++ b/pkg/kubelet/dockershim/network/hostport/hostport_syncer.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables" @@ -97,7 +97,7 @@ func (h *hostportSyncer) openHostports(podHostportMapping *PodPortMapping) error if retErr != nil { for hp, socket := range ports { if err := socket.Close(); err != nil { - glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podHostportMapping), err) + klog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podHostportMapping), err) } } return retErr @@ -188,7 +188,7 @@ func (h *hostportSyncer) OpenPodHostportsAndSync(newPortMapping *PodPortMapping, func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMappings []*PodPortMapping) error { start := time.Now() defer func() { - glog.V(4).Infof("syncHostportsRules took %v", time.Since(start)) + klog.V(4).Infof("syncHostportsRules took %v", time.Since(start)) }() hostportPodMap, err := gatherAllHostports(activePodPortMappings) @@ -205,7 +205,7 @@ func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMap iptablesData := bytes.NewBuffer(nil) err = h.iptables.SaveInto(utiliptables.TableNAT, iptablesData) if err != nil { // if we failed to get any rules - glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) + klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes()) } @@ -283,7 +283,7 @@ func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMap writeLine(natRules, "COMMIT") natLines := append(natChains.Bytes(), natRules.Bytes()...) - glog.V(3).Infof("Restoring iptables rules: %s", natLines) + klog.V(3).Infof("Restoring iptables rules: %s", natLines) err = h.iptables.RestoreAll(natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { return fmt.Errorf("Failed to execute iptables-restore: %v", err) @@ -309,7 +309,7 @@ func (h *hostportSyncer) cleanupHostportMap(containerPortMap map[*PortMapping]ta for hp, socket := range h.hostPortMap { if _, ok := currentHostports[hp]; !ok { socket.Close() - glog.V(3).Infof("Closed local port %s", hp.String()) + klog.V(3).Infof("Closed local port %s", hp.String()) delete(h.hostPortMap, hp) } } diff --git a/pkg/kubelet/dockershim/network/kubenet/BUILD b/pkg/kubelet/dockershim/network/kubenet/BUILD index 92613205c5bba..0b4c606aebe7d 100644 --- a/pkg/kubelet/dockershim/network/kubenet/BUILD +++ b/pkg/kubelet/dockershim/network/kubenet/BUILD @@ -51,9 +51,9 @@ go_library( "//vendor/github.com/containernetworking/cni/libcni:go_default_library", "//vendor/github.com/containernetworking/cni/pkg/types:go_default_library", "//vendor/github.com/containernetworking/cni/pkg/types/020:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/vishvananda/netlink:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ diff --git a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go index 3611a84aec374..c3909b1c7f41a 100644 --- a/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go +++ b/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go @@ -29,12 +29,12 @@ import ( "github.com/containernetworking/cni/libcni" cnitypes "github.com/containernetworking/cni/pkg/types" cnitypes020 "github.com/containernetworking/cni/pkg/types/020" - "github.com/golang/glog" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilnet "k8s.io/apimachinery/pkg/util/net" utilsets "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/network" @@ -124,10 +124,10 @@ func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode kubeletc if mtu == network.UseDefaultMTU { if link, err := findMinMTU(); err == nil { plugin.mtu = link.MTU - glog.V(5).Infof("Using interface %s MTU %d as bridge MTU", link.Name, link.MTU) + klog.V(5).Infof("Using interface %s MTU %d as bridge MTU", link.Name, link.MTU) } else { plugin.mtu = fallbackMTU - glog.Warningf("Failed to find default bridge MTU, using %d: %v", fallbackMTU, err) + klog.Warningf("Failed to find default bridge MTU, using %d: %v", fallbackMTU, err) } } else { plugin.mtu = mtu @@ -142,7 +142,7 @@ func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode kubeletc plugin.execer.Command("modprobe", "br-netfilter").CombinedOutput() err := plugin.sysctl.SetSysctl(sysctlBridgeCallIPTables, 1) if err != nil { - glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err) + klog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err) } plugin.loConfig, err = libcni.ConfFromBytes([]byte(`{ @@ -234,16 +234,16 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string) if !ok { - glog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE) + klog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE) return } if plugin.netConfig != nil { - glog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR) + klog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR) return } - glog.V(5).Infof("PodCIDR is set to %q", podCIDR) + klog.V(5).Infof("PodCIDR is set to %q", podCIDR) _, cidr, err := net.ParseCIDR(podCIDR) if err == nil { setHairpin := plugin.hairpinMode == kubeletconfig.HairpinVeth @@ -251,10 +251,10 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf cidr.IP[len(cidr.IP)-1] += 1 json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, podCIDR, cidr.IP.String()) - glog.V(2).Infof("CNI network config set to %v", json) + klog.V(2).Infof("CNI network config set to %v", json) plugin.netConfig, err = libcni.ConfFromBytes([]byte(json)) if err == nil { - glog.V(5).Infof("CNI network config:\n%s", json) + klog.V(5).Infof("CNI network config:\n%s", json) // Ensure cbr0 has no conflicting addresses; CNI's 'bridge' // plugin will bail out if the bridge has an unexpected one @@ -265,7 +265,7 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf } if err != nil { - glog.Warningf("Failed to generate CNI network config: %v", err) + klog.Warningf("Failed to generate CNI network config: %v", err) } } @@ -282,7 +282,7 @@ func (plugin *kubenetNetworkPlugin) clearBridgeAddressesExcept(keep *net.IPNet) for _, addr := range addrs { if !utilnet.IPNetEqual(addr.IPNet, keep) { - glog.V(2).Infof("Removing old address %s from %s", addr.IPNet.String(), BridgeName) + klog.V(2).Infof("Removing old address %s from %s", addr.IPNet.String(), BridgeName) netlink.AddrDel(bridge, &addr) } } @@ -300,7 +300,7 @@ func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int { func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error { // Disable DAD so we skip the kernel delay on bringing up new interfaces. if err := plugin.disableContainerDAD(id); err != nil { - glog.V(3).Infof("Failed to disable DAD in container: %v", err) + klog.V(3).Infof("Failed to disable DAD in container: %v", err) } // Bring up container loopback interface @@ -385,7 +385,7 @@ func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id k start := time.Now() defer func() { - glog.V(4).Infof("SetUpPod took %v for %s/%s", time.Since(start), namespace, name) + klog.V(4).Infof("SetUpPod took %v for %s/%s", time.Since(start), namespace, name) }() if err := plugin.Status(); err != nil { @@ -397,14 +397,14 @@ func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id k podIP, _ := plugin.podIPs[id] if err := plugin.teardown(namespace, name, id, podIP); err != nil { // Not a hard error or warning - glog.V(4).Infof("Failed to clean up %s/%s after SetUpPod failure: %v", namespace, name, err) + klog.V(4).Infof("Failed to clean up %s/%s after SetUpPod failure: %v", namespace, name, err) } return err } // Need to SNAT outbound traffic from cluster if err := plugin.ensureMasqRule(); err != nil { - glog.Errorf("Failed to ensure MASQ rule: %v", err) + klog.Errorf("Failed to ensure MASQ rule: %v", err) } return nil @@ -416,11 +416,11 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k errList := []error{} if podIP != "" { - glog.V(5).Infof("Removing pod IP %s from shaper", podIP) + klog.V(5).Infof("Removing pod IP %s from shaper", podIP) // shaper wants /32 if err := plugin.shaper().Reset(fmt.Sprintf("%s/32", podIP)); err != nil { // Possible bandwidth shaping wasn't enabled for this pod anyways - glog.V(4).Infof("Failed to remove pod IP %s from shaper: %v", podIP, err) + klog.V(4).Infof("Failed to remove pod IP %s from shaper: %v", podIP, err) } delete(plugin.podIPs, id) @@ -429,7 +429,7 @@ func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id k if err := plugin.delContainerFromNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id); err != nil { // This is to prevent returning error when TearDownPod is called twice on the same pod. This helps to reduce event pollution. if podIP != "" { - glog.Warningf("Failed to delete container from kubenet: %v", err) + klog.Warningf("Failed to delete container from kubenet: %v", err) } else { errList = append(errList, err) } @@ -457,7 +457,7 @@ func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, i start := time.Now() defer func() { - glog.V(4).Infof("TearDownPod took %v for %s/%s", time.Since(start), namespace, name) + klog.V(4).Infof("TearDownPod took %v for %s/%s", time.Since(start), namespace, name) }() if plugin.netConfig == nil { @@ -472,7 +472,7 @@ func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, i // Need to SNAT outbound traffic from cluster if err := plugin.ensureMasqRule(); err != nil { - glog.Errorf("Failed to ensure MASQ rule: %v", err) + klog.Errorf("Failed to ensure MASQ rule: %v", err) } return nil @@ -550,7 +550,7 @@ func (plugin *kubenetNetworkPlugin) checkRequiredCNIPluginsInOneDir(dir string) func (plugin *kubenetNetworkPlugin) buildCNIRuntimeConf(ifName string, id kubecontainer.ContainerID, needNetNs bool) (*libcni.RuntimeConf, error) { netnsPath, err := plugin.host.GetNetNS(id.ID) if needNetNs && err != nil { - glog.Errorf("Kubenet failed to retrieve network namespace path: %v", err) + klog.Errorf("Kubenet failed to retrieve network namespace path: %v", err) } return &libcni.RuntimeConf{ @@ -566,7 +566,7 @@ func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.Network return nil, fmt.Errorf("Error building CNI config: %v", err) } - glog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) + klog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) // The network plugin can take up to 3 seconds to execute, // so yield the lock while it runs. plugin.mu.Unlock() @@ -584,7 +584,7 @@ func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.Netwo return fmt.Errorf("Error building CNI config: %v", err) } - glog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) + klog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt) err = plugin.cniConfig.DelNetwork(config, rt) // The pod may not get deleted successfully at the first time. // Ignore "no such file or directory" error in case the network has already been deleted in previous attempts. @@ -609,40 +609,40 @@ func (plugin *kubenetNetworkPlugin) shaper() bandwidth.BandwidthShaper { func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareAddr) { if plugin.ebtables == nil { plugin.ebtables = utilebtables.New(plugin.execer) - glog.V(3).Infof("Flushing dedup chain") + klog.V(3).Infof("Flushing dedup chain") if err := plugin.ebtables.FlushChain(utilebtables.TableFilter, dedupChain); err != nil { - glog.Errorf("Failed to flush dedup chain: %v", err) + klog.Errorf("Failed to flush dedup chain: %v", err) } } _, err := plugin.ebtables.GetVersion() if err != nil { - glog.Warningf("Failed to get ebtables version. Skip syncing ebtables dedup rules: %v", err) + klog.Warningf("Failed to get ebtables version. Skip syncing ebtables dedup rules: %v", err) return } - glog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), plugin.gateway.String(), plugin.podCidr) + klog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), plugin.gateway.String(), plugin.podCidr) _, err = plugin.ebtables.EnsureChain(utilebtables.TableFilter, dedupChain) if err != nil { - glog.Errorf("Failed to ensure %v chain %v", utilebtables.TableFilter, dedupChain) + klog.Errorf("Failed to ensure %v chain %v", utilebtables.TableFilter, dedupChain) return } _, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, utilebtables.ChainOutput, "-j", string(dedupChain)) if err != nil { - glog.Errorf("Failed to ensure %v chain %v jump to %v chain: %v", utilebtables.TableFilter, utilebtables.ChainOutput, dedupChain, err) + klog.Errorf("Failed to ensure %v chain %v jump to %v chain: %v", utilebtables.TableFilter, utilebtables.ChainOutput, dedupChain, err) return } commonArgs := []string{"-p", "IPv4", "-s", macAddr.String(), "-o", "veth+"} _, err = plugin.ebtables.EnsureRule(utilebtables.Prepend, utilebtables.TableFilter, dedupChain, append(commonArgs, "--ip-src", plugin.gateway.String(), "-j", "ACCEPT")...) if err != nil { - glog.Errorf("Failed to ensure packets from cbr0 gateway to be accepted") + klog.Errorf("Failed to ensure packets from cbr0 gateway to be accepted") return } _, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, dedupChain, append(commonArgs, "--ip-src", plugin.podCidr, "-j", "DROP")...) if err != nil { - glog.Errorf("Failed to ensure packets from podCidr but has mac address of cbr0 to get dropped.") + klog.Errorf("Failed to ensure packets from podCidr but has mac address of cbr0 to get dropped.") return } } diff --git a/pkg/kubelet/dockershim/network/plugins.go b/pkg/kubelet/dockershim/network/plugins.go index 139d237e652a5..c67c1a355b6fb 100644 --- a/pkg/kubelet/dockershim/network/plugins.go +++ b/pkg/kubelet/dockershim/network/plugins.go @@ -23,11 +23,11 @@ import ( "sync" "time" - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport" @@ -156,7 +156,7 @@ func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host H if err != nil { allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err)) } else { - glog.V(1).Infof("Loaded network plugin %q", networkPluginName) + klog.V(1).Infof("Loaded network plugin %q", networkPluginName) } } else { allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName)) @@ -183,12 +183,12 @@ func (plugin *NoopNetworkPlugin) Init(host Host, hairpinMode kubeletconfig.Hairp // it was built-in. utilexec.New().Command("modprobe", "br-netfilter").CombinedOutput() if err := plugin.Sysctl.SetSysctl(sysctlBridgeCallIPTables, 1); err != nil { - glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err) + klog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err) } if val, err := plugin.Sysctl.GetSysctl(sysctlBridgeCallIP6Tables); err == nil { if val != 1 { if err = plugin.Sysctl.SetSysctl(sysctlBridgeCallIP6Tables, 1); err != nil { - glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIP6Tables, err) + klog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIP6Tables, err) } } } @@ -334,12 +334,12 @@ func (pm *PluginManager) podUnlock(fullPodName string) { lock, ok := pm.pods[fullPodName] if !ok { - glog.Warningf("Unbalanced pod lock unref for %s", fullPodName) + klog.Warningf("Unbalanced pod lock unref for %s", fullPodName) return } else if lock.refcount == 0 { // This should never ever happen, but handle it anyway delete(pm.pods, fullPodName) - glog.Warningf("Pod lock for %s still in map with zero refcount", fullPodName) + klog.Warningf("Pod lock for %s still in map with zero refcount", fullPodName) return } lock.refcount-- @@ -374,7 +374,7 @@ func (pm *PluginManager) SetUpPod(podNamespace, podName string, id kubecontainer pm.podLock(fullPodName).Lock() defer pm.podUnlock(fullPodName) - glog.V(3).Infof("Calling network plugin %s to set up pod %q", pm.plugin.Name(), fullPodName) + klog.V(3).Infof("Calling network plugin %s to set up pod %q", pm.plugin.Name(), fullPodName) if err := pm.plugin.SetUpPod(podNamespace, podName, id, annotations, options); err != nil { return fmt.Errorf("NetworkPlugin %s failed to set up pod %q network: %v", pm.plugin.Name(), fullPodName, err) } @@ -388,7 +388,7 @@ func (pm *PluginManager) TearDownPod(podNamespace, podName string, id kubecontai pm.podLock(fullPodName).Lock() defer pm.podUnlock(fullPodName) - glog.V(3).Infof("Calling network plugin %s to tear down pod %q", pm.plugin.Name(), fullPodName) + klog.V(3).Infof("Calling network plugin %s to tear down pod %q", pm.plugin.Name(), fullPodName) if err := pm.plugin.TearDownPod(podNamespace, podName, id); err != nil { return fmt.Errorf("NetworkPlugin %s failed to teardown pod %q network: %v", pm.plugin.Name(), fullPodName, err) } diff --git a/pkg/kubelet/dockershim/remote/BUILD b/pkg/kubelet/dockershim/remote/BUILD index a40ce09eaaeaf..06740b1f1603c 100644 --- a/pkg/kubelet/dockershim/remote/BUILD +++ b/pkg/kubelet/dockershim/remote/BUILD @@ -13,8 +13,8 @@ go_library( "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", "//pkg/kubelet/dockershim:go_default_library", "//pkg/kubelet/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/dockershim/remote/docker_server.go b/pkg/kubelet/dockershim/remote/docker_server.go index 546c3b6c4abb0..734f61cca04bb 100644 --- a/pkg/kubelet/dockershim/remote/docker_server.go +++ b/pkg/kubelet/dockershim/remote/docker_server.go @@ -19,8 +19,8 @@ package remote import ( "fmt" - "github.com/golang/glog" "google.golang.org/grpc" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" "k8s.io/kubernetes/pkg/kubelet/dockershim" "k8s.io/kubernetes/pkg/kubelet/util" @@ -52,11 +52,11 @@ func NewDockerServer(endpoint string, s dockershim.CRIService) *DockerServer { func (s *DockerServer) Start() error { // Start the internal service. if err := s.service.Start(); err != nil { - glog.Errorf("Unable to start docker service") + klog.Errorf("Unable to start docker service") return err } - glog.V(2).Infof("Start dockershim grpc server") + klog.V(2).Infof("Start dockershim grpc server") l, err := util.CreateListener(s.endpoint) if err != nil { return fmt.Errorf("failed to listen on %q: %v", s.endpoint, err) @@ -70,7 +70,7 @@ func (s *DockerServer) Start() error { runtimeapi.RegisterImageServiceServer(s.server, s.service) go func() { if err := s.server.Serve(l); err != nil { - glog.Fatalf("Failed to serve connections: %v", err) + klog.Fatalf("Failed to serve connections: %v", err) } }() return nil diff --git a/pkg/kubelet/eviction/BUILD b/pkg/kubelet/eviction/BUILD index a3c139bcd60fb..320f4f64bf2db 100644 --- a/pkg/kubelet/eviction/BUILD +++ b/pkg/kubelet/eviction/BUILD @@ -67,7 +67,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index c1bf8837c787c..7e1ede9eeb753 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -153,7 +153,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd } // reject pods when under memory pressure (if pod is best effort), or if under disk pressure. - glog.Warningf("Failed to admit pod %s - node has conditions: %v", format.Pod(attrs.Pod), m.nodeConditions) + klog.Warningf("Failed to admit pod %s - node has conditions: %v", format.Pod(attrs.Pod), m.nodeConditions) return lifecycle.PodAdmitResult{ Admit: false, Reason: Reason, @@ -164,7 +164,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd // Start starts the control loop to observe and response to low compute resources. func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, podCleanedUpFunc PodCleanedUpFunc, monitoringInterval time.Duration) { thresholdHandler := func(message string) { - glog.Infof(message) + klog.Infof(message) m.synchronize(diskInfoProvider, podFunc) } if m.config.KernelMemcgNotification { @@ -172,7 +172,7 @@ func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePod if threshold.Signal == evictionapi.SignalMemoryAvailable || threshold.Signal == evictionapi.SignalAllocatableMemoryAvailable { notifier, err := NewMemoryThresholdNotifier(threshold, m.config.PodCgroupRoot, &CgroupNotifierFactory{}, thresholdHandler) if err != nil { - glog.Warningf("eviction manager: failed to create memory threshold notifier: %v", err) + klog.Warningf("eviction manager: failed to create memory threshold notifier: %v", err) } else { go notifier.Start() m.thresholdNotifiers = append(m.thresholdNotifiers, notifier) @@ -184,7 +184,7 @@ func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePod go func() { for { if evictedPods := m.synchronize(diskInfoProvider, podFunc); evictedPods != nil { - glog.Infof("eviction manager: pods %s evicted, waiting for pod to be cleaned up", format.Pods(evictedPods)) + klog.Infof("eviction manager: pods %s evicted, waiting for pod to be cleaned up", format.Pods(evictedPods)) m.waitForPodsCleanup(podCleanedUpFunc, evictedPods) } else { time.Sleep(monitoringInterval) @@ -223,7 +223,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act return nil } - glog.V(3).Infof("eviction manager: synchronize housekeeping") + klog.V(3).Infof("eviction manager: synchronize housekeeping") // build the ranking functions (if not yet known) // TODO: have a function in cadvisor that lets us know if global housekeeping has completed if m.dedicatedImageFs == nil { @@ -240,7 +240,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act updateStats := true summary, err := m.summaryProvider.Get(updateStats) if err != nil { - glog.Errorf("eviction manager: failed to get summary stats: %v", err) + klog.Errorf("eviction manager: failed to get summary stats: %v", err) return nil } @@ -248,7 +248,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act m.thresholdsLastUpdated = m.clock.Now() for _, notifier := range m.thresholdNotifiers { if err := notifier.UpdateThreshold(summary); err != nil { - glog.Warningf("eviction manager: failed to update %s: %v", notifier.Description(), err) + klog.Warningf("eviction manager: failed to update %s: %v", notifier.Description(), err) } } } @@ -275,7 +275,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act // the set of node conditions that are triggered by currently observed thresholds nodeConditions := nodeConditions(thresholds) if len(nodeConditions) > 0 { - glog.V(3).Infof("eviction manager: node conditions - observed: %v", nodeConditions) + klog.V(3).Infof("eviction manager: node conditions - observed: %v", nodeConditions) } // track when a node condition was last observed @@ -284,7 +284,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act // node conditions report true if it has been observed within the transition period window nodeConditions = nodeConditionsObservedSince(nodeConditionsLastObservedAt, m.config.PressureTransitionPeriod, now) if len(nodeConditions) > 0 { - glog.V(3).Infof("eviction manager: node conditions - transition period not met: %v", nodeConditions) + klog.V(3).Infof("eviction manager: node conditions - transition period not met: %v", nodeConditions) } // determine the set of thresholds we need to drive eviction behavior (i.e. all grace periods are met) @@ -314,7 +314,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act } if len(thresholds) == 0 { - glog.V(3).Infof("eviction manager: no resources are starved") + klog.V(3).Infof("eviction manager: no resources are starved") return nil } @@ -323,39 +323,39 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act thresholdToReclaim := thresholds[0] resourceToReclaim, found := signalToResource[thresholdToReclaim.Signal] if !found { - glog.V(3).Infof("eviction manager: threshold %s was crossed, but reclaim is not implemented for this threshold.", thresholdToReclaim.Signal) + klog.V(3).Infof("eviction manager: threshold %s was crossed, but reclaim is not implemented for this threshold.", thresholdToReclaim.Signal) return nil } - glog.Warningf("eviction manager: attempting to reclaim %v", resourceToReclaim) + klog.Warningf("eviction manager: attempting to reclaim %v", resourceToReclaim) // record an event about the resources we are now attempting to reclaim via eviction m.recorder.Eventf(m.nodeRef, v1.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim) // check if there are node-level resources we can reclaim to reduce pressure before evicting end-user pods. if m.reclaimNodeLevelResources(thresholdToReclaim.Signal, resourceToReclaim) { - glog.Infof("eviction manager: able to reduce %v pressure without evicting pods.", resourceToReclaim) + klog.Infof("eviction manager: able to reduce %v pressure without evicting pods.", resourceToReclaim) return nil } - glog.Infof("eviction manager: must evict pod(s) to reclaim %v", resourceToReclaim) + klog.Infof("eviction manager: must evict pod(s) to reclaim %v", resourceToReclaim) // rank the pods for eviction rank, ok := m.signalToRankFunc[thresholdToReclaim.Signal] if !ok { - glog.Errorf("eviction manager: no ranking function for signal %s", thresholdToReclaim.Signal) + klog.Errorf("eviction manager: no ranking function for signal %s", thresholdToReclaim.Signal) return nil } // the only candidates viable for eviction are those pods that had anything running. if len(activePods) == 0 { - glog.Errorf("eviction manager: eviction thresholds have been met, but no pods are active to evict") + klog.Errorf("eviction manager: eviction thresholds have been met, but no pods are active to evict") return nil } // rank the running pods for eviction for the specified resource rank(activePods, statsFunc) - glog.Infof("eviction manager: pods ranked for eviction: %s", format.Pods(activePods)) + klog.Infof("eviction manager: pods ranked for eviction: %s", format.Pods(activePods)) //record age of metrics for met thresholds that we are using for evictions. for _, t := range thresholds { @@ -377,7 +377,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act return []*v1.Pod{pod} } } - glog.Infof("eviction manager: unable to evict any pods from the node") + klog.Infof("eviction manager: unable to evict any pods from the node") return nil } @@ -389,7 +389,7 @@ func (m *managerImpl) waitForPodsCleanup(podCleanedUpFunc PodCleanedUpFunc, pods for { select { case <-timeout.C(): - glog.Warningf("eviction manager: timed out waiting for pods %s to be cleaned up", format.Pods(pods)) + klog.Warningf("eviction manager: timed out waiting for pods %s to be cleaned up", format.Pods(pods)) return case <-ticker.C(): for i, pod := range pods { @@ -397,7 +397,7 @@ func (m *managerImpl) waitForPodsCleanup(podCleanedUpFunc PodCleanedUpFunc, pods break } if i == len(pods)-1 { - glog.Infof("eviction manager: pods %s successfully cleaned up", format.Pods(pods)) + klog.Infof("eviction manager: pods %s successfully cleaned up", format.Pods(pods)) return } } @@ -411,14 +411,14 @@ func (m *managerImpl) reclaimNodeLevelResources(signalToReclaim evictionapi.Sign for _, nodeReclaimFunc := range nodeReclaimFuncs { // attempt to reclaim the pressured resource. if err := nodeReclaimFunc(); err != nil { - glog.Warningf("eviction manager: unexpected error when attempting to reduce %v pressure: %v", resourceToReclaim, err) + klog.Warningf("eviction manager: unexpected error when attempting to reduce %v pressure: %v", resourceToReclaim, err) } } if len(nodeReclaimFuncs) > 0 { summary, err := m.summaryProvider.Get(true) if err != nil { - glog.Errorf("eviction manager: failed to get summary stats after resource reclaim: %v", err) + klog.Errorf("eviction manager: failed to get summary stats after resource reclaim: %v", err) return false } @@ -502,7 +502,7 @@ func (m *managerImpl) podEphemeralStorageLimitEviction(podStats statsapi.PodStat } podEphemeralUsage, err := podLocalEphemeralStorageUsage(podStats, pod, fsStatsSet) if err != nil { - glog.Errorf("eviction manager: error getting pod disk usage %v", err) + klog.Errorf("eviction manager: error getting pod disk usage %v", err) return false } @@ -545,7 +545,7 @@ func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg // do not evict such pods. Static pods are not re-admitted after evictions. // https://github.com/kubernetes/kubernetes/issues/40573 has more details. if kubelettypes.IsCriticalPod(pod) && kubepod.IsStaticPod(pod) { - glog.Errorf("eviction manager: cannot evict a critical static pod %s", format.Pod(pod)) + klog.Errorf("eviction manager: cannot evict a critical static pod %s", format.Pod(pod)) return false } status := v1.PodStatus{ @@ -558,9 +558,9 @@ func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg // this is a blocking call and should only return when the pod and its containers are killed. err := m.killPodFunc(pod, status, &gracePeriodOverride) if err != nil { - glog.Errorf("eviction manager: pod %s failed to evict %v", format.Pod(pod), err) + klog.Errorf("eviction manager: pod %s failed to evict %v", format.Pod(pod), err) } else { - glog.Infof("eviction manager: pod %s is evicted successfully", format.Pod(pod)) + klog.Infof("eviction manager: pod %s is evicted successfully", format.Pod(pod)) } return true } diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index c078ce93d00d8..ef34b97a0ba4c 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -23,10 +23,10 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" @@ -732,7 +732,7 @@ func makeSignalObservations(summary *statsapi.Summary) (signalObservations, stat } } if allocatableContainer, err := getSysContainer(summary.Node.SystemContainers, statsapi.SystemContainerPods); err != nil { - glog.Errorf("eviction manager: failed to construct signal: %q error: %v", evictionapi.SignalAllocatableMemoryAvailable, err) + klog.Errorf("eviction manager: failed to construct signal: %q error: %v", evictionapi.SignalAllocatableMemoryAvailable, err) } else { if memory := allocatableContainer.Memory; memory != nil && memory.AvailableBytes != nil && memory.WorkingSetBytes != nil { result[evictionapi.SignalAllocatableMemoryAvailable] = signalObservation{ @@ -805,7 +805,7 @@ func thresholdsMet(thresholds []evictionapi.Threshold, observations signalObserv threshold := thresholds[i] observed, found := observations[threshold.Signal] if !found { - glog.Warningf("eviction manager: no observation found for eviction signal %v", threshold.Signal) + klog.Warningf("eviction manager: no observation found for eviction signal %v", threshold.Signal) continue } // determine if we have met the specified threshold @@ -828,20 +828,20 @@ func thresholdsMet(thresholds []evictionapi.Threshold, observations signalObserv } func debugLogObservations(logPrefix string, observations signalObservations) { - if !glog.V(3) { + if !klog.V(3) { return } for k, v := range observations { if !v.time.IsZero() { - glog.Infof("eviction manager: %v: signal=%v, available: %v, capacity: %v, time: %v", logPrefix, k, v.available, v.capacity, v.time) + klog.Infof("eviction manager: %v: signal=%v, available: %v, capacity: %v, time: %v", logPrefix, k, v.available, v.capacity, v.time) } else { - glog.Infof("eviction manager: %v: signal=%v, available: %v, capacity: %v", logPrefix, k, v.available, v.capacity) + klog.Infof("eviction manager: %v: signal=%v, available: %v, capacity: %v", logPrefix, k, v.available, v.capacity) } } } func debugLogThresholdsWithObservation(logPrefix string, thresholds []evictionapi.Threshold, observations signalObservations) { - if !glog.V(3) { + if !klog.V(3) { return } for i := range thresholds { @@ -849,9 +849,9 @@ func debugLogThresholdsWithObservation(logPrefix string, thresholds []evictionap observed, found := observations[threshold.Signal] if found { quantity := evictionapi.GetThresholdQuantity(threshold.Value, observed.capacity) - glog.Infof("eviction manager: %v: threshold [signal=%v, quantity=%v] observed %v", logPrefix, threshold.Signal, quantity, observed.available) + klog.Infof("eviction manager: %v: threshold [signal=%v, quantity=%v] observed %v", logPrefix, threshold.Signal, quantity, observed.available) } else { - glog.Infof("eviction manager: %v: threshold [signal=%v] had no observation", logPrefix, threshold.Signal) + klog.Infof("eviction manager: %v: threshold [signal=%v] had no observation", logPrefix, threshold.Signal) } } } @@ -862,7 +862,7 @@ func thresholdsUpdatedStats(thresholds []evictionapi.Threshold, observations, la threshold := thresholds[i] observed, found := observations[threshold.Signal] if !found { - glog.Warningf("eviction manager: no observation found for eviction signal %v", threshold.Signal) + klog.Warningf("eviction manager: no observation found for eviction signal %v", threshold.Signal) continue } last, found := lastObservations[threshold.Signal] @@ -892,7 +892,7 @@ func thresholdsMetGracePeriod(observedAt thresholdsObservedAt, now time.Time) [] for threshold, at := range observedAt { duration := now.Sub(at) if duration < threshold.GracePeriod { - glog.V(2).Infof("eviction manager: eviction criteria not yet met for %v, duration: %v", formatThreshold(threshold), duration) + klog.V(2).Infof("eviction manager: eviction criteria not yet met for %v, duration: %v", formatThreshold(threshold), duration) continue } results = append(results, threshold) diff --git a/pkg/kubelet/eviction/memory_threshold_notifier.go b/pkg/kubelet/eviction/memory_threshold_notifier.go index 8d86944f39d00..b60393e8c64e9 100644 --- a/pkg/kubelet/eviction/memory_threshold_notifier.go +++ b/pkg/kubelet/eviction/memory_threshold_notifier.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/resource" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" @@ -71,7 +71,7 @@ func NewMemoryThresholdNotifier(threshold evictionapi.Threshold, cgroupRoot stri } func (m *memoryThresholdNotifier) Start() { - glog.Infof("eviction manager: created %s", m.Description()) + klog.Infof("eviction manager: created %s", m.Description()) for range m.events { m.handler(fmt.Sprintf("eviction manager: %s crossed", m.Description())) } @@ -98,7 +98,7 @@ func (m *memoryThresholdNotifier) UpdateThreshold(summary *statsapi.Summary) err memcgThreshold.Sub(*evictionThresholdQuantity) memcgThreshold.Add(*inactiveFile) - glog.V(3).Infof("eviction manager: setting %s to %s\n", m.Description(), memcgThreshold.String()) + klog.V(3).Infof("eviction manager: setting %s to %s\n", m.Description(), memcgThreshold.String()) if m.notifier != nil { m.notifier.Stop() } diff --git a/pkg/kubelet/eviction/threshold_notifier_linux.go b/pkg/kubelet/eviction/threshold_notifier_linux.go index 4fabd7345a5c8..1d097fd293fde 100644 --- a/pkg/kubelet/eviction/threshold_notifier_linux.go +++ b/pkg/kubelet/eviction/threshold_notifier_linux.go @@ -21,8 +21,8 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/sys/unix" + "k8s.io/klog" ) const ( @@ -104,7 +104,7 @@ func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) { Events: unix.EPOLLIN, }) if err != nil { - glog.Warningf("eviction manager: error adding epoll eventfd: %v", err) + klog.Warningf("eviction manager: error adding epoll eventfd: %v", err) return } for { @@ -115,7 +115,7 @@ func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) { } event, err := wait(n.epfd, n.eventfd, notifierRefreshInterval) if err != nil { - glog.Warningf("eviction manager: error while waiting for memcg events: %v", err) + klog.Warningf("eviction manager: error while waiting for memcg events: %v", err) return } else if !event { // Timeout on wait. This is expected if the threshold was not crossed @@ -125,7 +125,7 @@ func (n *linuxCgroupNotifier) Start(eventCh chan<- struct{}) { buf := make([]byte, eventSize) _, err = unix.Read(n.eventfd, buf) if err != nil { - glog.Warningf("eviction manager: error reading memcg events: %v", err) + klog.Warningf("eviction manager: error reading memcg events: %v", err) return } eventCh <- struct{}{} diff --git a/pkg/kubelet/eviction/threshold_notifier_unsupported.go b/pkg/kubelet/eviction/threshold_notifier_unsupported.go index 7078c7865a91e..afa92fe5fceac 100644 --- a/pkg/kubelet/eviction/threshold_notifier_unsupported.go +++ b/pkg/kubelet/eviction/threshold_notifier_unsupported.go @@ -18,11 +18,11 @@ limitations under the License. package eviction -import "github.com/golang/glog" +import "k8s.io/klog" // NewCgroupNotifier creates a cgroup notifier that does nothing because cgroups do not exist on non-linux systems. func NewCgroupNotifier(path, attribute string, threshold int64) (CgroupNotifier, error) { - glog.V(5).Infof("cgroup notifications not supported") + klog.V(5).Infof("cgroup notifications not supported") return &unsupportedThresholdNotifier{}, nil } diff --git a/pkg/kubelet/images/BUILD b/pkg/kubelet/images/BUILD index 5f5594b3f3c82..cbb23f945e1d2 100644 --- a/pkg/kubelet/images/BUILD +++ b/pkg/kubelet/images/BUILD @@ -30,7 +30,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//vendor/github.com/docker/distribution/reference:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/images/image_gc_manager.go b/pkg/kubelet/images/image_gc_manager.go index a25769a90e3ed..4c6feabb17b4f 100644 --- a/pkg/kubelet/images/image_gc_manager.go +++ b/pkg/kubelet/images/image_gc_manager.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/errors" @@ -178,7 +178,7 @@ func (im *realImageGCManager) Start() { } _, err := im.detectImages(ts) if err != nil { - glog.Warningf("[imageGCManager] Failed to monitor images: %v", err) + klog.Warningf("[imageGCManager] Failed to monitor images: %v", err) } else { im.initialized = true } @@ -189,7 +189,7 @@ func (im *realImageGCManager) Start() { go wait.Until(func() { images, err := im.runtime.ListImages() if err != nil { - glog.Warningf("[imageGCManager] Failed to update image list: %v", err) + klog.Warningf("[imageGCManager] Failed to update image list: %v", err) } else { im.imageCache.set(images) } @@ -223,7 +223,7 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, e // Make a set of images in use by containers. for _, pod := range pods { for _, container := range pod.Containers { - glog.V(5).Infof("Pod %s/%s, container %s uses image %s(%s)", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID) + klog.V(5).Infof("Pod %s/%s, container %s uses image %s(%s)", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID) imagesInUse.Insert(container.ImageID) } } @@ -234,12 +234,12 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, e im.imageRecordsLock.Lock() defer im.imageRecordsLock.Unlock() for _, image := range images { - glog.V(5).Infof("Adding image ID %s to currentImages", image.ID) + klog.V(5).Infof("Adding image ID %s to currentImages", image.ID) currentImages.Insert(image.ID) // New image, set it as detected now. if _, ok := im.imageRecords[image.ID]; !ok { - glog.V(5).Infof("Image ID %s is new", image.ID) + klog.V(5).Infof("Image ID %s is new", image.ID) im.imageRecords[image.ID] = &imageRecord{ firstDetected: detectTime, } @@ -247,18 +247,18 @@ func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, e // Set last used time to now if the image is being used. if isImageUsed(image.ID, imagesInUse) { - glog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now) + klog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now) im.imageRecords[image.ID].lastUsed = now } - glog.V(5).Infof("Image ID %s has size %d", image.ID, image.Size) + klog.V(5).Infof("Image ID %s has size %d", image.ID, image.Size) im.imageRecords[image.ID].size = image.Size } // Remove old images from our records. for image := range im.imageRecords { if !currentImages.Has(image) { - glog.V(5).Infof("Image ID %s is no longer present; removing from imageRecords", image) + klog.V(5).Infof("Image ID %s is no longer present; removing from imageRecords", image) delete(im.imageRecords, image) } } @@ -282,7 +282,7 @@ func (im *realImageGCManager) GarbageCollect() error { } if available > capacity { - glog.Warningf("available %d is larger than capacity %d", available, capacity) + klog.Warningf("available %d is larger than capacity %d", available, capacity) available = capacity } @@ -297,7 +297,7 @@ func (im *realImageGCManager) GarbageCollect() error { usagePercent := 100 - int(available*100/capacity) if usagePercent >= im.policy.HighThresholdPercent { amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available - glog.Infof("[imageGCManager]: Disk usage on image filesystem is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes down to the low threshold (%d%%).", usagePercent, im.policy.HighThresholdPercent, amountToFree, im.policy.LowThresholdPercent) + klog.Infof("[imageGCManager]: Disk usage on image filesystem is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes down to the low threshold (%d%%).", usagePercent, im.policy.HighThresholdPercent, amountToFree, im.policy.LowThresholdPercent) freed, err := im.freeSpace(amountToFree, time.Now()) if err != nil { return err @@ -314,7 +314,7 @@ func (im *realImageGCManager) GarbageCollect() error { } func (im *realImageGCManager) DeleteUnusedImages() error { - glog.Infof("attempting to delete unused images") + klog.Infof("attempting to delete unused images") _, err := im.freeSpace(math.MaxInt64, time.Now()) return err } @@ -338,7 +338,7 @@ func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) ( images := make([]evictionInfo, 0, len(im.imageRecords)) for image, record := range im.imageRecords { if isImageUsed(image, imagesInUse) { - glog.V(5).Infof("Image ID %s is being used", image) + klog.V(5).Infof("Image ID %s is being used", image) continue } images = append(images, evictionInfo{ @@ -352,10 +352,10 @@ func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) ( var deletionErrors []error spaceFreed := int64(0) for _, image := range images { - glog.V(5).Infof("Evaluating image ID %s for possible garbage collection", image.id) + klog.V(5).Infof("Evaluating image ID %s for possible garbage collection", image.id) // Images that are currently in used were given a newer lastUsed. if image.lastUsed.Equal(freeTime) || image.lastUsed.After(freeTime) { - glog.V(5).Infof("Image ID %s has lastUsed=%v which is >= freeTime=%v, not eligible for garbage collection", image.id, image.lastUsed, freeTime) + klog.V(5).Infof("Image ID %s has lastUsed=%v which is >= freeTime=%v, not eligible for garbage collection", image.id, image.lastUsed, freeTime) continue } @@ -363,12 +363,12 @@ func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) ( // In such a case, the image may have just been pulled down, and will be used by a container right away. if freeTime.Sub(image.firstDetected) < im.policy.MinAge { - glog.V(5).Infof("Image ID %s has age %v which is less than the policy's minAge of %v, not eligible for garbage collection", image.id, freeTime.Sub(image.firstDetected), im.policy.MinAge) + klog.V(5).Infof("Image ID %s has age %v which is less than the policy's minAge of %v, not eligible for garbage collection", image.id, freeTime.Sub(image.firstDetected), im.policy.MinAge) continue } // Remove image. Continue despite errors. - glog.Infof("[imageGCManager]: Removing image %q to free %d bytes", image.id, image.size) + klog.Infof("[imageGCManager]: Removing image %q to free %d bytes", image.id, image.size) err := im.runtime.RemoveImage(container.ImageSpec{Image: image.id}) if err != nil { deletionErrors = append(deletionErrors, err) diff --git a/pkg/kubelet/images/image_manager.go b/pkg/kubelet/images/image_manager.go index 381c55e40844b..36b36a9e99169 100644 --- a/pkg/kubelet/images/image_manager.go +++ b/pkg/kubelet/images/image_manager.go @@ -20,10 +20,10 @@ import ( "fmt" dockerref "github.com/docker/distribution/reference" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/util/parsers" @@ -88,14 +88,14 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p logPrefix := fmt.Sprintf("%s/%s", pod.Name, container.Image) ref, err := kubecontainer.GenerateContainerRef(pod, container) if err != nil { - glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err) + klog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err) } // If the image contains no tag or digest, a default tag should be applied. image, err := applyDefaultImageTag(container.Image) if err != nil { msg := fmt.Sprintf("Failed to apply default image tag %q: %v", container.Image, err) - m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning) return "", msg, ErrInvalidImageName } @@ -103,7 +103,7 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p imageRef, err := m.imageService.GetImageRef(spec) if err != nil { msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) - m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning) return "", msg, ErrImageInspect } @@ -111,26 +111,26 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p if !shouldPullImage(container, present) { if present { msg := fmt.Sprintf("Container image %q already present on machine", container.Image) - m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, klog.Info) return imageRef, "", nil } msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) - m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, klog.Warning) return "", msg, ErrImageNeverPull } backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image) if m.backOff.IsInBackOffSinceUpdate(backOffKey, m.backOff.Clock.Now()) { msg := fmt.Sprintf("Back-off pulling image %q", container.Image) - m.logIt(ref, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, klog.Info) return "", msg, ErrImagePullBackOff } - m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), klog.Info) pullChan := make(chan pullResult) m.puller.pullImage(spec, pullSecrets, pullChan) imagePullResult := <-pullChan if imagePullResult.err != nil { - m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), glog.Warning) + m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), klog.Warning) m.backOff.Next(backOffKey, m.backOff.Clock.Now()) if imagePullResult.err == ErrRegistryUnavailable { msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image) @@ -139,7 +139,7 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p return "", imagePullResult.err.Error(), ErrImagePull } - m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info) + m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), klog.Info) m.backOff.GC() return imagePullResult.imageRef, "", nil } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 8fe90c2da717d..18dc512c94f0a 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -31,7 +31,6 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" "k8s.io/api/core/v1" @@ -55,6 +54,7 @@ import ( "k8s.io/client-go/util/integer" cloudprovider "k8s.io/cloud-provider" csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" + "k8s.io/klog" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config" @@ -275,13 +275,13 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku // define file config source if kubeCfg.StaticPodPath != "" { - glog.Infof("Adding pod path: %v", kubeCfg.StaticPodPath) + klog.Infof("Adding pod path: %v", kubeCfg.StaticPodPath) config.NewSourceFile(kubeCfg.StaticPodPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(kubetypes.FileSource)) } // define url config source if kubeCfg.StaticPodURL != "" { - glog.Infof("Adding pod url %q with HTTP header %v", kubeCfg.StaticPodURL, manifestURLHeader) + klog.Infof("Adding pod url %q with HTTP header %v", kubeCfg.StaticPodURL, manifestURLHeader) config.NewSourceURL(kubeCfg.StaticPodURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(kubetypes.HTTPSource)) } @@ -291,7 +291,7 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku var updatechannel chan<- interface{} if bootstrapCheckpointPath != "" { - glog.Infof("Adding checkpoint path: %v", bootstrapCheckpointPath) + klog.Infof("Adding checkpoint path: %v", bootstrapCheckpointPath) updatechannel = cfg.Channel(kubetypes.ApiserverSource) err := cfg.Restore(bootstrapCheckpointPath, updatechannel) if err != nil { @@ -300,7 +300,7 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku } if kubeDeps.KubeClient != nil { - glog.Infof("Watching apiserver") + klog.Infof("Watching apiserver") if updatechannel == nil { updatechannel = cfg.Channel(kubetypes.ApiserverSource) } @@ -391,7 +391,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, return nil, fmt.Errorf("error fetching current instance name from cloud provider: %v", err) } - glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) + klog.V(2).Infof("cloud provider determined current node name to be %s", nodeName) } if kubeDeps.PodConfig == nil { @@ -470,7 +470,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, for _, ipEntry := range kubeCfg.ClusterDNS { ip := net.ParseIP(ipEntry) if ip == nil { - glog.Warningf("Invalid clusterDNS ip '%q'", ipEntry) + klog.Warningf("Invalid clusterDNS ip '%q'", ipEntry) } else { clusterDNS = append(clusterDNS, ip) } @@ -479,7 +479,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, parsedNodeIP := net.ParseIP(nodeIP) protocol := utilipt.ProtocolIpv4 if parsedNodeIP != nil && parsedNodeIP.To4() == nil { - glog.V(0).Infof("IPv6 node IP (%s), assume IPv6 operation", nodeIP) + klog.V(0).Infof("IPv6 node IP (%s), assume IPv6 operation", nodeIP) protocol = utilipt.ProtocolIpv6 } @@ -563,7 +563,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.configMapManager = configMapManager if klet.experimentalHostUserNamespaceDefaulting { - glog.Infof("Experimental host user namespace defaulting is enabled.") + klog.Infof("Experimental host user namespace defaulting is enabled.") } machineInfo, err := klet.cadvisor.MachineInfo() @@ -607,7 +607,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.resourceAnalyzer = serverstats.NewResourceAnalyzer(klet, kubeCfg.VolumeStatsAggPeriod.Duration) if containerRuntime == "rkt" { - glog.Fatalln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.") + klog.Fatalln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.") } // if left at nil, that means it is unneeded @@ -627,10 +627,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } // The unix socket for kubelet <-> dockershim communication. - glog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q", + klog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q", remoteRuntimeEndpoint, remoteImageEndpoint) - glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") + klog.V(2).Infof("Starting the GRPC server for the docker CRI shim.") server := dockerremote.NewDockerServer(remoteRuntimeEndpoint, ds) if err := server.Start(); err != nil { return nil, err @@ -718,7 +718,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime) klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy) if _, err := klet.updatePodCIDR(kubeCfg.PodCIDR); err != nil { - glog.Errorf("Pod CIDR update failed %v", err) + klog.Errorf("Pod CIDR update failed %v", err) } // setup containerGC @@ -1264,23 +1264,23 @@ func (kl *Kubelet) StartGarbageCollection() { loggedContainerGCFailure := false go wait.Until(func() { if err := kl.containerGC.GarbageCollect(); err != nil { - glog.Errorf("Container garbage collection failed: %v", err) + klog.Errorf("Container garbage collection failed: %v", err) kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error()) loggedContainerGCFailure = true } else { - var vLevel glog.Level = 4 + var vLevel klog.Level = 4 if loggedContainerGCFailure { vLevel = 1 loggedContainerGCFailure = false } - glog.V(vLevel).Infof("Container garbage collection succeeded") + klog.V(vLevel).Infof("Container garbage collection succeeded") } }, ContainerGCPeriod, wait.NeverStop) // when the high threshold is set to 100, stub the image GC manager if kl.kubeletConfiguration.ImageGCHighThresholdPercent == 100 { - glog.V(2).Infof("ImageGCHighThresholdPercent is set 100, Disable image GC") + klog.V(2).Infof("ImageGCHighThresholdPercent is set 100, Disable image GC") return } @@ -1288,21 +1288,21 @@ func (kl *Kubelet) StartGarbageCollection() { go wait.Until(func() { if err := kl.imageManager.GarbageCollect(); err != nil { if prevImageGCFailed { - glog.Errorf("Image garbage collection failed multiple times in a row: %v", err) + klog.Errorf("Image garbage collection failed multiple times in a row: %v", err) // Only create an event for repeated failures kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ImageGCFailed, err.Error()) } else { - glog.Errorf("Image garbage collection failed once. Stats initialization may not have completed yet: %v", err) + klog.Errorf("Image garbage collection failed once. Stats initialization may not have completed yet: %v", err) } prevImageGCFailed = true } else { - var vLevel glog.Level = 4 + var vLevel klog.Level = 4 if prevImageGCFailed { vLevel = 1 prevImageGCFailed = false } - glog.V(vLevel).Infof("Image garbage collection succeeded") + klog.V(vLevel).Infof("Image garbage collection succeeded") } }, ImageGCPeriod, wait.NeverStop) } @@ -1321,7 +1321,7 @@ func (kl *Kubelet) initializeModules() error { // If the container logs directory does not exist, create it. if _, err := os.Stat(ContainerLogsDir); err != nil { if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil { - glog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err) + klog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err) } } @@ -1349,7 +1349,7 @@ func (kl *Kubelet) initializeRuntimeDependentModules() { if err := kl.cadvisor.Start(); err != nil { // Fail kubelet and rely on the babysitter to retry starting kubelet. // TODO(random-liu): Add backoff logic in the babysitter - glog.Fatalf("Failed to start cAdvisor %v", err) + klog.Fatalf("Failed to start cAdvisor %v", err) } // trigger on-demand stats collection once so that we have capacity information for ephemeral storage. @@ -1359,12 +1359,12 @@ func (kl *Kubelet) initializeRuntimeDependentModules() { node, err := kl.getNodeAnyWay() if err != nil { // Fail kubelet and rely on the babysitter to retry starting kubelet. - glog.Fatalf("Kubelet failed to get node info: %v", err) + klog.Fatalf("Kubelet failed to get node info: %v", err) } // containerManager must start after cAdvisor because it needs filesystem capacity information if err := kl.containerManager.Start(node, kl.GetActivePods, kl.sourcesReady, kl.statusManager, kl.runtimeService); err != nil { // Fail kubelet and rely on the babysitter to retry starting kubelet. - glog.Fatalf("Failed to start ContainerManager %v", err) + klog.Fatalf("Failed to start ContainerManager %v", err) } // eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs kl.evictionManager.Start(kl.StatsProvider, kl.GetActivePods, kl.podResourcesAreReclaimed, evictionMonitoringPeriod) @@ -1378,10 +1378,10 @@ func (kl *Kubelet) initializeRuntimeDependentModules() { // Adding Registration Callback function for Device Manager kl.pluginWatcher.AddHandler(pluginwatcherapi.DevicePlugin, kl.containerManager.GetPluginRegistrationHandler()) // Start the plugin watcher - glog.V(4).Infof("starting watcher") + klog.V(4).Infof("starting watcher") if err := kl.pluginWatcher.Start(); err != nil { kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error()) - glog.Fatalf("failed to start Plugin Watcher. err: %v", err) + klog.Fatalf("failed to start Plugin Watcher. err: %v", err) } } } @@ -1392,7 +1392,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { kl.logServer = http.StripPrefix("/logs/", http.FileServer(http.Dir("/var/log/"))) } if kl.kubeClient == nil { - glog.Warning("No api server defined - no node status update will be sent.") + klog.Warning("No api server defined - no node status update will be sent.") } // Start the cloud provider sync manager @@ -1402,7 +1402,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { if err := kl.initializeModules(); err != nil { kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error()) - glog.Fatal(err) + klog.Fatal(err) } // Start volume manager @@ -1510,7 +1510,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // since kubelet first saw the pod if firstSeenTime is set. metrics.PodWorkerStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime)) } else { - glog.V(3).Infof("First seen time not recorded for pod %q", pod.UID) + klog.V(3).Infof("First seen time not recorded for pod %q", pod.UID) } } @@ -1609,7 +1609,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { if !(podKilled && pod.Spec.RestartPolicy == v1.RestartPolicyNever) { if !pcm.Exists(pod) { if err := kl.containerManager.UpdateQOSCgroups(); err != nil { - glog.V(2).Infof("Failed to update QoS cgroups while syncing pod: %v", err) + klog.V(2).Infof("Failed to update QoS cgroups while syncing pod: %v", err) } if err := pcm.EnsureExists(pod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToCreatePodContainer, "unable to ensure pod container exists: %v", err) @@ -1627,9 +1627,9 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { if mirrorPod.DeletionTimestamp != nil || !kl.podManager.IsMirrorPodOf(mirrorPod, pod) { // The mirror pod is semantically different from the static pod. Remove // it. The mirror pod will get recreated later. - glog.Warningf("Deleting mirror pod %q because it is outdated", format.Pod(mirrorPod)) + klog.Warningf("Deleting mirror pod %q because it is outdated", format.Pod(mirrorPod)) if err := kl.podManager.DeleteMirrorPod(podFullName); err != nil { - glog.Errorf("Failed deleting mirror pod %q: %v", format.Pod(mirrorPod), err) + klog.Errorf("Failed deleting mirror pod %q: %v", format.Pod(mirrorPod), err) } else { deleted = true } @@ -1638,11 +1638,11 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { if mirrorPod == nil || deleted { node, err := kl.GetNode() if err != nil || node.DeletionTimestamp != nil { - glog.V(4).Infof("No need to create a mirror pod, since node %q has been removed from the cluster", kl.nodeName) + klog.V(4).Infof("No need to create a mirror pod, since node %q has been removed from the cluster", kl.nodeName) } else { - glog.V(4).Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) + klog.V(4).Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) if err := kl.podManager.CreateMirrorPod(pod); err != nil { - glog.Errorf("Failed creating a mirror pod for %q: %v", format.Pod(pod), err) + klog.Errorf("Failed creating a mirror pod for %q: %v", format.Pod(pod), err) } } } @@ -1651,7 +1651,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // Make data directories for the pod if err := kl.makePodDataDirs(pod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToMakePodDataDirectories, "error making pod data directories: %v", err) - glog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err) + klog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err) return err } @@ -1660,7 +1660,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // Wait for volumes to attach/mount if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err) - glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err) + klog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err) return err } } @@ -1809,7 +1809,7 @@ func (kl *Kubelet) canRunPod(pod *v1.Pod) lifecycle.PodAdmitResult { // no changes are seen to the configuration, will synchronize the last known desired // state every sync-frequency seconds. Never returns. func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHandler) { - glog.Info("Starting kubelet main sync loop.") + klog.Info("Starting kubelet main sync loop.") // The syncTicker wakes up kubelet to checks if there are any pod workers // that need to be sync'd. A one-second period is sufficient because the // sync interval is defaulted to 10s. @@ -1826,7 +1826,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand duration := base for { if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 { - glog.Infof("skipping pod synchronization - %v", rs) + klog.Infof("skipping pod synchronization - %v", rs) // exponential backoff time.Sleep(duration) duration = time.Duration(math.Min(float64(max), factor*float64(duration))) @@ -1882,39 +1882,39 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle // Update from a config source; dispatch it to the right handler // callback. if !open { - glog.Errorf("Update channel is closed. Exiting the sync loop.") + klog.Errorf("Update channel is closed. Exiting the sync loop.") return false } switch u.Op { case kubetypes.ADD: - glog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, format.Pods(u.Pods)) + klog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, format.Pods(u.Pods)) // After restarting, kubelet will get all existing pods through // ADD as if they are new pods. These pods will then go through the // admission process and *may* be rejected. This can be resolved // once we have checkpointing. handler.HandlePodAdditions(u.Pods) case kubetypes.UPDATE: - glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.PodsWithDeletionTimestamps(u.Pods)) + klog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.PodsWithDeletionTimestamps(u.Pods)) handler.HandlePodUpdates(u.Pods) case kubetypes.REMOVE: - glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, format.Pods(u.Pods)) + klog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, format.Pods(u.Pods)) handler.HandlePodRemoves(u.Pods) case kubetypes.RECONCILE: - glog.V(4).Infof("SyncLoop (RECONCILE, %q): %q", u.Source, format.Pods(u.Pods)) + klog.V(4).Infof("SyncLoop (RECONCILE, %q): %q", u.Source, format.Pods(u.Pods)) handler.HandlePodReconcile(u.Pods) case kubetypes.DELETE: - glog.V(2).Infof("SyncLoop (DELETE, %q): %q", u.Source, format.Pods(u.Pods)) + klog.V(2).Infof("SyncLoop (DELETE, %q): %q", u.Source, format.Pods(u.Pods)) // DELETE is treated as a UPDATE because of graceful deletion. handler.HandlePodUpdates(u.Pods) case kubetypes.RESTORE: - glog.V(2).Infof("SyncLoop (RESTORE, %q): %q", u.Source, format.Pods(u.Pods)) + klog.V(2).Infof("SyncLoop (RESTORE, %q): %q", u.Source, format.Pods(u.Pods)) // These are pods restored from the checkpoint. Treat them as new // pods. handler.HandlePodAdditions(u.Pods) case kubetypes.SET: // TODO: Do we want to support this? - glog.Errorf("Kubelet does not support snapshot update") + klog.Errorf("Kubelet does not support snapshot update") } if u.Op != kubetypes.RESTORE { @@ -1933,11 +1933,11 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle if isSyncPodWorthy(e) { // PLEG event for a pod; sync it. if pod, ok := kl.podManager.GetPodByUID(e.ID); ok { - glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e) + klog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e) handler.HandlePodSyncs([]*v1.Pod{pod}) } else { // If the pod no longer exists, ignore the event. - glog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e) + klog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e) } } @@ -1952,7 +1952,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle if len(podsToSync) == 0 { break } - glog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), format.Pods(podsToSync)) + klog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), format.Pods(podsToSync)) handler.HandlePodSyncs(podsToSync) case update := <-kl.livenessManager.Updates(): if update.Result == proberesults.Failure { @@ -1963,21 +1963,21 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle pod, ok := kl.podManager.GetPodByUID(update.PodUID) if !ok { // If the pod no longer exists, ignore the update. - glog.V(4).Infof("SyncLoop (container unhealthy): ignore irrelevant update: %#v", update) + klog.V(4).Infof("SyncLoop (container unhealthy): ignore irrelevant update: %#v", update) break } - glog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(pod)) + klog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(pod)) handler.HandlePodSyncs([]*v1.Pod{pod}) } case <-housekeepingCh: if !kl.sourcesReady.AllReady() { // If the sources aren't ready or volume manager has not yet synced the states, // skip housekeeping, as we may accidentally delete pods from unready sources. - glog.V(4).Infof("SyncLoop (housekeeping, skipped): sources aren't ready yet.") + klog.V(4).Infof("SyncLoop (housekeeping, skipped): sources aren't ready yet.") } else { - glog.V(4).Infof("SyncLoop (housekeeping)") + klog.V(4).Infof("SyncLoop (housekeeping)") if err := handler.HandlePodCleanups(); err != nil { - glog.Errorf("Failed cleaning pods: %v", err) + klog.Errorf("Failed cleaning pods: %v", err) } } } @@ -2100,7 +2100,7 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) { // Deletion is allowed to fail because the periodic cleanup routine // will trigger deletion again. if err := kl.deletePod(pod); err != nil { - glog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err) + klog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err) } kl.probeManager.RemovePod(pod) } @@ -2159,20 +2159,20 @@ func (kl *Kubelet) updateRuntimeUp() { s, err := kl.containerRuntime.Status() if err != nil { - glog.Errorf("Container runtime sanity check failed: %v", err) + klog.Errorf("Container runtime sanity check failed: %v", err) return } if s == nil { - glog.Errorf("Container runtime status is nil") + klog.Errorf("Container runtime status is nil") return } // Periodically log the whole runtime status for debugging. // TODO(random-liu): Consider to send node event when optional // condition is unmet. - glog.V(4).Infof("Container runtime status: %v", s) + klog.V(4).Infof("Container runtime status: %v", s) networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady) if networkReady == nil || !networkReady.Status { - glog.Errorf("Container runtime network not ready: %v", networkReady) + klog.Errorf("Container runtime network not ready: %v", networkReady) kl.runtimeState.setNetworkState(fmt.Errorf("runtime network not ready: %v", networkReady)) } else { // Set nil if the container runtime network is ready. @@ -2184,7 +2184,7 @@ func (kl *Kubelet) updateRuntimeUp() { runtimeReady := s.GetRuntimeCondition(kubecontainer.RuntimeReady) // If RuntimeReady is not set or is false, report an error. if runtimeReady == nil || !runtimeReady.Status { - glog.Errorf("Container runtime not ready: %v", runtimeReady) + klog.Errorf("Container runtime not ready: %v", runtimeReady) return } kl.oneTimeInitializer.Do(kl.initializeRuntimeDependentModules) @@ -2241,12 +2241,12 @@ func (kl *Kubelet) fastStatusUpdateOnce() { time.Sleep(100 * time.Millisecond) node, err := kl.GetNode() if err != nil { - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) continue } if node.Spec.PodCIDR != "" { if _, err := kl.updatePodCIDR(node.Spec.PodCIDR); err != nil { - glog.Errorf("Pod CIDR update failed %v", err) + klog.Errorf("Pod CIDR update failed %v", err) continue } kl.updateRuntimeUp() diff --git a/pkg/kubelet/kubelet_getters.go b/pkg/kubelet/kubelet_getters.go index 89077eb7f73cc..7256adb1e162e 100644 --- a/pkg/kubelet/kubelet_getters.go +++ b/pkg/kubelet/kubelet_getters.go @@ -22,8 +22,8 @@ import ( "net" "path/filepath" - "github.com/golang/glog" cadvisorapiv1 "github.com/google/cadvisor/info/v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -261,13 +261,13 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err if pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil { return volumes, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr) } else if !pathExists { - glog.Warningf("Path %q does not exist", podVolDir) + klog.Warningf("Path %q does not exist", podVolDir) return volumes, nil } volumePluginDirs, err := ioutil.ReadDir(podVolDir) if err != nil { - glog.Errorf("Could not read directory %s: %v", podVolDir, err) + klog.Errorf("Could not read directory %s: %v", podVolDir, err) return volumes, err } for _, volumePluginDir := range volumePluginDirs { diff --git a/pkg/kubelet/kubelet_network.go b/pkg/kubelet/kubelet_network.go index aa791280cb146..c604eddb4c108 100644 --- a/pkg/kubelet/kubelet_network.go +++ b/pkg/kubelet/kubelet_network.go @@ -19,8 +19,8 @@ package kubelet import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -74,7 +74,7 @@ func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) { return true, fmt.Errorf("failed to update pod CIDR: %v", err) } - glog.Infof("Setting Pod CIDR: %v -> %v", podCIDR, cidr) + klog.Infof("Setting Pod CIDR: %v -> %v", podCIDR, cidr) kl.runtimeState.setPodCIDR(cidr) return true, nil } diff --git a/pkg/kubelet/kubelet_network_linux.go b/pkg/kubelet/kubelet_network_linux.go index 002b226b19073..ec7d41d9557e2 100644 --- a/pkg/kubelet/kubelet_network_linux.go +++ b/pkg/kubelet/kubelet_network_linux.go @@ -21,7 +21,7 @@ package kubelet import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -33,73 +33,73 @@ import ( // Marked connection will get SNAT on POSTROUTING Chain in nat table func (kl *Kubelet) syncNetworkUtil() { if kl.iptablesMasqueradeBit < 0 || kl.iptablesMasqueradeBit > 31 { - glog.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", kl.iptablesMasqueradeBit) + klog.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", kl.iptablesMasqueradeBit) return } if kl.iptablesDropBit < 0 || kl.iptablesDropBit > 31 { - glog.Errorf("invalid iptables-drop-bit %v not in [0, 31]", kl.iptablesDropBit) + klog.Errorf("invalid iptables-drop-bit %v not in [0, 31]", kl.iptablesDropBit) return } if kl.iptablesDropBit == kl.iptablesMasqueradeBit { - glog.Errorf("iptables-masquerade-bit %v and iptables-drop-bit %v must be different", kl.iptablesMasqueradeBit, kl.iptablesDropBit) + klog.Errorf("iptables-masquerade-bit %v and iptables-drop-bit %v must be different", kl.iptablesMasqueradeBit, kl.iptablesDropBit) return } // Setup KUBE-MARK-DROP rules dropMark := getIPTablesMark(kl.iptablesDropBit) if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubeMarkDropChain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkDropChain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkDropChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkDropChain, "-j", "MARK", "--set-xmark", dropMark); err != nil { - glog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkDropChain, err) + klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkDropChain, err) return } if _, err := kl.iptClient.EnsureChain(utiliptables.TableFilter, KubeFirewallChain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, KubeFirewallChain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, KubeFirewallChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableFilter, KubeFirewallChain, "-m", "comment", "--comment", "kubernetes firewall for dropping marked packets", "-m", "mark", "--mark", dropMark, "-j", "DROP"); err != nil { - glog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err) + klog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainOutput, "-j", string(KubeFirewallChain)); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainOutput, KubeFirewallChain, err) + klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainOutput, KubeFirewallChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainInput, "-j", string(KubeFirewallChain)); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainInput, KubeFirewallChain, err) + klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainInput, KubeFirewallChain, err) return } // Setup KUBE-MARK-MASQ rules masqueradeMark := getIPTablesMark(kl.iptablesMasqueradeBit) if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubeMarkMasqChain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkMasqChain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkMasqChain, err) return } if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubePostroutingChain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubePostroutingChain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubePostroutingChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkMasqChain, "-j", "MARK", "--set-xmark", masqueradeMark); err != nil { - glog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkMasqChain, err) + klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkMasqChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, utiliptables.ChainPostrouting, "-m", "comment", "--comment", "kubernetes postrouting rules", "-j", string(KubePostroutingChain)); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, KubePostroutingChain, err) + klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, KubePostroutingChain, err) return } if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain, "-m", "comment", "--comment", "kubernetes service traffic requiring SNAT", "-m", "mark", "--mark", masqueradeMark, "-j", "MASQUERADE"); err != nil { - glog.Errorf("Failed to ensure SNAT rule for packets marked by %v in %v chain %v: %v", KubeMarkMasqChain, utiliptables.TableNAT, KubePostroutingChain, err) + klog.Errorf("Failed to ensure SNAT rule for packets marked by %v in %v chain %v: %v", KubeMarkMasqChain, utiliptables.TableNAT, KubePostroutingChain, err) return } } diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 5dbbed729d7a6..d15e4167e38d2 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -24,7 +24,7 @@ import ( "sort" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -65,14 +65,14 @@ func (kl *Kubelet) registerWithAPIServer() { node, err := kl.initialNode() if err != nil { - glog.Errorf("Unable to construct v1.Node object for kubelet: %v", err) + klog.Errorf("Unable to construct v1.Node object for kubelet: %v", err) continue } - glog.Infof("Attempting to register node %s", node.Name) + klog.Infof("Attempting to register node %s", node.Name) registered := kl.tryRegisterWithAPIServer(node) if registered { - glog.Infof("Successfully registered node %s", node.Name) + klog.Infof("Successfully registered node %s", node.Name) kl.registrationCompleted = true return } @@ -91,27 +91,27 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { } if !apierrors.IsAlreadyExists(err) { - glog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err) + klog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err) return false } existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(string(kl.nodeName), metav1.GetOptions{}) if err != nil { - glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) + klog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) return false } if existingNode == nil { - glog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName) + klog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName) return false } originalNode := existingNode.DeepCopy() if originalNode == nil { - glog.Errorf("Nil %q node object", kl.nodeName) + klog.Errorf("Nil %q node object", kl.nodeName) return false } - glog.Infof("Node %s was previously registered", kl.nodeName) + klog.Infof("Node %s was previously registered", kl.nodeName) // Edge case: the node was previously registered; reconcile // the value of the controller-managed attach-detach @@ -121,7 +121,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool { requiresUpdate = kl.reconcileExtendedResource(node, existingNode) || requiresUpdate if requiresUpdate { if _, _, err := nodeutil.PatchNodeStatus(kl.kubeClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, existingNode); err != nil { - glog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err) + klog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err) return false } } @@ -193,10 +193,10 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v // not have the same value, update the existing node with // the correct value of the annotation. if !newSet { - glog.Info("Controller attach-detach setting changed to false; updating existing Node") + klog.Info("Controller attach-detach setting changed to false; updating existing Node") delete(existingNode.Annotations, volutil.ControllerManagedAttachAnnotation) } else { - glog.Info("Controller attach-detach setting changed to true; updating existing Node") + klog.Info("Controller attach-detach setting changed to true; updating existing Node") if existingNode.Annotations == nil { existingNode.Annotations = make(map[string]string) } @@ -275,24 +275,24 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { node.Annotations = make(map[string]string) } - glog.Infof("Setting node annotation to enable volume controller attach/detach") + klog.Infof("Setting node annotation to enable volume controller attach/detach") node.Annotations[volutil.ControllerManagedAttachAnnotation] = "true" } else { - glog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes") + klog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes") } if kl.keepTerminatedPodVolumes { if node.Annotations == nil { node.Annotations = make(map[string]string) } - glog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node") + klog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node") node.Annotations[volutil.KeepTerminatedPodVolumesAnnotation] = "true" } // @question: should this be place after the call to the cloud provider? which also applies labels for k, v := range kl.nodeLabels { if cv, found := node.ObjectMeta.Labels[k]; found { - glog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv) + klog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv) } node.ObjectMeta.Labels[k] = v } @@ -323,7 +323,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { return nil, err } if instanceType != "" { - glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType) + klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType) node.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType } // If the cloud has zone information, label the node with the zone information @@ -334,11 +334,11 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) { return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err) } if zone.FailureDomain != "" { - glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain) + klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain) node.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain } if zone.Region != "" { - glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region) + klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region) node.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region } } @@ -364,20 +364,20 @@ func (kl *Kubelet) syncNodeStatus() { kl.registerWithAPIServer() } if err := kl.updateNodeStatus(); err != nil { - glog.Errorf("Unable to update node status: %v", err) + klog.Errorf("Unable to update node status: %v", err) } } // updateNodeStatus updates node status to master with retries if there is any // change or enough time passed from the last sync. func (kl *Kubelet) updateNodeStatus() error { - glog.V(5).Infof("Updating node status") + klog.V(5).Infof("Updating node status") for i := 0; i < nodeStatusUpdateRetry; i++ { if err := kl.tryUpdateNodeStatus(i); err != nil { if i > 0 && kl.onRepeatedHeartbeatFailure != nil { kl.onRepeatedHeartbeatFailure() } - glog.Errorf("Error updating node status, will retry: %v", err) + klog.Errorf("Error updating node status, will retry: %v", err) } else { return nil } @@ -414,7 +414,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { // node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is // actually changed. if podCIDRChanged, err = kl.updatePodCIDR(node.Spec.PodCIDR); err != nil { - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) } } @@ -443,7 +443,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { // recordNodeStatusEvent records an event of the given type with the given // message for the node. func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) { - glog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName) + klog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName) // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event) @@ -475,9 +475,9 @@ func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) error { // refactor the node status condition code out to a different file. func (kl *Kubelet) setNodeStatus(node *v1.Node) { for i, f := range kl.setNodeStatusFuncs { - glog.V(5).Infof("Setting node status at position %v", i) + klog.V(5).Infof("Setting node status at position %v", i) if err := f(node); err != nil { - glog.Warningf("Failed to set some node status fields: %s", err) + klog.Warningf("Failed to set some node status fields: %s", err) } } } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 5216a7b2f42f2..ca7f633660838 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -32,7 +32,6 @@ import ( "strings" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,6 +40,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilvalidation "k8s.io/apimachinery/pkg/util/validation" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/api/v1/resource" podshelper "k8s.io/kubernetes/pkg/apis/core/pods" @@ -104,7 +104,7 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol } vol, ok := podVolumes[device.Name] if !ok || vol.BlockVolumeMapper == nil { - glog.Errorf("Block volume cannot be satisfied for container %q, because the volume is missing or the volume mapper is nil: %+v", container.Name, device) + klog.Errorf("Block volume cannot be satisfied for container %q, because the volume is missing or the volume mapper is nil: %+v", container.Name, device) return nil, fmt.Errorf("cannot find volume %q to pass into container %q", device.Name, container.Name) } // Get a symbolic link associated to a block device under pod device path @@ -118,7 +118,7 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol if vol.ReadOnly { permission = "r" } - glog.V(4).Infof("Device will be attached to container %q. Path on host: %v", container.Name, symlinkPath) + klog.V(4).Infof("Device will be attached to container %q. Path on host: %v", container.Name, symlinkPath) devices = append(devices, kubecontainer.DeviceInfo{PathOnHost: symlinkPath, PathInContainer: device.DevicePath, Permissions: permission}) } } @@ -135,7 +135,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h // Kubernetes will not mount /etc/hosts if: // - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set. mountEtcHostsFile := len(podIP) > 0 && runtime.GOOS != "windows" - glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile) + klog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile) mounts := []kubecontainer.Mount{} var cleanupAction func() for i, mount := range container.VolumeMounts { @@ -143,7 +143,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath) vol, ok := podVolumes[mount.Name] if !ok || vol.Mounter == nil { - glog.Errorf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %+v", container.Name, mount) + klog.Errorf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %+v", container.Name, mount) return nil, cleanupAction, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name) } @@ -182,7 +182,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h hostPath = filepath.Join(volumePath, mount.SubPath) if subPathExists, err := mounter.ExistsPath(hostPath); err != nil { - glog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath) + klog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath) } else if !subPathExists { // Create the sub path now because if it's auto-created later when referenced, it may have an // incorrect ownership and mode. For example, the sub path directory must have at least g+rwx @@ -195,7 +195,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h } if err := mounter.SafeMakeDir(mount.SubPath, volumePath, perm); err != nil { // Don't pass detailed error back to the user because it could give information about host filesystem - glog.Errorf("failed to create subPath directory for volumeMount %q of container %q: %v", mount.Name, container.Name, err) + klog.Errorf("failed to create subPath directory for volumeMount %q of container %q: %v", mount.Name, container.Name, err) return nil, cleanupAction, fmt.Errorf("failed to create subPath directory for volumeMount %q of container %q", mount.Name, container.Name) } } @@ -209,7 +209,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h }) if err != nil { // Don't pass detailed error back to the user because it could give information about host filesystem - glog.Errorf("failed to prepare subPath for volumeMount %q of container %q: %v", mount.Name, container.Name, err) + klog.Errorf("failed to prepare subPath for volumeMount %q of container %q: %v", mount.Name, container.Name, err) return nil, cleanupAction, fmt.Errorf("failed to prepare subPath for volumeMount %q of container %q", mount.Name, container.Name) } } @@ -229,7 +229,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h if err != nil { return nil, cleanupAction, err } - glog.V(5).Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation) + klog.V(5).Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation) mustMountRO := vol.Mounter.GetAttributes().ReadOnly @@ -373,7 +373,7 @@ func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) { return hostname, nil } truncated := hostname[:hostnameMaxLen] - glog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", podName, hostnameMaxLen, truncated) + klog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", podName, hostnameMaxLen, truncated) // hostname should not end with '-' or '.' truncated = strings.TrimRight(truncated, "-.") if len(truncated) == 0 { @@ -465,7 +465,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai if len(container.TerminationMessagePath) != 0 && runtime.GOOS != "windows" { p := kl.getPodContainerDir(pod.UID, container.Name) if err := os.MkdirAll(p, 0750); err != nil { - glog.Errorf("Error on creating %q: %v", p, err) + klog.Errorf("Error on creating %q: %v", p, err) } else { opts.PodContainerDir = p } @@ -803,7 +803,7 @@ func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *k return err } if err := kl.containerManager.UpdateQOSCgroups(); err != nil { - glog.V(2).Infof("Failed to update QoS cgroups while killing pod: %v", err) + klog.V(2).Infof("Failed to update QoS cgroups while killing pod: %v", err) } return nil } @@ -831,7 +831,7 @@ func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) []v1.Secret { for _, secretRef := range pod.Spec.ImagePullSecrets { secret, err := kl.secretManager.GetSecret(pod.Namespace, secretRef.Name) if err != nil { - glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err) + klog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err) continue } @@ -885,13 +885,13 @@ func (kl *Kubelet) IsPodDeleted(uid types.UID) bool { func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool { if !notRunning(status.ContainerStatuses) { // We shouldnt delete pods that still have running containers - glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) + klog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) return false } // pod's containers should be deleted runtimeStatus, err := kl.podCache.Get(pod.UID) if err != nil { - glog.V(3).Infof("Pod %q is terminated, Error getting runtimeStatus from the podCache: %s", format.Pod(pod), err) + klog.V(3).Infof("Pod %q is terminated, Error getting runtimeStatus from the podCache: %s", format.Pod(pod), err) return false } if len(runtimeStatus.ContainerStatuses) > 0 { @@ -899,18 +899,18 @@ func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bo for _, status := range runtimeStatus.ContainerStatuses { statusStr += fmt.Sprintf("%+v ", *status) } - glog.V(3).Infof("Pod %q is terminated, but some containers have not been cleaned up: %s", format.Pod(pod), statusStr) + klog.V(3).Infof("Pod %q is terminated, but some containers have not been cleaned up: %s", format.Pod(pod), statusStr) return false } if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes { // We shouldnt delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes - glog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod)) + klog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod)) return false } if kl.kubeletConfiguration.CgroupsPerQOS { pcm := kl.containerManager.NewPodContainerManager() if pcm.Exists(pod) { - glog.V(3).Infof("Pod %q is terminated, but pod cgroup sandbox has not been cleaned up", format.Pod(pod)) + klog.V(3).Infof("Pod %q is terminated, but pod cgroup sandbox has not been cleaned up", format.Pod(pod)) return false } } @@ -1009,7 +1009,7 @@ func (kl *Kubelet) HandlePodCleanups() error { runningPods, err := kl.runtimeCache.GetPods() if err != nil { - glog.Errorf("Error listing containers: %#v", err) + klog.Errorf("Error listing containers: %#v", err) return err } for _, pod := range runningPods { @@ -1025,7 +1025,7 @@ func (kl *Kubelet) HandlePodCleanups() error { // TODO: Evaluate the performance impact of bypassing the runtime cache. runningPods, err = kl.containerRuntime.GetPods(false) if err != nil { - glog.Errorf("Error listing containers: %#v", err) + klog.Errorf("Error listing containers: %#v", err) return err } @@ -1038,7 +1038,7 @@ func (kl *Kubelet) HandlePodCleanups() error { // We want all cleanup tasks to be run even if one of them failed. So // we just log an error here and continue other cleanup tasks. // This also applies to the other clean up tasks. - glog.Errorf("Failed cleaning up orphaned pod directories: %v", err) + klog.Errorf("Failed cleaning up orphaned pod directories: %v", err) } // Remove any orphaned mirror pods. @@ -1072,10 +1072,10 @@ func (kl *Kubelet) podKiller() { if !exists { go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) { - glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name) + klog.V(2).Infof("Killing unwanted pod %q", runningPod.Name) err := kl.killPod(apiPod, runningPod, nil, nil) if err != nil { - glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err) + klog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err) } lock.Lock() killing.Delete(string(runningPod.ID)) @@ -1281,7 +1281,7 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { case pendingInitialization > 0: fallthrough case waiting > 0: - glog.V(5).Infof("pod waiting > 0, pending") + klog.V(5).Infof("pod waiting > 0, pending") // One or more containers has not been started return v1.PodPending case running > 0 && unknown == 0: @@ -1308,7 +1308,7 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { // and in the process of restarting return v1.PodRunning default: - glog.V(5).Infof("pod default case, pending") + klog.V(5).Infof("pod default case, pending") return v1.PodPending } } @@ -1316,7 +1316,7 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase { // generateAPIPodStatus creates the final API pod status for a pod, given the // internal pod status. func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus { - glog.V(3).Infof("Generating status for %q", format.Pod(pod)) + klog.V(3).Infof("Generating status for %q", format.Pod(pod)) s := kl.convertStatusToAPIStatus(pod, podStatus) @@ -1338,7 +1338,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded { // API server shows terminal phase; transitions are not allowed if s.Phase != pod.Status.Phase { - glog.Errorf("Pod attempted illegal phase transition from %s to %s: %v", pod.Status.Phase, s.Phase, s) + klog.Errorf("Pod attempted illegal phase transition from %s to %s: %v", pod.Status.Phase, s.Phase, s) // Force back to phase from the API server s.Phase = pod.Status.Phase } @@ -1358,7 +1358,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po if kl.kubeClient != nil { hostIP, err := kl.getHostIPAnyWay() if err != nil { - glog.V(4).Infof("Cannot get host IP: %v", err) + klog.V(4).Infof("Cannot get host IP: %v", err) } else { s.HostIP = hostIP.String() if kubecontainer.IsHostNetworkPod(pod) && s.PodIP == "" { @@ -1665,13 +1665,13 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(cgroupPods map[types.UID]cm.CgroupN // process in the cgroup to the minimum value while we wait. if the kubelet // is configured to keep terminated volumes, we will delete the cgroup and not block. if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist && !kl.keepTerminatedPodVolumes { - glog.V(3).Infof("Orphaned pod %q found, but volumes not yet removed. Reducing cpu to minimum", uid) + klog.V(3).Infof("Orphaned pod %q found, but volumes not yet removed. Reducing cpu to minimum", uid) if err := pcm.ReduceCPULimits(val); err != nil { - glog.Warningf("Failed to reduce cpu time for pod %q pending volume cleanup due to %v", uid, err) + klog.Warningf("Failed to reduce cpu time for pod %q pending volume cleanup due to %v", uid, err) } continue } - glog.V(3).Infof("Orphaned pod %q found, removing pod cgroups", uid) + klog.V(3).Infof("Orphaned pod %q found, removing pod cgroups", uid) // Destroy all cgroups of pod that should not be running, // by first killing all the attached processes to these cgroups. // We ignore errors thrown by the method, as the housekeeping loop would @@ -1734,13 +1734,13 @@ func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { if volume.PersistentVolumeClaim != nil { pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) if err != nil { - glog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err) + klog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err) continue } if pvc != nil { referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { - glog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err) + klog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err) continue } if referencedVolume != nil && referencedVolume.Spec.HostPath != nil { diff --git a/pkg/kubelet/kubelet_resources.go b/pkg/kubelet/kubelet_resources.go index dc47a85880e33..be6d29738bac0 100644 --- a/pkg/kubelet/kubelet_resources.go +++ b/pkg/kubelet/kubelet_resources.go @@ -19,7 +19,7 @@ package kubelet import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/api/v1/resource" @@ -42,7 +42,7 @@ func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *v1.Pod, container *v1.Con return nil, nil, fmt.Errorf("failed to find node object, expected a node") } allocatable := node.Status.Allocatable - glog.Infof("allocatable: %v", allocatable) + klog.Infof("allocatable: %v", allocatable) outputPod := pod.DeepCopy() for idx := range outputPod.Spec.Containers { resource.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable) diff --git a/pkg/kubelet/kubelet_volumes.go b/pkg/kubelet/kubelet_volumes.go index 09179fec80419..7681ee6529e9e 100644 --- a/pkg/kubelet/kubelet_volumes.go +++ b/pkg/kubelet/kubelet_volumes.go @@ -19,11 +19,11 @@ package kubelet import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/util/removeall" "k8s.io/kubernetes/pkg/volume" @@ -62,11 +62,11 @@ func (kl *Kubelet) podVolumesExist(podUID types.UID) bool { // There are some volume plugins such as flexvolume might not have mounts. See issue #61229 volumePaths, err := kl.getMountedVolumePathListFromDisk(podUID) if err != nil { - glog.Errorf("pod %q found, but error %v occurred during checking mounted volumes from disk", podUID, err) + klog.Errorf("pod %q found, but error %v occurred during checking mounted volumes from disk", podUID, err) return true } if len(volumePaths) > 0 { - glog.V(4).Infof("pod %q found, but volumes are still mounted on disk %v", podUID, volumePaths) + klog.V(4).Infof("pod %q found, but volumes are still mounted on disk %v", podUID, volumePaths) return true } @@ -85,7 +85,7 @@ func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, o if err != nil { return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err) } - glog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name()) + klog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name()) return physicalMounter, nil } @@ -115,7 +115,7 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon // If volumes have not been unmounted/detached, do not delete directory. // Doing so may result in corruption of data. if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist { - glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid) + klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid) continue } // If there are still volume directories, do not delete directory @@ -128,18 +128,18 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid)) continue } - glog.V(3).Infof("Orphaned pod %q found, removing", uid) + klog.V(3).Infof("Orphaned pod %q found, removing", uid) if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil { - glog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err) + klog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err) orphanRemovalErrors = append(orphanRemovalErrors, err) } } logSpew := func(errs []error) { if len(errs) > 0 { - glog.Errorf("%v : There were a total of %v errors similar to this. Turn up verbosity to see them.", errs[0], len(errs)) + klog.Errorf("%v : There were a total of %v errors similar to this. Turn up verbosity to see them.", errs[0], len(errs)) for _, err := range errs { - glog.V(5).Infof("Orphan pod: %v", err) + klog.V(5).Infof("Orphan pod: %v", err) } } } diff --git a/pkg/kubelet/kubeletconfig/BUILD b/pkg/kubelet/kubeletconfig/BUILD index fd98a1cea8634..55383942ff0d7 100644 --- a/pkg/kubelet/kubeletconfig/BUILD +++ b/pkg/kubelet/kubeletconfig/BUILD @@ -30,7 +30,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/kubeletconfig/configsync.go b/pkg/kubelet/kubeletconfig/configsync.go index cb92fb7e6545c..7a897f3badf0f 100644 --- a/pkg/kubelet/kubeletconfig/configsync.go +++ b/pkg/kubelet/kubeletconfig/configsync.go @@ -21,7 +21,7 @@ import ( "os" "time" - "github.com/golang/glog" + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -197,7 +197,7 @@ func restartForNewConfig(eventClient v1core.EventsGetter, nodeName string, sourc // we directly log and send the event, instead of using the event recorder, // because the event recorder won't flush its queue before we exit (we'd lose the event) event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message) - glog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) + klog.V(3).Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(event); err != nil { utillog.Errorf("failed to send event, error: %v", err) } diff --git a/pkg/kubelet/kubeletconfig/util/log/BUILD b/pkg/kubelet/kubeletconfig/util/log/BUILD index 34b20a27e3455..f47e2eead47eb 100644 --- a/pkg/kubelet/kubeletconfig/util/log/BUILD +++ b/pkg/kubelet/kubeletconfig/util/log/BUILD @@ -9,7 +9,7 @@ go_library( name = "go_default_library", srcs = ["log.go"], importpath = "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log", - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) filegroup( diff --git a/pkg/kubelet/kubeletconfig/util/log/log.go b/pkg/kubelet/kubeletconfig/util/log/log.go index b4ecfe4dc996c..6e68b46a0fe6f 100644 --- a/pkg/kubelet/kubeletconfig/util/log/log.go +++ b/pkg/kubelet/kubeletconfig/util/log/log.go @@ -19,7 +19,7 @@ package log import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" ) const logFmt = "kubelet config controller: %s" @@ -33,7 +33,7 @@ func Errorf(format string, args ...interface{}) { } else { s = format } - glog.ErrorDepth(1, fmt.Sprintf(logFmt, s)) + klog.ErrorDepth(1, fmt.Sprintf(logFmt, s)) } // Infof shim that inserts "kubelet config controller" at the beginning of the log message, @@ -45,5 +45,5 @@ func Infof(format string, args ...interface{}) { } else { s = format } - glog.InfoDepth(1, fmt.Sprintf(logFmt, s)) + klog.InfoDepth(1, fmt.Sprintf(logFmt, s)) } diff --git a/pkg/kubelet/kuberuntime/BUILD b/pkg/kubelet/kuberuntime/BUILD index 67c04164816c1..12c16e7e548a4 100644 --- a/pkg/kubelet/kuberuntime/BUILD +++ b/pkg/kubelet/kuberuntime/BUILD @@ -66,9 +66,9 @@ go_library( "//staging/src/k8s.io/client-go/tools/reference:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//vendor/github.com/armon/circbuf:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/qos:go_default_library", diff --git a/pkg/kubelet/kuberuntime/helpers.go b/pkg/kubelet/kuberuntime/helpers.go index fd977d5a16e21..056e4d0ae75f6 100644 --- a/pkg/kubelet/kuberuntime/helpers.go +++ b/pkg/kubelet/kuberuntime/helpers.go @@ -22,10 +22,10 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -83,7 +83,7 @@ func toRuntimeProtocol(protocol v1.Protocol) runtimeapi.Protocol { return runtimeapi.Protocol_SCTP } - glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) + klog.Warningf("Unknown protocol %q: defaulting to TCP", protocol) return runtimeapi.Protocol_TCP } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 6e0b0ce045c91..9407a03c63a17 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -33,7 +33,7 @@ import ( "google.golang.org/grpc" "github.com/armon/circbuf" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -68,7 +68,7 @@ var ( func (m *kubeGenericRuntimeManager) recordContainerEvent(pod *v1.Pod, container *v1.Container, containerID, eventType, reason, message string, args ...interface{}) { ref, err := kubecontainer.GenerateContainerRef(pod, container) if err != nil { - glog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err) + klog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err) return } eventMessage := message @@ -101,9 +101,9 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb // Step 2: create the container. ref, err := kubecontainer.GenerateContainerRef(pod, container) if err != nil { - glog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err) + klog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err) } - glog.V(4).Infof("Generating ref for container %s: %#v", container.Name, ref) + klog.V(4).Infof("Generating ref for container %s: %#v", container.Name, ref) // For a new container, the RestartCount should be 0 restartCount := 0 @@ -162,7 +162,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb // to create it in the first place. it happens when journald logging driver is used with docker. if _, err := m.osInterface.Stat(containerLog); !os.IsNotExist(err) { if err := m.osInterface.Symlink(containerLog, legacySymlink); err != nil { - glog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v", + klog.Errorf("Failed to create legacy symbolic link %q to container %q log %q: %v", legacySymlink, containerID, containerLog, err) } } @@ -177,7 +177,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb if handlerErr != nil { m.recordContainerEvent(pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, msg) if err := m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", nil); err != nil { - glog.Errorf("Failed to kill container %q(id=%q) in pod %q: %v, %v", + klog.Errorf("Failed to kill container %q(id=%q) in pod %q: %v, %v", container.Name, kubeContainerID.String(), format.Pod(pod), ErrPostStartHook, err) } return msg, fmt.Errorf("%s: %v", ErrPostStartHook, handlerErr) @@ -332,7 +332,7 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([] containers, err := m.runtimeService.ListContainers(filter) if err != nil { - glog.Errorf("getKubeletContainers failed: %v", err) + klog.Errorf("getKubeletContainers failed: %v", err) return nil, err } @@ -385,7 +385,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)}, }) if err != nil { - glog.Errorf("ListContainers error: %v", err) + klog.Errorf("ListContainers error: %v", err) return nil, err } @@ -394,7 +394,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n for i, c := range containers { status, err := m.runtimeService.ContainerStatus(c.Id) if err != nil { - glog.Errorf("ContainerStatus for %s error: %v", c.Id, err) + klog.Errorf("ContainerStatus for %s error: %v", c.Id, err) return nil, err } cStatus := toKubeContainerStatus(status, m.runtimeName) @@ -461,7 +461,7 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin // executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes. func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 { - glog.V(3).Infof("Running preStop hook for container %q", containerID.String()) + klog.V(3).Infof("Running preStop hook for container %q", containerID.String()) start := metav1.Now() done := make(chan struct{}) @@ -469,16 +469,16 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID defer close(done) defer utilruntime.HandleCrash() if msg, err := m.runner.Run(containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil { - glog.Errorf("preStop hook for container %q failed: %v", containerSpec.Name, err) + klog.Errorf("preStop hook for container %q failed: %v", containerSpec.Name, err) m.recordContainerEvent(pod, containerSpec, containerID.ID, v1.EventTypeWarning, events.FailedPreStopHook, msg) } }() select { case <-time.After(time.Duration(gracePeriod) * time.Second): - glog.V(2).Infof("preStop hook for container %q did not complete in %d seconds", containerID, gracePeriod) + klog.V(2).Infof("preStop hook for container %q did not complete in %d seconds", containerID, gracePeriod) case <-done: - glog.V(3).Infof("preStop hook for container %q completed", containerID) + klog.V(3).Infof("preStop hook for container %q completed", containerID) } return int64(metav1.Now().Sub(start.Time).Seconds()) @@ -556,7 +556,7 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec gracePeriod = *pod.Spec.TerminationGracePeriodSeconds } - glog.V(2).Infof("Killing container %q with %d second grace period", containerID.String(), gracePeriod) + klog.V(2).Infof("Killing container %q with %d second grace period", containerID.String(), gracePeriod) // Run internal pre-stop lifecycle hook if err := m.internalLifecycle.PreStopContainer(containerID.ID); err != nil { @@ -573,14 +573,14 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubec } if gracePeriodOverride != nil { gracePeriod = *gracePeriodOverride - glog.V(3).Infof("Killing container %q, but using %d second grace period override", containerID, gracePeriod) + klog.V(3).Infof("Killing container %q, but using %d second grace period override", containerID, gracePeriod) } err := m.runtimeService.StopContainer(containerID.ID, gracePeriod) if err != nil { - glog.Errorf("Container %q termination failed with gracePeriod %d: %v", containerID.String(), gracePeriod, err) + klog.Errorf("Container %q termination failed with gracePeriod %d: %v", containerID.String(), gracePeriod, err) } else { - glog.V(3).Infof("Container %q exited normally", containerID.String()) + klog.V(3).Infof("Container %q exited normally", containerID.String()) } message := fmt.Sprintf("Killing container with id %s", containerID.String()) @@ -643,7 +643,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod, continue } // prune all other init containers that match this container name - glog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count) + klog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count) if err := m.removeContainer(status.ID.ID); err != nil { utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod))) continue @@ -653,7 +653,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod, if _, ok := m.containerRefManager.GetRef(status.ID); ok { m.containerRefManager.ClearRef(status.ID) } else { - glog.Warningf("No ref for container %q", status.ID) + klog.Warningf("No ref for container %q", status.ID) } } } @@ -675,7 +675,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus * } count++ // Purge all init containers that match this container name - glog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count) + klog.V(4).Infof("Removing init container %q instance %q %d", status.Name, status.ID.ID, count) if err := m.removeContainer(status.ID.ID); err != nil { utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod))) continue @@ -684,7 +684,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus * if _, ok := m.containerRefManager.GetRef(status.ID); ok { m.containerRefManager.ClearRef(status.ID) } else { - glog.Warningf("No ref for container %q", status.ID) + klog.Warningf("No ref for container %q", status.ID) } } } @@ -739,7 +739,7 @@ func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus) func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { status, err := m.runtimeService.ContainerStatus(containerID.ID) if err != nil { - glog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err) + klog.V(4).Infof("failed to get container status for %v: %v", containerID.String(), err) return fmt.Errorf("Unable to retrieve container logs for %v", containerID.String()) } return m.ReadLogs(ctx, status.GetLogPath(), containerID.ID, logOptions, stdout, stderr) @@ -795,7 +795,7 @@ func (m *kubeGenericRuntimeManager) RunInContainer(id kubecontainer.ContainerID, // Notice that we assume that the container should only be removed in non-running state, and // it will not write container logs anymore in that state. func (m *kubeGenericRuntimeManager) removeContainer(containerID string) error { - glog.V(4).Infof("Removing container %q", containerID) + klog.V(4).Infof("Removing container %q", containerID) // Call internal container post-stop lifecycle hook. if err := m.internalLifecycle.PostStopContainer(containerID); err != nil { return err diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc.go b/pkg/kubelet/kuberuntime/kuberuntime_gc.go index 3e0afbc45b6b6..e2ba00d0645a6 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc.go @@ -23,9 +23,9 @@ import ( "sort" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -123,7 +123,7 @@ func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int numToKeep := len(containers) - toRemove for i := len(containers) - 1; i >= numToKeep; i-- { if err := cgc.manager.removeContainer(containers[i].id); err != nil { - glog.Errorf("Failed to remove container %q: %v", containers[i].id, err) + klog.Errorf("Failed to remove container %q: %v", containers[i].id, err) } } @@ -145,16 +145,16 @@ func (cgc *containerGC) removeOldestNSandboxes(sandboxes []sandboxGCInfo, toRemo // removeSandbox removes the sandbox by sandboxID. func (cgc *containerGC) removeSandbox(sandboxID string) { - glog.V(4).Infof("Removing sandbox %q", sandboxID) + klog.V(4).Infof("Removing sandbox %q", sandboxID) // In normal cases, kubelet should've already called StopPodSandbox before // GC kicks in. To guard against the rare cases where this is not true, try // stopping the sandbox before removing it. if err := cgc.client.StopPodSandbox(sandboxID); err != nil { - glog.Errorf("Failed to stop sandbox %q before removing: %v", sandboxID, err) + klog.Errorf("Failed to stop sandbox %q before removing: %v", sandboxID, err) return } if err := cgc.client.RemovePodSandbox(sandboxID); err != nil { - glog.Errorf("Failed to remove sandbox %q: %v", sandboxID, err) + klog.Errorf("Failed to remove sandbox %q: %v", sandboxID, err) } } @@ -328,7 +328,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error { } err := osInterface.RemoveAll(filepath.Join(podLogsRootDirectory, name)) if err != nil { - glog.Errorf("Failed to remove pod logs directory %q: %v", name, err) + klog.Errorf("Failed to remove pod logs directory %q: %v", name, err) } } } @@ -340,7 +340,7 @@ func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error { if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) { err := osInterface.Remove(logSymlink) if err != nil { - glog.Errorf("Failed to remove container log dead symlink %q: %v", logSymlink, err) + klog.Errorf("Failed to remove container log dead symlink %q: %v", logSymlink, err) } } } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_image.go b/pkg/kubelet/kuberuntime/kuberuntime_image.go index a8f2c1c060bef..60fc6201c0180 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_image.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_image.go @@ -17,9 +17,9 @@ limitations under the License. package kuberuntime import ( - "github.com/golang/glog" "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" "k8s.io/kubernetes/pkg/credentialprovider" credentialprovidersecrets "k8s.io/kubernetes/pkg/credentialprovider/secrets" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" @@ -44,11 +44,11 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul imgSpec := &runtimeapi.ImageSpec{Image: img} creds, withCredentials := keyring.Lookup(repoToPull) if !withCredentials { - glog.V(3).Infof("Pulling image %q without credentials", img) + klog.V(3).Infof("Pulling image %q without credentials", img) imageRef, err := m.imageService.PullImage(imgSpec, nil) if err != nil { - glog.Errorf("Pull image %q failed: %v", img, err) + klog.Errorf("Pull image %q failed: %v", img, err) return "", err } @@ -84,7 +84,7 @@ func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pul func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (string, error) { status, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image.Image}) if err != nil { - glog.Errorf("ImageStatus for image %q failed: %v", image, err) + klog.Errorf("ImageStatus for image %q failed: %v", image, err) return "", err } if status == nil { @@ -99,7 +99,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error) allImages, err := m.imageService.ListImages(nil) if err != nil { - glog.Errorf("ListImages failed: %v", err) + klog.Errorf("ListImages failed: %v", err) return nil, err } @@ -119,7 +119,7 @@ func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error) func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error { err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: image.Image}) if err != nil { - glog.Errorf("Remove image %q failed: %v", image.Image, err) + klog.Errorf("Remove image %q failed: %v", image.Image, err) return err } @@ -133,7 +133,7 @@ func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) e func (m *kubeGenericRuntimeManager) ImageStats() (*kubecontainer.ImageStats, error) { allImages, err := m.imageService.ListImages(nil) if err != nil { - glog.Errorf("ListImages failed: %v", err) + klog.Errorf("ListImages failed: %v", err) return nil, err } stats := &kubecontainer.ImageStats{} diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 42b5ff9f86b18..90039beda0942 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -22,8 +22,8 @@ import ( "os" "time" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -181,21 +181,21 @@ func NewKubeGenericRuntimeManager( typedVersion, err := kubeRuntimeManager.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { - glog.Errorf("Get runtime version failed: %v", err) + klog.Errorf("Get runtime version failed: %v", err) return nil, err } // Only matching kubeRuntimeAPIVersion is supported now // TODO: Runtime API machinery is under discussion at https://github.com/kubernetes/kubernetes/issues/28642 if typedVersion.Version != kubeRuntimeAPIVersion { - glog.Errorf("Runtime api version %s is not supported, only %s is supported now", + klog.Errorf("Runtime api version %s is not supported, only %s is supported now", typedVersion.Version, kubeRuntimeAPIVersion) return nil, ErrVersionNotSupported } kubeRuntimeManager.runtimeName = typedVersion.RuntimeName - glog.Infof("Container runtime %s initialized, version: %s, apiVersion: %s", + klog.Infof("Container runtime %s initialized, version: %s, apiVersion: %s", typedVersion.RuntimeName, typedVersion.RuntimeVersion, typedVersion.RuntimeApiVersion) @@ -205,7 +205,7 @@ func NewKubeGenericRuntimeManager( // new runtime interface if _, err := osInterface.Stat(podLogsRootDirectory); os.IsNotExist(err) { if err := osInterface.MkdirAll(podLogsRootDirectory, 0755); err != nil { - glog.Errorf("Failed to create directory %q: %v", podLogsRootDirectory, err) + klog.Errorf("Failed to create directory %q: %v", podLogsRootDirectory, err) } } @@ -244,7 +244,7 @@ func newRuntimeVersion(version string) (*utilversion.Version, error) { func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) { typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { - glog.Errorf("Get remote runtime typed version failed: %v", err) + klog.Errorf("Get remote runtime typed version failed: %v", err) return nil, err } return typedVersion, nil @@ -254,7 +254,7 @@ func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionRespon func (m *kubeGenericRuntimeManager) Version() (kubecontainer.Version, error) { typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { - glog.Errorf("Get remote runtime version failed: %v", err) + klog.Errorf("Get remote runtime version failed: %v", err) return nil, err } @@ -296,7 +296,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err for i := range sandboxes { s := sandboxes[i] if s.Metadata == nil { - glog.V(4).Infof("Sandbox does not have metadata: %+v", s) + klog.V(4).Infof("Sandbox does not have metadata: %+v", s) continue } podUID := kubetypes.UID(s.Metadata.Uid) @@ -310,7 +310,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err p := pods[podUID] converted, err := m.sandboxToKubeContainer(s) if err != nil { - glog.V(4).Infof("Convert %q sandbox %v of pod %q failed: %v", m.runtimeName, s, podUID, err) + klog.V(4).Infof("Convert %q sandbox %v of pod %q failed: %v", m.runtimeName, s, podUID, err) continue } p.Sandboxes = append(p.Sandboxes, converted) @@ -323,7 +323,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err for i := range containers { c := containers[i] if c.Metadata == nil { - glog.V(4).Infof("Container does not have metadata: %+v", c) + klog.V(4).Infof("Container does not have metadata: %+v", c) continue } @@ -340,7 +340,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err converted, err := m.toKubeContainer(c) if err != nil { - glog.V(4).Infof("Convert %s container %v of pod %q failed: %v", m.runtimeName, c, labelledInfo.PodUID, err) + klog.V(4).Infof("Convert %s container %v of pod %q failed: %v", m.runtimeName, c, labelledInfo.PodUID, err) continue } @@ -394,7 +394,7 @@ type podActions struct { // (changed, new attempt, original sandboxID if exist). func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, uint32, string) { if len(podStatus.SandboxStatuses) == 0 { - glog.V(2).Infof("No sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) + klog.V(2).Infof("No sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) return true, 0, "" } @@ -408,23 +408,23 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *ku // Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one. sandboxStatus := podStatus.SandboxStatuses[0] if readySandboxCount > 1 { - glog.V(2).Infof("More than 1 sandboxes for pod %q are ready. Need to reconcile them", format.Pod(pod)) + klog.V(2).Infof("More than 1 sandboxes for pod %q are ready. Need to reconcile them", format.Pod(pod)) return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id } if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY { - glog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) + klog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod)) return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id } // Needs to create a new sandbox when network namespace changed. if sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != networkNamespaceForPod(pod) { - glog.V(2).Infof("Sandbox for pod %q has changed. Need to start a new one", format.Pod(pod)) + klog.V(2).Infof("Sandbox for pod %q has changed. Need to start a new one", format.Pod(pod)) return true, sandboxStatus.Metadata.Attempt + 1, "" } // Needs to create a new sandbox when the sandbox does not have an IP address. if !kubecontainer.IsHostNetworkPod(pod) && sandboxStatus.Network.Ip == "" { - glog.V(2).Infof("Sandbox for pod %q has no IP address. Need to start a new one", format.Pod(pod)) + klog.V(2).Infof("Sandbox for pod %q has no IP address. Need to start a new one", format.Pod(pod)) return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id } @@ -450,7 +450,7 @@ func containerSucceeded(c *v1.Container, podStatus *kubecontainer.PodStatus) boo // computePodActions checks whether the pod spec has changed and returns the changes if true. func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions { - glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) + klog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) createPodSandbox, attempt, sandboxID := m.podSandboxChanged(pod, podStatus) changes := podActions{ @@ -516,7 +516,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku // to it. if containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning { if err := m.internalLifecycle.PostStopContainer(containerStatus.ID.ID); err != nil { - glog.Errorf("internal container post-stop lifecycle hook failed for container %v in pod %v with error %v", + klog.Errorf("internal container post-stop lifecycle hook failed for container %v in pod %v with error %v", container.Name, pod.Name, err) } } @@ -526,7 +526,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning { if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container) - glog.V(3).Infof(message) + klog.V(3).Infof(message) changes.ContainersToStart = append(changes.ContainersToStart, idx) } continue @@ -562,7 +562,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku container: &pod.Spec.Containers[idx], message: message, } - glog.V(2).Infof("Container %q (%q) of pod %s: %s", container.Name, containerStatus.ID, format.Pod(pod), message) + klog.V(2).Infof("Container %q (%q) of pod %s: %s", container.Name, containerStatus.ID, format.Pod(pod), message) } if keepCount == 0 && len(changes.ContainersToStart) == 0 { @@ -583,31 +583,31 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { // Step 1: Compute sandbox and container changes. podContainerChanges := m.computePodActions(pod, podStatus) - glog.V(3).Infof("computePodActions got %+v for pod %q", podContainerChanges, format.Pod(pod)) + klog.V(3).Infof("computePodActions got %+v for pod %q", podContainerChanges, format.Pod(pod)) if podContainerChanges.CreateSandbox { ref, err := ref.GetReference(legacyscheme.Scheme, pod) if err != nil { - glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) + klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) } if podContainerChanges.SandboxID != "" { m.recorder.Eventf(ref, v1.EventTypeNormal, events.SandboxChanged, "Pod sandbox changed, it will be killed and re-created.") } else { - glog.V(4).Infof("SyncPod received new pod %q, will create a sandbox for it", format.Pod(pod)) + klog.V(4).Infof("SyncPod received new pod %q, will create a sandbox for it", format.Pod(pod)) } } // Step 2: Kill the pod if the sandbox has changed. if podContainerChanges.KillPod { if !podContainerChanges.CreateSandbox { - glog.V(4).Infof("Stopping PodSandbox for %q because all other containers are dead.", format.Pod(pod)) + klog.V(4).Infof("Stopping PodSandbox for %q because all other containers are dead.", format.Pod(pod)) } else { - glog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod)) + klog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod)) } killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil) result.AddPodSyncResult(killResult) if killResult.Error() != nil { - glog.Errorf("killPodWithSyncResult failed: %v", killResult.Error()) + klog.Errorf("killPodWithSyncResult failed: %v", killResult.Error()) return } @@ -617,12 +617,12 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat } else { // Step 3: kill any running containers in this pod which are not to keep. for containerID, containerInfo := range podContainerChanges.ContainersToKill { - glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerInfo.name, containerID, format.Pod(pod)) + klog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerInfo.name, containerID, format.Pod(pod)) killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name) result.AddSyncResult(killContainerResult) if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, nil); err != nil { killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) - glog.Errorf("killContainer %q(id=%q) for pod %q failed: %v", containerInfo.name, containerID, format.Pod(pod), err) + klog.Errorf("killContainer %q(id=%q) for pod %q failed: %v", containerInfo.name, containerID, format.Pod(pod), err) return } } @@ -653,30 +653,30 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat var msg string var err error - glog.V(4).Infof("Creating sandbox for pod %q", format.Pod(pod)) + klog.V(4).Infof("Creating sandbox for pod %q", format.Pod(pod)) createSandboxResult := kubecontainer.NewSyncResult(kubecontainer.CreatePodSandbox, format.Pod(pod)) result.AddSyncResult(createSandboxResult) podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt) if err != nil { createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg) - glog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err) + klog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err) ref, referr := ref.GetReference(legacyscheme.Scheme, pod) if referr != nil { - glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) + klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) } m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed create pod sandbox: %v", err) return } - glog.V(4).Infof("Created PodSandbox %q for pod %q", podSandboxID, format.Pod(pod)) + klog.V(4).Infof("Created PodSandbox %q for pod %q", podSandboxID, format.Pod(pod)) podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) if err != nil { ref, referr := ref.GetReference(legacyscheme.Scheme, pod) if referr != nil { - glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) + klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr) } m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedStatusPodSandBox, "Unable to get pod sandbox status: %v", err) - glog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod)) + klog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod)) result.Fail(err) return } @@ -686,7 +686,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat if !kubecontainer.IsHostNetworkPod(pod) { // Overwrite the podIP passed in the pod status, since we just started the pod sandbox. podIP = m.determinePodSandboxIP(pod.Namespace, pod.Name, podSandboxStatus) - glog.V(4).Infof("Determined the ip %q for pod %q after sandbox changed", podIP, format.Pod(pod)) + klog.V(4).Infof("Determined the ip %q for pod %q after sandbox changed", podIP, format.Pod(pod)) } } @@ -696,7 +696,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat podSandboxConfig, err := m.generatePodSandboxConfig(pod, podContainerChanges.Attempt) if err != nil { message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err) - glog.Error(message) + klog.Error(message) configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, message) return } @@ -709,11 +709,11 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff) if isInBackOff { startContainerResult.Fail(err, msg) - glog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod)) + klog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod)) return } - glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) + klog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod)) if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, kubecontainer.ContainerTypeInit); err != nil { startContainerResult.Fail(err, msg) utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg)) @@ -721,7 +721,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat } // Successfully started the container; clear the entry in the failure - glog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod)) + klog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod)) } // Step 6: start containers in podContainerChanges.ContainersToStart. @@ -733,18 +733,18 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStat isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff) if isInBackOff { startContainerResult.Fail(err, msg) - glog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod)) + klog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod)) continue } - glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) + klog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP, kubecontainer.ContainerTypeRegular); err != nil { startContainerResult.Fail(err, msg) // known errors that are logged in other places are logged at higher levels here to avoid // repetitive log spam switch { case err == images.ErrImagePullBackOff: - glog.V(3).Infof("container start failed: %v: %s", err, msg) + klog.V(3).Infof("container start failed: %v: %s", err, msg) default: utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg)) } @@ -770,7 +770,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain return false, "", nil } - glog.V(3).Infof("checking backoff for container %q in pod %q", container.Name, format.Pod(pod)) + klog.V(3).Infof("checking backoff for container %q in pod %q", container.Name, format.Pod(pod)) // Use the finished time of the latest exited container as the start point to calculate whether to do back-off. ts := cStatus.FinishedAt // backOff requires a unique key to identify the container. @@ -780,7 +780,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain m.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container") } err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod)) - glog.V(3).Infof("%s", err.Error()) + klog.V(3).Infof("%s", err.Error()) return true, err.Error(), kubecontainer.ErrCrashLoopBackOff } @@ -812,7 +812,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPo for _, podSandbox := range runningPod.Sandboxes { if err := m.runtimeService.StopPodSandbox(podSandbox.ID.ID); err != nil { killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error()) - glog.Errorf("Failed to stop sandbox %q", podSandbox.ID) + klog.Errorf("Failed to stop sandbox %q", podSandbox.ID) } } @@ -847,14 +847,14 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp UID: uid, }, }) - glog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName) + klog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName) sandboxStatuses := make([]*runtimeapi.PodSandboxStatus, len(podSandboxIDs)) podIP := "" for idx, podSandboxID := range podSandboxIDs { podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID) if err != nil { - glog.Errorf("PodSandboxStatus of sandbox %q for pod %q error: %v", podSandboxID, podFullName, err) + klog.Errorf("PodSandboxStatus of sandbox %q for pod %q error: %v", podSandboxID, podFullName, err) return nil, err } sandboxStatuses[idx] = podSandboxStatus @@ -868,7 +868,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp // Get statuses of all containers visible in the pod. containerStatuses, err := m.getPodContainerStatuses(uid, name, namespace) if err != nil { - glog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err) + klog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err) return nil, err } @@ -899,7 +899,7 @@ func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.Contai func (m *kubeGenericRuntimeManager) GetPodContainerID(pod *kubecontainer.Pod) (kubecontainer.ContainerID, error) { formattedPod := kubecontainer.FormatPod(pod) if len(pod.Sandboxes) == 0 { - glog.Errorf("No sandboxes are found for pod %q", formattedPod) + klog.Errorf("No sandboxes are found for pod %q", formattedPod) return kubecontainer.ContainerID{}, fmt.Errorf("sandboxes for pod %q not found", formattedPod) } @@ -912,7 +912,7 @@ func (m *kubeGenericRuntimeManager) GetPodContainerID(pod *kubecontainer.Pod) (k func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error { // TODO(#35531): do we really want to write a method on this manager for each // field of the config? - glog.Infof("updating runtime config through cri with podcidr %v", podCIDR) + klog.Infof("updating runtime config through cri with podcidr %v", podCIDR) return m.runtimeService.UpdateRuntimeConfig( &runtimeapi.RuntimeConfig{ NetworkConfig: &runtimeapi.NetworkConfig{ diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go index c9c8fd2897033..dd51bda32ca91 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go @@ -22,10 +22,10 @@ import ( "net/url" "sort" - "github.com/golang/glog" "k8s.io/api/core/v1" kubetypes "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -38,7 +38,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32 podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt) if err != nil { message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err) - glog.Error(message) + klog.Error(message) return "", message, err } @@ -46,7 +46,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32 err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755) if err != nil { message := fmt.Sprintf("Create pod log directory for pod %q failed: %v", format.Pod(pod), err) - glog.Errorf(message) + klog.Errorf(message) return "", message, err } @@ -62,7 +62,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32 podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig, runtimeHandler) if err != nil { message := fmt.Sprintf("CreatePodSandbox for pod %q failed: %v", format.Pod(pod), err) - glog.Error(message) + klog.Error(message) return "", message, err } @@ -204,7 +204,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi resp, err := m.runtimeService.ListPodSandbox(filter) if err != nil { - glog.Errorf("ListPodSandbox failed: %v", err) + klog.Errorf("ListPodSandbox failed: %v", err) return nil, err } @@ -214,14 +214,14 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi // determinePodSandboxIP determines the IP address of the given pod sandbox. func (m *kubeGenericRuntimeManager) determinePodSandboxIP(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) string { if podSandbox.Network == nil { - glog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP") + klog.Warningf("Pod Sandbox status doesn't have network information, cannot report IP") return "" } ip := podSandbox.Network.Ip if len(ip) != 0 && net.ParseIP(ip) == nil { // ip could be an empty string if runtime is not responsible for the // IP (e.g., host networking). - glog.Warningf("Pod Sandbox reported an unparseable IP %v", ip) + klog.Warningf("Pod Sandbox reported an unparseable IP %v", ip) return "" } return ip @@ -240,7 +240,7 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, s } sandboxes, err := m.runtimeService.ListPodSandbox(filter) if err != nil { - glog.Errorf("ListPodSandbox with pod UID %q failed: %v", podUID, err) + klog.Errorf("ListPodSandbox with pod UID %q failed: %v", podUID, err) return nil, err } diff --git a/pkg/kubelet/kuberuntime/labels.go b/pkg/kubelet/kuberuntime/labels.go index 9adc0205a26dc..5f36e0b5d984f 100644 --- a/pkg/kubelet/kuberuntime/labels.go +++ b/pkg/kubelet/kuberuntime/labels.go @@ -20,10 +20,10 @@ import ( "encoding/json" "strconv" - "github.com/golang/glog" "k8s.io/api/core/v1" kubetypes "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/types" @@ -135,7 +135,7 @@ func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount // Using json encoding so that the PreStop handler object is readable after writing as a label rawPreStop, err := json.Marshal(container.Lifecycle.PreStop) if err != nil { - glog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err) + klog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err) } else { annotations[containerPreStopHandlerLabel] = string(rawPreStop) } @@ -144,7 +144,7 @@ func newContainerAnnotations(container *v1.Container, pod *v1.Pod, restartCount if len(container.Ports) > 0 { rawContainerPorts, err := json.Marshal(container.Ports) if err != nil { - glog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err) + klog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err) } else { annotations[containerPortsLabel] = string(rawContainerPorts) } @@ -203,28 +203,28 @@ func getContainerInfoFromAnnotations(annotations map[string]string) *annotatedCo } if containerInfo.Hash, err = getUint64ValueFromLabel(annotations, containerHashLabel); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", containerHashLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", containerHashLabel, annotations, err) } if containerInfo.RestartCount, err = getIntValueFromLabel(annotations, containerRestartCountLabel); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", containerRestartCountLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", containerRestartCountLabel, annotations, err) } if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(annotations, podDeletionGracePeriodLabel); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", podDeletionGracePeriodLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", podDeletionGracePeriodLabel, annotations, err) } if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(annotations, podTerminationGracePeriodLabel); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", podTerminationGracePeriodLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", podTerminationGracePeriodLabel, annotations, err) } preStopHandler := &v1.Handler{} if found, err := getJSONObjectFromLabel(annotations, containerPreStopHandlerLabel, preStopHandler); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", containerPreStopHandlerLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", containerPreStopHandlerLabel, annotations, err) } else if found { containerInfo.PreStopHandler = preStopHandler } containerPorts := []v1.ContainerPort{} if found, err := getJSONObjectFromLabel(annotations, containerPortsLabel, &containerPorts); err != nil { - glog.Errorf("Unable to get %q from annotations %q: %v", containerPortsLabel, annotations, err) + klog.Errorf("Unable to get %q from annotations %q: %v", containerPortsLabel, annotations, err) } else if found { containerInfo.ContainerPorts = containerPorts } @@ -237,7 +237,7 @@ func getStringValueFromLabel(labels map[string]string, label string) string { return value } // Do not report error, because there should be many old containers without label now. - glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) // Return empty string "" for these containers, the caller will get value by other ways. return "" } @@ -252,7 +252,7 @@ func getIntValueFromLabel(labels map[string]string, label string) (int, error) { return intValue, nil } // Do not report error, because there should be many old containers without label now. - glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) // Just set the value to 0 return 0, nil } @@ -267,7 +267,7 @@ func getUint64ValueFromLabel(labels map[string]string, label string) (uint64, er return intValue, nil } // Do not report error, because there should be many old containers without label now. - glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) + klog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label) // Just set the value to 0 return 0, nil } diff --git a/pkg/kubelet/kuberuntime/logs/BUILD b/pkg/kubelet/kuberuntime/logs/BUILD index ba0dc3f49e9a7..61eeead77f3bc 100644 --- a/pkg/kubelet/kuberuntime/logs/BUILD +++ b/pkg/kubelet/kuberuntime/logs/BUILD @@ -12,7 +12,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/kuberuntime/logs/logs.go b/pkg/kubelet/kuberuntime/logs/logs.go index c81194d1e638a..e679cee84b6ca 100644 --- a/pkg/kubelet/kuberuntime/logs/logs.go +++ b/pkg/kubelet/kuberuntime/logs/logs.go @@ -30,7 +30,7 @@ import ( "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" "github.com/fsnotify/fsnotify" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" @@ -293,7 +293,7 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r msg := &logMessage{} for { if stop { - glog.V(2).Infof("Finish parsing log file %q", path) + klog.V(2).Infof("Finish parsing log file %q", path) return nil } l, err := r.ReadBytes(eol[0]) @@ -328,7 +328,7 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r if len(l) == 0 { continue } - glog.Warningf("Incomplete line in log file %q: %q", path, l) + klog.Warningf("Incomplete line in log file %q: %q", path, l) } if parse == nil { // Initialize the log parsing function. @@ -340,16 +340,16 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r // Parse the log line. msg.reset() if err := parse(l, msg); err != nil { - glog.Errorf("Failed with err %v when parsing log for log file %q: %q", err, path, l) + klog.Errorf("Failed with err %v when parsing log for log file %q: %q", err, path, l) continue } // Write the log line into the stream. if err := writer.write(msg); err != nil { if err == errMaximumWrite { - glog.V(2).Infof("Finish parsing log file %q, hit bytes limit %d(bytes)", path, opts.bytes) + klog.V(2).Infof("Finish parsing log file %q, hit bytes limit %d(bytes)", path, opts.bytes) return nil } - glog.Errorf("Failed with err %v when writing log for log file %q: %+v", err, path, msg) + klog.Errorf("Failed with err %v when writing log for log file %q: %+v", err, path, msg) return err } } @@ -362,7 +362,7 @@ func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) { } // Only keep following container log when it is running. if s.State != runtimeapi.ContainerState_CONTAINER_RUNNING { - glog.V(5).Infof("Container %q is not running (state=%q)", id, s.State) + klog.V(5).Infof("Container %q is not running (state=%q)", id, s.State) // Do not return error because it's normal that the container stops // during waiting. return false, nil @@ -387,10 +387,10 @@ func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeServic case fsnotify.Write: return true, nil default: - glog.Errorf("Unexpected fsnotify event: %v, retrying...", e) + klog.Errorf("Unexpected fsnotify event: %v, retrying...", e) } case err := <-w.Errors: - glog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry) + klog.Errorf("Fsnotify watch error: %v, %d error retries remaining", err, errRetry) if errRetry == 0 { return false, err } diff --git a/pkg/kubelet/lifecycle/BUILD b/pkg/kubelet/lifecycle/BUILD index e46f993837b54..ff518ddfdf1f4 100644 --- a/pkg/kubelet/lifecycle/BUILD +++ b/pkg/kubelet/lifecycle/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/lifecycle/handlers.go b/pkg/kubelet/lifecycle/handlers.go index c0630ebb2aa1e..8b5a8b8d4067a 100644 --- a/pkg/kubelet/lifecycle/handlers.go +++ b/pkg/kubelet/lifecycle/handlers.go @@ -23,10 +23,10 @@ import ( "net/http" "strconv" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -59,20 +59,20 @@ func (hr *HandlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, output, err := hr.commandRunner.RunInContainer(containerID, handler.Exec.Command, 0) if err != nil { msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output)) - glog.V(1).Infof(msg) + klog.V(1).Infof(msg) } return msg, err case handler.HTTPGet != nil: msg, err := hr.runHTTPHandler(pod, container, handler) if err != nil { msg = fmt.Sprintf("Http lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", handler.HTTPGet.Path, container.Name, format.Pod(pod), err, msg) - glog.V(1).Infof(msg) + klog.V(1).Infof(msg) } return msg, err default: err := fmt.Errorf("Invalid handler: %v", handler) msg := fmt.Sprintf("Cannot run handler: %v", err) - glog.Errorf(msg) + klog.Errorf(msg) return msg, err } } @@ -105,7 +105,7 @@ func (hr *HandlerRunner) runHTTPHandler(pod *v1.Pod, container *v1.Container, ha if len(host) == 0 { status, err := hr.containerManager.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { - glog.Errorf("Unable to get pod info, event handlers may be invalid.") + klog.Errorf("Unable to get pod info, event handlers may be invalid.") return "", err } if status.IP == "" { diff --git a/pkg/kubelet/lifecycle/predicate.go b/pkg/kubelet/lifecycle/predicate.go index ba6d25b584c97..df4a32d1addd9 100644 --- a/pkg/kubelet/lifecycle/predicate.go +++ b/pkg/kubelet/lifecycle/predicate.go @@ -19,7 +19,7 @@ package lifecycle import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" @@ -58,7 +58,7 @@ func NewPredicateAdmitHandler(getNodeAnyWayFunc getNodeAnyWayFuncType, admission func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult { node, err := w.getNodeAnyWayFunc() if err != nil { - glog.Errorf("Cannot get Node info: %v", err) + klog.Errorf("Cannot get Node info: %v", err) return PodAdmitResult{ Admit: false, Reason: "InvalidNodeInfo", @@ -72,7 +72,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult // ensure the node has enough plugin resources for that required in pods if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil { message := fmt.Sprintf("Update plugin resources failed due to %v, which is unexpected.", err) - glog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) + klog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) return PodAdmitResult{ Admit: false, Reason: "UnexpectedAdmissionError", @@ -93,7 +93,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult fit, reasons, err := predicates.GeneralPredicates(podWithoutMissingExtendedResources, nil, nodeInfo) if err != nil { message := fmt.Sprintf("GeneralPredicates failed due to %v, which is unexpected.", err) - glog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) + klog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) return PodAdmitResult{ Admit: fit, Reason: "UnexpectedAdmissionError", @@ -104,7 +104,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult fit, reasons, err = w.admissionFailureHandler.HandleAdmissionFailure(admitPod, reasons) if err != nil { message := fmt.Sprintf("Unexpected error while attempting to recover from admission failure: %v", err) - glog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) + klog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) return PodAdmitResult{ Admit: fit, Reason: "UnexpectedAdmissionError", @@ -117,7 +117,7 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult var message string if len(reasons) == 0 { message = fmt.Sprint("GeneralPredicates failed due to unknown reason, which is unexpected.") - glog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) + klog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) return PodAdmitResult{ Admit: fit, Reason: "UnknownReason", @@ -130,19 +130,19 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult case *predicates.PredicateFailureError: reason = re.PredicateName message = re.Error() - glog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) + klog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) case *predicates.InsufficientResourceError: reason = fmt.Sprintf("OutOf%s", re.ResourceName) message = re.Error() - glog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) + klog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) case *predicates.FailureReason: reason = re.GetReason() message = fmt.Sprintf("Failure: %s", re.GetReason()) - glog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) + klog.V(2).Infof("Predicate failed on Pod: %v, for reason: %v", format.Pod(admitPod), message) default: reason = "UnexpectedPredicateFailureType" message = fmt.Sprintf("GeneralPredicates failed due to %v, which is unexpected.", r) - glog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) + klog.Warningf("Failed to admit pod %v - %s", format.Pod(admitPod), message) } return PodAdmitResult{ Admit: fit, diff --git a/pkg/kubelet/logs/BUILD b/pkg/kubelet/logs/BUILD index 41e4281dbaa23..5881068143495 100644 --- a/pkg/kubelet/logs/BUILD +++ b/pkg/kubelet/logs/BUILD @@ -14,7 +14,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/logs/container_log_manager.go b/pkg/kubelet/logs/container_log_manager.go index cae78993d1b05..50cfcf495347f 100644 --- a/pkg/kubelet/logs/container_log_manager.go +++ b/pkg/kubelet/logs/container_log_manager.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/clock" @@ -171,7 +171,7 @@ func (c *containerLogManager) Start() { // Start a goroutine periodically does container log rotation. go wait.Forever(func() { if err := c.rotateLogs(); err != nil { - glog.Errorf("Failed to rotate container logs: %v", err) + klog.Errorf("Failed to rotate container logs: %v", err) } }, logMonitorPeriod) } @@ -193,27 +193,27 @@ func (c *containerLogManager) rotateLogs() error { // Note that we should not block log rotate for an error of a single container. status, err := c.runtimeService.ContainerStatus(id) if err != nil { - glog.Errorf("Failed to get container status for %q: %v", id, err) + klog.Errorf("Failed to get container status for %q: %v", id, err) continue } path := status.GetLogPath() info, err := os.Stat(path) if err != nil { if !os.IsNotExist(err) { - glog.Errorf("Failed to stat container log %q: %v", path, err) + klog.Errorf("Failed to stat container log %q: %v", path, err) continue } // In rotateLatestLog, there are several cases that we may // lose original container log after ReopenContainerLog fails. // We try to recover it by reopening container log. if err := c.runtimeService.ReopenContainerLog(id); err != nil { - glog.Errorf("Container %q log %q doesn't exist, reopen container log failed: %v", id, path, err) + klog.Errorf("Container %q log %q doesn't exist, reopen container log failed: %v", id, path, err) continue } // The container log should be recovered. info, err = os.Stat(path) if err != nil { - glog.Errorf("Failed to stat container log %q after reopen: %v", path, err) + klog.Errorf("Failed to stat container log %q after reopen: %v", path, err) continue } } @@ -222,7 +222,7 @@ func (c *containerLogManager) rotateLogs() error { } // Perform log rotation. if err := c.rotateLog(id, path); err != nil { - glog.Errorf("Failed to rotate log %q for container %q: %v", path, id, err) + klog.Errorf("Failed to rotate log %q for container %q: %v", path, id, err) continue } } @@ -379,7 +379,7 @@ func (c *containerLogManager) rotateLatestLog(id, log string) error { // This shouldn't happen. // Report an error if this happens, because we will lose original // log. - glog.Errorf("Failed to rename rotated log %q back to %q: %v, reopen container log error: %v", rotated, log, renameErr, err) + klog.Errorf("Failed to rename rotated log %q back to %q: %v, reopen container log error: %v", rotated, log, renameErr, err) } return fmt.Errorf("failed to reopen container log %q: %v", id, err) } diff --git a/pkg/kubelet/metrics/BUILD b/pkg/kubelet/metrics/BUILD index 4587b6b790026..e104574eba833 100644 --- a/pkg/kubelet/metrics/BUILD +++ b/pkg/kubelet/metrics/BUILD @@ -14,8 +14,8 @@ go_library( "//pkg/kubelet/container:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/metrics/collectors/BUILD b/pkg/kubelet/metrics/collectors/BUILD index 4b41a1a32272c..db4e373ab9bf6 100644 --- a/pkg/kubelet/metrics/collectors/BUILD +++ b/pkg/kubelet/metrics/collectors/BUILD @@ -10,8 +10,8 @@ go_library( "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/server/stats:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/metrics/collectors/volume_stats.go b/pkg/kubelet/metrics/collectors/volume_stats.go index e6f1cf36da860..c281aaea57fa1 100644 --- a/pkg/kubelet/metrics/collectors/volume_stats.go +++ b/pkg/kubelet/metrics/collectors/volume_stats.go @@ -17,9 +17,9 @@ limitations under the License. package collectors import ( - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/metrics" serverstats "k8s.io/kubernetes/pkg/kubelet/server/stats" @@ -87,7 +87,7 @@ func (collector *volumeStatsCollector) Collect(ch chan<- prometheus.Metric) { lv = append([]string{pvcRef.Namespace, pvcRef.Name}, lv...) metric, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, v, lv...) if err != nil { - glog.Warningf("Failed to generate metric: %v", err) + klog.Warningf("Failed to generate metric: %v", err) return } ch <- metric diff --git a/pkg/kubelet/metrics/metrics.go b/pkg/kubelet/metrics/metrics.go index 8a2e027a28515..ec1a7166c3061 100644 --- a/pkg/kubelet/metrics/metrics.go +++ b/pkg/kubelet/metrics/metrics.go @@ -21,10 +21,10 @@ import ( "sync" "time" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" corev1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -270,7 +270,7 @@ func (pc *podAndContainerCollector) Describe(ch chan<- *prometheus.Desc) { func (pc *podAndContainerCollector) Collect(ch chan<- prometheus.Metric) { runningPods, err := pc.containerCache.GetPods() if err != nil { - glog.Warningf("Failed to get running container information while collecting metrics: %v", err) + klog.Warningf("Failed to get running container information while collecting metrics: %v", err) return } diff --git a/pkg/kubelet/mountpod/BUILD b/pkg/kubelet/mountpod/BUILD index f7d56e3b1f725..411da640f203b 100644 --- a/pkg/kubelet/mountpod/BUILD +++ b/pkg/kubelet/mountpod/BUILD @@ -25,7 +25,7 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/mountpod/mount_pod_test.go b/pkg/kubelet/mountpod/mount_pod_test.go index bf7a0ec56027e..e248b5e3df58f 100644 --- a/pkg/kubelet/mountpod/mount_pod_test.go +++ b/pkg/kubelet/mountpod/mount_pod_test.go @@ -22,7 +22,7 @@ import ( "path" "testing" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -137,7 +137,7 @@ func TestGetVolumeExec(t *testing.T) { } pod, container, err := mgr.GetMountPod("kubernetes.io/glusterfs") if err != nil { - glog.V(5).Infof("test %q returned error %s", test.name, err) + klog.V(5).Infof("test %q returned error %s", test.name, err) } if err == nil && test.expectError { t.Errorf("test %q: expected error, got none", test.name) diff --git a/pkg/kubelet/network/dns/BUILD b/pkg/kubelet/network/dns/BUILD index ae7bba5cf9ec4..19be94381cc1f 100644 --- a/pkg/kubelet/network/dns/BUILD +++ b/pkg/kubelet/network/dns/BUILD @@ -15,7 +15,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/network/dns/dns.go b/pkg/kubelet/network/dns/dns.go index b77f349fc3586..aa3a8e01a5f49 100644 --- a/pkg/kubelet/network/dns/dns.go +++ b/pkg/kubelet/network/dns/dns.go @@ -35,7 +35,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/util/format" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -119,7 +119,7 @@ func (c *Configurer) formDNSSearchFitsLimits(composedSearch []string, pod *v1.Po if limitsExceeded { log := fmt.Sprintf("Search Line limits were exceeded, some search paths have been omitted, the applied search line is: %s", strings.Join(composedSearch, " ")) c.recorder.Event(pod, v1.EventTypeWarning, "DNSConfigForming", log) - glog.Error(log) + klog.Error(log) } return composedSearch } @@ -129,7 +129,7 @@ func (c *Configurer) formDNSNameserversFitsLimits(nameservers []string, pod *v1. nameservers = nameservers[0:validation.MaxDNSNameservers] log := fmt.Sprintf("Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: %s", strings.Join(nameservers, " ")) c.recorder.Event(pod, v1.EventTypeWarning, "DNSConfigForming", log) - glog.Error(log) + klog.Error(log) } return nameservers } @@ -157,7 +157,7 @@ func (c *Configurer) CheckLimitsForResolvConf() { f, err := os.Open(c.ResolverConfig) if err != nil { c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error()) - glog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error()) + klog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error()) return } defer f.Close() @@ -165,7 +165,7 @@ func (c *Configurer) CheckLimitsForResolvConf() { _, hostSearch, _, err := parseResolvConf(f) if err != nil { c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error()) - glog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error()) + klog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error()) return } @@ -178,14 +178,14 @@ func (c *Configurer) CheckLimitsForResolvConf() { if len(hostSearch) > domainCountLimit { log := fmt.Sprintf("Resolv.conf file '%s' contains search line consisting of more than %d domains!", c.ResolverConfig, domainCountLimit) c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log) - glog.V(4).Infof("CheckLimitsForResolvConf: " + log) + klog.V(4).Infof("CheckLimitsForResolvConf: " + log) return } if len(strings.Join(hostSearch, " ")) > validation.MaxDNSSearchListChars { log := fmt.Sprintf("Resolv.conf file '%s' contains search line which length is more than allowed %d chars!", c.ResolverConfig, validation.MaxDNSSearchListChars) c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log) - glog.V(4).Infof("CheckLimitsForResolvConf: " + log) + klog.V(4).Infof("CheckLimitsForResolvConf: " + log) return } @@ -336,7 +336,7 @@ func (c *Configurer) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { dnsType, err := getPodDNSType(pod) if err != nil { - glog.Errorf("Failed to get DNS type for pod %q: %v. Falling back to DNSClusterFirst policy.", format.Pod(pod), err) + klog.Errorf("Failed to get DNS type for pod %q: %v. Falling back to DNSClusterFirst policy.", format.Pod(pod), err) dnsType = podDNSCluster } switch dnsType { @@ -400,11 +400,11 @@ func (c *Configurer) SetupDNSinContainerizedMounter(mounterPath string) { f, err := os.Open(c.ResolverConfig) defer f.Close() if err != nil { - glog.Error("Could not open resolverConf file") + klog.Error("Could not open resolverConf file") } else { _, hostSearch, _, err := parseResolvConf(f) if err != nil { - glog.Errorf("Error for parsing the reslov.conf file: %v", err) + klog.Errorf("Error for parsing the reslov.conf file: %v", err) } else { dnsString = dnsString + "search" for _, search := range hostSearch { @@ -415,6 +415,6 @@ func (c *Configurer) SetupDNSinContainerizedMounter(mounterPath string) { } } if err := ioutil.WriteFile(resolvePath, []byte(dnsString), 0600); err != nil { - glog.Errorf("Could not write dns nameserver in file %s, with error %v", resolvePath, err) + klog.Errorf("Could not write dns nameserver in file %s, with error %v", resolvePath, err) } } diff --git a/pkg/kubelet/nodelease/BUILD b/pkg/kubelet/nodelease/BUILD index 923c33e77f771..579a2ddbf6097 100644 --- a/pkg/kubelet/nodelease/BUILD +++ b/pkg/kubelet/nodelease/BUILD @@ -14,7 +14,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/coordination/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/pkg/kubelet/nodelease/controller.go b/pkg/kubelet/nodelease/controller.go index fa0d833854d67..c614368517b34 100644 --- a/pkg/kubelet/nodelease/controller.go +++ b/pkg/kubelet/nodelease/controller.go @@ -29,7 +29,7 @@ import ( coordclientset "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" "k8s.io/utils/pointer" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -81,7 +81,7 @@ func NewController(clock clock.Clock, client clientset.Interface, holderIdentity // Run runs the controller func (c *controller) Run(stopCh <-chan struct{}) { if c.leaseClient == nil { - glog.Infof("node lease controller has nil lease client, will not claim or renew leases") + klog.Infof("node lease controller has nil lease client, will not claim or renew leases") return } wait.Until(c.sync, c.renewInterval, stopCh) @@ -112,7 +112,7 @@ func (c *controller) backoffEnsureLease() (*coordv1beta1.Lease, bool) { break } sleep = minDuration(2*sleep, maxBackoff) - glog.Errorf("failed to ensure node lease exists, will retry in %v, error: %v", sleep, err) + klog.Errorf("failed to ensure node lease exists, will retry in %v, error: %v", sleep, err) // backoff wait c.clock.Sleep(sleep) } @@ -146,12 +146,12 @@ func (c *controller) retryUpdateLease(base *coordv1beta1.Lease) { if err == nil { return } - glog.Errorf("failed to update node lease, error: %v", err) + klog.Errorf("failed to update node lease, error: %v", err) if i > 0 && c.onRepeatedHeartbeatFailure != nil { c.onRepeatedHeartbeatFailure() } } - glog.Errorf("failed %d attempts to update node lease, will retry after %v", maxUpdateRetries, c.renewInterval) + klog.Errorf("failed %d attempts to update node lease, will retry after %v", maxUpdateRetries, c.renewInterval) } // newLease constructs a new lease if base is nil, or returns a copy of base @@ -191,7 +191,7 @@ func (c *controller) newLease(base *coordv1beta1.Lease) *coordv1beta1.Lease { }, } } else { - glog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", c.holderIdentity, err) + klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", c.holderIdentity, err) } } diff --git a/pkg/kubelet/nodestatus/BUILD b/pkg/kubelet/nodestatus/BUILD index 9e38273fc5432..a626ce19866ae 100644 --- a/pkg/kubelet/nodestatus/BUILD +++ b/pkg/kubelet/nodestatus/BUILD @@ -21,8 +21,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/nodestatus/setters.go b/pkg/kubelet/nodestatus/setters.go index bf049c4824b05..d16bcec294990 100644 --- a/pkg/kubelet/nodestatus/setters.go +++ b/pkg/kubelet/nodestatus/setters.go @@ -42,7 +42,7 @@ import ( "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/volume" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -69,7 +69,7 @@ func NodeAddress(nodeIP net.IP, // typically Kubelet.nodeIP if err := validateNodeIPFunc(nodeIP); err != nil { return fmt.Errorf("failed to validate nodeIP: %v", err) } - glog.V(2).Infof("Using node IP: %q", nodeIP.String()) + klog.V(2).Infof("Using node IP: %q", nodeIP.String()) } if externalCloudProvider { @@ -137,11 +137,11 @@ func NodeAddress(nodeIP net.IP, // typically Kubelet.nodeIP if existingHostnameAddress == nil { // no existing Hostname address found, add it - glog.Warningf("adding overridden hostname of %v to cloudprovider-reported addresses", hostname) + klog.Warningf("adding overridden hostname of %v to cloudprovider-reported addresses", hostname) nodeAddresses = append(nodeAddresses, v1.NodeAddress{Type: v1.NodeHostName, Address: hostname}) } else { // override the Hostname address reported by the cloud provider - glog.Warningf("replacing cloudprovider-reported hostname of %v with overridden hostname of %v", existingHostnameAddress.Address, hostname) + klog.Warningf("replacing cloudprovider-reported hostname of %v with overridden hostname of %v", existingHostnameAddress.Address, hostname) existingHostnameAddress.Address = hostname } } @@ -239,7 +239,7 @@ func MachineInfo(nodeName string, node.Status.Capacity[v1.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI) node.Status.Capacity[v1.ResourceMemory] = resource.MustParse("0Gi") node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(int64(maxPods), resource.DecimalSI) - glog.Errorf("Error getting machine info: %v", err) + klog.Errorf("Error getting machine info: %v", err) } else { node.Status.NodeInfo.MachineID = info.MachineID node.Status.NodeInfo.SystemUUID = info.SystemUUID @@ -278,14 +278,14 @@ func MachineInfo(nodeName string, if devicePluginCapacity != nil { for k, v := range devicePluginCapacity { if old, ok := node.Status.Capacity[k]; !ok || old.Value() != v.Value() { - glog.V(2).Infof("Update capacity for %s to %d", k, v.Value()) + klog.V(2).Infof("Update capacity for %s to %d", k, v.Value()) } node.Status.Capacity[k] = v } } for _, removedResource := range removedDevicePlugins { - glog.V(2).Infof("Set capacity for %s to 0 on device removal", removedResource) + klog.V(2).Infof("Set capacity for %s to 0 on device removal", removedResource) // Set the capacity of the removed resource to 0 instead of // removing the resource from the node status. This is to indicate // that the resource is managed by device plugin and had been @@ -326,7 +326,7 @@ func MachineInfo(nodeName string, if devicePluginAllocatable != nil { for k, v := range devicePluginAllocatable { if old, ok := node.Status.Allocatable[k]; !ok || old.Value() != v.Value() { - glog.V(2).Infof("Update allocatable for %s to %d", k, v.Value()) + klog.V(2).Infof("Update allocatable for %s to %d", k, v.Value()) } node.Status.Allocatable[k] = v } @@ -357,7 +357,7 @@ func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // verinfo, err := versionInfoFunc() if err != nil { // TODO(mtaufen): consider removing this log line, since returned error will be logged - glog.Errorf("Error getting version info: %v", err) + klog.Errorf("Error getting version info: %v", err) return fmt.Errorf("error getting version info: %v", err) } @@ -397,7 +397,7 @@ func Images(nodeStatusMaxImages int32, containerImages, err := imageListFunc() if err != nil { // TODO(mtaufen): consider removing this log line, since returned error will be logged - glog.Errorf("Error getting image list: %v", err) + klog.Errorf("Error getting image list: %v", err) node.Status.Images = imagesOnNode return fmt.Errorf("error getting image list: %v", err) } @@ -515,7 +515,7 @@ func ReadyCondition( recordEventFunc(v1.EventTypeNormal, events.NodeReady) } else { recordEventFunc(v1.EventTypeNormal, events.NodeNotReady) - glog.Infof("Node became not ready: %+v", newNodeReadyCondition) + klog.Infof("Node became not ready: %+v", newNodeReadyCondition) } } return nil @@ -733,7 +733,7 @@ func VolumeLimits(volumePluginListFunc func() []volume.VolumePluginWithAttachLim for _, volumePlugin := range pluginWithLimits { attachLimits, err := volumePlugin.GetVolumeLimits() if err != nil { - glog.V(4).Infof("Error getting volume limit for plugin %s", volumePlugin.GetPluginName()) + klog.V(4).Infof("Error getting volume limit for plugin %s", volumePlugin.GetPluginName()) continue } for limitKey, value := range attachLimits { diff --git a/pkg/kubelet/oom_watcher.go b/pkg/kubelet/oom_watcher.go index 448082b05f1d1..1ca014b4babf3 100644 --- a/pkg/kubelet/oom_watcher.go +++ b/pkg/kubelet/oom_watcher.go @@ -17,13 +17,13 @@ limitations under the License. package kubelet import ( - "github.com/golang/glog" "github.com/google/cadvisor/events" cadvisorapi "github.com/google/cadvisor/info/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/cadvisor" ) @@ -65,10 +65,10 @@ func (ow *realOOMWatcher) Start(ref *v1.ObjectReference) error { defer runtime.HandleCrash() for event := range eventChannel.GetChannel() { - glog.V(2).Infof("Got sys oom event from cadvisor: %v", event) + klog.V(2).Infof("Got sys oom event from cadvisor: %v", event) ow.recorder.PastEventf(ref, metav1.Time{Time: event.Timestamp}, v1.EventTypeWarning, systemOOMEvent, "System OOM encountered") } - glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor") + klog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor") }() return nil } diff --git a/pkg/kubelet/pleg/BUILD b/pkg/kubelet/pleg/BUILD index f3d4073e340a4..2d54aee08868f 100644 --- a/pkg/kubelet/pleg/BUILD +++ b/pkg/kubelet/pleg/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index bbe9a91365ecd..4de9c721130f4 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -21,11 +21,11 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/metrics" @@ -150,7 +150,7 @@ func generateEvents(podID types.UID, cid string, oldState, newState plegContaine return nil } - glog.V(4).Infof("GenericPLEG: %v/%v: %v -> %v", podID, cid, oldState, newState) + klog.V(4).Infof("GenericPLEG: %v/%v: %v -> %v", podID, cid, oldState, newState) switch newState { case plegContainerRunning: return []*PodLifecycleEvent{{ID: podID, Type: ContainerStarted, Data: cid}} @@ -186,7 +186,7 @@ func (g *GenericPLEG) updateRelistTime(timestamp time.Time) { // relist queries the container runtime for list of pods/containers, compare // with the internal pods/containers, and generates events accordingly. func (g *GenericPLEG) relist() { - glog.V(5).Infof("GenericPLEG: Relisting") + klog.V(5).Infof("GenericPLEG: Relisting") if lastRelistTime := g.getRelistTime(); !lastRelistTime.IsZero() { metrics.PLEGRelistInterval.Observe(metrics.SinceInMicroseconds(lastRelistTime)) @@ -200,7 +200,7 @@ func (g *GenericPLEG) relist() { // Get all the pods. podList, err := g.runtime.GetPods(true) if err != nil { - glog.Errorf("GenericPLEG: Unable to retrieve pods: %v", err) + klog.Errorf("GenericPLEG: Unable to retrieve pods: %v", err) return } @@ -244,7 +244,7 @@ func (g *GenericPLEG) relist() { // serially may take a while. We should be aware of this and // parallelize if needed. if err := g.updateCache(pod, pid); err != nil { - glog.Errorf("PLEG: Ignoring events for pod %s/%s: %v", pod.Name, pod.Namespace, err) + klog.Errorf("PLEG: Ignoring events for pod %s/%s: %v", pod.Name, pod.Namespace, err) // make sure we try to reinspect the pod during the next relisting needsReinspection[pid] = pod @@ -271,10 +271,10 @@ func (g *GenericPLEG) relist() { if g.cacheEnabled() { // reinspect any pods that failed inspection during the previous relist if len(g.podsToReinspect) > 0 { - glog.V(5).Infof("GenericPLEG: Reinspecting pods that previously failed inspection") + klog.V(5).Infof("GenericPLEG: Reinspecting pods that previously failed inspection") for pid, pod := range g.podsToReinspect { if err := g.updateCache(pod, pid); err != nil { - glog.Errorf("PLEG: pod %s/%s failed reinspection: %v", pod.Name, pod.Namespace, err) + klog.Errorf("PLEG: pod %s/%s failed reinspection: %v", pod.Name, pod.Namespace, err) needsReinspection[pid] = pod } } @@ -374,7 +374,7 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { if pod == nil { // The pod is missing in the current relist. This means that // the pod has no visible (active or inactive) containers. - glog.V(4).Infof("PLEG: Delete status for pod %q", string(pid)) + klog.V(4).Infof("PLEG: Delete status for pod %q", string(pid)) g.cache.Delete(pid) return nil } @@ -383,7 +383,7 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { // GetPodStatus(pod *kubecontainer.Pod) so that Docker can avoid listing // all containers again. status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace) - glog.V(4).Infof("PLEG: Write status for %s/%s: %#v (err: %v)", pod.Name, pod.Namespace, status, err) + klog.V(4).Infof("PLEG: Write status for %s/%s: %#v (err: %v)", pod.Name, pod.Namespace, status, err) if err == nil { // Preserve the pod IP across cache updates if the new IP is empty. // When a pod is torn down, kubelet may race with PLEG and retrieve diff --git a/pkg/kubelet/pod/BUILD b/pkg/kubelet/pod/BUILD index 4096c025864ed..8d4811ab45995 100644 --- a/pkg/kubelet/pod/BUILD +++ b/pkg/kubelet/pod/BUILD @@ -25,7 +25,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/pod/mirror_client.go b/pkg/kubelet/pod/mirror_client.go index 9b8add5bc5a0d..b4b6abe61ce83 100644 --- a/pkg/kubelet/pod/mirror_client.go +++ b/pkg/kubelet/pod/mirror_client.go @@ -17,11 +17,11 @@ limitations under the License. package pod import ( - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -79,13 +79,13 @@ func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error { } name, namespace, err := kubecontainer.ParsePodFullName(podFullName) if err != nil { - glog.Errorf("Failed to parse a pod full name %q", podFullName) + klog.Errorf("Failed to parse a pod full name %q", podFullName) return err } - glog.V(2).Infof("Deleting a mirror pod %q", podFullName) + klog.V(2).Infof("Deleting a mirror pod %q", podFullName) // TODO(random-liu): Delete the mirror pod with uid precondition in mirror pod manager if err := mc.apiserverClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0)); err != nil && !errors.IsNotFound(err) { - glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err) + klog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err) } return nil } diff --git a/pkg/kubelet/pod/pod_manager.go b/pkg/kubelet/pod/pod_manager.go index 6033ae8d50a86..ce5c1c30c62d3 100644 --- a/pkg/kubelet/pod/pod_manager.go +++ b/pkg/kubelet/pod/pod_manager.go @@ -19,7 +19,7 @@ package pod import ( "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -163,7 +163,7 @@ func (pm *basicManager) UpdatePod(pod *v1.Pod) { pm.updatePodsInternal(pod) if pm.checkpointManager != nil { if err := checkpoint.WritePod(pm.checkpointManager, pod); err != nil { - glog.Errorf("Error writing checkpoint for pod: %v", pod.GetName()) + klog.Errorf("Error writing checkpoint for pod: %v", pod.GetName()) } } } @@ -226,7 +226,7 @@ func (pm *basicManager) DeletePod(pod *v1.Pod) { } if pm.checkpointManager != nil { if err := checkpoint.DeletePod(pm.checkpointManager, pod); err != nil { - glog.Errorf("Error deleting checkpoint for pod: %v", pod.GetName()) + klog.Errorf("Error deleting checkpoint for pod: %v", pod.GetName()) } } } diff --git a/pkg/kubelet/pod_container_deletor.go b/pkg/kubelet/pod_container_deletor.go index 48a85958db34b..0a00ac90698c5 100644 --- a/pkg/kubelet/pod_container_deletor.go +++ b/pkg/kubelet/pod_container_deletor.go @@ -19,8 +19,8 @@ package kubelet import ( "sort" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -72,7 +72,7 @@ func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontain }(filterContainerID, podStatus) if filterContainerID != "" && matchedContainer == nil { - glog.Warningf("Container %q not found in pod's containers", filterContainerID) + klog.Warningf("Container %q not found in pod's containers", filterContainerID) return containerStatusbyCreatedList{} } @@ -106,7 +106,7 @@ func (p *podContainerDeletor) deleteContainersInPod(filterContainerID string, po select { case p.worker <- candidate.ID: default: - glog.Warningf("Failed to issue the request to remove container %v", candidate.ID) + klog.Warningf("Failed to issue the request to remove container %v", candidate.ID) } } } diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index a42589a99c5ee..6b7c2244f34c9 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -22,12 +22,12 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" @@ -187,7 +187,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) { } if err != nil { // IMPORTANT: we do not log errors here, the syncPodFn is responsible for logging errors - glog.Errorf("Error syncing pod %s (%q), skipping: %v", update.Pod.UID, format.Pod(update.Pod), err) + klog.Errorf("Error syncing pod %s (%q), skipping: %v", update.Pod.UID, format.Pod(update.Pod), err) } p.wrapUp(update.Pod.UID, err) } diff --git a/pkg/kubelet/preemption/BUILD b/pkg/kubelet/preemption/BUILD index 9e8e44331b8cd..fa8f7ed8132bd 100644 --- a/pkg/kubelet/preemption/BUILD +++ b/pkg/kubelet/preemption/BUILD @@ -22,7 +22,7 @@ go_library( "//pkg/scheduler/algorithm/predicates:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/preemption/preemption.go b/pkg/kubelet/preemption/preemption.go index c7236e2c5f71d..94a3afad6b5c9 100644 --- a/pkg/kubelet/preemption/preemption.go +++ b/pkg/kubelet/preemption/preemption.go @@ -20,9 +20,9 @@ import ( "fmt" "math" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/api/v1/resource" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/kubelet/events" @@ -96,7 +96,7 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod, if err != nil { return fmt.Errorf("preemption: error finding a set of pods to preempt: %v", err) } - glog.Infof("preemption: attempting to evict pods %v, in order to free up resources: %s", podsToPreempt, insufficientResources.toString()) + klog.Infof("preemption: attempting to evict pods %v, in order to free up resources: %s", podsToPreempt, insufficientResources.toString()) for _, pod := range podsToPreempt { status := v1.PodStatus{ Phase: v1.PodFailed, @@ -110,7 +110,7 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod, if err != nil { return fmt.Errorf("preemption: pod %s failed to evict %v", format.Pod(pod), err) } - glog.Infof("preemption: pod %s evicted successfully", format.Pod(pod)) + klog.Infof("preemption: pod %s evicted successfully", format.Pod(pod)) } return nil } diff --git a/pkg/kubelet/prober/BUILD b/pkg/kubelet/prober/BUILD index 78394bf728949..18922e59e333a 100644 --- a/pkg/kubelet/prober/BUILD +++ b/pkg/kubelet/prober/BUILD @@ -32,8 +32,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -64,7 +64,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index 77cea64603149..efec60c98d2a1 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -39,7 +39,7 @@ import ( tcprobe "k8s.io/kubernetes/pkg/probe/tcp" "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) const maxProbeRetries = 3 @@ -91,7 +91,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c ctrName := fmt.Sprintf("%s:%s", format.Pod(pod), container.Name) if probeSpec == nil { - glog.Warningf("%s probe for %s is nil", probeType, ctrName) + klog.Warningf("%s probe for %s is nil", probeType, ctrName) return results.Success, nil } @@ -100,22 +100,22 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c // Probe failed in one way or another. ref, hasRef := pb.refManager.GetRef(containerID) if !hasRef { - glog.Warningf("No ref for container %q (%s)", containerID.String(), ctrName) + klog.Warningf("No ref for container %q (%s)", containerID.String(), ctrName) } if err != nil { - glog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err) + klog.V(1).Infof("%s probe for %q errored: %v", probeType, ctrName, err) if hasRef { pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored: %v", probeType, err) } } else { // result != probe.Success - glog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output) + klog.V(1).Infof("%s probe for %q failed (%v): %s", probeType, ctrName, result, output) if hasRef { pb.recorder.Eventf(ref, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output) } } return results.Failure, err } - glog.V(3).Infof("%s probe for %q succeeded", probeType, ctrName) + klog.V(3).Infof("%s probe for %q succeeded", probeType, ctrName) return results.Success, nil } @@ -147,7 +147,7 @@ func buildHeader(headerList []v1.HTTPHeader) http.Header { func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { timeout := time.Duration(p.TimeoutSeconds) * time.Second if p.Exec != nil { - glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command) + klog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod, container, p.Exec.Command) command := kubecontainer.ExpandContainerCommandOnlyStatic(p.Exec.Command, container.Env) return pb.exec.Probe(pb.newExecInContainer(container, containerID, command, timeout)) } @@ -162,10 +162,10 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status return probe.Unknown, "", err } path := p.HTTPGet.Path - glog.V(4).Infof("HTTP-Probe Host: %v://%v, Port: %v, Path: %v", scheme, host, port, path) + klog.V(4).Infof("HTTP-Probe Host: %v://%v, Port: %v, Path: %v", scheme, host, port, path) url := formatURL(scheme, host, port, path) headers := buildHeader(p.HTTPGet.HTTPHeaders) - glog.V(4).Infof("HTTP-Probe Headers: %v", headers) + klog.V(4).Infof("HTTP-Probe Headers: %v", headers) if probeType == liveness { return pb.livenessHttp.Probe(url, headers, timeout) } else { // readiness @@ -181,10 +181,10 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status if host == "" { host = status.PodIP } - glog.V(4).Infof("TCP-Probe Host: %v, Port: %v, Timeout: %v", host, port, timeout) + klog.V(4).Infof("TCP-Probe Host: %v, Port: %v, Timeout: %v", host, port, timeout) return pb.tcp.Probe(host, port, timeout) } - glog.Warningf("Failed to find probe builder for container: %v", container) + klog.Warningf("Failed to find probe builder for container: %v", container) return probe.Unknown, "", fmt.Errorf("Missing probe handler for %s:%s", format.Pod(pod), container.Name) } diff --git a/pkg/kubelet/prober/prober_manager.go b/pkg/kubelet/prober/prober_manager.go index 0e53e094076fa..a913598ef6998 100644 --- a/pkg/kubelet/prober/prober_manager.go +++ b/pkg/kubelet/prober/prober_manager.go @@ -19,13 +19,13 @@ package prober import ( "sync" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/status" @@ -149,7 +149,7 @@ func (m *manager) AddPod(pod *v1.Pod) { if c.ReadinessProbe != nil { key.probeType = readiness if _, ok := m.workers[key]; ok { - glog.Errorf("Readiness probe already exists! %v - %v", + klog.Errorf("Readiness probe already exists! %v - %v", format.Pod(pod), c.Name) return } @@ -161,7 +161,7 @@ func (m *manager) AddPod(pod *v1.Pod) { if c.LivenessProbe != nil { key.probeType = liveness if _, ok := m.workers[key]; ok { - glog.Errorf("Liveness probe already exists! %v - %v", + klog.Errorf("Liveness probe already exists! %v - %v", format.Pod(pod), c.Name) return } diff --git a/pkg/kubelet/prober/prober_manager_test.go b/pkg/kubelet/prober/prober_manager_test.go index f684a4ff599fd..2ea5d63fbc990 100644 --- a/pkg/kubelet/prober/prober_manager_test.go +++ b/pkg/kubelet/prober/prober_manager_test.go @@ -22,12 +22,12 @@ import ( "testing" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/probe" @@ -359,7 +359,7 @@ func waitForWorkerExit(m *manager, workerPaths []probeKey) error { if exited, _ := condition(); exited { continue // Already exited, no need to poll. } - glog.Infof("Polling %v", w) + klog.Infof("Polling %v", w) if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil { return err } @@ -384,7 +384,7 @@ func waitForReadyStatus(m *manager, ready bool) error { } return status.ContainerStatuses[0].Ready == ready, nil } - glog.Infof("Polling for ready state %v", ready) + klog.Infof("Polling for ready state %v", ready) if err := wait.Poll(interval, wait.ForeverTestTimeout, condition); err != nil { return err } @@ -399,7 +399,7 @@ func cleanup(t *testing.T, m *manager) { condition := func() (bool, error) { workerCount := m.workerCount() if workerCount > 0 { - glog.Infof("Waiting for %d workers to exit...", workerCount) + klog.Infof("Waiting for %d workers to exit...", workerCount) } return workerCount == 0, nil } diff --git a/pkg/kubelet/prober/worker.go b/pkg/kubelet/prober/worker.go index cdefc1da2c679..0602419d7b6d4 100644 --- a/pkg/kubelet/prober/worker.go +++ b/pkg/kubelet/prober/worker.go @@ -20,10 +20,10 @@ import ( "math/rand" "time" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/prober/results" @@ -160,13 +160,13 @@ func (w *worker) doProbe() (keepGoing bool) { status, ok := w.probeManager.statusManager.GetPodStatus(w.pod.UID) if !ok { // Either the pod has not been created yet, or it was already deleted. - glog.V(3).Infof("No status for pod: %v", format.Pod(w.pod)) + klog.V(3).Infof("No status for pod: %v", format.Pod(w.pod)) return true } // Worker should terminate if pod is terminated. if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded { - glog.V(3).Infof("Pod %v %v, exiting probe worker", + klog.V(3).Infof("Pod %v %v, exiting probe worker", format.Pod(w.pod), status.Phase) return false } @@ -174,7 +174,7 @@ func (w *worker) doProbe() (keepGoing bool) { c, ok := podutil.GetContainerStatus(status.ContainerStatuses, w.container.Name) if !ok || len(c.ContainerID) == 0 { // Either the container has not been created yet, or it was deleted. - glog.V(3).Infof("Probe target container not found: %v - %v", + klog.V(3).Infof("Probe target container not found: %v - %v", format.Pod(w.pod), w.container.Name) return true // Wait for more information. } @@ -195,7 +195,7 @@ func (w *worker) doProbe() (keepGoing bool) { } if c.State.Running == nil { - glog.V(3).Infof("Non-running container probed: %v - %v", + klog.V(3).Infof("Non-running container probed: %v - %v", format.Pod(w.pod), w.container.Name) if !w.containerID.IsEmpty() { w.resultsManager.Set(w.containerID, results.Failure, w.pod) diff --git a/pkg/kubelet/remote/BUILD b/pkg/kubelet/remote/BUILD index 05c6efd271af5..cf19206ce2bda 100644 --- a/pkg/kubelet/remote/BUILD +++ b/pkg/kubelet/remote/BUILD @@ -19,8 +19,8 @@ go_library( "//pkg/kubelet/apis/cri:go_default_library", "//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library", "//pkg/kubelet/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/kubelet/remote/remote_image.go b/pkg/kubelet/remote/remote_image.go index a6bc88f7cc4c2..17b90574dc051 100644 --- a/pkg/kubelet/remote/remote_image.go +++ b/pkg/kubelet/remote/remote_image.go @@ -22,8 +22,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "google.golang.org/grpc" + "k8s.io/klog" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" @@ -38,7 +38,7 @@ type RemoteImageService struct { // NewRemoteImageService creates a new internalapi.ImageManagerService. func NewRemoteImageService(endpoint string, connectionTimeout time.Duration) (internalapi.ImageManagerService, error) { - glog.V(3).Infof("Connecting to image service %s", endpoint) + klog.V(3).Infof("Connecting to image service %s", endpoint) addr, dailer, err := util.GetAddressAndDialer(endpoint) if err != nil { return nil, err @@ -49,7 +49,7 @@ func NewRemoteImageService(endpoint string, connectionTimeout time.Duration) (in conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) if err != nil { - glog.Errorf("Connect remote image service %s failed: %v", addr, err) + klog.Errorf("Connect remote image service %s failed: %v", addr, err) return nil, err } @@ -68,7 +68,7 @@ func (r *RemoteImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runt Filter: filter, }) if err != nil { - glog.Errorf("ListImages with filter %+v from image service failed: %v", filter, err) + klog.Errorf("ListImages with filter %+v from image service failed: %v", filter, err) return nil, err } @@ -84,14 +84,14 @@ func (r *RemoteImageService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimea Image: image, }) if err != nil { - glog.Errorf("ImageStatus %q from image service failed: %v", image.Image, err) + klog.Errorf("ImageStatus %q from image service failed: %v", image.Image, err) return nil, err } if resp.Image != nil { if resp.Image.Id == "" || resp.Image.Size_ == 0 { errorMessage := fmt.Sprintf("Id or size of image %q is not set", image.Image) - glog.Errorf("ImageStatus failed: %s", errorMessage) + klog.Errorf("ImageStatus failed: %s", errorMessage) return nil, errors.New(errorMessage) } } @@ -109,13 +109,13 @@ func (r *RemoteImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtim Auth: auth, }) if err != nil { - glog.Errorf("PullImage %q from image service failed: %v", image.Image, err) + klog.Errorf("PullImage %q from image service failed: %v", image.Image, err) return "", err } if resp.ImageRef == "" { errorMessage := fmt.Sprintf("imageRef of image %q is not set", image.Image) - glog.Errorf("PullImage failed: %s", errorMessage) + klog.Errorf("PullImage failed: %s", errorMessage) return "", errors.New(errorMessage) } @@ -131,7 +131,7 @@ func (r *RemoteImageService) RemoveImage(image *runtimeapi.ImageSpec) error { Image: image, }) if err != nil { - glog.Errorf("RemoveImage %q from image service failed: %v", image.Image, err) + klog.Errorf("RemoveImage %q from image service failed: %v", image.Image, err) return err } @@ -147,7 +147,7 @@ func (r *RemoteImageService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error resp, err := r.imageClient.ImageFsInfo(ctx, &runtimeapi.ImageFsInfoRequest{}) if err != nil { - glog.Errorf("ImageFsInfo from image service failed: %v", err) + klog.Errorf("ImageFsInfo from image service failed: %v", err) return nil, err } return resp.GetImageFilesystems(), nil diff --git a/pkg/kubelet/remote/remote_runtime.go b/pkg/kubelet/remote/remote_runtime.go index f447b850e15ca..16e16daff84b3 100644 --- a/pkg/kubelet/remote/remote_runtime.go +++ b/pkg/kubelet/remote/remote_runtime.go @@ -23,8 +23,8 @@ import ( "strings" "time" - "github.com/golang/glog" "google.golang.org/grpc" + "k8s.io/klog" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" @@ -40,7 +40,7 @@ type RemoteRuntimeService struct { // NewRemoteRuntimeService creates a new internalapi.RuntimeService. func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration) (internalapi.RuntimeService, error) { - glog.V(3).Infof("Connecting to runtime service %s", endpoint) + klog.V(3).Infof("Connecting to runtime service %s", endpoint) addr, dailer, err := util.GetAddressAndDialer(endpoint) if err != nil { return nil, err @@ -50,7 +50,7 @@ func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration) ( conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) if err != nil { - glog.Errorf("Connect remote runtime %s failed: %v", addr, err) + klog.Errorf("Connect remote runtime %s failed: %v", addr, err) return nil, err } @@ -69,7 +69,7 @@ func (r *RemoteRuntimeService) Version(apiVersion string) (*runtimeapi.VersionRe Version: apiVersion, }) if err != nil { - glog.Errorf("Version from runtime service failed: %v", err) + klog.Errorf("Version from runtime service failed: %v", err) return nil, err } @@ -93,13 +93,13 @@ func (r *RemoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig RuntimeHandler: runtimeHandler, }) if err != nil { - glog.Errorf("RunPodSandbox from runtime service failed: %v", err) + klog.Errorf("RunPodSandbox from runtime service failed: %v", err) return "", err } if resp.PodSandboxId == "" { errorMessage := fmt.Sprintf("PodSandboxId is not set for sandbox %q", config.GetMetadata()) - glog.Errorf("RunPodSandbox failed: %s", errorMessage) + klog.Errorf("RunPodSandbox failed: %s", errorMessage) return "", errors.New(errorMessage) } @@ -116,7 +116,7 @@ func (r *RemoteRuntimeService) StopPodSandbox(podSandBoxID string) error { PodSandboxId: podSandBoxID, }) if err != nil { - glog.Errorf("StopPodSandbox %q from runtime service failed: %v", podSandBoxID, err) + klog.Errorf("StopPodSandbox %q from runtime service failed: %v", podSandBoxID, err) return err } @@ -133,7 +133,7 @@ func (r *RemoteRuntimeService) RemovePodSandbox(podSandBoxID string) error { PodSandboxId: podSandBoxID, }) if err != nil { - glog.Errorf("RemovePodSandbox %q from runtime service failed: %v", podSandBoxID, err) + klog.Errorf("RemovePodSandbox %q from runtime service failed: %v", podSandBoxID, err) return err } @@ -170,7 +170,7 @@ func (r *RemoteRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilte Filter: filter, }) if err != nil { - glog.Errorf("ListPodSandbox with filter %+v from runtime service failed: %v", filter, err) + klog.Errorf("ListPodSandbox with filter %+v from runtime service failed: %v", filter, err) return nil, err } @@ -188,13 +188,13 @@ func (r *RemoteRuntimeService) CreateContainer(podSandBoxID string, config *runt SandboxConfig: sandboxConfig, }) if err != nil { - glog.Errorf("CreateContainer in sandbox %q from runtime service failed: %v", podSandBoxID, err) + klog.Errorf("CreateContainer in sandbox %q from runtime service failed: %v", podSandBoxID, err) return "", err } if resp.ContainerId == "" { errorMessage := fmt.Sprintf("ContainerId is not set for container %q", config.GetMetadata()) - glog.Errorf("CreateContainer failed: %s", errorMessage) + klog.Errorf("CreateContainer failed: %s", errorMessage) return "", errors.New(errorMessage) } @@ -210,7 +210,7 @@ func (r *RemoteRuntimeService) StartContainer(containerID string) error { ContainerId: containerID, }) if err != nil { - glog.Errorf("StartContainer %q from runtime service failed: %v", containerID, err) + klog.Errorf("StartContainer %q from runtime service failed: %v", containerID, err) return err } @@ -230,7 +230,7 @@ func (r *RemoteRuntimeService) StopContainer(containerID string, timeout int64) Timeout: timeout, }) if err != nil { - glog.Errorf("StopContainer %q from runtime service failed: %v", containerID, err) + klog.Errorf("StopContainer %q from runtime service failed: %v", containerID, err) return err } @@ -247,7 +247,7 @@ func (r *RemoteRuntimeService) RemoveContainer(containerID string) error { ContainerId: containerID, }) if err != nil { - glog.Errorf("RemoveContainer %q from runtime service failed: %v", containerID, err) + klog.Errorf("RemoveContainer %q from runtime service failed: %v", containerID, err) return err } @@ -263,7 +263,7 @@ func (r *RemoteRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter Filter: filter, }) if err != nil { - glog.Errorf("ListContainers with filter %+v from runtime service failed: %v", filter, err) + klog.Errorf("ListContainers with filter %+v from runtime service failed: %v", filter, err) return nil, err } @@ -279,13 +279,13 @@ func (r *RemoteRuntimeService) ContainerStatus(containerID string) (*runtimeapi. ContainerId: containerID, }) if err != nil { - glog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) + klog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) return nil, err } if resp.Status != nil { if err := verifyContainerStatus(resp.Status); err != nil { - glog.Errorf("ContainerStatus of %q failed: %v", containerID, err) + klog.Errorf("ContainerStatus of %q failed: %v", containerID, err) return nil, err } } @@ -303,7 +303,7 @@ func (r *RemoteRuntimeService) UpdateContainerResources(containerID string, reso Linux: resources, }) if err != nil { - glog.Errorf("UpdateContainerResources %q from runtime service failed: %v", containerID, err) + klog.Errorf("UpdateContainerResources %q from runtime service failed: %v", containerID, err) return err } @@ -333,7 +333,7 @@ func (r *RemoteRuntimeService) ExecSync(containerID string, cmd []string, timeou } resp, err := r.runtimeClient.ExecSync(ctx, req) if err != nil { - glog.Errorf("ExecSync %s '%s' from runtime service failed: %v", containerID, strings.Join(cmd, " "), err) + klog.Errorf("ExecSync %s '%s' from runtime service failed: %v", containerID, strings.Join(cmd, " "), err) return nil, nil, err } @@ -355,13 +355,13 @@ func (r *RemoteRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.Ex resp, err := r.runtimeClient.Exec(ctx, req) if err != nil { - glog.Errorf("Exec %s '%s' from runtime service failed: %v", req.ContainerId, strings.Join(req.Cmd, " "), err) + klog.Errorf("Exec %s '%s' from runtime service failed: %v", req.ContainerId, strings.Join(req.Cmd, " "), err) return nil, err } if resp.Url == "" { errorMessage := "URL is not set" - glog.Errorf("Exec failed: %s", errorMessage) + klog.Errorf("Exec failed: %s", errorMessage) return nil, errors.New(errorMessage) } @@ -375,13 +375,13 @@ func (r *RemoteRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeap resp, err := r.runtimeClient.Attach(ctx, req) if err != nil { - glog.Errorf("Attach %s from runtime service failed: %v", req.ContainerId, err) + klog.Errorf("Attach %s from runtime service failed: %v", req.ContainerId, err) return nil, err } if resp.Url == "" { errorMessage := "URL is not set" - glog.Errorf("Exec failed: %s", errorMessage) + klog.Errorf("Exec failed: %s", errorMessage) return nil, errors.New(errorMessage) } return resp, nil @@ -394,13 +394,13 @@ func (r *RemoteRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) ( resp, err := r.runtimeClient.PortForward(ctx, req) if err != nil { - glog.Errorf("PortForward %s from runtime service failed: %v", req.PodSandboxId, err) + klog.Errorf("PortForward %s from runtime service failed: %v", req.PodSandboxId, err) return nil, err } if resp.Url == "" { errorMessage := "URL is not set" - glog.Errorf("Exec failed: %s", errorMessage) + klog.Errorf("Exec failed: %s", errorMessage) return nil, errors.New(errorMessage) } @@ -435,13 +435,13 @@ func (r *RemoteRuntimeService) Status() (*runtimeapi.RuntimeStatus, error) { resp, err := r.runtimeClient.Status(ctx, &runtimeapi.StatusRequest{}) if err != nil { - glog.Errorf("Status from runtime service failed: %v", err) + klog.Errorf("Status from runtime service failed: %v", err) return nil, err } if resp.Status == nil || len(resp.Status.Conditions) < 2 { errorMessage := "RuntimeReady or NetworkReady condition are not set" - glog.Errorf("Status failed: %s", errorMessage) + klog.Errorf("Status failed: %s", errorMessage) return nil, errors.New(errorMessage) } @@ -457,7 +457,7 @@ func (r *RemoteRuntimeService) ContainerStats(containerID string) (*runtimeapi.C ContainerId: containerID, }) if err != nil { - glog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) + klog.Errorf("ContainerStatus %q from runtime service failed: %v", containerID, err) return nil, err } @@ -474,7 +474,7 @@ func (r *RemoteRuntimeService) ListContainerStats(filter *runtimeapi.ContainerSt Filter: filter, }) if err != nil { - glog.Errorf("ListContainerStats with filter %+v from runtime service failed: %v", filter, err) + klog.Errorf("ListContainerStats with filter %+v from runtime service failed: %v", filter, err) return nil, err } @@ -487,7 +487,7 @@ func (r *RemoteRuntimeService) ReopenContainerLog(containerID string) error { _, err := r.runtimeClient.ReopenContainerLog(ctx, &runtimeapi.ReopenContainerLogRequest{ContainerId: containerID}) if err != nil { - glog.Errorf("ReopenContainerLog %q from runtime service failed: %v", containerID, err) + klog.Errorf("ReopenContainerLog %q from runtime service failed: %v", containerID, err) return err } return nil diff --git a/pkg/kubelet/runonce.go b/pkg/kubelet/runonce.go index 294580e7a0938..86c1f36f0c735 100644 --- a/pkg/kubelet/runonce.go +++ b/pkg/kubelet/runonce.go @@ -21,8 +21,8 @@ import ( "os" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -51,15 +51,15 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, // If the container logs directory does not exist, create it. if _, err := os.Stat(ContainerLogsDir); err != nil { if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil { - glog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err) + klog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err) } } select { case u := <-updates: - glog.Infof("processing manifest with %d pods", len(u.Pods)) + klog.Infof("processing manifest with %d pods", len(u.Pods)) result, err := kl.runOnce(u.Pods, runOnceRetryDelay) - glog.Infof("finished processing %d pods", len(u.Pods)) + klog.Infof("finished processing %d pods", len(u.Pods)) return result, err case <-time.After(runOnceManifestDelay): return nil, fmt.Errorf("no pod manifest update after %v", runOnceManifestDelay) @@ -85,7 +85,7 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results [] }(pod) } - glog.Infof("Waiting for %d pods", len(admitted)) + klog.Infof("Waiting for %d pods", len(admitted)) failedPods := []string{} for i := 0; i < len(admitted); i++ { res := <-ch @@ -93,19 +93,19 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results [] if res.Err != nil { faliedContainerName, err := kl.getFailedContainers(res.Pod) if err != nil { - glog.Infof("unable to get failed containers' names for pod %q, error:%v", format.Pod(res.Pod), err) + klog.Infof("unable to get failed containers' names for pod %q, error:%v", format.Pod(res.Pod), err) } else { - glog.Infof("unable to start pod %q because container:%v failed", format.Pod(res.Pod), faliedContainerName) + klog.Infof("unable to start pod %q because container:%v failed", format.Pod(res.Pod), faliedContainerName) } failedPods = append(failedPods, format.Pod(res.Pod)) } else { - glog.Infof("started pod %q", format.Pod(res.Pod)) + klog.Infof("started pod %q", format.Pod(res.Pod)) } } if len(failedPods) > 0 { return results, fmt.Errorf("error running pods: %v", failedPods) } - glog.Infof("%d pods started", len(pods)) + klog.Infof("%d pods started", len(pods)) return results, err } @@ -120,14 +120,14 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { } if kl.isPodRunning(pod, status) { - glog.Infof("pod %q containers running", format.Pod(pod)) + klog.Infof("pod %q containers running", format.Pod(pod)) return nil } - glog.Infof("pod %q containers not running: syncing", format.Pod(pod)) + klog.Infof("pod %q containers not running: syncing", format.Pod(pod)) - glog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) + klog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod)) if err := kl.podManager.CreateMirrorPod(pod); err != nil { - glog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err) + klog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err) } mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) if err = kl.syncPod(syncPodOptions{ @@ -142,7 +142,7 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { return fmt.Errorf("timeout error: pod %q containers not running after %d retries", format.Pod(pod), runOnceMaxRetries) } // TODO(proppy): health checking would be better than waiting + checking the state at the next iteration. - glog.Infof("pod %q containers synced, waiting for %v", format.Pod(pod), delay) + klog.Infof("pod %q containers synced, waiting for %v", format.Pod(pod), delay) time.Sleep(delay) retry++ delay *= runOnceRetryDelayBackoff @@ -154,7 +154,7 @@ func (kl *Kubelet) isPodRunning(pod *v1.Pod, status *kubecontainer.PodStatus) bo for _, c := range pod.Spec.Containers { cs := status.FindContainerStatusByName(c.Name) if cs == nil || cs.State != kubecontainer.ContainerStateRunning { - glog.Infof("Container %q for pod %q not running", c.Name, format.Pod(pod)) + klog.Infof("Container %q for pod %q not running", c.Name, format.Pod(pod)) return false } } diff --git a/pkg/kubelet/server/BUILD b/pkg/kubelet/server/BUILD index 2a9fd9efee9e1..103e166d3b5ec 100644 --- a/pkg/kubelet/server/BUILD +++ b/pkg/kubelet/server/BUILD @@ -42,12 +42,12 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/flushwriter:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/container:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/metrics:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus/promhttp:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/server/auth.go b/pkg/kubelet/server/auth.go index 68e483f6ce0e2..8b8805cf5cf4c 100644 --- a/pkg/kubelet/server/auth.go +++ b/pkg/kubelet/server/auth.go @@ -20,11 +20,11 @@ import ( "net/http" "strings" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog" ) // KubeletAuth implements AuthInterface @@ -108,7 +108,7 @@ func (n nodeAuthorizerAttributesGetter) GetRequestAttributes(u user.Info, r *htt attrs.Subresource = "spec" } - glog.V(5).Infof("Node request attributes: user=%#v attrs=%#v", attrs.GetUser(), attrs) + klog.V(5).Infof("Node request attributes: user=%#v attrs=%#v", attrs.GetUser(), attrs) return attrs } diff --git a/pkg/kubelet/server/portforward/BUILD b/pkg/kubelet/server/portforward/BUILD index 07541328a3ad1..46c57de81bf68 100644 --- a/pkg/kubelet/server/portforward/BUILD +++ b/pkg/kubelet/server/portforward/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/wsstream:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/server/portforward/httpstream.go b/pkg/kubelet/server/portforward/httpstream.go index 06ed961e0e260..43393bd57ae74 100644 --- a/pkg/kubelet/server/portforward/httpstream.go +++ b/pkg/kubelet/server/portforward/httpstream.go @@ -30,7 +30,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" api "k8s.io/kubernetes/pkg/apis/core" - "github.com/golang/glog" + "k8s.io/klog" ) func handleHttpStreams(req *http.Request, w http.ResponseWriter, portForwarder PortForwarder, podName string, uid types.UID, supportedPortForwardProtocols []string, idleTimeout, streamCreationTimeout time.Duration) error { @@ -42,7 +42,7 @@ func handleHttpStreams(req *http.Request, w http.ResponseWriter, portForwarder P } streamChan := make(chan httpstream.Stream, 1) - glog.V(5).Infof("Upgrading port forward response") + klog.V(5).Infof("Upgrading port forward response") upgrader := spdy.NewResponseUpgrader() conn := upgrader.UpgradeResponse(w, req, httpStreamReceived(streamChan)) if conn == nil { @@ -50,7 +50,7 @@ func handleHttpStreams(req *http.Request, w http.ResponseWriter, portForwarder P } defer conn.Close() - glog.V(5).Infof("(conn=%p) setting port forwarding streaming connection idle timeout to %v", conn, idleTimeout) + klog.V(5).Infof("(conn=%p) setting port forwarding streaming connection idle timeout to %v", conn, idleTimeout) conn.SetIdleTimeout(idleTimeout) h := &httpStreamHandler{ @@ -121,11 +121,11 @@ func (h *httpStreamHandler) getStreamPair(requestID string) (*httpStreamPair, bo defer h.streamPairsLock.Unlock() if p, ok := h.streamPairs[requestID]; ok { - glog.V(5).Infof("(conn=%p, request=%s) found existing stream pair", h.conn, requestID) + klog.V(5).Infof("(conn=%p, request=%s) found existing stream pair", h.conn, requestID) return p, false } - glog.V(5).Infof("(conn=%p, request=%s) creating new stream pair", h.conn, requestID) + klog.V(5).Infof("(conn=%p, request=%s) creating new stream pair", h.conn, requestID) p := newPortForwardPair(requestID) h.streamPairs[requestID] = p @@ -143,7 +143,7 @@ func (h *httpStreamHandler) monitorStreamPair(p *httpStreamPair, timeout <-chan utilruntime.HandleError(err) p.printError(err.Error()) case <-p.complete: - glog.V(5).Infof("(conn=%v, request=%s) successfully received error and data streams", h.conn, p.requestID) + klog.V(5).Infof("(conn=%v, request=%s) successfully received error and data streams", h.conn, p.requestID) } h.removeStreamPair(p.requestID) } @@ -170,7 +170,7 @@ func (h *httpStreamHandler) removeStreamPair(requestID string) { func (h *httpStreamHandler) requestID(stream httpstream.Stream) string { requestID := stream.Headers().Get(api.PortForwardRequestIDHeader) if len(requestID) == 0 { - glog.V(5).Infof("(conn=%p) stream received without %s header", h.conn, api.PortForwardRequestIDHeader) + klog.V(5).Infof("(conn=%p) stream received without %s header", h.conn, api.PortForwardRequestIDHeader) // If we get here, it's because the connection came from an older client // that isn't generating the request id header // (https://github.com/kubernetes/kubernetes/blob/843134885e7e0b360eb5441e85b1410a8b1a7a0c/pkg/client/unversioned/portforward/portforward.go#L258-L287) @@ -197,7 +197,7 @@ func (h *httpStreamHandler) requestID(stream httpstream.Stream) string { requestID = strconv.Itoa(int(stream.Identifier()) - 2) } - glog.V(5).Infof("(conn=%p) automatically assigning request ID=%q from stream type=%s, stream ID=%d", h.conn, requestID, streamType, stream.Identifier()) + klog.V(5).Infof("(conn=%p) automatically assigning request ID=%q from stream type=%s, stream ID=%d", h.conn, requestID, streamType, stream.Identifier()) } return requestID } @@ -206,17 +206,17 @@ func (h *httpStreamHandler) requestID(stream httpstream.Stream) string { // streams, invoking portForward for each complete stream pair. The loop exits // when the httpstream.Connection is closed. func (h *httpStreamHandler) run() { - glog.V(5).Infof("(conn=%p) waiting for port forward streams", h.conn) + klog.V(5).Infof("(conn=%p) waiting for port forward streams", h.conn) Loop: for { select { case <-h.conn.CloseChan(): - glog.V(5).Infof("(conn=%p) upgraded connection closed", h.conn) + klog.V(5).Infof("(conn=%p) upgraded connection closed", h.conn) break Loop case stream := <-h.streamChan: requestID := h.requestID(stream) streamType := stream.Headers().Get(api.StreamType) - glog.V(5).Infof("(conn=%p, request=%s) received new stream of type %s", h.conn, requestID, streamType) + klog.V(5).Infof("(conn=%p, request=%s) received new stream of type %s", h.conn, requestID, streamType) p, created := h.getStreamPair(requestID) if created { @@ -242,9 +242,9 @@ func (h *httpStreamHandler) portForward(p *httpStreamPair) { portString := p.dataStream.Headers().Get(api.PortHeader) port, _ := strconv.ParseInt(portString, 10, 32) - glog.V(5).Infof("(conn=%p, request=%s) invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) + klog.V(5).Infof("(conn=%p, request=%s) invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) err := h.forwarder.PortForward(h.pod, h.uid, int32(port), p.dataStream) - glog.V(5).Infof("(conn=%p, request=%s) done invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) + klog.V(5).Infof("(conn=%p, request=%s) done invoking forwarder.PortForward for port %s", h.conn, p.requestID, portString) if err != nil { msg := fmt.Errorf("error forwarding port %d to pod %s, uid %v: %v", port, h.pod, h.uid, err) diff --git a/pkg/kubelet/server/portforward/websocket.go b/pkg/kubelet/server/portforward/websocket.go index 8bd6eb7098032..1b23d74b519b4 100644 --- a/pkg/kubelet/server/portforward/websocket.go +++ b/pkg/kubelet/server/portforward/websocket.go @@ -26,7 +26,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" @@ -185,9 +185,9 @@ func (h *websocketStreamHandler) portForward(p *websocketStreamPair) { defer p.dataStream.Close() defer p.errorStream.Close() - glog.V(5).Infof("(conn=%p) invoking forwarder.PortForward for port %d", h.conn, p.port) + klog.V(5).Infof("(conn=%p) invoking forwarder.PortForward for port %d", h.conn, p.port) err := h.forwarder.PortForward(h.pod, h.uid, p.port, p.dataStream) - glog.V(5).Infof("(conn=%p) done invoking forwarder.PortForward for port %d", h.conn, p.port) + klog.V(5).Infof("(conn=%p) done invoking forwarder.PortForward for port %d", h.conn, p.port) if err != nil { msg := fmt.Errorf("error forwarding port %d to pod %s, uid %v: %v", p.port, h.pod, h.uid, err) diff --git a/pkg/kubelet/server/remotecommand/BUILD b/pkg/kubelet/server/remotecommand/BUILD index 776abc764c9e7..fef6fec9c2cc1 100644 --- a/pkg/kubelet/server/remotecommand/BUILD +++ b/pkg/kubelet/server/remotecommand/BUILD @@ -27,7 +27,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/wsstream:go_default_library", "//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/kubelet/server/remotecommand/httpstream.go b/pkg/kubelet/server/remotecommand/httpstream.go index e504a32c522d5..8bff323ec7a92 100644 --- a/pkg/kubelet/server/remotecommand/httpstream.go +++ b/pkg/kubelet/server/remotecommand/httpstream.go @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/tools/remotecommand" api "k8s.io/kubernetes/pkg/apis/core" - "github.com/golang/glog" + "k8s.io/klog" ) // Options contains details about which streams are required for @@ -54,7 +54,7 @@ func NewOptions(req *http.Request) (*Options, error) { stderr := req.FormValue(api.ExecStderrParam) == "1" if tty && stderr { // TODO: make this an error before we reach this method - glog.V(4).Infof("Access to exec with tty and stderr is not supported, bypassing stderr") + klog.V(4).Infof("Access to exec with tty and stderr is not supported, bypassing stderr") stderr = false } @@ -155,7 +155,7 @@ func createHttpStreamStreams(req *http.Request, w http.ResponseWriter, opts *Opt case remotecommandconsts.StreamProtocolV2Name: handler = &v2ProtocolHandler{} case "": - glog.V(4).Infof("Client did not request protocol negotiation. Falling back to %q", remotecommandconsts.StreamProtocolV1Name) + klog.V(4).Infof("Client did not request protocol negotiation. Falling back to %q", remotecommandconsts.StreamProtocolV1Name) fallthrough case remotecommandconsts.StreamProtocolV1Name: handler = &v1ProtocolHandler{} diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index 4c8a3c3cdf7ed..141ed74cbfac8 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -31,13 +31,13 @@ import ( "strings" "time" - restful "github.com/emicklei/go-restful" - "github.com/golang/glog" + "github.com/emicklei/go-restful" cadvisormetrics "github.com/google/cadvisor/container" cadvisorapi "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/metrics" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -130,7 +130,7 @@ func ListenAndServeKubeletServer( enableContentionProfiling, redirectContainerStreaming bool, criHandler http.Handler) { - glog.Infof("Starting to listen on %s:%d", address, port) + klog.Infof("Starting to listen on %s:%d", address, port) handler := NewServer(host, resourceAnalyzer, auth, enableDebuggingHandlers, enableContentionProfiling, redirectContainerStreaming, criHandler) s := &http.Server{ Addr: net.JoinHostPort(address.String(), strconv.FormatUint(uint64(port), 10)), @@ -142,15 +142,15 @@ func ListenAndServeKubeletServer( // Passing empty strings as the cert and key files means no // cert/keys are specified and GetCertificate in the TLSConfig // should be called instead. - glog.Fatal(s.ListenAndServeTLS(tlsOptions.CertFile, tlsOptions.KeyFile)) + klog.Fatal(s.ListenAndServeTLS(tlsOptions.CertFile, tlsOptions.KeyFile)) } else { - glog.Fatal(s.ListenAndServe()) + klog.Fatal(s.ListenAndServe()) } } // ListenAndServeKubeletReadOnlyServer initializes a server to respond to HTTP network requests on the Kubelet. func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer stats.ResourceAnalyzer, address net.IP, port uint) { - glog.V(1).Infof("Starting to listen read-only on %s:%d", address, port) + klog.V(1).Infof("Starting to listen read-only on %s:%d", address, port) s := NewServer(host, resourceAnalyzer, nil, false, false, false, nil) server := &http.Server{ @@ -158,7 +158,7 @@ func ListenAndServeKubeletReadOnlyServer(host HostInterface, resourceAnalyzer st Handler: &s, MaxHeaderBytes: 1 << 20, } - glog.Fatal(server.ListenAndServe()) + klog.Fatal(server.ListenAndServe()) } // AuthInterface contains all methods required by the auth filters @@ -223,7 +223,7 @@ func (s *Server) InstallAuthFilter() { // Authenticate info, ok, err := s.auth.AuthenticateRequest(req.Request) if err != nil { - glog.Errorf("Unable to authenticate the request due to an error: %v", err) + klog.Errorf("Unable to authenticate the request due to an error: %v", err) resp.WriteErrorString(http.StatusUnauthorized, "Unauthorized") return } @@ -239,13 +239,13 @@ func (s *Server) InstallAuthFilter() { decision, _, err := s.auth.Authorize(attrs) if err != nil { msg := fmt.Sprintf("Authorization error (user=%s, verb=%s, resource=%s, subresource=%s)", attrs.GetUser().GetName(), attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource()) - glog.Errorf(msg, err) + klog.Errorf(msg, err) resp.WriteErrorString(http.StatusInternalServerError, msg) return } if decision != authorizer.DecisionAllow { msg := fmt.Sprintf("Forbidden (user=%s, verb=%s, resource=%s, subresource=%s)", attrs.GetUser().GetName(), attrs.GetVerb(), attrs.GetResource(), attrs.GetSubresource()) - glog.V(2).Info(msg) + klog.V(2).Info(msg) resp.WriteErrorString(http.StatusForbidden, msg) return } @@ -315,7 +315,7 @@ const pprofBasePath = "/debug/pprof/" // InstallDebuggingHandlers registers the HTTP request patterns that serve logs or run commands/containers func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) { - glog.Infof("Adding debug handlers to kubelet server.") + klog.Infof("Adding debug handlers to kubelet server.") ws := new(restful.WebService) ws. @@ -648,7 +648,7 @@ type responder struct { } func (r *responder) Error(w http.ResponseWriter, req *http.Request, err error) { - glog.Errorf("Error while proxying request: %v", err) + klog.Errorf("Error while proxying request: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) } @@ -745,7 +745,7 @@ func writeJsonResponse(response *restful.Response, data []byte) { response.Header().Set(restful.HEADER_ContentType, restful.MIME_JSON) response.WriteHeader(http.StatusOK) if _, err := response.Write(data); err != nil { - glog.Errorf("Error writing response: %v", err) + klog.Errorf("Error writing response: %v", err) } } diff --git a/pkg/kubelet/server/stats/BUILD b/pkg/kubelet/server/stats/BUILD index c3623fd98d1f3..6fec6f2eb9980 100644 --- a/pkg/kubelet/server/stats/BUILD +++ b/pkg/kubelet/server/stats/BUILD @@ -25,8 +25,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/server/stats/fs_resource_analyzer.go b/pkg/kubelet/server/stats/fs_resource_analyzer.go index 5f72134c4f55d..331a0b27ba6d6 100644 --- a/pkg/kubelet/server/stats/fs_resource_analyzer.go +++ b/pkg/kubelet/server/stats/fs_resource_analyzer.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "github.com/golang/glog" + "k8s.io/klog" ) // Map to PodVolumeStats pointers since the addresses for map values are not constant and can cause pain @@ -60,10 +60,10 @@ func newFsResourceAnalyzer(statsProvider StatsProvider, calcVolumePeriod time.Du func (s *fsResourceAnalyzer) Start() { s.startOnce.Do(func() { if s.calcPeriod <= 0 { - glog.Info("Volume stats collection disabled.") + klog.Info("Volume stats collection disabled.") return } - glog.Info("Starting FS ResourceAnalyzer") + klog.Info("Starting FS ResourceAnalyzer") go wait.Forever(func() { s.updateCachedPodVolumeStats() }, s.calcPeriod) }) } diff --git a/pkg/kubelet/server/stats/handler.go b/pkg/kubelet/server/stats/handler.go index 683f20c32f368..f87e5c820c5c8 100644 --- a/pkg/kubelet/server/stats/handler.go +++ b/pkg/kubelet/server/stats/handler.go @@ -25,8 +25,8 @@ import ( "time" restful "github.com/emicklei/go-restful" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -236,7 +236,7 @@ func (h *handler) handleSystemContainer(request *restful.Request, response *rest if err != nil { if _, ok := stats[containerName]; ok { // If the failure is partial, log it and return a best-effort response. - glog.Errorf("Partial failure issuing GetRawContainerInfo(%v): %v", query, err) + klog.Errorf("Partial failure issuing GetRawContainerInfo(%v): %v", query, err) } else { handleError(response, fmt.Sprintf("/stats/container %v", query), err) return @@ -272,7 +272,7 @@ func (h *handler) handlePodContainer(request *restful.Request, response *restful pod, ok := h.provider.GetPodByName(params["namespace"], params["podName"]) if !ok { - glog.V(4).Infof("Container not found: %v", params) + klog.V(4).Infof("Container not found: %v", params) response.WriteError(http.StatusNotFound, kubecontainer.ErrContainerNotFound) return } @@ -291,7 +291,7 @@ func (h *handler) handlePodContainer(request *restful.Request, response *restful func writeResponse(response *restful.Response, stats interface{}) { if err := response.WriteAsJson(stats); err != nil { - glog.Errorf("Error writing response: %v", err) + klog.Errorf("Error writing response: %v", err) } } @@ -303,7 +303,7 @@ func handleError(response *restful.Response, request string, err error) { response.WriteError(http.StatusNotFound, err) default: msg := fmt.Sprintf("Internal Error: %v", err) - glog.Errorf("HTTP InternalServerError serving %s: %s", request, msg) + klog.Errorf("HTTP InternalServerError serving %s: %s", request, msg) response.WriteErrorString(http.StatusInternalServerError, msg) } } diff --git a/pkg/kubelet/server/stats/summary_sys_containers.go b/pkg/kubelet/server/stats/summary_sys_containers.go index 7179e828020f0..baaff0ab1bd65 100644 --- a/pkg/kubelet/server/stats/summary_sys_containers.go +++ b/pkg/kubelet/server/stats/summary_sys_containers.go @@ -19,7 +19,7 @@ limitations under the License. package stats import ( - "github.com/golang/glog" + "k8s.io/klog" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -42,7 +42,7 @@ func (sp *summaryProviderImpl) GetSystemContainersStats(nodeConfig cm.NodeConfig } s, _, err := sp.provider.GetCgroupStats(cont.name, cont.forceStatsUpdate) if err != nil { - glog.Errorf("Failed to get system container stats for %q: %v", cont.name, err) + klog.Errorf("Failed to get system container stats for %q: %v", cont.name, err) continue } // System containers don't have a filesystem associated with them. @@ -71,7 +71,7 @@ func (sp *summaryProviderImpl) GetSystemContainersCPUAndMemoryStats(nodeConfig c } s, err := sp.provider.GetCgroupCPUAndMemoryStats(cont.name, cont.forceStatsUpdate) if err != nil { - glog.Errorf("Failed to get system container stats for %q: %v", cont.name, err) + klog.Errorf("Failed to get system container stats for %q: %v", cont.name, err) continue } s.Name = sys diff --git a/pkg/kubelet/server/stats/volume_stat_calculator.go b/pkg/kubelet/server/stats/volume_stat_calculator.go index 2c535f56241ae..220d8785f5348 100644 --- a/pkg/kubelet/server/stats/volume_stat_calculator.go +++ b/pkg/kubelet/server/stats/volume_stat_calculator.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/volume" - "github.com/golang/glog" + "k8s.io/klog" ) // volumeStatCalculator calculates volume metrics for a given pod periodically in the background and caches the result @@ -109,7 +109,7 @@ func (s *volumeStatCalculator) calcAndStoreStats() { if err != nil { // Expected for Volumes that don't support Metrics if !volume.IsNotSupported(err) { - glog.V(4).Infof("Failed to calculate volume metrics for pod %s volume %s: %+v", format.Pod(s.pod), name, err) + klog.V(4).Infof("Failed to calculate volume metrics for pod %s volume %s: %+v", format.Pod(s.pod), name, err) } continue } diff --git a/pkg/kubelet/stats/BUILD b/pkg/kubelet/stats/BUILD index ddd6639ea26a8..07ca1b5454055 100644 --- a/pkg/kubelet/stats/BUILD +++ b/pkg/kubelet/stats/BUILD @@ -28,10 +28,10 @@ go_library( "//pkg/volume:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/fs:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/stats/cadvisor_stats_provider.go b/pkg/kubelet/stats/cadvisor_stats_provider.go index 35820f7e94a52..9549c12a1a663 100644 --- a/pkg/kubelet/stats/cadvisor_stats_provider.go +++ b/pkg/kubelet/stats/cadvisor_stats_provider.go @@ -22,8 +22,8 @@ import ( "sort" "strings" - "github.com/golang/glog" cadvisorapiv2 "github.com/google/cadvisor/info/v2" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -306,7 +306,7 @@ func isPodManagedContainer(cinfo *cadvisorapiv2.ContainerInfo) bool { podNamespace := kubetypes.GetPodNamespace(cinfo.Spec.Labels) managed := podName != "" && podNamespace != "" if !managed && podName != podNamespace { - glog.Warningf( + klog.Warningf( "Expect container to have either both podName (%s) and podNamespace (%s) labels, or neither.", podName, podNamespace) } @@ -429,7 +429,7 @@ func getCadvisorContainerInfo(ca cadvisor.Interface) (map[string]cadvisorapiv2.C if _, ok := infos["/"]; ok { // If the failure is partial, log it and return a best-effort // response. - glog.Errorf("Partial failure issuing cadvisor.ContainerInfoV2: %v", err) + klog.Errorf("Partial failure issuing cadvisor.ContainerInfoV2: %v", err) } else { return nil, fmt.Errorf("failed to get root cgroup stats: %v", err) } diff --git a/pkg/kubelet/stats/cri_stats_provider.go b/pkg/kubelet/stats/cri_stats_provider.go index 3ebf3a0fec2ea..bafd24d65c4fd 100644 --- a/pkg/kubelet/stats/cri_stats_provider.go +++ b/pkg/kubelet/stats/cri_stats_provider.go @@ -24,8 +24,8 @@ import ( "strings" "time" - "github.com/golang/glog" cadvisorfs "github.com/google/cadvisor/fs" + "k8s.io/klog" cadvisorapiv2 "github.com/google/cadvisor/info/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -154,7 +154,7 @@ func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // container stats caStats, caFound := caInfos[containerID] if !caFound { - glog.V(4).Infof("Unable to find cadvisor stats for %q", containerID) + klog.V(4).Infof("Unable to find cadvisor stats for %q", containerID) } else { p.addCadvisorContainerStats(cs, &caStats) } @@ -236,7 +236,7 @@ func (p *criStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, erro // container stats caStats, caFound := caInfos[containerID] if !caFound { - glog.V(4).Infof("Unable to find cadvisor stats for %q", containerID) + klog.V(4).Infof("Unable to find cadvisor stats for %q", containerID) } else { p.addCadvisorContainerStats(cs, &caStats) } @@ -307,7 +307,7 @@ func (p *criStatsProvider) ImageFsDevice() (string, error) { // nil. func (p *criStatsProvider) getFsInfo(fsID *runtimeapi.FilesystemIdentifier) *cadvisorapiv2.FsInfo { if fsID == nil { - glog.V(2).Infof("Failed to get filesystem info: fsID is nil.") + klog.V(2).Infof("Failed to get filesystem info: fsID is nil.") return nil } mountpoint := fsID.GetMountpoint() @@ -315,9 +315,9 @@ func (p *criStatsProvider) getFsInfo(fsID *runtimeapi.FilesystemIdentifier) *cad if err != nil { msg := fmt.Sprintf("Failed to get the info of the filesystem with mountpoint %q: %v.", mountpoint, err) if err == cadvisorfs.ErrNoSuchDevice { - glog.V(2).Info(msg) + klog.V(2).Info(msg) } else { - glog.Error(msg) + klog.Error(msg) } return nil } @@ -362,7 +362,7 @@ func (p *criStatsProvider) addPodNetworkStats( } // TODO: sum Pod network stats from container stats. - glog.V(4).Infof("Unable to find cadvisor stats for sandbox %q", podSandboxID) + klog.V(4).Infof("Unable to find cadvisor stats for sandbox %q", podSandboxID) } func (p *criStatsProvider) addPodCPUMemoryStats( @@ -579,7 +579,7 @@ func (p *criStatsProvider) getContainerLogStats(path string, rootFsInfo *cadviso m := p.logMetricsService.createLogMetricsProvider(path) logMetrics, err := m.GetMetrics() if err != nil { - glog.Errorf("Unable to fetch container log stats for path %s: %v ", path, err) + klog.Errorf("Unable to fetch container log stats for path %s: %v ", path, err) return nil } result := &statsapi.FsStats{ diff --git a/pkg/kubelet/stats/helper.go b/pkg/kubelet/stats/helper.go index 2bdda4314a6ae..54f3093e55301 100644 --- a/pkg/kubelet/stats/helper.go +++ b/pkg/kubelet/stats/helper.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" cadvisorapiv1 "github.com/google/cadvisor/info/v1" cadvisorapiv2 "github.com/google/cadvisor/info/v2" @@ -206,7 +206,7 @@ func cadvisorInfoToUserDefinedMetrics(info *cadvisorapiv2.ContainerInfo) []stats for name, values := range stat.CustomMetrics { specVal, ok := udmMap[name] if !ok { - glog.Warningf("spec for custom metric %q is missing from cAdvisor output. Spec: %+v, Metrics: %+v", name, info.Spec, stat.CustomMetrics) + klog.Warningf("spec for custom metric %q is missing from cAdvisor output. Spec: %+v, Metrics: %+v", name, info.Spec, stat.CustomMetrics) continue } for _, value := range values { diff --git a/pkg/kubelet/status/BUILD b/pkg/kubelet/status/BUILD index 6e341e9b4983c..0b590440e5850 100644 --- a/pkg/kubelet/status/BUILD +++ b/pkg/kubelet/status/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index f184003117c82..37d5bfa03545d 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -24,7 +24,6 @@ import ( clientset "k8s.io/client-go/kubernetes" - "github.com/golang/glog" "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" @@ -32,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" podutil "k8s.io/kubernetes/pkg/api/v1/pod" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubepod "k8s.io/kubernetes/pkg/kubelet/pod" @@ -145,17 +145,17 @@ func (m *manager) Start() { // on the master, where the kubelet is responsible for bootstrapping the pods // of the master components. if m.kubeClient == nil { - glog.Infof("Kubernetes client is nil, not starting status manager.") + klog.Infof("Kubernetes client is nil, not starting status manager.") return } - glog.Info("Starting to sync pod status with apiserver") + klog.Info("Starting to sync pod status with apiserver") syncTicker := time.Tick(syncPeriod) // syncPod and syncBatch share the same go routine to avoid sync races. go wait.Forever(func() { select { case syncRequest := <-m.podStatusChannel: - glog.V(5).Infof("Status Manager: syncing pod: %q, with status: (%d, %v) from podStatusChannel", + klog.V(5).Infof("Status Manager: syncing pod: %q, with status: (%d, %v) from podStatusChannel", syncRequest.podUID, syncRequest.status.version, syncRequest.status.status) m.syncPod(syncRequest.podUID, syncRequest.status) case <-syncTicker: @@ -177,7 +177,7 @@ func (m *manager) SetPodStatus(pod *v1.Pod, status v1.PodStatus) { for _, c := range pod.Status.Conditions { if !kubetypes.PodConditionByKubelet(c.Type) { - glog.Errorf("Kubelet is trying to update pod condition %q for pod %q. "+ + klog.Errorf("Kubelet is trying to update pod condition %q for pod %q. "+ "But it is not owned by kubelet.", string(c.Type), format.Pod(pod)) } } @@ -196,13 +196,13 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai pod, ok := m.podManager.GetPodByUID(podUID) if !ok { - glog.V(4).Infof("Pod %q has been deleted, no need to update readiness", string(podUID)) + klog.V(4).Infof("Pod %q has been deleted, no need to update readiness", string(podUID)) return } oldStatus, found := m.podStatuses[pod.UID] if !found { - glog.Warningf("Container readiness changed before pod has synced: %q - %q", + klog.Warningf("Container readiness changed before pod has synced: %q - %q", format.Pod(pod), containerID.String()) return } @@ -210,13 +210,13 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai // Find the container to update. containerStatus, _, ok := findContainerStatus(&oldStatus.status, containerID.String()) if !ok { - glog.Warningf("Container readiness changed for unknown container: %q - %q", + klog.Warningf("Container readiness changed for unknown container: %q - %q", format.Pod(pod), containerID.String()) return } if containerStatus.Ready == ready { - glog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready, + klog.V(4).Infof("Container readiness unchanged (%v): %q - %q", ready, format.Pod(pod), containerID.String()) return } @@ -238,7 +238,7 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai if conditionIndex != -1 { status.Conditions[conditionIndex] = condition } else { - glog.Warningf("PodStatus missing %s type condition: %+v", conditionType, status) + klog.Warningf("PodStatus missing %s type condition: %+v", conditionType, status) status.Conditions = append(status.Conditions, condition) } } @@ -328,11 +328,11 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp // Check for illegal state transition in containers if err := checkContainerStateTransition(oldStatus.ContainerStatuses, status.ContainerStatuses, pod.Spec.RestartPolicy); err != nil { - glog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err) return false } if err := checkContainerStateTransition(oldStatus.InitContainerStatuses, status.InitContainerStatuses, pod.Spec.RestartPolicy); err != nil { - glog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Status update on pod %v/%v aborted: %v", pod.Namespace, pod.Name, err) return false } @@ -361,7 +361,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp // The intent here is to prevent concurrent updates to a pod's status from // clobbering each other so the phase of a pod progresses monotonically. if isCached && isPodStatusByKubeletEqual(&cachedStatus.status, &status) && !forceUpdate { - glog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status) + klog.V(3).Infof("Ignoring same status for pod %q, status: %+v", format.Pod(pod), status) return false // No new status. } @@ -375,13 +375,13 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp select { case m.podStatusChannel <- podStatusSyncRequest{pod.UID, newStatus}: - glog.V(5).Infof("Status Manager: adding pod: %q, with status: (%q, %v) to podStatusChannel", + klog.V(5).Infof("Status Manager: adding pod: %q, with status: (%q, %v) to podStatusChannel", pod.UID, newStatus.version, newStatus.status) return true default: // Let the periodic syncBatch handle the update if the channel is full. // We can't block, since we hold the mutex lock. - glog.V(4).Infof("Skipping the status update for pod %q for now because the channel is full; status: %+v", + klog.V(4).Infof("Skipping the status update for pod %q for now because the channel is full; status: %+v", format.Pod(pod), status) return false } @@ -415,7 +415,7 @@ func (m *manager) RemoveOrphanedStatuses(podUIDs map[types.UID]bool) { defer m.podStatusesLock.Unlock() for key := range m.podStatuses { if _, ok := podUIDs[key]; !ok { - glog.V(5).Infof("Removing %q from status map.", key) + klog.V(5).Infof("Removing %q from status map.", key) delete(m.podStatuses, key) } } @@ -442,7 +442,7 @@ func (m *manager) syncBatch() { syncedUID := kubetypes.MirrorPodUID(uid) if mirrorUID, ok := podToMirror[kubetypes.ResolvedPodUID(uid)]; ok { if mirrorUID == "" { - glog.V(5).Infof("Static pod %q (%s/%s) does not have a corresponding mirror pod; skipping", uid, status.podName, status.podNamespace) + klog.V(5).Infof("Static pod %q (%s/%s) does not have a corresponding mirror pod; skipping", uid, status.podName, status.podNamespace) continue } syncedUID = mirrorUID @@ -461,7 +461,7 @@ func (m *manager) syncBatch() { }() for _, update := range updatedStatuses { - glog.V(5).Infof("Status Manager: syncPod in syncbatch. pod UID: %q", update.podUID) + klog.V(5).Infof("Status Manager: syncPod in syncbatch. pod UID: %q", update.podUID) m.syncPod(update.podUID, update.status) } } @@ -469,41 +469,41 @@ func (m *manager) syncBatch() { // syncPod syncs the given status with the API server. The caller must not hold the lock. func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { if !m.needsUpdate(uid, status) { - glog.V(1).Infof("Status for pod %q is up-to-date; skipping", uid) + klog.V(1).Infof("Status for pod %q is up-to-date; skipping", uid) return } // TODO: make me easier to express from client code pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(status.podName, metav1.GetOptions{}) if errors.IsNotFound(err) { - glog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid) + klog.V(3).Infof("Pod %q (%s) does not exist on the server", status.podName, uid) // If the Pod is deleted the status will be cleared in // RemoveOrphanedStatuses, so we just ignore the update here. return } if err != nil { - glog.Warningf("Failed to get status for pod %q: %v", format.PodDesc(status.podName, status.podNamespace, uid), err) + klog.Warningf("Failed to get status for pod %q: %v", format.PodDesc(status.podName, status.podNamespace, uid), err) return } translatedUID := m.podManager.TranslatePodUID(pod.UID) // Type convert original uid just for the purpose of comparison. if len(translatedUID) > 0 && translatedUID != kubetypes.ResolvedPodUID(uid) { - glog.V(2).Infof("Pod %q was deleted and then recreated, skipping status update; old UID %q, new UID %q", format.Pod(pod), uid, translatedUID) + klog.V(2).Infof("Pod %q was deleted and then recreated, skipping status update; old UID %q, new UID %q", format.Pod(pod), uid, translatedUID) m.deletePodStatus(uid) return } oldStatus := pod.Status.DeepCopy() newPod, patchBytes, err := statusutil.PatchPodStatus(m.kubeClient, pod.Namespace, pod.Name, *oldStatus, mergePodStatus(*oldStatus, status.status)) - glog.V(3).Infof("Patch status for pod %q with %q", format.Pod(pod), patchBytes) + klog.V(3).Infof("Patch status for pod %q with %q", format.Pod(pod), patchBytes) if err != nil { - glog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err) + klog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err) return } pod = newPod - glog.V(3).Infof("Status for pod %q updated successfully: (%d, %+v)", format.Pod(pod), status.version, status.status) + klog.V(3).Infof("Status for pod %q updated successfully: (%d, %+v)", format.Pod(pod), status.version, status.status) m.apiStatusVersions[kubetypes.MirrorPodUID(pod.UID)] = status.version // We don't handle graceful deletion of mirror pods. @@ -513,10 +513,10 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID)) err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, deleteOptions) if err != nil { - glog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err) + klog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err) return } - glog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod)) + klog.V(3).Infof("Pod %q fully terminated and removed from etcd", format.Pod(pod)) m.deletePodStatus(uid) } } @@ -555,14 +555,14 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { // The pod could be a static pod, so we should translate first. pod, ok := m.podManager.GetPodByUID(uid) if !ok { - glog.V(4).Infof("Pod %q has been deleted, no need to reconcile", string(uid)) + klog.V(4).Infof("Pod %q has been deleted, no need to reconcile", string(uid)) return false } // If the pod is a static pod, we should check its mirror pod, because only status in mirror pod is meaningful to us. if kubepod.IsStaticPod(pod) { mirrorPod, ok := m.podManager.GetMirrorPodByPod(pod) if !ok { - glog.V(4).Infof("Static pod %q has no corresponding mirror pod, no need to reconcile", format.Pod(pod)) + klog.V(4).Infof("Static pod %q has no corresponding mirror pod, no need to reconcile", format.Pod(pod)) return false } pod = mirrorPod @@ -576,7 +576,7 @@ func (m *manager) needsReconcile(uid types.UID, status v1.PodStatus) bool { // reconcile is not needed. Just return. return false } - glog.V(3).Infof("Pod status is inconsistent with cached status for pod %q, a reconciliation should be triggered:\n %+v", format.Pod(pod), + klog.V(3).Infof("Pod status is inconsistent with cached status for pod %q, a reconciliation should be triggered:\n %+v", format.Pod(pod), diff.ObjectDiff(podStatus, status)) return true diff --git a/pkg/kubelet/token/BUILD b/pkg/kubelet/token/BUILD index 2591a4ec27522..9f7d4db7caa90 100644 --- a/pkg/kubelet/token/BUILD +++ b/pkg/kubelet/token/BUILD @@ -25,7 +25,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/token/token_manager.go b/pkg/kubelet/token/token_manager.go index 75086c86b976e..76819ac72df91 100644 --- a/pkg/kubelet/token/token_manager.go +++ b/pkg/kubelet/token/token_manager.go @@ -24,12 +24,12 @@ import ( "sync" "time" - "github.com/golang/glog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" ) const ( @@ -90,7 +90,7 @@ func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticat case m.expired(ctr): return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err) default: - glog.Errorf("couldn't update token %s: %v", key, err) + klog.Errorf("couldn't update token %s: %v", key, err) return ctr, nil } } @@ -142,7 +142,7 @@ func (m *Manager) expired(t *authenticationv1.TokenRequest) bool { // ttl, or if the token is older than 24 hours. func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool { if tr.Spec.ExpirationSeconds == nil { - glog.Errorf("expiration seconds was nil for tr: %#v", tr) + klog.Errorf("expiration seconds was nil for tr: %#v", tr) return false } now := m.clock.Now() diff --git a/pkg/kubelet/util/BUILD b/pkg/kubelet/util/BUILD index 01847254a9870..4165864e0a307 100644 --- a/pkg/kubelet/util/BUILD +++ b/pkg/kubelet/util/BUILD @@ -45,16 +45,16 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:darwin": [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:freebsd": [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ "//vendor/github.com/Microsoft/go-winio:go_default_library", diff --git a/pkg/kubelet/util/pluginwatcher/BUILD b/pkg/kubelet/util/pluginwatcher/BUILD index 0301c6b95b39e..20c98231bbf91 100644 --- a/pkg/kubelet/util/pluginwatcher/BUILD +++ b/pkg/kubelet/util/pluginwatcher/BUILD @@ -16,10 +16,10 @@ go_library( "//pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2:go_default_library", "//pkg/util/filesystem:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pkg/errors:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -30,6 +30,7 @@ go_test( deps = [ "//pkg/kubelet/apis/pluginregistration/v1alpha1:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/util/pluginwatcher/example_handler.go b/pkg/kubelet/util/pluginwatcher/example_handler.go index 8f9cac5d9bde2..fc14dfe955902 100644 --- a/pkg/kubelet/util/pluginwatcher/example_handler.go +++ b/pkg/kubelet/util/pluginwatcher/example_handler.go @@ -23,8 +23,8 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/net/context" + "k8s.io/klog" v1beta1 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1" v1beta2 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta2" @@ -117,7 +117,7 @@ func (p *exampleHandler) EventChan(pluginName string) chan examplePluginEvent { } func (p *exampleHandler) SendEvent(pluginName string, event examplePluginEvent) { - glog.V(2).Infof("Sending %v for plugin %s over chan %v", event, pluginName, p.eventChans[pluginName]) + klog.V(2).Infof("Sending %v for plugin %s over chan %v", event, pluginName, p.eventChans[pluginName]) p.eventChans[pluginName] <- event } diff --git a/pkg/kubelet/util/pluginwatcher/example_plugin.go b/pkg/kubelet/util/pluginwatcher/example_plugin.go index 694b366120258..17a64168c4336 100644 --- a/pkg/kubelet/util/pluginwatcher/example_plugin.go +++ b/pkg/kubelet/util/pluginwatcher/example_plugin.go @@ -24,9 +24,9 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/net/context" "google.golang.org/grpc" + "k8s.io/klog" registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1" v1beta1 "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher/example_plugin_apis/v1beta1" @@ -49,7 +49,7 @@ type pluginServiceV1Beta1 struct { } func (s *pluginServiceV1Beta1) GetExampleInfo(ctx context.Context, rqt *v1beta1.ExampleRequest) (*v1beta1.ExampleResponse, error) { - glog.Infof("GetExampleInfo v1beta1field: %s", rqt.V1Beta1Field) + klog.Infof("GetExampleInfo v1beta1field: %s", rqt.V1Beta1Field) return &v1beta1.ExampleResponse{}, nil } @@ -62,7 +62,7 @@ type pluginServiceV1Beta2 struct { } func (s *pluginServiceV1Beta2) GetExampleInfo(ctx context.Context, rqt *v1beta2.ExampleRequest) (*v1beta2.ExampleResponse, error) { - glog.Infof("GetExampleInfo v1beta2_field: %s", rqt.V1Beta2Field) + klog.Infof("GetExampleInfo v1beta2_field: %s", rqt.V1Beta2Field) return &v1beta2.ExampleResponse{}, nil } @@ -97,7 +97,7 @@ func (e *examplePlugin) GetInfo(ctx context.Context, req *registerapi.InfoReques } func (e *examplePlugin) NotifyRegistrationStatus(ctx context.Context, status *registerapi.RegistrationStatus) (*registerapi.RegistrationStatusResponse, error) { - glog.Errorf("Registration is: %v\n", status) + klog.Errorf("Registration is: %v\n", status) if e.registrationStatus != nil { e.registrationStatus <- *status @@ -108,13 +108,13 @@ func (e *examplePlugin) NotifyRegistrationStatus(ctx context.Context, status *re // Serve starts a pluginwatcher server and one or more of the plugin services func (e *examplePlugin) Serve(services ...string) error { - glog.Infof("starting example server at: %s\n", e.endpoint) + klog.Infof("starting example server at: %s\n", e.endpoint) lis, err := net.Listen("unix", e.endpoint) if err != nil { return err } - glog.Infof("example server started at: %s\n", e.endpoint) + klog.Infof("example server started at: %s\n", e.endpoint) e.grpcServer = grpc.NewServer() // Registers kubelet plugin watcher api. @@ -141,7 +141,7 @@ func (e *examplePlugin) Serve(services ...string) error { defer e.wg.Done() // Blocking call to accept incoming connections. if err := e.grpcServer.Serve(lis); err != nil { - glog.Errorf("example server stopped serving: %v", err) + klog.Errorf("example server stopped serving: %v", err) } }() @@ -149,7 +149,7 @@ func (e *examplePlugin) Serve(services ...string) error { } func (e *examplePlugin) Stop() error { - glog.Infof("Stopping example server at: %s\n", e.endpoint) + klog.Infof("Stopping example server at: %s\n", e.endpoint) e.grpcServer.Stop() c := make(chan struct{}) diff --git a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go index 88dda9b7c02d5..15f49a6acd226 100644 --- a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go +++ b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go @@ -25,10 +25,10 @@ import ( "time" "github.com/fsnotify/fsnotify" - "github.com/golang/glog" "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" + "k8s.io/klog" registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1" utilfs "k8s.io/kubernetes/pkg/util/filesystem" @@ -82,7 +82,7 @@ func (w *Watcher) getHandler(pluginType string) (PluginHandler, bool) { // Start watches for the creation of plugin sockets at the path func (w *Watcher) Start() error { - glog.V(2).Infof("Plugin Watcher Start at %s", w.path) + klog.V(2).Infof("Plugin Watcher Start at %s", w.path) w.stopCh = make(chan interface{}) // Creating the directory to be watched if it doesn't exist yet, @@ -112,12 +112,12 @@ func (w *Watcher) Start() error { if event.Op&fsnotify.Create == fsnotify.Create { err := w.handleCreateEvent(event) if err != nil { - glog.Errorf("error %v when handling create event: %s", err, event) + klog.Errorf("error %v when handling create event: %s", err, event) } } else if event.Op&fsnotify.Remove == fsnotify.Remove { err := w.handleDeleteEvent(event) if err != nil { - glog.Errorf("error %v when handling delete event: %s", err, event) + klog.Errorf("error %v when handling delete event: %s", err, event) } } return @@ -125,7 +125,7 @@ func (w *Watcher) Start() error { continue case err := <-fsWatcher.Errors: if err != nil { - glog.Errorf("fsWatcher received error: %v", err) + klog.Errorf("fsWatcher received error: %v", err) } continue case <-w.stopCh: @@ -165,7 +165,7 @@ func (w *Watcher) Stop() error { } func (w *Watcher) init() error { - glog.V(4).Infof("Ensuring Plugin directory at %s ", w.path) + klog.V(4).Infof("Ensuring Plugin directory at %s ", w.path) if err := w.fs.MkdirAll(w.path, 0755); err != nil { return fmt.Errorf("error (re-)creating root %s: %v", w.path, err) @@ -203,7 +203,7 @@ func (w *Watcher) traversePluginDir(dir string) error { } }() default: - glog.V(5).Infof("Ignoring file %s with mode %v", path, mode) + klog.V(5).Infof("Ignoring file %s with mode %v", path, mode) } return nil @@ -212,7 +212,7 @@ func (w *Watcher) traversePluginDir(dir string) error { // Handle filesystem notify event. func (w *Watcher) handleCreateEvent(event fsnotify.Event) error { - glog.V(6).Infof("Handling create event: %v", event) + klog.V(6).Infof("Handling create event: %v", event) fi, err := os.Stat(event.Name) if err != nil { @@ -220,7 +220,7 @@ func (w *Watcher) handleCreateEvent(event fsnotify.Event) error { } if strings.HasPrefix(fi.Name(), ".") { - glog.Errorf("Ignoring file: %s", fi.Name()) + klog.Errorf("Ignoring file: %s", fi.Name()) return nil } @@ -286,7 +286,7 @@ func (w *Watcher) handlePluginRegistration(socketPath string) error { } func (w *Watcher) handleDeleteEvent(event fsnotify.Event) error { - glog.V(6).Infof("Handling delete event: %v", event) + klog.V(6).Infof("Handling delete event: %v", event) plugin, ok := w.getPlugin(event.Name) if !ok { @@ -304,7 +304,7 @@ func (w *Watcher) handleDeleteEvent(event fsnotify.Event) error { // When ReRegistering, the new plugin will have removed the current mapping (map[socketPath] = plugin) and replaced // it with it's own socketPath. if _, ok = w.getPlugin(event.Name); !ok { - glog.V(2).Infof("A newer plugin watcher has been registered for plugin %v, dropping DeRegister call", plugin) + klog.V(2).Infof("A newer plugin watcher has been registered for plugin %v, dropping DeRegister call", plugin) return nil } @@ -313,7 +313,7 @@ func (w *Watcher) handleDeleteEvent(event fsnotify.Event) error { return fmt.Errorf("could not find handler %s for plugin %s at path %s", plugin.pluginType, plugin.pluginName, event.Name) } - glog.V(2).Infof("DeRegistering plugin %v at path %s", plugin, event.Name) + klog.V(2).Infof("DeRegistering plugin %v at path %s", plugin, event.Name) w.deRegisterPlugin(event.Name, plugin.pluginType, plugin.pluginName) h.DeRegisterPlugin(plugin.pluginName) diff --git a/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go b/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go index fdcb8b705bcd1..a5e8576c3ed0c 100644 --- a/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go +++ b/pkg/kubelet/util/pluginwatcher/plugin_watcher_test.go @@ -27,6 +27,7 @@ import ( "github.com/stretchr/testify/require" + "k8s.io/klog" registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1" ) @@ -39,6 +40,7 @@ var ( func init() { var logLevel string + klog.InitFlags(flag.CommandLine) flag.Set("alsologtostderr", fmt.Sprintf("%t", true)) flag.StringVar(&logLevel, "logLevel", "6", "test") flag.Lookup("v").Value.Set(logLevel) diff --git a/pkg/kubelet/util/util_unix.go b/pkg/kubelet/util/util_unix.go index d12d51a1aefdc..fe4483095be93 100644 --- a/pkg/kubelet/util/util_unix.go +++ b/pkg/kubelet/util/util_unix.go @@ -25,8 +25,8 @@ import ( "os" "time" - "github.com/golang/glog" "golang.org/x/sys/unix" + "k8s.io/klog" ) const ( @@ -73,7 +73,7 @@ func parseEndpointWithFallbackProtocol(endpoint string, fallbackProtocol string) fallbackEndpoint := fallbackProtocol + "://" + endpoint protocol, addr, err = parseEndpoint(fallbackEndpoint) if err == nil { - glog.Warningf("Using %q as endpoint is deprecated, please consider using full url format %q.", endpoint, fallbackEndpoint) + klog.Warningf("Using %q as endpoint is deprecated, please consider using full url format %q.", endpoint, fallbackEndpoint) } } return diff --git a/pkg/kubelet/volume_host.go b/pkg/kubelet/volume_host.go index 35b7d78460109..425afebdbf226 100644 --- a/pkg/kubelet/volume_host.go +++ b/pkg/kubelet/volume_host.go @@ -21,7 +21,7 @@ import ( "net" "runtime" - "github.com/golang/glog" + "k8s.io/klog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" @@ -162,7 +162,7 @@ func (kvh *kubeletVolumeHost) GetCloudProvider() cloudprovider.Interface { func (kvh *kubeletVolumeHost) GetMounter(pluginName string) mount.Interface { exec, err := kvh.getMountExec(pluginName) if err != nil { - glog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) + klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) // Use the default mounter exec = nil } @@ -223,7 +223,7 @@ func (kvh *kubeletVolumeHost) GetEventRecorder() record.EventRecorder { func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec { exec, err := kvh.getMountExec(pluginName) if err != nil { - glog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) + klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error()) // Use the default exec exec = nil } @@ -238,7 +238,7 @@ func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec { // os.Exec should be used. func (kvh *kubeletVolumeHost) getMountExec(pluginName string) (mount.Exec, error) { if !utilfeature.DefaultFeatureGate.Enabled(features.MountContainers) { - glog.V(5).Infof("using default mounter/exec for %s", pluginName) + klog.V(5).Infof("using default mounter/exec for %s", pluginName) return nil, nil } @@ -248,10 +248,10 @@ func (kvh *kubeletVolumeHost) getMountExec(pluginName string) (mount.Exec, error } if pod == nil { // Use default mounter/exec for this plugin - glog.V(5).Infof("using default mounter/exec for %s", pluginName) + klog.V(5).Infof("using default mounter/exec for %s", pluginName) return nil, nil } - glog.V(5).Infof("using container %s/%s/%s to execute mount utilities for %s", pod.Namespace, pod.Name, container, pluginName) + klog.V(5).Infof("using container %s/%s/%s to execute mount utilities for %s", pod.Namespace, pod.Name, container, pluginName) return &containerExec{ pod: pod, containerName: container, @@ -271,6 +271,6 @@ var _ mount.Exec = &containerExec{} func (e *containerExec) Run(cmd string, args ...string) ([]byte, error) { cmdline := append([]string{cmd}, args...) - glog.V(5).Infof("Exec mounter running in pod %s/%s/%s: %v", e.pod.Namespace, e.pod.Name, e.containerName, cmdline) + klog.V(5).Infof("Exec mounter running in pod %s/%s/%s: %v", e.pod.Namespace, e.pod.Name, e.containerName, cmdline) return e.kl.RunInContainer(container.GetPodFullName(e.pod), e.pod.UID, e.containerName, cmdline) } diff --git a/pkg/kubelet/volumemanager/BUILD b/pkg/kubelet/volumemanager/BUILD index 855d1b65700dd..269eb189192db 100644 --- a/pkg/kubelet/volumemanager/BUILD +++ b/pkg/kubelet/volumemanager/BUILD @@ -33,7 +33,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/volumemanager/cache/BUILD b/pkg/kubelet/volumemanager/cache/BUILD index e9861638ab64f..3bc1a3886bbe3 100644 --- a/pkg/kubelet/volumemanager/cache/BUILD +++ b/pkg/kubelet/volumemanager/cache/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 82aef86c72024..629a69d7a49c6 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -24,7 +24,7 @@ import ( "fmt" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -407,7 +407,7 @@ func (asw *actualStateOfWorld) addVolume( } else { // If volume object already exists, update the fields such as device path volumeObj.devicePath = devicePath - glog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q", + klog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q", volumeName, devicePath) } @@ -476,7 +476,7 @@ func (asw *actualStateOfWorld) MarkVolumeAsResized( volumeName) } - glog.V(5).Infof("Volume %s(OuterVolumeSpecName %s) of pod %s has been resized", + klog.V(5).Infof("Volume %s(OuterVolumeSpecName %s) of pod %s has been resized", volumeName, podObj.outerVolumeSpecName, podName) podObj.fsResizeRequired = false asw.attachedVolumes[volumeName].mountedPods[podName] = podObj @@ -497,7 +497,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired( asw.volumePluginMgr.FindPluginBySpec(podObj.volumeSpec) if err != nil || volumePlugin == nil { // Log and continue processing - glog.Errorf( + klog.Errorf( "MarkRemountRequired failed to FindPluginBySpec for pod %q (podUid %q) volume: %q (volSpecName: %q)", podObj.podName, podObj.podUID, @@ -521,13 +521,13 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired( defer asw.Unlock() volumeObj, exist := asw.attachedVolumes[volumeName] if !exist { - glog.Warningf("MarkFSResizeRequired for volume %s failed as volume not exist", volumeName) + klog.Warningf("MarkFSResizeRequired for volume %s failed as volume not exist", volumeName) return } podObj, exist := volumeObj.mountedPods[podName] if !exist { - glog.Warningf("MarkFSResizeRequired for volume %s failed "+ + klog.Warningf("MarkFSResizeRequired for volume %s failed "+ "as pod(%s) not exist", volumeName, podName) return } @@ -536,7 +536,7 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired( asw.volumePluginMgr.FindExpandablePluginBySpec(podObj.volumeSpec) if err != nil || volumePlugin == nil { // Log and continue processing - glog.Errorf( + klog.Errorf( "MarkFSResizeRequired failed to find expandable plugin for pod %q volume: %q (volSpecName: %q)", podObj.podName, volumeObj.volumeName, @@ -546,7 +546,7 @@ func (asw *actualStateOfWorld) MarkFSResizeRequired( if volumePlugin.RequiresFSResize() { if !podObj.fsResizeRequired { - glog.V(3).Infof("PVC volume %s(OuterVolumeSpecName %s) of pod %s requires file system resize", + klog.V(3).Infof("PVC volume %s(OuterVolumeSpecName %s) of pod %s requires file system resize", volumeName, podObj.outerVolumeSpecName, podName) podObj.fsResizeRequired = true } diff --git a/pkg/kubelet/volumemanager/metrics/BUILD b/pkg/kubelet/volumemanager/metrics/BUILD index 6d44fabf88b3d..4a1da39bf30fc 100644 --- a/pkg/kubelet/volumemanager/metrics/BUILD +++ b/pkg/kubelet/volumemanager/metrics/BUILD @@ -8,8 +8,8 @@ go_library( deps = [ "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/volume:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/volumemanager/metrics/metrics.go b/pkg/kubelet/volumemanager/metrics/metrics.go index c428401e08ffb..11eb1f5d69f9b 100644 --- a/pkg/kubelet/volumemanager/metrics/metrics.go +++ b/pkg/kubelet/volumemanager/metrics/metrics.go @@ -19,8 +19,8 @@ package metrics import ( "sync" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/volume" ) @@ -85,7 +85,7 @@ func (c *totalVolumesCollector) Collect(ch chan<- prometheus.Metric) { pluginName, stateName) if err != nil { - glog.Warningf("Failed to create metric : %v", err) + klog.Warningf("Failed to create metric : %v", err) } ch <- metric } diff --git a/pkg/kubelet/volumemanager/populator/BUILD b/pkg/kubelet/volumemanager/populator/BUILD index 6a370b5124fe4..1e2b7d2dfd2fc 100644 --- a/pkg/kubelet/volumemanager/populator/BUILD +++ b/pkg/kubelet/volumemanager/populator/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 3f1b71a05ef87..81bf6e60e91a0 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -25,7 +25,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -127,7 +127,7 @@ type processedPods struct { func (dswp *desiredStateOfWorldPopulator) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) { // Wait for the completion of a loop that started after sources are all ready, then set hasAddedPods accordingly - glog.Infof("Desired state populator starts to run") + klog.Infof("Desired state populator starts to run") wait.PollUntil(dswp.loopSleepDuration, func() (bool, error) { done := sourcesReady.AllReady() dswp.populatorLoop() @@ -159,7 +159,7 @@ func (dswp *desiredStateOfWorldPopulator) populatorLoop() { // findAndRemoveDeletedPods() is called independently of the main // populator loop. if time.Since(dswp.timeOfLastGetPodStatus) < dswp.getPodStatusRetryDuration { - glog.V(5).Infof( + klog.V(5).Infof( "Skipping findAndRemoveDeletedPods(). Not permitted until %v (getPodStatusRetryDuration %v).", dswp.timeOfLastGetPodStatus.Add(dswp.getPodStatusRetryDuration), dswp.getPodStatusRetryDuration) @@ -230,7 +230,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { var getPodsErr error runningPods, getPodsErr = dswp.kubeContainerRuntime.GetPods(false) if getPodsErr != nil { - glog.Errorf( + klog.Errorf( "kubeContainerRuntime.findAndRemoveDeletedPods returned error %v.", getPodsErr) continue @@ -252,17 +252,17 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { } if runningContainers { - glog.V(4).Infof( + klog.V(4).Infof( "Pod %q has been removed from pod manager. However, it still has one or more containers in the non-exited state. Therefore, it will not be removed from volume manager.", format.Pod(volumeToMount.Pod)) continue } if !dswp.actualStateOfWorld.VolumeExists(volumeToMount.VolumeName) && podExists { - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Actual state has not yet has this information skip removing volume from desired state", "")) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Actual state has not yet has this information skip removing volume from desired state", "")) continue } - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", "")) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Removing volume from desired state", "")) dswp.desiredStateOfWorld.DeletePodFromVolume( volumeToMount.PodName, volumeToMount.VolumeName) @@ -293,7 +293,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes( pvc, volumeSpec, volumeGidValue, err := dswp.createVolumeSpec(podVolume, pod.Name, pod.Namespace, mountsMap, devicesMap) if err != nil { - glog.Errorf( + klog.Errorf( "Error processing volume %q for pod %q: %v", podVolume.Name, format.Pod(pod), @@ -306,7 +306,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes( _, err = dswp.desiredStateOfWorld.AddPodToVolume( uniquePodName, pod, volumeSpec, podVolume.Name, volumeGidValue) if err != nil { - glog.Errorf( + klog.Errorf( "Failed to add volume %q (specName: %q) for pod %q to desiredStateOfWorld. err=%v", podVolume.Name, volumeSpec.Name(), @@ -315,7 +315,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes( allVolumesAdded = false } - glog.V(4).Infof( + klog.V(4).Infof( "Added volume %q (volSpec=%q) for pod %q to desired state.", podVolume.Name, volumeSpec.Name(), @@ -365,12 +365,12 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize( } fsVolume, err := util.CheckVolumeModeFilesystem(volumeSpec) if err != nil { - glog.Errorf("Check volume mode failed for volume %s(OuterVolumeSpecName %s): %v", + klog.Errorf("Check volume mode failed for volume %s(OuterVolumeSpecName %s): %v", uniqueVolumeName, podVolume.Name, err) return } if !fsVolume { - glog.V(5).Infof("Block mode volume needn't to check file system resize request") + klog.V(5).Infof("Block mode volume needn't to check file system resize request") return } if processedVolumesForFSResize.Has(string(uniqueVolumeName)) { @@ -380,7 +380,7 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize( } if mountedReadOnlyByPod(podVolume, pod) { // This volume is used as read only by this pod, we don't perform resize for read only volumes. - glog.V(5).Infof("Skip file system resize check for volume %s in pod %s/%s "+ + klog.V(5).Infof("Skip file system resize check for volume %s in pod %s/%s "+ "as the volume is mounted as readonly", podVolume.Name, pod.Namespace, pod.Name) return } @@ -474,7 +474,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( podVolume v1.Volume, podName string, podNamespace string, mountsMap map[string]bool, devicesMap map[string]bool) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) { if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { - glog.V(5).Infof( + klog.V(5).Infof( "Found PVC, ClaimName: %q/%q", podNamespace, pvcSource.ClaimName) @@ -491,7 +491,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( } pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID - glog.V(5).Infof( + klog.V(5).Infof( "Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q", podNamespace, pvcSource.ClaimName, @@ -509,7 +509,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec( err) } - glog.V(5).Infof( + klog.V(5).Infof( "Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)", volumeSpec.Name(), pvName, diff --git a/pkg/kubelet/volumemanager/reconciler/BUILD b/pkg/kubelet/volumemanager/reconciler/BUILD index 586067da32e22..2a264aa4ee3d8 100644 --- a/pkg/kubelet/volumemanager/reconciler/BUILD +++ b/pkg/kubelet/volumemanager/reconciler/BUILD @@ -29,7 +29,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 2ffb5c99fa713..1b6bbbfbe778c 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -26,13 +26,13 @@ import ( "path" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" @@ -151,7 +151,7 @@ func (rc *reconciler) reconciliationLoopFunc() func() { // Otherwise, the reconstruct process may clean up pods' volumes that are still in use because // desired state of world does not contain a complete list of pods. if rc.populatorHasAddedPods() && !rc.StatesHasBeenSynced() { - glog.Infof("Reconciler: start to sync state") + klog.Infof("Reconciler: start to sync state") rc.sync() } } @@ -167,7 +167,7 @@ func (rc *reconciler) reconcile() { for _, mountedVolume := range rc.actualStateOfWorld.GetMountedVolumes() { if !rc.desiredStateOfWorld.PodExistsInVolume(mountedVolume.PodName, mountedVolume.VolumeName) { // Volume is mounted, unmount it - glog.V(5).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", "")) + klog.V(5).Infof(mountedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountVolume", "")) err := rc.operationExecutor.UnmountVolume( mountedVolume.MountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir) if err != nil && @@ -175,10 +175,10 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { - glog.Infof(mountedVolume.GenerateMsgDetailed("operationExecutor.UnmountVolume started", "")) + klog.Infof(mountedVolume.GenerateMsgDetailed("operationExecutor.UnmountVolume started", "")) } } } @@ -191,7 +191,7 @@ func (rc *reconciler) reconcile() { if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable { // Volume is not attached (or doesn't implement attacher), kubelet attach is disabled, wait // for controller to finish attaching volume. - glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", "")) + klog.V(5).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", "")) err := rc.operationExecutor.VerifyControllerAttachedVolume( volumeToMount.VolumeToMount, rc.nodeName, @@ -201,10 +201,10 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.VerifyControllerAttachedVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.VerifyControllerAttachedVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { - glog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.VerifyControllerAttachedVolume started", "")) + klog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.VerifyControllerAttachedVolume started", "")) } } else { // Volume is not attached to node, kubelet attach is enabled, volume implements an attacher, @@ -214,17 +214,17 @@ func (rc *reconciler) reconcile() { VolumeSpec: volumeToMount.VolumeSpec, NodeName: rc.nodeName, } - glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", "")) + klog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", "")) err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld) if err != nil && !nestedpendingoperations.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { - glog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.AttachVolume started", "")) + klog.Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.AttachVolume started", "")) } } } else if !volMounted || cache.IsRemountRequiredError(err) { @@ -234,7 +234,7 @@ func (rc *reconciler) reconcile() { if isRemount { remountingLogStr = "Volume is already mounted to pod, but remount was requested." } - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr)) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.MountVolume", remountingLogStr)) err := rc.operationExecutor.MountVolume( rc.waitForAttachTimeout, volumeToMount.VolumeToMount, @@ -245,18 +245,18 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.MountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.MountVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { if remountingLogStr == "" { - glog.V(1).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr)) + klog.V(1).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr)) } else { - glog.V(5).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr)) + klog.V(5).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.MountVolume started", remountingLogStr)) } } } else if cache.IsFSResizeRequiredError(err) && utilfeature.DefaultFeatureGate.Enabled(features.ExpandInUsePersistentVolumes) { - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandVolumeFSWithoutUnmounting", "")) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.ExpandVolumeFSWithoutUnmounting", "")) err := rc.operationExecutor.ExpandVolumeFSWithoutUnmounting( volumeToMount.VolumeToMount, rc.actualStateOfWorld) @@ -265,10 +265,10 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(volumeToMount.GenerateErrorDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting failed", err).Error()) + klog.Errorf(volumeToMount.GenerateErrorDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting failed", err).Error()) } if err == nil { - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting started", "")) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("operationExecutor.ExpandVolumeFSWithoutUnmounting started", "")) } } } @@ -280,7 +280,7 @@ func (rc *reconciler) reconcile() { !rc.operationExecutor.IsOperationPending(attachedVolume.VolumeName, nestedpendingoperations.EmptyUniquePodName) { if attachedVolume.GloballyMounted { // Volume is globally mounted to device, unmount it - glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", "")) + klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.UnmountDevice", "")) err := rc.operationExecutor.UnmountDevice( attachedVolume.AttachedVolume, rc.actualStateOfWorld, rc.mounter) if err != nil && @@ -288,20 +288,20 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists and exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountDevice failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.UnmountDevice failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { - glog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.UnmountDevice started", "")) + klog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.UnmountDevice started", "")) } } else { // Volume is attached to node, detach it // Kubelet not responsible for detaching or this volume has a non-attachable volume plugin. if rc.controllerAttachDetachEnabled || !attachedVolume.PluginIsAttachable { rc.actualStateOfWorld.MarkVolumeAsDetached(attachedVolume.VolumeName, attachedVolume.NodeName) - glog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached", fmt.Sprintf("DevicePath %q", attachedVolume.DevicePath))) + klog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached", fmt.Sprintf("DevicePath %q", attachedVolume.DevicePath))) } else { // Only detach if kubelet detach is enabled - glog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", "")) + klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", "")) err := rc.operationExecutor.DetachVolume( attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld) if err != nil && @@ -309,10 +309,10 @@ func (rc *reconciler) reconcile() { !exponentialbackoff.IsExponentialBackoff(err) { // Ignore nestedpendingoperations.IsAlreadyExists && exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - glog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) + klog.Errorf(attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } if err == nil { - glog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.DetachVolume started", "")) + klog.Infof(attachedVolume.GenerateMsgDetailed("operationExecutor.DetachVolume started", "")) } } } @@ -369,14 +369,14 @@ func (rc *reconciler) syncStates() { // Get volumes information by reading the pod's directory podVolumes, err := getVolumesFromPodDir(rc.kubeletPodsDir) if err != nil { - glog.Errorf("Cannot get volumes from disk %v", err) + klog.Errorf("Cannot get volumes from disk %v", err) return } volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume) volumeNeedReport := []v1.UniqueVolumeName{} for _, volume := range podVolumes { if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) { - glog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) + klog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) // There is nothing to reconstruct continue } @@ -387,11 +387,11 @@ func (rc *reconciler) syncStates() { if volumeInDSW { // Some pod needs the volume, don't clean it up and hope that // reconcile() calls SetUp and reconstructs the volume in ASW. - glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) + klog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) continue } // No pod needs the volume. - glog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err) + klog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err) rc.cleanupMounts(volume) continue } @@ -402,14 +402,14 @@ func (rc *reconciler) syncStates() { // this new kubelet so reconcile() calls SetUp and re-mounts the // volume if it's necessary. volumeNeedReport = append(volumeNeedReport, reconstructedVolume.volumeName) - glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), marking as InUse", volume.volumeSpecName, volume.podName) + klog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), marking as InUse", volume.volumeSpecName, volume.podName) continue } // There is no pod that uses the volume. if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName) { - glog.Warning("Volume is in pending operation, skip cleaning up mounts") + klog.Warning("Volume is in pending operation, skip cleaning up mounts") } - glog.V(2).Infof( + klog.V(2).Infof( "Reconciler sync states: could not find pod information in desired state, update it in actual state: %+v", reconstructedVolume) volumesNeedUpdate[reconstructedVolume.volumeName] = reconstructedVolume @@ -417,7 +417,7 @@ func (rc *reconciler) syncStates() { if len(volumesNeedUpdate) > 0 { if err = rc.updateStates(volumesNeedUpdate); err != nil { - glog.Errorf("Error occurred during reconstruct volume from disk: %v", err) + klog.Errorf("Error occurred during reconstruct volume from disk: %v", err) } } if len(volumeNeedReport) > 0 { @@ -426,7 +426,7 @@ func (rc *reconciler) syncStates() { } func (rc *reconciler) cleanupMounts(volume podVolume) { - glog.V(2).Infof("Reconciler sync states: could not find information (PID: %s) (Volume SpecName: %s) in desired state, clean up the mount points", + klog.V(2).Infof("Reconciler sync states: could not find information (PID: %s) (Volume SpecName: %s) in desired state, clean up the mount points", volume.podName, volume.volumeSpecName) mountedVolume := operationexecutor.MountedVolume{ PodName: volume.podName, @@ -439,7 +439,7 @@ func (rc *reconciler) cleanupMounts(volume podVolume) { // to unmount both volume and device in the same routine. err := rc.operationExecutor.UnmountVolume(mountedVolume, rc.actualStateOfWorld, rc.kubeletPodsDir) if err != nil { - glog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error()) + klog.Errorf(mountedVolume.GenerateErrorDetailed(fmt.Sprintf("volumeHandler.UnmountVolumeHandler for UnmountVolume failed"), err).Error()) return } } @@ -557,13 +557,13 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume, func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName]*reconstructedVolume) { node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(string(rc.nodeName), metav1.GetOptions{}) if fetchErr != nil { - glog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr) + klog.Errorf("updateStates in reconciler: could not get node status with error %v", fetchErr) } else { for _, attachedVolume := range node.Status.VolumesAttached { if volume, exists := volumesNeedUpdate[attachedVolume.Name]; exists { volume.devicePath = attachedVolume.DevicePath volumesNeedUpdate[attachedVolume.Name] = volume - glog.V(4).Infof("Update devicePath from node status for volume (%q): %q", attachedVolume.Name, volume.devicePath) + klog.V(4).Infof("Update devicePath from node status for volume (%q): %q", attachedVolume.Name, volume.devicePath) } } } @@ -599,7 +599,7 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re //TODO: the devicePath might not be correct for some volume plugins: see issue #54108 volume.volumeName, volume.volumeSpec, "" /* nodeName */, volume.devicePath) if err != nil { - glog.Errorf("Could not add volume information to actual state of world: %v", err) + klog.Errorf("Could not add volume information to actual state of world: %v", err) continue } err = rc.actualStateOfWorld.MarkVolumeAsMounted( @@ -612,22 +612,22 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re volume.volumeGidValue, volume.volumeSpec) if err != nil { - glog.Errorf("Could not add pod to volume information to actual state of world: %v", err) + klog.Errorf("Could not add pod to volume information to actual state of world: %v", err) continue } - glog.V(4).Infof("Volume: %s (pod UID %s) is marked as mounted and added into the actual state", volume.volumeName, volume.podName) + klog.V(4).Infof("Volume: %s (pod UID %s) is marked as mounted and added into the actual state", volume.volumeName, volume.podName) if volume.attachablePlugin != nil { deviceMountPath, err := getDeviceMountPath(volume) if err != nil { - glog.Errorf("Could not find device mount path for volume %s", volume.volumeName) + klog.Errorf("Could not find device mount path for volume %s", volume.volumeName) continue } err = rc.actualStateOfWorld.MarkDeviceAsMounted(volume.volumeName, volume.devicePath, deviceMountPath) if err != nil { - glog.Errorf("Could not mark device is mounted to actual state of world: %v", err) + klog.Errorf("Could not mark device is mounted to actual state of world: %v", err) continue } - glog.V(4).Infof("Volume: %s (pod UID %s) is marked device as mounted and added into the actual state", volume.volumeName, volume.podName) + klog.V(4).Infof("Volume: %s (pod UID %s) is marked device as mounted and added into the actual state", volume.volumeName, volume.podName) } } return nil @@ -671,13 +671,13 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { volumePluginPath := path.Join(volumesDir, pluginName) volumePluginDirs, err := utilfile.ReadDirNoStat(volumePluginPath) if err != nil { - glog.Errorf("Could not read volume plugin directory %q: %v", volumePluginPath, err) + klog.Errorf("Could not read volume plugin directory %q: %v", volumePluginPath, err) continue } unescapePluginName := utilstrings.UnescapeQualifiedNameForDisk(pluginName) for _, volumeName := range volumePluginDirs { mountPath := path.Join(volumePluginPath, volumeName) - glog.V(5).Infof("podName: %v, mount path from volume plugin directory: %v, ", podName, mountPath) + klog.V(5).Infof("podName: %v, mount path from volume plugin directory: %v, ", podName, mountPath) volumes = append(volumes, podVolume{ podName: volumetypes.UniquePodName(podName), volumeSpecName: volumeName, @@ -689,6 +689,6 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { } } } - glog.V(4).Infof("Get volumes from pod directory %q %+v", podDir, volumes) + klog.V(4).Infof("Get volumes from pod directory %q %+v", podDir, volumes) return volumes, nil } diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index 06bdcb884259a..ba3d99d64c8fa 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -22,7 +22,6 @@ import ( "strconv" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" @@ -30,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/pod" @@ -243,15 +243,15 @@ func (vm *volumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan str defer runtime.HandleCrash() go vm.desiredStateOfWorldPopulator.Run(sourcesReady, stopCh) - glog.V(2).Infof("The desired_state_of_world populator starts") + klog.V(2).Infof("The desired_state_of_world populator starts") - glog.Infof("Starting Kubelet Volume Manager") + klog.Infof("Starting Kubelet Volume Manager") go vm.reconciler.Run(stopCh) metrics.Register(vm.actualStateOfWorld, vm.desiredStateOfWorld, vm.volumePluginMgr) <-stopCh - glog.Infof("Shutting down Kubelet Volume Manager") + klog.Infof("Shutting down Kubelet Volume Manager") } func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap { @@ -347,7 +347,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { return nil } - glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod)) + klog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod)) uniquePodName := util.GetUniquePodName(pod) // Some pods expect to have Setup called over and over again to update. @@ -380,7 +380,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error { unattachedVolumes) } - glog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod)) + klog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod)) return nil } diff --git a/pkg/kubelet/winstats/BUILD b/pkg/kubelet/winstats/BUILD index 731b86303c2f5..25afdd4cfc041 100644 --- a/pkg/kubelet/winstats/BUILD +++ b/pkg/kubelet/winstats/BUILD @@ -15,10 +15,10 @@ go_library( "@io_bazel_rules_go//go/platform:windows": [ "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/github.com/JeffAshton/win_pdh:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/cadvisor/info/v1:go_default_library", "//vendor/github.com/google/cadvisor/info/v2:go_default_library", "//vendor/golang.org/x/sys/windows:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/pkg/kubelet/winstats/perfcounter_nodestats.go b/pkg/kubelet/winstats/perfcounter_nodestats.go index 9777d88745a83..51fb15ae93da1 100644 --- a/pkg/kubelet/winstats/perfcounter_nodestats.go +++ b/pkg/kubelet/winstats/perfcounter_nodestats.go @@ -26,10 +26,10 @@ import ( "time" "unsafe" - "github.com/golang/glog" cadvisorapi "github.com/google/cadvisor/info/v1" "golang.org/x/sys/windows" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" ) // MemoryStatusEx is the same as Windows structure MEMORYSTATUSEX @@ -141,19 +141,19 @@ func (p *perfCounterNodeStatsClient) getNodeInfo() nodeInfo { func (p *perfCounterNodeStatsClient) collectMetricsData(cpuCounter, memWorkingSetCounter, memCommittedBytesCounter *perfCounter) { cpuValue, err := cpuCounter.getData() if err != nil { - glog.Errorf("Unable to get cpu perf counter data; err: %v", err) + klog.Errorf("Unable to get cpu perf counter data; err: %v", err) return } memWorkingSetValue, err := memWorkingSetCounter.getData() if err != nil { - glog.Errorf("Unable to get memWorkingSet perf counter data; err: %v", err) + klog.Errorf("Unable to get memWorkingSet perf counter data; err: %v", err) return } memCommittedBytesValue, err := memCommittedBytesCounter.getData() if err != nil { - glog.Errorf("Unable to get memCommittedBytes perf counter data; err: %v", err) + klog.Errorf("Unable to get memCommittedBytes perf counter data; err: %v", err) return } diff --git a/pkg/kubemark/BUILD b/pkg/kubemark/BUILD index d00990bb6ba9a..0eea6adcfe60d 100644 --- a/pkg/kubemark/BUILD +++ b/pkg/kubemark/BUILD @@ -49,7 +49,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//test/utils:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], diff --git a/pkg/kubemark/controller.go b/pkg/kubemark/controller.go index 42438bac31576..3ae38b2f5ef70 100644 --- a/pkg/kubemark/controller.go +++ b/pkg/kubemark/controller.go @@ -33,7 +33,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/controller" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -125,7 +125,7 @@ func (kubemarkController *KubemarkController) WaitForCacheSync(stopCh chan struc func (kubemarkController *KubemarkController) Run(stopCh chan struct{}) { nodeTemplate, err := kubemarkController.getNodeTemplate() if err != nil { - glog.Fatalf("failed to get node template: %s", err) + klog.Fatalf("failed to get node template: %s", err) } kubemarkController.nodeTemplate = nodeTemplate @@ -239,7 +239,7 @@ func (kubemarkController *KubemarkController) addNodeToNodeGroup(nodeGroup strin func (kubemarkController *KubemarkController) RemoveNodeFromNodeGroup(nodeGroup string, node string) error { pod := kubemarkController.getPodByName(node) if pod == nil { - glog.Warningf("Can't delete node %s from nodegroup %s. Node does not exist.", node, nodeGroup) + klog.Warningf("Can't delete node %s from nodegroup %s. Node does not exist.", node, nodeGroup) return nil } if pod.ObjectMeta.Labels[nodeGroupLabel] != nodeGroup { @@ -252,7 +252,7 @@ func (kubemarkController *KubemarkController) RemoveNodeFromNodeGroup(nodeGroup pod.ObjectMeta.Labels["name"], &metav1.DeleteOptions{PropagationPolicy: &policy}) if err == nil { - glog.Infof("marking node %s for deletion", node) + klog.Infof("marking node %s for deletion", node) // Mark node for deletion from kubemark cluster. // Once it becomes unready after replication controller // deletion has been noticed, we will delete it explicitly. @@ -340,7 +340,7 @@ func (kubemarkController *KubemarkController) runNodeCreation(stop <-chan struct kubemarkController.nodeGroupQueueSizeLock.Lock() err := kubemarkController.addNodeToNodeGroup(nodeGroup) if err != nil { - glog.Errorf("failed to add node to node group %s: %v", nodeGroup, err) + klog.Errorf("failed to add node to node group %s: %v", nodeGroup, err) } else { kubemarkController.nodeGroupQueueSize[nodeGroup]-- } @@ -376,7 +376,7 @@ func (kubemarkCluster *kubemarkCluster) removeUnneededNodes(oldObj interface{}, if kubemarkCluster.nodesToDelete[node.Name] { kubemarkCluster.nodesToDelete[node.Name] = false if err := kubemarkCluster.client.CoreV1().Nodes().Delete(node.Name, &metav1.DeleteOptions{}); err != nil { - glog.Errorf("failed to delete node %s from kubemark cluster, err: %v", node.Name, err) + klog.Errorf("failed to delete node %s from kubemark cluster, err: %v", node.Name, err) } } return diff --git a/pkg/kubemark/hollow_kubelet.go b/pkg/kubemark/hollow_kubelet.go index 15bec8a9c92f8..875d84c30033b 100644 --- a/pkg/kubemark/hollow_kubelet.go +++ b/pkg/kubemark/hollow_kubelet.go @@ -37,7 +37,7 @@ import ( "k8s.io/kubernetes/pkg/volume/secret" "k8s.io/kubernetes/test/utils" - "github.com/golang/glog" + "k8s.io/klog" ) type HollowKubelet struct { @@ -93,7 +93,7 @@ func (hk *HollowKubelet) Run() { KubeletFlags: *hk.KubeletFlags, KubeletConfiguration: *hk.KubeletConfiguration, }, hk.KubeletDeps, false); err != nil { - glog.Fatalf("Failed to run HollowKubelet: %v. Exiting.", err) + klog.Fatalf("Failed to run HollowKubelet: %v. Exiting.", err) } select {} } @@ -109,7 +109,7 @@ func GetHollowKubeletConfig( testRootDir := utils.MakeTempDirOrDie("hollow-kubelet.", "") podFilePath := utils.MakeTempDirOrDie("static-pods", testRootDir) - glog.Infof("Using %s as root dir for hollow-kubelet", testRootDir) + klog.Infof("Using %s as root dir for hollow-kubelet", testRootDir) // Flags struct f := options.NewKubeletFlags() diff --git a/pkg/kubemark/hollow_proxy.go b/pkg/kubemark/hollow_proxy.go index dcde82c576d89..5e4a7ec8897fb 100644 --- a/pkg/kubemark/hollow_proxy.go +++ b/pkg/kubemark/hollow_proxy.go @@ -35,7 +35,7 @@ import ( utilexec "k8s.io/utils/exec" utilpointer "k8s.io/utils/pointer" - "github.com/golang/glog" + "k8s.io/klog" ) type HollowProxy struct { @@ -133,6 +133,6 @@ func NewHollowProxyOrDie( func (hp *HollowProxy) Run() { if err := hp.ProxyServer.Run(); err != nil { - glog.Fatalf("Error while running proxy: %v\n", err) + klog.Fatalf("Error while running proxy: %v\n", err) } } diff --git a/pkg/master/BUILD b/pkg/master/BUILD index ead2039445e5c..5c35db72d1277 100644 --- a/pkg/master/BUILD +++ b/pkg/master/BUILD @@ -114,8 +114,8 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/master/controller.go b/pkg/master/controller.go index e2e6f24e68638..29c9d2da752e8 100644 --- a/pkg/master/controller.go +++ b/pkg/master/controller.go @@ -21,7 +21,6 @@ import ( "net" "time" - "github.com/golang/glog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +31,7 @@ import ( genericapiserver "k8s.io/apiserver/pkg/server" utilfeature "k8s.io/apiserver/pkg/util/feature" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/master/reconcilers" "k8s.io/kubernetes/pkg/registry/core/rangeallocation" @@ -83,7 +83,7 @@ type Controller struct { func (c *completedConfig) NewBootstrapController(legacyRESTStorage corerest.LegacyRESTStorage, serviceClient corev1client.ServicesGetter, nsClient corev1client.NamespacesGetter, eventClient corev1client.EventsGetter) *Controller { _, publicServicePort, err := c.GenericConfig.SecureServing.HostPort() if err != nil { - glog.Fatalf("failed to get listener address: %v", err) + klog.Fatalf("failed to get listener address: %v", err) } systemNamespaces := []string{metav1.NamespaceSystem, metav1.NamespacePublic} @@ -144,15 +144,15 @@ func (c *Controller) Start() { // run all of the controllers once prior to returning from Start. if err := repairClusterIPs.RunOnce(); err != nil { // If we fail to repair cluster IPs apiserver is useless. We should restart and retry. - glog.Fatalf("Unable to perform initial IP allocation check: %v", err) + klog.Fatalf("Unable to perform initial IP allocation check: %v", err) } if err := repairNodePorts.RunOnce(); err != nil { // If we fail to repair node ports apiserver is useless. We should restart and retry. - glog.Fatalf("Unable to perform initial service nodePort check: %v", err) + klog.Fatalf("Unable to perform initial service nodePort check: %v", err) } // Service definition is reconciled during first run to correct port and type per expectations. if err := c.UpdateKubernetesService(true); err != nil { - glog.Errorf("Unable to perform initial Kubernetes service initialization: %v", err) + klog.Errorf("Unable to perform initial Kubernetes service initialization: %v", err) } c.runner = async.NewRunner(c.RunKubernetesNamespaces, c.RunKubernetesService, repairClusterIPs.RunUntil, repairNodePorts.RunUntil) @@ -167,9 +167,9 @@ func (c *Controller) Stop() { finishedReconciling := make(chan struct{}) go func() { defer close(finishedReconciling) - glog.Infof("Shutting down kubernetes service endpoint reconciler") + klog.Infof("Shutting down kubernetes service endpoint reconciler") if err := c.EndpointReconciler.StopReconciling("kubernetes", c.PublicIP, endpointPorts); err != nil { - glog.Error(err) + klog.Error(err) } }() @@ -178,7 +178,7 @@ func (c *Controller) Stop() { // done case <-time.After(2 * c.EndpointInterval): // don't block server shutdown forever if we can't reach etcd to remove ourselves - glog.Warning("StopReconciling() timed out") + klog.Warning("StopReconciling() timed out") } } @@ -266,7 +266,7 @@ func (c *Controller) CreateOrUpdateMasterServiceIfNeeded(serviceName string, ser // The service already exists. if reconcile { if svc, updated := reconcilers.GetMasterServiceUpdateIfNeeded(s, servicePorts, serviceType); updated { - glog.Warningf("Resetting master service %q to %#v", serviceName, svc) + klog.Warningf("Resetting master service %q to %#v", serviceName, svc) _, err := c.ServiceClient.Services(metav1.NamespaceDefault).Update(svc) return err } diff --git a/pkg/master/controller/crdregistration/BUILD b/pkg/master/controller/crdregistration/BUILD index 0e605162d0d95..fa592c6060519 100644 --- a/pkg/master/controller/crdregistration/BUILD +++ b/pkg/master/controller/crdregistration/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/master/controller/crdregistration/crdregistration_controller.go b/pkg/master/controller/crdregistration/crdregistration_controller.go index 49e3b1edc20df..1670a5b0780a3 100644 --- a/pkg/master/controller/crdregistration/crdregistration_controller.go +++ b/pkg/master/controller/crdregistration/crdregistration_controller.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" crdinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion/apiextensions/internalversion" @@ -88,12 +88,12 @@ func NewAutoRegistrationController(crdinformer crdinformers.CustomResourceDefini if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.V(2).Infof("Couldn't get object from tombstone %#v", obj) + klog.V(2).Infof("Couldn't get object from tombstone %#v", obj) return } cast, ok = tombstone.Obj.(*apiextensions.CustomResourceDefinition) if !ok { - glog.V(2).Infof("Tombstone contained unexpected object: %#v", obj) + klog.V(2).Infof("Tombstone contained unexpected object: %#v", obj) return } } @@ -109,8 +109,8 @@ func (c *crdRegistrationController) Run(threadiness int, stopCh <-chan struct{}) // make sure the work queue is shutdown which will trigger workers to end defer c.queue.ShutDown() - glog.Infof("Starting crd-autoregister controller") - defer glog.Infof("Shutting down crd-autoregister controller") + klog.Infof("Starting crd-autoregister controller") + defer klog.Infof("Shutting down crd-autoregister controller") // wait for your secondary caches to fill before starting your work if !controller.WaitForCacheSync("crd-autoregister", stopCh, c.crdSynced) { diff --git a/pkg/master/master.go b/pkg/master/master.go index 97c86986117f1..3f8447c32efd9 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -76,8 +76,8 @@ import ( "k8s.io/kubernetes/pkg/serviceaccount" nodeutil "k8s.io/kubernetes/pkg/util/node" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" // RESTStorage installers admissionregistrationrest "k8s.io/kubernetes/pkg/registry/admissionregistration/rest" @@ -217,18 +217,18 @@ func (c *Config) createLeaseReconciler() reconcilers.EndpointReconciler { ttl := c.ExtraConfig.MasterEndpointReconcileTTL config, err := c.ExtraConfig.StorageFactory.NewConfig(api.Resource("apiServerIPInfo")) if err != nil { - glog.Fatalf("Error determining service IP ranges: %v", err) + klog.Fatalf("Error determining service IP ranges: %v", err) } leaseStorage, _, err := storagefactory.Create(*config) if err != nil { - glog.Fatalf("Error creating storage factory: %v", err) + klog.Fatalf("Error creating storage factory: %v", err) } masterLeases := reconcilers.NewLeases(leaseStorage, "/masterleases/", ttl) return reconcilers.NewLeaseEndpointReconciler(endpointClient, masterLeases) } func (c *Config) createEndpointReconciler() reconcilers.EndpointReconciler { - glog.Infof("Using reconciler: %v", c.ExtraConfig.EndpointReconcilerType) + klog.Infof("Using reconciler: %v", c.ExtraConfig.EndpointReconcilerType) switch c.ExtraConfig.EndpointReconcilerType { // there are numerous test dependencies that depend on a default controller case "", reconcilers.MasterCountReconcilerType: @@ -238,7 +238,7 @@ func (c *Config) createEndpointReconciler() reconcilers.EndpointReconciler { case reconcilers.NoneEndpointReconcilerType: return c.createNoneReconciler() default: - glog.Fatalf("Reconciler not implemented: %v", c.ExtraConfig.EndpointReconcilerType) + klog.Fatalf("Reconciler not implemented: %v", c.ExtraConfig.EndpointReconcilerType) } return nil } @@ -252,7 +252,7 @@ func (cfg *Config) Complete() CompletedConfig { serviceIPRange, apiServerServiceIP, err := DefaultServiceIPRange(c.ExtraConfig.ServiceIPRange) if err != nil { - glog.Fatalf("Error determining service IP ranges: %v", err) + klog.Fatalf("Error determining service IP ranges: %v", err) } if c.ExtraConfig.ServiceIPRange.IP == nil { c.ExtraConfig.ServiceIPRange = serviceIPRange @@ -272,7 +272,7 @@ func (cfg *Config) Complete() CompletedConfig { // but then that breaks the strict nestedness of ServiceType. // Review post-v1 c.ExtraConfig.ServiceNodePortRange = kubeoptions.DefaultServiceNodePortRange - glog.Infof("Node port range unspecified. Defaulting to %v.", c.ExtraConfig.ServiceNodePortRange) + klog.Infof("Node port range unspecified. Defaulting to %v.", c.ExtraConfig.ServiceNodePortRange) } if c.ExtraConfig.EndpointReconcilerConfig.Interval == 0 { @@ -371,7 +371,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) func (m *Master) InstallLegacyAPI(c *completedConfig, restOptionsGetter generic.RESTOptionsGetter, legacyRESTStorageProvider corerest.LegacyRESTStorageProvider) { legacyRESTStorage, apiGroupInfo, err := legacyRESTStorageProvider.NewLegacyRESTStorage(restOptionsGetter) if err != nil { - glog.Fatalf("Error building core storage: %v", err) + klog.Fatalf("Error building core storage: %v", err) } controllerName := "bootstrap-controller" @@ -381,7 +381,7 @@ func (m *Master) InstallLegacyAPI(c *completedConfig, restOptionsGetter generic. m.GenericAPIServer.AddPreShutdownHookOrDie(controllerName, bootstrapController.PreShutdownHook) if err := m.GenericAPIServer.InstallLegacyAPIGroup(genericapiserver.DefaultLegacyAPIPrefix, &apiGroupInfo); err != nil { - glog.Fatalf("Error in registering group versions: %v", err) + klog.Fatalf("Error in registering group versions: %v", err) } } @@ -407,20 +407,20 @@ func (m *Master) InstallAPIs(apiResourceConfigSource serverstorage.APIResourceCo for _, restStorageBuilder := range restStorageProviders { groupName := restStorageBuilder.GroupName() if !apiResourceConfigSource.AnyVersionForGroupEnabled(groupName) { - glog.V(1).Infof("Skipping disabled API group %q.", groupName) + klog.V(1).Infof("Skipping disabled API group %q.", groupName) continue } apiGroupInfo, enabled := restStorageBuilder.NewRESTStorage(apiResourceConfigSource, restOptionsGetter) if !enabled { - glog.Warningf("Problem initializing API group %q, skipping.", groupName) + klog.Warningf("Problem initializing API group %q, skipping.", groupName) continue } - glog.V(1).Infof("Enabling API group %q.", groupName) + klog.V(1).Infof("Enabling API group %q.", groupName) if postHookProvider, ok := restStorageBuilder.(genericapiserver.PostStartHookProvider); ok { name, hook, err := postHookProvider.PostStartHook() if err != nil { - glog.Fatalf("Error building PostStartHook: %v", err) + klog.Fatalf("Error building PostStartHook: %v", err) } m.GenericAPIServer.AddPostStartHookOrDie(name, hook) } @@ -430,7 +430,7 @@ func (m *Master) InstallAPIs(apiResourceConfigSource serverstorage.APIResourceCo for i := range apiGroupsInfo { if err := m.GenericAPIServer.InstallAPIGroup(&apiGroupsInfo[i]); err != nil { - glog.Fatalf("Error in registering group versions: %v", err) + klog.Fatalf("Error in registering group versions: %v", err) } } } diff --git a/pkg/master/reconcilers/BUILD b/pkg/master/reconcilers/BUILD index 97375d1453ee1..4ad16ac8b7483 100644 --- a/pkg/master/reconcilers/BUILD +++ b/pkg/master/reconcilers/BUILD @@ -21,7 +21,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/storage:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/master/reconcilers/lease.go b/pkg/master/reconcilers/lease.go index ec9f7ba63a88b..d7ea00b009663 100644 --- a/pkg/master/reconcilers/lease.go +++ b/pkg/master/reconcilers/lease.go @@ -28,7 +28,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -72,7 +72,7 @@ func (s *storageLeases) ListLeases() ([]string, error) { ipList[i] = ip.Subsets[0].Addresses[0].IP } - glog.V(6).Infof("Current master IPs listed in storage are %v", ipList) + klog.V(6).Infof("Current master IPs listed in storage are %v", ipList) return ipList, nil } @@ -98,7 +98,7 @@ func (s *storageLeases) UpdateLease(ip string) error { // changing a field. existing.Generation++ - glog.V(6).Infof("Resetting TTL on master IP %q listed in storage to %v", ip, leaseTime) + klog.V(6).Infof("Resetting TTL on master IP %q listed in storage to %v", ip, leaseTime) return existing, &leaseTime, nil }) @@ -219,7 +219,7 @@ func (r *leaseEndpointReconciler) doReconcile(serviceName string, endpointPorts e.Subsets[0].Ports = endpointPorts } - glog.Warningf("Resetting endpoints for master service %q to %v", serviceName, masterIPs) + klog.Warningf("Resetting endpoints for master service %q to %v", serviceName, masterIPs) if shouldCreate { if _, err = r.endpointClient.Endpoints(corev1.NamespaceDefault).Create(e); errors.IsAlreadyExists(err) { err = nil diff --git a/pkg/master/reconcilers/mastercount.go b/pkg/master/reconcilers/mastercount.go index 479883e70a266..18a635b6a7487 100644 --- a/pkg/master/reconcilers/mastercount.go +++ b/pkg/master/reconcilers/mastercount.go @@ -21,12 +21,12 @@ import ( "net" "sync" - "github.com/golang/glog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/util/retry" + "k8s.io/klog" endpointsv1 "k8s.io/kubernetes/pkg/api/v1/endpoints" ) @@ -96,7 +96,7 @@ func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, i Addresses: []corev1.EndpointAddress{{IP: ip.String()}}, Ports: endpointPorts, }} - glog.Warningf("Resetting endpoints for master service %q to %#v", serviceName, e) + klog.Warningf("Resetting endpoints for master service %q to %#v", serviceName, e) _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e) return err } @@ -132,7 +132,7 @@ func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, i // Reset ports. e.Subsets[0].Ports = endpointPorts } - glog.Warningf("Resetting endpoints for master service %q to %v", serviceName, e) + klog.Warningf("Resetting endpoints for master service %q to %v", serviceName, e) _, err = r.endpointClient.Endpoints(metav1.NamespaceDefault).Update(e) return err } diff --git a/pkg/master/services.go b/pkg/master/services.go index 44bb15edffc00..dbd061910238e 100644 --- a/pkg/master/services.go +++ b/pkg/master/services.go @@ -20,7 +20,7 @@ import ( "fmt" "net" - "github.com/golang/glog" + "k8s.io/klog" kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" ) @@ -30,7 +30,7 @@ import ( func DefaultServiceIPRange(passedServiceClusterIPRange net.IPNet) (net.IPNet, net.IP, error) { serviceClusterIPRange := passedServiceClusterIPRange if passedServiceClusterIPRange.IP == nil { - glog.Infof("Network range for service cluster IPs is unspecified. Defaulting to %v.", kubeoptions.DefaultServiceIPCIDR) + klog.Infof("Network range for service cluster IPs is unspecified. Defaulting to %v.", kubeoptions.DefaultServiceIPCIDR) serviceClusterIPRange = kubeoptions.DefaultServiceIPCIDR } if size := ipallocator.RangeSize(&serviceClusterIPRange); size < 8 { @@ -42,7 +42,7 @@ func DefaultServiceIPRange(passedServiceClusterIPRange net.IPNet) (net.IPNet, ne if err != nil { return net.IPNet{}, net.IP{}, err } - glog.V(4).Infof("Setting service IP to %q (read-write).", apiServerServiceIP) + klog.V(4).Infof("Setting service IP to %q (read-write).", apiServerServiceIP) return serviceClusterIPRange, apiServerServiceIP, nil } diff --git a/pkg/master/tunneler/BUILD b/pkg/master/tunneler/BUILD index 1bcdc49768808..fd4bf629dd93f 100644 --- a/pkg/master/tunneler/BUILD +++ b/pkg/master/tunneler/BUILD @@ -25,8 +25,8 @@ go_library( "//pkg/util/file:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/master/tunneler/ssh.go b/pkg/master/tunneler/ssh.go index 6a55015d5c17b..88217d170ed34 100644 --- a/pkg/master/tunneler/ssh.go +++ b/pkg/master/tunneler/ssh.go @@ -32,8 +32,8 @@ import ( "k8s.io/kubernetes/pkg/ssh" utilfile "k8s.io/kubernetes/pkg/util/file" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" ) type InstallSSHKey func(ctx context.Context, user string, data []byte) error @@ -115,20 +115,20 @@ func (c *SSHTunneler) Run(getAddresses AddressFunc) { // Usernames are capped @ 32 if len(c.SSHUser) > 32 { - glog.Warning("SSH User is too long, truncating to 32 chars") + klog.Warning("SSH User is too long, truncating to 32 chars") c.SSHUser = c.SSHUser[0:32] } - glog.Infof("Setting up proxy: %s %s", c.SSHUser, c.SSHKeyfile) + klog.Infof("Setting up proxy: %s %s", c.SSHUser, c.SSHKeyfile) // public keyfile is written last, so check for that. publicKeyFile := c.SSHKeyfile + ".pub" exists, err := utilfile.FileExists(publicKeyFile) if err != nil { - glog.Errorf("Error detecting if key exists: %v", err) + klog.Errorf("Error detecting if key exists: %v", err) } else if !exists { - glog.Infof("Key doesn't exist, attempting to create") + klog.Infof("Key doesn't exist, attempting to create") if err := generateSSHKey(c.SSHKeyfile, publicKeyFile); err != nil { - glog.Errorf("Failed to create key pair: %v", err) + klog.Errorf("Failed to create key pair: %v", err) } } @@ -168,21 +168,21 @@ func (c *SSHTunneler) SecondsSinceSSHKeySync() int64 { func (c *SSHTunneler) installSSHKeySyncLoop(user, publicKeyfile string) { go wait.Until(func() { if c.InstallSSHKey == nil { - glog.Error("Won't attempt to install ssh key: InstallSSHKey function is nil") + klog.Error("Won't attempt to install ssh key: InstallSSHKey function is nil") return } key, err := ssh.ParsePublicKeyFromFile(publicKeyfile) if err != nil { - glog.Errorf("Failed to load public key: %v", err) + klog.Errorf("Failed to load public key: %v", err) return } keyData, err := ssh.EncodeSSHKey(key) if err != nil { - glog.Errorf("Failed to encode public key: %v", err) + klog.Errorf("Failed to encode public key: %v", err) return } if err := c.InstallSSHKey(context.TODO(), user, keyData); err != nil { - glog.Errorf("Failed to install ssh key: %v", err) + klog.Errorf("Failed to install ssh key: %v", err) return } atomic.StoreInt64(&c.lastSSHKeySync, c.clock.Now().Unix()) @@ -195,9 +195,9 @@ func (c *SSHTunneler) nodesSyncLoop() { // TODO (cjcullen) make this watch. go wait.Until(func() { addrs, err := c.getAddresses() - glog.V(4).Infof("Calling update w/ addrs: %v", addrs) + klog.V(4).Infof("Calling update w/ addrs: %v", addrs) if err != nil { - glog.Errorf("Failed to getAddresses: %v", err) + klog.Errorf("Failed to getAddresses: %v", err) } c.tunnels.Update(addrs) atomic.StoreInt64(&c.lastSync, c.clock.Now().Unix()) @@ -213,11 +213,11 @@ func generateSSHKey(privateKeyfile, publicKeyfile string) error { // through last time, so delete it. exists, err := utilfile.FileExists(privateKeyfile) if err != nil { - glog.Errorf("Error detecting if private key exists: %v", err) + klog.Errorf("Error detecting if private key exists: %v", err) } else if exists { - glog.Infof("Private key exists, but public key does not") + klog.Infof("Private key exists, but public key does not") if err := os.Remove(privateKeyfile); err != nil { - glog.Errorf("Failed to remove stale private key: %v", err) + klog.Errorf("Failed to remove stale private key: %v", err) } } if err := ioutil.WriteFile(privateKeyfile, ssh.EncodePrivateKey(private), 0600); err != nil { diff --git a/pkg/printers/internalversion/BUILD b/pkg/printers/internalversion/BUILD index e25a7d5f22838..e8dc560b9822b 100644 --- a/pkg/printers/internalversion/BUILD +++ b/pkg/printers/internalversion/BUILD @@ -118,7 +118,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//vendor/github.com/fatih/camelcase:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index 7199d9640f8b5..94515620426a4 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -30,7 +30,7 @@ import ( "text/tabwriter" "time" - "github.com/golang/glog" + "k8s.io/klog" "github.com/fatih/camelcase" @@ -185,7 +185,7 @@ func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]printers.Desc func DescriberFor(kind schema.GroupKind, clientConfig *rest.Config) (printers.Describer, bool) { describers, err := describerMap(clientConfig) if err != nil { - glog.V(1).Info(err) + klog.V(1).Info(err) return nil, false } @@ -323,7 +323,7 @@ func init() { describeNamespace, ) if err != nil { - glog.Fatalf("Cannot register describers: %v", err) + klog.Fatalf("Cannot register describers: %v", err) } DefaultObjectDescriber = d } @@ -626,7 +626,7 @@ func (d *PodDescriber) Describe(namespace, name string, describerSettings printe var events *api.EventList if describerSettings.ShowEvents { if ref, err := ref.GetReference(legacyscheme.Scheme, pod); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", pod, err) + klog.Errorf("Unable to construct reference to '%#v': %v", pod, err) } else { ref.Kind = "" events, _ = d.Core().Events(namespace).Search(legacyscheme.Scheme, ref) @@ -2795,7 +2795,7 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings print var events *api.EventList if describerSettings.ShowEvents { if ref, err := ref.GetReference(legacyscheme.Scheme, node); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", node, err) + klog.Errorf("Unable to construct reference to '%#v': %v", node, err) } else { // TODO: We haven't decided the namespace for Node object yet. ref.UID = types.UID(ref.Name) diff --git a/pkg/probe/exec/BUILD b/pkg/probe/exec/BUILD index 42b68b510b69a..317dc0fc0cda5 100644 --- a/pkg/probe/exec/BUILD +++ b/pkg/probe/exec/BUILD @@ -12,7 +12,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/probe/exec", deps = [ "//pkg/probe:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/probe/exec/exec.go b/pkg/probe/exec/exec.go index 5901e35a6a159..a6ae523aa629c 100644 --- a/pkg/probe/exec/exec.go +++ b/pkg/probe/exec/exec.go @@ -20,7 +20,7 @@ import ( "k8s.io/kubernetes/pkg/probe" "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) // New creates a Prober. @@ -40,7 +40,7 @@ type execProber struct{} // errors if any. func (pr execProber) Probe(e exec.Cmd) (probe.Result, string, error) { data, err := e.CombinedOutput() - glog.V(4).Infof("Exec probe response: %q", string(data)) + klog.V(4).Infof("Exec probe response: %q", string(data)) if err != nil { exit, ok := err.(exec.ExitError) if ok { diff --git a/pkg/probe/http/BUILD b/pkg/probe/http/BUILD index 5c788fc3ab9f6..98387df305b29 100644 --- a/pkg/probe/http/BUILD +++ b/pkg/probe/http/BUILD @@ -14,7 +14,7 @@ go_library( "//pkg/probe:go_default_library", "//pkg/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/probe/http/http.go b/pkg/probe/http/http.go index 11bbddfa8be03..e9bcc0f3e1bb2 100644 --- a/pkg/probe/http/http.go +++ b/pkg/probe/http/http.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/pkg/probe" "k8s.io/kubernetes/pkg/version" - "github.com/golang/glog" + "k8s.io/klog" ) // New creates Prober that will skip TLS verification while probing. @@ -96,9 +96,9 @@ func DoHTTPProbe(url *url.URL, headers http.Header, client GetHTTPInterface) (pr } body := string(b) if res.StatusCode >= http.StatusOK && res.StatusCode < http.StatusBadRequest { - glog.V(4).Infof("Probe succeeded for %s, Response: %v", url.String(), *res) + klog.V(4).Infof("Probe succeeded for %s, Response: %v", url.String(), *res) return probe.Success, body, nil } - glog.V(4).Infof("Probe failed for %s with request headers %v, response body: %v", url.String(), headers, body) + klog.V(4).Infof("Probe failed for %s with request headers %v, response body: %v", url.String(), headers, body) return probe.Failure, fmt.Sprintf("HTTP probe failed with statuscode: %d", res.StatusCode), nil } diff --git a/pkg/probe/tcp/BUILD b/pkg/probe/tcp/BUILD index b14c197e2557d..929f1e4fad37c 100644 --- a/pkg/probe/tcp/BUILD +++ b/pkg/probe/tcp/BUILD @@ -12,7 +12,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/probe/tcp", deps = [ "//pkg/probe:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/probe/tcp/tcp.go b/pkg/probe/tcp/tcp.go index cf18eb1a5b988..7b183e0b45a74 100644 --- a/pkg/probe/tcp/tcp.go +++ b/pkg/probe/tcp/tcp.go @@ -23,7 +23,7 @@ import ( "k8s.io/kubernetes/pkg/probe" - "github.com/golang/glog" + "k8s.io/klog" ) // New creates Prober. @@ -55,7 +55,7 @@ func DoTCPProbe(addr string, timeout time.Duration) (probe.Result, string, error } err = conn.Close() if err != nil { - glog.Errorf("Unexpected error closing TCP probe socket: %v (%#v)", err, err) + klog.Errorf("Unexpected error closing TCP probe socket: %v (%#v)", err, err) } return probe.Success, "", nil } diff --git a/pkg/proxy/BUILD b/pkg/proxy/BUILD index 9920c511765c6..bb290126fea25 100644 --- a/pkg/proxy/BUILD +++ b/pkg/proxy/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/proxy/config/BUILD b/pkg/proxy/config/BUILD index e45009ff4a8f0..b68b6f036e2a0 100644 --- a/pkg/proxy/config/BUILD +++ b/pkg/proxy/config/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/proxy/config/config.go b/pkg/proxy/config/config.go index 03256f040d99e..2b22e643b97d1 100644 --- a/pkg/proxy/config/config.go +++ b/pkg/proxy/config/config.go @@ -20,12 +20,12 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" coreinformers "k8s.io/client-go/informers/core/v1" listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller" ) @@ -99,15 +99,15 @@ func (c *EndpointsConfig) RegisterEventHandler(handler EndpointsHandler) { func (c *EndpointsConfig) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Info("Starting endpoints config controller") - defer glog.Info("Shutting down endpoints config controller") + klog.Info("Starting endpoints config controller") + defer klog.Info("Shutting down endpoints config controller") if !controller.WaitForCacheSync("endpoints config", stopCh, c.listerSynced) { return } for i := range c.eventHandlers { - glog.V(3).Infof("Calling handler.OnEndpointsSynced()") + klog.V(3).Infof("Calling handler.OnEndpointsSynced()") c.eventHandlers[i].OnEndpointsSynced() } @@ -121,7 +121,7 @@ func (c *EndpointsConfig) handleAddEndpoints(obj interface{}) { return } for i := range c.eventHandlers { - glog.V(4).Infof("Calling handler.OnEndpointsAdd") + klog.V(4).Infof("Calling handler.OnEndpointsAdd") c.eventHandlers[i].OnEndpointsAdd(endpoints) } } @@ -138,7 +138,7 @@ func (c *EndpointsConfig) handleUpdateEndpoints(oldObj, newObj interface{}) { return } for i := range c.eventHandlers { - glog.V(4).Infof("Calling handler.OnEndpointsUpdate") + klog.V(4).Infof("Calling handler.OnEndpointsUpdate") c.eventHandlers[i].OnEndpointsUpdate(oldEndpoints, endpoints) } } @@ -157,7 +157,7 @@ func (c *EndpointsConfig) handleDeleteEndpoints(obj interface{}) { } } for i := range c.eventHandlers { - glog.V(4).Infof("Calling handler.OnEndpointsDelete") + klog.V(4).Infof("Calling handler.OnEndpointsDelete") c.eventHandlers[i].OnEndpointsDelete(endpoints) } } @@ -199,15 +199,15 @@ func (c *ServiceConfig) RegisterEventHandler(handler ServiceHandler) { func (c *ServiceConfig) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() - glog.Info("Starting service config controller") - defer glog.Info("Shutting down service config controller") + klog.Info("Starting service config controller") + defer klog.Info("Shutting down service config controller") if !controller.WaitForCacheSync("service config", stopCh, c.listerSynced) { return } for i := range c.eventHandlers { - glog.V(3).Info("Calling handler.OnServiceSynced()") + klog.V(3).Info("Calling handler.OnServiceSynced()") c.eventHandlers[i].OnServiceSynced() } @@ -221,7 +221,7 @@ func (c *ServiceConfig) handleAddService(obj interface{}) { return } for i := range c.eventHandlers { - glog.V(4).Info("Calling handler.OnServiceAdd") + klog.V(4).Info("Calling handler.OnServiceAdd") c.eventHandlers[i].OnServiceAdd(service) } } @@ -238,7 +238,7 @@ func (c *ServiceConfig) handleUpdateService(oldObj, newObj interface{}) { return } for i := range c.eventHandlers { - glog.V(4).Info("Calling handler.OnServiceUpdate") + klog.V(4).Info("Calling handler.OnServiceUpdate") c.eventHandlers[i].OnServiceUpdate(oldService, service) } } @@ -257,7 +257,7 @@ func (c *ServiceConfig) handleDeleteService(obj interface{}) { } } for i := range c.eventHandlers { - glog.V(4).Info("Calling handler.OnServiceDelete") + klog.V(4).Info("Calling handler.OnServiceDelete") c.eventHandlers[i].OnServiceDelete(service) } } diff --git a/pkg/proxy/endpoints.go b/pkg/proxy/endpoints.go index 06c7d3ad5ab69..067be71d79b49 100644 --- a/pkg/proxy/endpoints.go +++ b/pkg/proxy/endpoints.go @@ -22,7 +22,7 @@ import ( "strconv" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -197,7 +197,7 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoint for i := range ss.Ports { port := &ss.Ports[i] if port.Port == 0 { - glog.Warningf("ignoring invalid endpoint port %s", port.Name) + klog.Warningf("ignoring invalid endpoint port %s", port.Name) continue } svcPortName := ServicePortName{ @@ -207,7 +207,7 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoint for i := range ss.Addresses { addr := &ss.Addresses[i] if addr.IP == "" { - glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) + klog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) continue } // Filter out the incorrect IP version case. @@ -226,12 +226,12 @@ func (ect *EndpointChangeTracker) endpointsToEndpointsMap(endpoints *v1.Endpoint endpointsMap[svcPortName] = append(endpointsMap[svcPortName], baseEndpointInfo) } } - if glog.V(3) { + if klog.V(3) { newEPList := []string{} for _, ep := range endpointsMap[svcPortName] { newEPList = append(newEPList, ep.String()) } - glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList) + klog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList) } } } @@ -299,7 +299,7 @@ func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, stale } } if stale { - glog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.String()) + klog.V(4).Infof("Stale endpoint %v -> %v", svcPortName, ep.String()) *staleEndpoints = append(*staleEndpoints, ServiceEndpoint{Endpoint: ep.String(), ServicePortName: svcPortName}) } } diff --git a/pkg/proxy/healthcheck/BUILD b/pkg/proxy/healthcheck/BUILD index b711fcdb07d9f..f94de593ce05a 100644 --- a/pkg/proxy/healthcheck/BUILD +++ b/pkg/proxy/healthcheck/BUILD @@ -20,8 +20,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/renstrom/dedent:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/proxy/healthcheck/healthcheck.go b/pkg/proxy/healthcheck/healthcheck.go index 0c59a38cf79a3..5dc3f009ecb65 100644 --- a/pkg/proxy/healthcheck/healthcheck.go +++ b/pkg/proxy/healthcheck/healthcheck.go @@ -25,8 +25,8 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" "github.com/renstrom/dedent" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -133,9 +133,9 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err // Remove any that are not needed any more. for nsn, svc := range hcs.services { if port, found := newServices[nsn]; !found || port != svc.port { - glog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port) + klog.V(2).Infof("Closing healthcheck %q on port %d", nsn.String(), svc.port) if err := svc.listener.Close(); err != nil { - glog.Errorf("Close(%v): %v", svc.listener.Addr(), err) + klog.Errorf("Close(%v): %v", svc.listener.Addr(), err) } delete(hcs.services, nsn) } @@ -144,11 +144,11 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err // Add any that are needed. for nsn, port := range newServices { if hcs.services[nsn] != nil { - glog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port) + klog.V(3).Infof("Existing healthcheck %q on port %d", nsn.String(), port) continue } - glog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port) + klog.V(2).Infof("Opening healthcheck %q on port %d", nsn.String(), port) svc := &hcInstance{port: port} addr := fmt.Sprintf(":%d", port) svc.server = hcs.httpFactory.New(addr, hcHandler{name: nsn, hcs: hcs}) @@ -166,19 +166,19 @@ func (hcs *server) SyncServices(newServices map[types.NamespacedName]uint16) err UID: types.UID(nsn.String()), }, api.EventTypeWarning, "FailedToStartServiceHealthcheck", msg) } - glog.Error(msg) + klog.Error(msg) continue } hcs.services[nsn] = svc go func(nsn types.NamespacedName, svc *hcInstance) { // Serve() will exit when the listener is closed. - glog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port) + klog.V(3).Infof("Starting goroutine for healthcheck %q on port %d", nsn.String(), svc.port) if err := svc.server.Serve(svc.listener); err != nil { - glog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err) + klog.V(3).Infof("Healthcheck %q closed: %v", nsn.String(), err) return } - glog.V(3).Infof("Healthcheck %q closed", nsn.String()) + klog.V(3).Infof("Healthcheck %q closed", nsn.String()) }(nsn, svc) } return nil @@ -203,7 +203,7 @@ func (h hcHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { svc, ok := h.hcs.services[h.name] if !ok || svc == nil { h.hcs.lock.Unlock() - glog.Errorf("Received request for closed healthcheck %q", h.name.String()) + klog.Errorf("Received request for closed healthcheck %q", h.name.String()) return } count := svc.endpoints @@ -232,10 +232,10 @@ func (hcs *server) SyncEndpoints(newEndpoints map[types.NamespacedName]int) erro for nsn, count := range newEndpoints { if hcs.services[nsn] == nil { - glog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String()) + klog.V(3).Infof("Not saving endpoints for unknown healthcheck %q", nsn.String()) continue } - glog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String()) + klog.V(3).Infof("Reporting %d endpoints for healthcheck %q", count, nsn.String()) hcs.services[nsn].endpoints = count } for nsn, hci := range hcs.services { @@ -306,7 +306,7 @@ func (hs *HealthzServer) Run() { server := hs.httpFactory.New(hs.addr, serveMux) go wait.Until(func() { - glog.V(3).Infof("Starting goroutine for healthz on %s", hs.addr) + klog.V(3).Infof("Starting goroutine for healthz on %s", hs.addr) listener, err := hs.listener.Listen(hs.addr) if err != nil { @@ -314,15 +314,15 @@ func (hs *HealthzServer) Run() { if hs.recorder != nil { hs.recorder.Eventf(hs.nodeRef, api.EventTypeWarning, "FailedToStartNodeHealthcheck", msg) } - glog.Error(msg) + klog.Error(msg) return } if err := server.Serve(listener); err != nil { - glog.Errorf("Healthz closed with error: %v", err) + klog.Errorf("Healthz closed with error: %v", err) return } - glog.Error("Unexpected healthz closed.") + klog.Error("Unexpected healthz closed.") }, nodeHealthzRetryInterval, wait.NeverStop) } diff --git a/pkg/proxy/iptables/BUILD b/pkg/proxy/iptables/BUILD index 84154ed565faf..2d1794529cd35 100644 --- a/pkg/proxy/iptables/BUILD +++ b/pkg/proxy/iptables/BUILD @@ -25,7 +25,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -46,7 +46,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/exec/testing:go_default_library", ], diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index db76a7770a88d..e6e34598f68a3 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -32,7 +32,7 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -182,7 +182,7 @@ func newEndpointInfo(baseInfo *proxy.BaseEndpointInfo) proxy.Endpoint { func (e *endpointsInfo) Equal(other proxy.Endpoint) bool { o, ok := other.(*endpointsInfo) if !ok { - glog.Error("Failed to cast endpointsInfo") + klog.Error("Failed to cast endpointsInfo") return false } return e.Endpoint == o.Endpoint && @@ -303,7 +303,7 @@ func NewProxier(ipt utiliptables.Interface, // are connected to a Linux bridge (but not SDN bridges). Until most // plugins handle this, log when config is missing if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 { - glog.Warning("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended") + klog.Warning("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended") } // Generate the masquerade mark to use for SNAT rules. @@ -311,12 +311,12 @@ func NewProxier(ipt utiliptables.Interface, masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) if nodeIP == nil { - glog.Warning("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") + klog.Warning("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") nodeIP = net.ParseIP("127.0.0.1") } if len(clusterCIDR) == 0 { - glog.Warning("clusterCIDR not specified, unable to distinguish between internal and external traffic") + klog.Warning("clusterCIDR not specified, unable to distinguish between internal and external traffic") } else if utilnet.IsIPv6CIDR(clusterCIDR) != ipt.IsIpv6() { return nil, fmt.Errorf("clusterCIDR %s has incorrect IP version: expect isIPv6=%t", clusterCIDR, ipt.IsIpv6()) } @@ -352,7 +352,7 @@ func NewProxier(ipt utiliptables.Interface, networkInterfacer: utilproxy.RealNetwork{}, } burstSyncs := 2 - glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) + klog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) return proxier, nil } @@ -392,7 +392,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { ) if err := ipt.DeleteRule(chain.table, chain.sourceChain, args...); err != nil { if !utiliptables.IsNotFoundError(err) { - glog.Errorf("Error removing pure-iptables proxy rule: %v", err) + klog.Errorf("Error removing pure-iptables proxy rule: %v", err) encounteredError = true } } @@ -401,7 +401,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { // Flush and remove all of our "-t nat" chains. iptablesData := bytes.NewBuffer(nil) if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil { - glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableNAT, err) + klog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableNAT, err) encounteredError = true } else { existingNATChains := utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes()) @@ -429,7 +429,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { // Write it. err = ipt.Restore(utiliptables.TableNAT, natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { - glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableNAT, err) + klog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableNAT, err) encounteredError = true } } @@ -437,7 +437,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { // Flush and remove all of our "-t filter" chains. iptablesData.Reset() if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil { - glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err) + klog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err) encounteredError = true } else { existingFilterChains := utiliptables.GetChainLines(utiliptables.TableFilter, iptablesData.Bytes()) @@ -455,7 +455,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { filterLines := append(filterChains.Bytes(), filterRules.Bytes()...) // Write it. if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { - glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err) + klog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err) encounteredError = true } } @@ -609,7 +609,7 @@ func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceE endpointIP := utilproxy.IPPart(epSvcPair.Endpoint) err := conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, v1.ProtocolUDP) if err != nil { - glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) + klog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) } } } @@ -638,11 +638,11 @@ func (proxier *Proxier) syncProxyRules() { start := time.Now() defer func() { metrics.SyncProxyRulesLatency.Observe(metrics.SinceInMicroseconds(start)) - glog.V(4).Infof("syncProxyRules took %v", time.Since(start)) + klog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() // don't sync rules till we've received services and endpoints if !proxier.endpointsSynced || !proxier.servicesSynced { - glog.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master") + klog.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master") return } @@ -656,17 +656,17 @@ func (proxier *Proxier) syncProxyRules() { // merge stale services gathered from updateEndpointsMap for _, svcPortName := range endpointUpdateResult.StaleServiceNames { if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.GetProtocol() == v1.ProtocolUDP { - glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString()) + klog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString()) staleServices.Insert(svcInfo.ClusterIPString()) } } - glog.V(3).Info("Syncing iptables rules") + klog.V(3).Info("Syncing iptables rules") // Create and link the kube chains. for _, chain := range iptablesJumpChains { if _, err := proxier.iptables.EnsureChain(chain.table, chain.chain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", chain.table, kubeServicesChain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", chain.table, kubeServicesChain, err) return } args := append(chain.extraArgs, @@ -674,7 +674,7 @@ func (proxier *Proxier) syncProxyRules() { "-j", string(chain.chain), ) if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, chain.table, chain.sourceChain, args...); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.chain, err) + klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", chain.table, chain.sourceChain, chain.chain, err) return } } @@ -689,7 +689,7 @@ func (proxier *Proxier) syncProxyRules() { proxier.existingFilterChainsData.Reset() err := proxier.iptables.SaveInto(utiliptables.TableFilter, proxier.existingFilterChainsData) if err != nil { // if we failed to get any rules - glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) + klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output existingFilterChains = utiliptables.GetChainLines(utiliptables.TableFilter, proxier.existingFilterChainsData.Bytes()) } @@ -699,7 +699,7 @@ func (proxier *Proxier) syncProxyRules() { proxier.iptablesData.Reset() err = proxier.iptables.SaveInto(utiliptables.TableNAT, proxier.iptablesData) if err != nil { // if we failed to get any rules - glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) + klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, proxier.iptablesData.Bytes()) } @@ -780,7 +780,7 @@ func (proxier *Proxier) syncProxyRules() { for svcName, svc := range proxier.serviceMap { svcInfo, ok := svc.(*serviceInfo) if !ok { - glog.Errorf("Failed to cast serviceInfo %q", svcName.String()) + klog.Errorf("Failed to cast serviceInfo %q", svcName.String()) continue } isIPv6 := utilnet.IsIPv6(svcInfo.ClusterIP) @@ -848,7 +848,7 @@ func (proxier *Proxier) syncProxyRules() { // machine, hold the local port open so no other process can open it // (because the socket might open but it would never work). if local, err := utilproxy.IsLocalIP(externalIP); err != nil { - glog.Errorf("can't determine if IP is local, assuming not: %v", err) + klog.Errorf("can't determine if IP is local, assuming not: %v", err) } else if local && (svcInfo.GetProtocol() != v1.ProtocolSCTP) { lp := utilproxy.LocalPort{ Description: "externalIP for " + svcNameString, @@ -857,7 +857,7 @@ func (proxier *Proxier) syncProxyRules() { Protocol: protocol, } if proxier.portsMap[lp] != nil { - glog.V(4).Infof("Port %s was open before and is still needed", lp.String()) + klog.V(4).Infof("Port %s was open before and is still needed", lp.String()) replacementPortsMap[lp] = proxier.portsMap[lp] } else { socket, err := proxier.portMapper.OpenLocalPort(&lp) @@ -871,7 +871,7 @@ func (proxier *Proxier) syncProxyRules() { UID: types.UID(proxier.hostname), Namespace: "", }, v1.EventTypeWarning, err.Error(), msg) - glog.Error(msg) + klog.Error(msg) continue } replacementPortsMap[lp] = socket @@ -991,7 +991,7 @@ func (proxier *Proxier) syncProxyRules() { // (because the socket might open but it would never work). addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer) if err != nil { - glog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err) + klog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err) continue } @@ -1016,12 +1016,12 @@ func (proxier *Proxier) syncProxyRules() { // For ports on node IPs, open the actual port and hold it. for _, lp := range lps { if proxier.portsMap[lp] != nil { - glog.V(4).Infof("Port %s was open before and is still needed", lp.String()) + klog.V(4).Infof("Port %s was open before and is still needed", lp.String()) replacementPortsMap[lp] = proxier.portsMap[lp] } else if svcInfo.GetProtocol() != v1.ProtocolSCTP { socket, err := proxier.portMapper.OpenLocalPort(&lp) if err != nil { - glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err) + klog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err) continue } if lp.Protocol == "udp" { @@ -1031,7 +1031,7 @@ func (proxier *Proxier) syncProxyRules() { // See issue: https://github.com/kubernetes/kubernetes/issues/49881 err := conntrack.ClearEntriesForPort(proxier.exec, lp.Port, isIPv6, v1.ProtocolUDP) if err != nil { - glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err) + klog.Errorf("Failed to clear udp conntrack for port %d, error: %v", lp.Port, err) } } replacementPortsMap[lp] = socket @@ -1087,7 +1087,7 @@ func (proxier *Proxier) syncProxyRules() { for _, ep := range proxier.endpointsMap[svcName] { epInfo, ok := ep.(*endpointsInfo) if !ok { - glog.Errorf("Failed to cast endpointsInfo %q", ep.String()) + klog.Errorf("Failed to cast endpointsInfo %q", ep.String()) continue } endpoints = append(endpoints, epInfo) @@ -1253,7 +1253,7 @@ func (proxier *Proxier) syncProxyRules() { // other service portal rules. addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer) if err != nil { - glog.Errorf("Failed to get node ip address matching nodeport cidr") + klog.Errorf("Failed to get node ip address matching nodeport cidr") } else { isIPv6 := proxier.iptables.IsIpv6() for address := range addresses { @@ -1270,7 +1270,7 @@ func (proxier *Proxier) syncProxyRules() { } // Ignore IP addresses with incorrect version if isIPv6 && !utilnet.IsIPv6String(address) || !isIPv6 && utilnet.IsIPv6String(address) { - glog.Errorf("IP address %s has incorrect IP version", address) + klog.Errorf("IP address %s has incorrect IP version", address) continue } // create nodeport rules for each IP one by one @@ -1329,12 +1329,12 @@ func (proxier *Proxier) syncProxyRules() { proxier.iptablesData.Write(proxier.natChains.Bytes()) proxier.iptablesData.Write(proxier.natRules.Bytes()) - glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes()) + klog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes()) err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { - glog.Errorf("Failed to execute iptables-restore: %v", err) + klog.Errorf("Failed to execute iptables-restore: %v", err) // Revert new local ports. - glog.V(2).Infof("Closing local ports after iptables-restore failure") + klog.V(2).Infof("Closing local ports after iptables-restore failure") utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap) return } @@ -1356,17 +1356,17 @@ func (proxier *Proxier) syncProxyRules() { // not "OnlyLocal", but the services list will not, and the healthChecker // will just drop those endpoints. if err := proxier.healthChecker.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil { - glog.Errorf("Error syncing healtcheck services: %v", err) + klog.Errorf("Error syncing healtcheck services: %v", err) } if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil { - glog.Errorf("Error syncing healthcheck endpoints: %v", err) + klog.Errorf("Error syncing healthcheck endpoints: %v", err) } // Finish housekeeping. // TODO: these could be made more consistent. for _, svcIP := range staleServices.UnsortedList() { if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil { - glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) + klog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } } proxier.deleteEndpointConnections(endpointUpdateResult.StaleEndpoints) @@ -1424,6 +1424,6 @@ func openLocalPort(lp *utilproxy.LocalPort) (utilproxy.Closeable, error) { default: return nil, fmt.Errorf("unknown protocol %q", lp.Protocol) } - glog.V(2).Infof("Opened local port %s", lp.String()) + klog.V(2).Infof("Opened local port %s", lp.String()) return socket, nil } diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index b14d0f416fccb..94ae8802a227d 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -26,7 +26,7 @@ import ( "testing" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -281,7 +281,7 @@ func TestDeleteEndpointConnections(t *testing.T) { // Run the test cases for _, tc := range testCases { priorExecs := fexec.CommandCalls - priorGlogErrs := glog.Stats.Error.Lines() + priorGlogErrs := klog.Stats.Error.Lines() input := []proxy.ServiceEndpoint{tc.epSvcPair} fakeProxier.deleteEndpointConnections(input) @@ -319,7 +319,7 @@ func TestDeleteEndpointConnections(t *testing.T) { if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete { expGlogErrs = 1 } - glogErrs := glog.Stats.Error.Lines() - priorGlogErrs + glogErrs := klog.Stats.Error.Lines() - priorGlogErrs if glogErrs != expGlogErrs { t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs) } diff --git a/pkg/proxy/ipvs/BUILD b/pkg/proxy/ipvs/BUILD index e7b13f3cac6ed..002b616f3f844 100644 --- a/pkg/proxy/ipvs/BUILD +++ b/pkg/proxy/ipvs/BUILD @@ -63,7 +63,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ diff --git a/pkg/proxy/ipvs/graceful_termination.go b/pkg/proxy/ipvs/graceful_termination.go index 0d8c4ebc567e5..d9357d2c6d8cf 100644 --- a/pkg/proxy/ipvs/graceful_termination.go +++ b/pkg/proxy/ipvs/graceful_termination.go @@ -21,8 +21,8 @@ import ( "time" "fmt" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" utilipvs "k8s.io/kubernetes/pkg/util/ipvs" ) @@ -63,7 +63,7 @@ func (q *graceTerminateRSList) add(rs *listItem) bool { return false } - glog.V(5).Infof("Adding rs %v to graceful delete rsList", rs) + klog.V(5).Infof("Adding rs %v to graceful delete rsList", rs) q.list[uniqueRS] = rs return true } @@ -86,11 +86,11 @@ func (q *graceTerminateRSList) flushList(handler func(rsToDelete *listItem) (boo for name, rs := range q.list { deleted, err := handler(rs) if err != nil { - glog.Errorf("Try delete rs %q err: %v", name, err) + klog.Errorf("Try delete rs %q err: %v", name, err) success = false } if deleted { - glog.Infof("lw: remote out of the list: %s", name) + klog.Infof("lw: remote out of the list: %s", name) q.remove(rs) } } @@ -141,7 +141,7 @@ func (m *GracefulTerminationManager) GracefulDeleteRS(vs *utilipvs.VirtualServer } deleted, err := m.deleteRsFunc(ele) if err != nil { - glog.Errorf("Delete rs %q err: %v", ele.String(), err) + klog.Errorf("Delete rs %q err: %v", ele.String(), err) } if deleted { return nil @@ -151,13 +151,13 @@ func (m *GracefulTerminationManager) GracefulDeleteRS(vs *utilipvs.VirtualServer if err != nil { return err } - glog.V(5).Infof("Adding an element to graceful delete rsList: %+v", ele) + klog.V(5).Infof("Adding an element to graceful delete rsList: %+v", ele) m.rsList.add(ele) return nil } func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, error) { - glog.Infof("Trying to delete rs: %s", rsToDelete.String()) + klog.Infof("Trying to delete rs: %s", rsToDelete.String()) rss, err := m.ipvs.GetRealServers(rsToDelete.VirtualServer) if err != nil { return false, err @@ -167,7 +167,7 @@ func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, e if rs.ActiveConn != 0 { return false, nil } - glog.Infof("Deleting rs: %s", rsToDelete.String()) + klog.Infof("Deleting rs: %s", rsToDelete.String()) err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rs) if err != nil { return false, fmt.Errorf("Delete destination %q err: %v", rs.String(), err) @@ -180,7 +180,7 @@ func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, e func (m *GracefulTerminationManager) tryDeleteRs() { if !m.rsList.flushList(m.deleteRsFunc) { - glog.Errorf("Try flush graceful termination list err") + klog.Errorf("Try flush graceful termination list err") } } @@ -203,12 +203,12 @@ func (m *GracefulTerminationManager) Run() { // before start, add leftover in delete rs to graceful delete rsList vss, err := m.ipvs.GetVirtualServers() if err != nil { - glog.Errorf("IPVS graceful delete manager failed to get IPVS virtualserver") + klog.Errorf("IPVS graceful delete manager failed to get IPVS virtualserver") } for _, vs := range vss { rss, err := m.ipvs.GetRealServers(vs) if err != nil { - glog.Errorf("IPVS graceful delete manager failed to get %v realserver", vs) + klog.Errorf("IPVS graceful delete manager failed to get %v realserver", vs) continue } for _, rs := range rss { diff --git a/pkg/proxy/ipvs/ipset.go b/pkg/proxy/ipvs/ipset.go index e25ee0098535d..e449cc1ee3ff1 100644 --- a/pkg/proxy/ipvs/ipset.go +++ b/pkg/proxy/ipvs/ipset.go @@ -22,7 +22,7 @@ import ( utilipset "k8s.io/kubernetes/pkg/util/ipset" "fmt" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -125,7 +125,7 @@ func (set *IPSet) resetEntries() { func (set *IPSet) syncIPSetEntries() { appliedEntries, err := set.handle.ListEntries(set.Name) if err != nil { - glog.Errorf("Failed to list ip set entries, error: %v", err) + klog.Errorf("Failed to list ip set entries, error: %v", err) return } @@ -140,18 +140,18 @@ func (set *IPSet) syncIPSetEntries() { for _, entry := range currentIPSetEntries.Difference(set.activeEntries).List() { if err := set.handle.DelEntry(entry, set.Name); err != nil { if !utilipset.IsNotFoundError(err) { - glog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err) + klog.Errorf("Failed to delete ip set entry: %s from ip set: %s, error: %v", entry, set.Name, err) } } else { - glog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name) + klog.V(3).Infof("Successfully delete legacy ip set entry: %s from ip set: %s", entry, set.Name) } } // Create active entries for _, entry := range set.activeEntries.Difference(currentIPSetEntries).List() { if err := set.handle.AddEntry(entry, &set.IPSet, true); err != nil { - glog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err) + klog.Errorf("Failed to add entry: %v to ip set: %s, error: %v", entry, set.Name, err) } else { - glog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name) + klog.V(3).Infof("Successfully add entry: %v to ip set: %s", entry, set.Name) } } } @@ -159,7 +159,7 @@ func (set *IPSet) syncIPSetEntries() { func ensureIPSet(set *IPSet) error { if err := set.handle.CreateSet(&set.IPSet, true); err != nil { - glog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err) + klog.Errorf("Failed to make sure ip set: %v exist, error: %v", set, err) return err } return nil @@ -169,13 +169,13 @@ func ensureIPSet(set *IPSet) error { func checkMinVersion(vstring string) bool { version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return false } minVersion, err := utilversion.ParseGeneric(MinIPSetCheckVersion) if err != nil { - glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err) + klog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinIPSetCheckVersion, err) return false } return !version.LessThan(minVersion) diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index d9233a0801af1..accaabc3a407e 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -28,7 +28,7 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -303,7 +303,7 @@ func NewProxier(ipt utiliptables.Interface, // are connected to a Linux bridge (but not SDN bridges). Until most // plugins handle this, log when config is missing if val, err := sysctl.GetSysctl(sysctlBridgeCallIPTables); err == nil && val != 1 { - glog.Infof("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended") + klog.Infof("missing br-netfilter module or unset sysctl br-nf-call-iptables; proxy may not work as intended") } // Set the conntrack sysctl we need for @@ -339,22 +339,22 @@ func NewProxier(ipt utiliptables.Interface, masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) if nodeIP == nil { - glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") + klog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") nodeIP = net.ParseIP("127.0.0.1") } isIPv6 := utilnet.IsIPv6(nodeIP) - glog.V(2).Infof("nodeIP: %v, isIPv6: %v", nodeIP, isIPv6) + klog.V(2).Infof("nodeIP: %v, isIPv6: %v", nodeIP, isIPv6) if len(clusterCIDR) == 0 { - glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") + klog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") } else if utilnet.IsIPv6CIDR(clusterCIDR) != isIPv6 { return nil, fmt.Errorf("clusterCIDR %s has incorrect IP version: expect isIPv6=%t", clusterCIDR, isIPv6) } if len(scheduler) == 0 { - glog.Warningf("IPVS scheduler not specified, use %s by default", DefaultScheduler) + klog.Warningf("IPVS scheduler not specified, use %s by default", DefaultScheduler) scheduler = DefaultScheduler } @@ -401,7 +401,7 @@ func NewProxier(ipt utiliptables.Interface, proxier.ipsetList[is.name] = NewIPSet(ipset, is.name, is.setType, isIPv6, is.comment) } burstSyncs := 2 - glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) + klog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) proxier.gracefuldeleteManager.Run() return proxier, nil @@ -453,7 +453,7 @@ func (handle *LinuxKernelHandler) GetModules() ([]string, error) { builtinModsFilePath := fmt.Sprintf("/lib/modules/%s/modules.builtin", kernelVersion) b, err := ioutil.ReadFile(builtinModsFilePath) if err != nil { - glog.Warningf("Failed to read file %s with error %v. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", builtinModsFilePath, err) + klog.Warningf("Failed to read file %s with error %v. You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", builtinModsFilePath, err) } var bmods []string for _, module := range ipvsModules { @@ -466,7 +466,7 @@ func (handle *LinuxKernelHandler) GetModules() ([]string, error) { for _, kmod := range ipvsModules { err := handle.executor.Command("modprobe", "--", kmod).Run() if err != nil { - glog.Warningf("Failed to load kernel module %v with modprobe. "+ + klog.Warningf("Failed to load kernel module %v with modprobe. "+ "You can ignore this message when kube-proxy is running inside container without mounting /lib/modules", kmod) } } @@ -536,7 +536,7 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool } if err := ipt.DeleteRule(jc.table, jc.from, args...); err != nil { if !utiliptables.IsNotFoundError(err) { - glog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) + klog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) encounteredError = true } } @@ -546,13 +546,13 @@ func cleanupIptablesLeftovers(ipt utiliptables.Interface) (encounteredError bool for _, ch := range iptablesChains { if err := ipt.FlushChain(ch.table, ch.chain); err != nil { if !utiliptables.IsNotFoundError(err) { - glog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) + klog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) encounteredError = true } } if err := ipt.DeleteChain(ch.table, ch.chain); err != nil { if !utiliptables.IsNotFoundError(err) { - glog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) + klog.Errorf("Error removing iptables rules in ipvs proxier: %v", err) encounteredError = true } } @@ -571,7 +571,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset encounteredError = false err := ipvs.Flush() if err != nil { - glog.Errorf("Error flushing IPVS rules: %v", err) + klog.Errorf("Error flushing IPVS rules: %v", err) encounteredError = true } } @@ -579,7 +579,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset nl := NewNetLinkHandle() err := nl.DeleteDummyDevice(DefaultDummyDevice) if err != nil { - glog.Errorf("Error deleting dummy device %s created by IPVS proxier: %v", DefaultDummyDevice, err) + klog.Errorf("Error deleting dummy device %s created by IPVS proxier: %v", DefaultDummyDevice, err) encounteredError = true } // Clear iptables created by ipvs Proxier. @@ -590,7 +590,7 @@ func CleanupLeftovers(ipvs utilipvs.Interface, ipt utiliptables.Interface, ipset err = ipset.DestroySet(set.name) if err != nil { if !utilipset.IsNotFoundError(err) { - glog.Errorf("Error removing ipset %s, error: %v", set.name, err) + klog.Errorf("Error removing ipset %s, error: %v", set.name, err) encounteredError = true } } @@ -692,11 +692,11 @@ func (proxier *Proxier) syncProxyRules() { start := time.Now() defer func() { metrics.SyncProxyRulesLatency.Observe(metrics.SinceInMicroseconds(start)) - glog.V(4).Infof("syncProxyRules took %v", time.Since(start)) + klog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() // don't sync rules till we've received services and endpoints if !proxier.endpointsSynced || !proxier.servicesSynced { - glog.V(2).Info("Not syncing ipvs rules until Services and Endpoints have been received from master") + klog.V(2).Info("Not syncing ipvs rules until Services and Endpoints have been received from master") return } @@ -710,12 +710,12 @@ func (proxier *Proxier) syncProxyRules() { // merge stale services gathered from updateEndpointsMap for _, svcPortName := range endpointUpdateResult.StaleServiceNames { if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.GetProtocol() == v1.ProtocolUDP { - glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString()) + klog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.ClusterIPString()) staleServices.Insert(svcInfo.ClusterIPString()) } } - glog.V(3).Infof("Syncing ipvs Proxier rules") + klog.V(3).Infof("Syncing ipvs Proxier rules") // Begin install iptables @@ -735,7 +735,7 @@ func (proxier *Proxier) syncProxyRules() { // make sure dummy interface exists in the system where ipvs Proxier will bind service address on it _, err := proxier.netlinkHandle.EnsureDummyDevice(DefaultDummyDevice) if err != nil { - glog.Errorf("Failed to create dummy interface: %s, error: %v", DefaultDummyDevice, err) + klog.Errorf("Failed to create dummy interface: %s, error: %v", DefaultDummyDevice, err) return } @@ -760,7 +760,7 @@ func (proxier *Proxier) syncProxyRules() { for svcName, svc := range proxier.serviceMap { svcInfo, ok := svc.(*serviceInfo) if !ok { - glog.Errorf("Failed to cast serviceInfo %q", svcName.String()) + klog.Errorf("Failed to cast serviceInfo %q", svcName.String()) continue } protocol := strings.ToLower(string(svcInfo.Protocol)) @@ -772,7 +772,7 @@ func (proxier *Proxier) syncProxyRules() { for _, e := range proxier.endpointsMap[svcName] { ep, ok := e.(*proxy.BaseEndpointInfo) if !ok { - glog.Errorf("Failed to cast BaseEndpointInfo %q", e.String()) + klog.Errorf("Failed to cast BaseEndpointInfo %q", e.String()) continue } epIP := ep.IP() @@ -789,7 +789,7 @@ func (proxier *Proxier) syncProxyRules() { SetType: utilipset.HashIPPortIP, } if valid := proxier.ipsetList[kubeLoopBackIPSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoopBackIPSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoopBackIPSet].Name)) continue } proxier.ipsetList[kubeLoopBackIPSet].activeEntries.Insert(entry.String()) @@ -806,7 +806,7 @@ func (proxier *Proxier) syncProxyRules() { // add service Cluster IP:Port to kubeServiceAccess ip set for the purpose of solving hairpin. // proxier.kubeServiceAccessSet.activeEntries.Insert(entry.String()) if valid := proxier.ipsetList[kubeClusterIPSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeClusterIPSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeClusterIPSet].Name)) continue } proxier.ipsetList[kubeClusterIPSet].activeEntries.Insert(entry.String()) @@ -829,16 +829,16 @@ func (proxier *Proxier) syncProxyRules() { // ExternalTrafficPolicy only works for NodePort and external LB traffic, does not affect ClusterIP // So we still need clusterIP rules in onlyNodeLocalEndpoints mode. if err := proxier.syncEndpoint(svcName, false, serv); err != nil { - glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { - glog.Errorf("Failed to sync service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync service: %v, err: %v", serv, err) } // Capture externalIPs. for _, externalIP := range svcInfo.ExternalIPs { if local, err := utilproxy.IsLocalIP(externalIP); err != nil { - glog.Errorf("can't determine if IP is local, assuming not: %v", err) + klog.Errorf("can't determine if IP is local, assuming not: %v", err) // We do not start listening on SCTP ports, according to our agreement in the // SCTP support KEP } else if local && (svcInfo.GetProtocol() != v1.ProtocolSCTP) { @@ -849,7 +849,7 @@ func (proxier *Proxier) syncProxyRules() { Protocol: protocol, } if proxier.portsMap[lp] != nil { - glog.V(4).Infof("Port %s was open before and is still needed", lp.String()) + klog.V(4).Infof("Port %s was open before and is still needed", lp.String()) replacementPortsMap[lp] = proxier.portsMap[lp] } else { socket, err := proxier.portMapper.OpenLocalPort(&lp) @@ -863,7 +863,7 @@ func (proxier *Proxier) syncProxyRules() { UID: types.UID(proxier.hostname), Namespace: "", }, v1.EventTypeWarning, err.Error(), msg) - glog.Error(msg) + klog.Error(msg) continue } replacementPortsMap[lp] = socket @@ -879,7 +879,7 @@ func (proxier *Proxier) syncProxyRules() { } // We have to SNAT packets to external IPs. if valid := proxier.ipsetList[kubeExternalIPSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeExternalIPSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeExternalIPSet].Name)) continue } proxier.ipsetList[kubeExternalIPSet].activeEntries.Insert(entry.String()) @@ -899,10 +899,10 @@ func (proxier *Proxier) syncProxyRules() { activeIPVSServices[serv.String()] = true activeBindAddrs[serv.Address.String()] = true if err := proxier.syncEndpoint(svcName, false, serv); err != nil { - glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { - glog.Errorf("Failed to sync service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync service: %v, err: %v", serv, err) } } @@ -921,14 +921,14 @@ func (proxier *Proxier) syncProxyRules() { // If we are proxying globally, we need to masquerade in case we cross nodes. // If we are proxying only locally, we can retain the source IP. if valid := proxier.ipsetList[kubeLoadBalancerSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSet].Name)) continue } proxier.ipsetList[kubeLoadBalancerSet].activeEntries.Insert(entry.String()) // insert loadbalancer entry to lbIngressLocalSet if service externaltrafficpolicy=local if svcInfo.OnlyNodeLocalEndpoints { if valid := proxier.ipsetList[kubeLoadBalancerLocalSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerLocalSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerLocalSet].Name)) continue } proxier.ipsetList[kubeLoadBalancerLocalSet].activeEntries.Insert(entry.String()) @@ -938,7 +938,7 @@ func (proxier *Proxier) syncProxyRules() { // This currently works for loadbalancers that preserves source ips. // For loadbalancers which direct traffic to service NodePort, the firewall rules will not apply. if valid := proxier.ipsetList[kubeLoadbalancerFWSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadbalancerFWSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadbalancerFWSet].Name)) continue } proxier.ipsetList[kubeLoadbalancerFWSet].activeEntries.Insert(entry.String()) @@ -954,7 +954,7 @@ func (proxier *Proxier) syncProxyRules() { } // enumerate all white list source cidr if valid := proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].Name)) continue } proxier.ipsetList[kubeLoadBalancerSourceCIDRSet].activeEntries.Insert(entry.String()) @@ -978,7 +978,7 @@ func (proxier *Proxier) syncProxyRules() { } // enumerate all white list source ip if valid := proxier.ipsetList[kubeLoadBalancerSourceIPSet].validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSourceIPSet].Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, proxier.ipsetList[kubeLoadBalancerSourceIPSet].Name)) continue } proxier.ipsetList[kubeLoadBalancerSourceIPSet].activeEntries.Insert(entry.String()) @@ -1002,10 +1002,10 @@ func (proxier *Proxier) syncProxyRules() { activeIPVSServices[serv.String()] = true activeBindAddrs[serv.Address.String()] = true if err := proxier.syncEndpoint(svcName, onlyLocal, serv); err != nil { - glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { - glog.Errorf("Failed to sync service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync service: %v, err: %v", serv, err) } } } @@ -1013,7 +1013,7 @@ func (proxier *Proxier) syncProxyRules() { if svcInfo.NodePort != 0 { addresses, err := utilproxy.GetNodeAddresses(proxier.nodePortAddresses, proxier.networkInterfacer) if err != nil { - glog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err) + klog.Errorf("Failed to get node ip address matching nodeport cidr: %v", err) continue } @@ -1038,14 +1038,14 @@ func (proxier *Proxier) syncProxyRules() { // For ports on node IPs, open the actual port and hold it. for _, lp := range lps { if proxier.portsMap[lp] != nil { - glog.V(4).Infof("Port %s was open before and is still needed", lp.String()) + klog.V(4).Infof("Port %s was open before and is still needed", lp.String()) replacementPortsMap[lp] = proxier.portsMap[lp] // We do not start listening on SCTP ports, according to our agreement in the // SCTP support KEP } else if svcInfo.GetProtocol() != v1.ProtocolSCTP { socket, err := proxier.portMapper.OpenLocalPort(&lp) if err != nil { - glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err) + klog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err) continue } if lp.Protocol == "udp" { @@ -1074,11 +1074,11 @@ func (proxier *Proxier) syncProxyRules() { nodePortSet = proxier.ipsetList[kubeNodePortSetSCTP] default: // It should never hit - glog.Errorf("Unsupported protocol type: %s", protocol) + klog.Errorf("Unsupported protocol type: %s", protocol) } if nodePortSet != nil { if valid := nodePortSet.validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, nodePortSet.Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, nodePortSet.Name)) continue } nodePortSet.activeEntries.Insert(entry.String()) @@ -1096,11 +1096,11 @@ func (proxier *Proxier) syncProxyRules() { nodePortLocalSet = proxier.ipsetList[kubeNodePortLocalSetSCTP] default: // It should never hit - glog.Errorf("Unsupported protocol type: %s", protocol) + klog.Errorf("Unsupported protocol type: %s", protocol) } if nodePortLocalSet != nil { if valid := nodePortLocalSet.validateEntry(entry); !valid { - glog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, nodePortLocalSet.Name)) + klog.Errorf("%s", fmt.Sprintf(EntryInvalidErr, entry, nodePortLocalSet.Name)) continue } nodePortLocalSet.activeEntries.Insert(entry.String()) @@ -1117,7 +1117,7 @@ func (proxier *Proxier) syncProxyRules() { // zero cidr nodeIPs, err = proxier.ipGetter.NodeIPs() if err != nil { - glog.Errorf("Failed to list all node IPs from host, err: %v", err) + klog.Errorf("Failed to list all node IPs from host, err: %v", err) } } for _, nodeIP := range nodeIPs { @@ -1136,10 +1136,10 @@ func (proxier *Proxier) syncProxyRules() { if err := proxier.syncService(svcNameString, serv, false); err == nil { activeIPVSServices[serv.String()] = true if err := proxier.syncEndpoint(svcName, svcInfo.OnlyNodeLocalEndpoints, serv); err != nil { - glog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync endpoint for service: %v, err: %v", serv, err) } } else { - glog.Errorf("Failed to sync service: %v, err: %v", serv, err) + klog.Errorf("Failed to sync service: %v, err: %v", serv, err) } } } @@ -1162,10 +1162,10 @@ func (proxier *Proxier) syncProxyRules() { proxier.iptablesData.Write(proxier.filterChains.Bytes()) proxier.iptablesData.Write(proxier.filterRules.Bytes()) - glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes()) + klog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes()) err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { - glog.Errorf("Failed to execute iptables-restore: %v\nRules:\n%s", err, proxier.iptablesData.Bytes()) + klog.Errorf("Failed to execute iptables-restore: %v\nRules:\n%s", err, proxier.iptablesData.Bytes()) // Revert new local ports. utilproxy.RevertPorts(replacementPortsMap, proxier.portsMap) return @@ -1186,7 +1186,7 @@ func (proxier *Proxier) syncProxyRules() { currentIPVSServices[appliedSvc.String()] = appliedSvc } } else { - glog.Errorf("Failed to get ipvs service, err: %v", err) + klog.Errorf("Failed to get ipvs service, err: %v", err) } proxier.cleanLegacyService(activeIPVSServices, currentIPVSServices) @@ -1194,7 +1194,7 @@ func (proxier *Proxier) syncProxyRules() { // currentBindAddrs represents ip addresses bind to DefaultDummyDevice from the system currentBindAddrs, err := proxier.netlinkHandle.ListBindAddress(DefaultDummyDevice) if err != nil { - glog.Errorf("Failed to get bind address, err: %v", err) + klog.Errorf("Failed to get bind address, err: %v", err) } proxier.cleanLegacyBindAddr(activeBindAddrs, currentBindAddrs) @@ -1207,17 +1207,17 @@ func (proxier *Proxier) syncProxyRules() { // not "OnlyLocal", but the services list will not, and the healthChecker // will just drop those endpoints. if err := proxier.healthChecker.SyncServices(serviceUpdateResult.HCServiceNodePorts); err != nil { - glog.Errorf("Error syncing healtcheck services: %v", err) + klog.Errorf("Error syncing healtcheck services: %v", err) } if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.HCEndpointsLocalIPSize); err != nil { - glog.Errorf("Error syncing healthcheck endpoints: %v", err) + klog.Errorf("Error syncing healthcheck endpoints: %v", err) } // Finish housekeeping. // TODO: these could be made more consistent. for _, svcIP := range staleServices.UnsortedList() { if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil { - glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) + klog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } } proxier.deleteEndpointConnections(endpointUpdateResult.StaleEndpoints) @@ -1395,7 +1395,7 @@ func (proxier *Proxier) createAndLinkeKubeChain() { // Make sure we keep stats for the top-level chains for _, ch := range iptablesChains { if _, err := proxier.iptables.EnsureChain(ch.table, ch.chain); err != nil { - glog.Errorf("Failed to ensure that %s chain %s exists: %v", ch.table, ch.chain, err) + klog.Errorf("Failed to ensure that %s chain %s exists: %v", ch.table, ch.chain, err) return } if ch.table == utiliptables.TableNAT { @@ -1416,7 +1416,7 @@ func (proxier *Proxier) createAndLinkeKubeChain() { for _, jc := range iptablesJumpChain { args := []string{"-m", "comment", "--comment", jc.comment, "-j", string(jc.to)} if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, jc.table, jc.from, args...); err != nil { - glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", jc.table, jc.from, jc.to, err) + klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", jc.table, jc.from, jc.to, err) } } @@ -1446,7 +1446,7 @@ func (proxier *Proxier) getExistingChains(buffer *bytes.Buffer, table utiliptabl buffer.Reset() err := proxier.iptables.SaveInto(table, buffer) if err != nil { // if we failed to get any rules - glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) + klog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output return utiliptables.GetChainLines(table, buffer.Bytes()) } @@ -1462,7 +1462,7 @@ func (proxier *Proxier) deleteEndpointConnections(connectionMap []proxy.ServiceE endpointIP := utilproxy.IPPart(epSvcPair.Endpoint) err := conntrack.ClearEntriesForNAT(proxier.exec, svcInfo.ClusterIPString(), endpointIP, v1.ProtocolUDP) if err != nil { - glog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) + klog.Errorf("Failed to delete %s endpoint connections, error: %v", epSvcPair.ServicePortName.String(), err) } } } @@ -1473,17 +1473,17 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer, if appliedVirtualServer == nil || !appliedVirtualServer.Equal(vs) { if appliedVirtualServer == nil { // IPVS service is not found, create a new service - glog.V(3).Infof("Adding new service %q %s:%d/%s", svcName, vs.Address, vs.Port, vs.Protocol) + klog.V(3).Infof("Adding new service %q %s:%d/%s", svcName, vs.Address, vs.Port, vs.Protocol) if err := proxier.ipvs.AddVirtualServer(vs); err != nil { - glog.Errorf("Failed to add IPVS service %q: %v", svcName, err) + klog.Errorf("Failed to add IPVS service %q: %v", svcName, err) return err } } else { // IPVS service was changed, update the existing one // During updates, service VIP will not go down - glog.V(3).Infof("IPVS service %s was changed", svcName) + klog.V(3).Infof("IPVS service %s was changed", svcName) if err := proxier.ipvs.UpdateVirtualServer(vs); err != nil { - glog.Errorf("Failed to update IPVS service, err:%v", err) + klog.Errorf("Failed to update IPVS service, err:%v", err) return err } } @@ -1492,10 +1492,10 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer, // bind service address to dummy interface even if service not changed, // in case that service IP was removed by other processes if bindAddr { - glog.V(4).Infof("Bind addr %s", vs.Address.String()) + klog.V(4).Infof("Bind addr %s", vs.Address.String()) _, err := proxier.netlinkHandle.EnsureAddressBind(vs.Address.String(), DefaultDummyDevice) if err != nil { - glog.Errorf("Failed to bind service address to dummy device %q: %v", svcName, err) + klog.Errorf("Failed to bind service address to dummy device %q: %v", svcName, err) return err } } @@ -1505,7 +1505,7 @@ func (proxier *Proxier) syncService(svcName string, vs *utilipvs.VirtualServer, func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNodeLocalEndpoints bool, vs *utilipvs.VirtualServer) error { appliedVirtualServer, err := proxier.ipvs.GetVirtualServer(vs) if err != nil || appliedVirtualServer == nil { - glog.Errorf("Failed to get IPVS service, error: %v", err) + klog.Errorf("Failed to get IPVS service, error: %v", err) return err } @@ -1516,7 +1516,7 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode curDests, err := proxier.ipvs.GetRealServers(appliedVirtualServer) if err != nil { - glog.Errorf("Failed to list IPVS destinations, error: %v", err) + klog.Errorf("Failed to list IPVS destinations, error: %v", err) return err } for _, des := range curDests { @@ -1534,12 +1534,12 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode for _, ep := range newEndpoints.List() { ip, port, err := net.SplitHostPort(ep) if err != nil { - glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) + klog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) continue } portNum, err := strconv.Atoi(port) if err != nil { - glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) + klog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) continue } @@ -1555,16 +1555,16 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode if !proxier.gracefuldeleteManager.InTerminationList(uniqueRS) { continue } - glog.V(5).Infof("new ep %q is in graceful delete list", uniqueRS) + klog.V(5).Infof("new ep %q is in graceful delete list", uniqueRS) err := proxier.gracefuldeleteManager.MoveRSOutofGracefulDeleteList(uniqueRS) if err != nil { - glog.Errorf("Failed to delete endpoint: %v in gracefulDeleteQueue, error: %v", ep, err) + klog.Errorf("Failed to delete endpoint: %v in gracefulDeleteQueue, error: %v", ep, err) continue } } err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest) if err != nil { - glog.Errorf("Failed to add destination: %v, error: %v", newDest, err) + klog.Errorf("Failed to add destination: %v, error: %v", newDest, err) continue } } @@ -1577,12 +1577,12 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode } ip, port, err := net.SplitHostPort(ep) if err != nil { - glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) + klog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) continue } portNum, err := strconv.Atoi(port) if err != nil { - glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) + klog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) continue } @@ -1591,10 +1591,10 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode Port: uint16(portNum), } - glog.V(5).Infof("Using graceful delete to delete: %v", delDest) + klog.V(5).Infof("Using graceful delete to delete: %v", delDest) err = proxier.gracefuldeleteManager.GracefulDeleteRS(appliedVirtualServer, delDest) if err != nil { - glog.Errorf("Failed to delete destination: %v, error: %v", delDest, err) + klog.Errorf("Failed to delete destination: %v, error: %v", delDest, err) continue } } @@ -1627,7 +1627,7 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre } if okayToDelete { if err := proxier.ipvs.DeleteVirtualServer(svc); err != nil { - glog.Errorf("Failed to delete service, error: %v", err) + klog.Errorf("Failed to delete service, error: %v", err) } } } @@ -1638,11 +1638,11 @@ func (proxier *Proxier) cleanLegacyBindAddr(activeBindAddrs map[string]bool, cur for _, addr := range currentBindAddrs { if _, ok := activeBindAddrs[addr]; !ok { // This address was not processed in the latest sync loop - glog.V(4).Infof("Unbind addr %s", addr) + klog.V(4).Infof("Unbind addr %s", addr) err := proxier.netlinkHandle.UnbindAddress(addr, DefaultDummyDevice) // Ignore no such address error when try to unbind address if err != nil { - glog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err) + klog.Errorf("Failed to unbind service addr %s from dummy interface %s: %v", addr, DefaultDummyDevice, err) } } } @@ -1708,7 +1708,7 @@ func openLocalPort(lp *utilproxy.LocalPort) (utilproxy.Closeable, error) { default: return nil, fmt.Errorf("unknown protocol %q", lp.Protocol) } - glog.V(2).Infof("Opened local port %s", lp.String()) + klog.V(2).Infof("Opened local port %s", lp.String()) return socket, nil } diff --git a/pkg/proxy/service.go b/pkg/proxy/service.go index 5853ef9ca00c0..8386e62c0e260 100644 --- a/pkg/proxy/service.go +++ b/pkg/proxy/service.go @@ -23,7 +23,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -119,7 +119,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic if apiservice.NeedsHealthCheck(service) { p := service.Spec.HealthCheckNodePort if p == 0 { - glog.Errorf("Service %s/%s has no healthcheck nodeport", service.Namespace, service.Name) + klog.Errorf("Service %s/%s has no healthcheck nodeport", service.Namespace, service.Name) } else { info.HealthCheckNodePort = int(p) } @@ -306,9 +306,9 @@ func (sm *ServiceMap) merge(other ServiceMap) sets.String { existingPorts.Insert(svcPortName.String()) _, exists := (*sm)[svcPortName] if !exists { - glog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String()) + klog.V(1).Infof("Adding new service port %q at %s", svcPortName, info.String()) } else { - glog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String()) + klog.V(1).Infof("Updating existing service port %q at %s", svcPortName, info.String()) } (*sm)[svcPortName] = info } @@ -331,13 +331,13 @@ func (sm *ServiceMap) unmerge(other ServiceMap, UDPStaleClusterIP sets.String) { for svcPortName := range other { info, exists := (*sm)[svcPortName] if exists { - glog.V(1).Infof("Removing service port %q", svcPortName) + klog.V(1).Infof("Removing service port %q", svcPortName) if info.GetProtocol() == v1.ProtocolUDP { UDPStaleClusterIP.Insert(info.ClusterIPString()) } delete(*sm, svcPortName) } else { - glog.Errorf("Service port %q doesn't exists", svcPortName) + klog.Errorf("Service port %q doesn't exists", svcPortName) } } } diff --git a/pkg/proxy/userspace/BUILD b/pkg/proxy/userspace/BUILD index 0d800fc66d388..b7887c85b5d0d 100644 --- a/pkg/proxy/userspace/BUILD +++ b/pkg/proxy/userspace/BUILD @@ -32,7 +32,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ diff --git a/pkg/proxy/userspace/proxier.go b/pkg/proxy/userspace/proxier.go index 38ee0c2ce6a90..661092b1b2b41 100644 --- a/pkg/proxy/userspace/proxier.go +++ b/pkg/proxy/userspace/proxier.go @@ -25,13 +25,13 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/proxy" utilproxy "k8s.io/kubernetes/pkg/proxy/util" @@ -81,7 +81,7 @@ func (info *ServiceInfo) IsAlive() bool { func logTimeout(err error) bool { if e, ok := err.(net.Error); ok { if e.Timeout() { - glog.V(3).Infof("connection to endpoint closed due to inactivity") + klog.V(3).Infof("connection to endpoint closed due to inactivity") return true } } @@ -184,7 +184,7 @@ func NewCustomProxier(loadBalancer LoadBalancer, listenIP net.IP, iptables iptab proxyPorts := newPortAllocator(pr) - glog.V(2).Infof("Setting proxy IP to %v and initializing iptables", hostIP) + klog.V(2).Infof("Setting proxy IP to %v and initializing iptables", hostIP) return createProxier(loadBalancer, listenIP, iptables, exec, hostIP, proxyPorts, syncPeriod, minSyncPeriod, udpIdleTimeout, makeProxySocket) } @@ -229,13 +229,13 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) { args := []string{"-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules"} if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostPortalChain))...); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error removing userspace rule: %v", err) + klog.Errorf("Error removing userspace rule: %v", err) encounteredError = true } } if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerPortalChain))...); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error removing userspace rule: %v", err) + klog.Errorf("Error removing userspace rule: %v", err) encounteredError = true } } @@ -243,20 +243,20 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) { args = append(args, "-m", "comment", "--comment", "handle service NodePorts; NOTE: this must be the last rule in the chain") if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainOutput, append(args, "-j", string(iptablesHostNodePortChain))...); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error removing userspace rule: %v", err) + klog.Errorf("Error removing userspace rule: %v", err) encounteredError = true } } if err := ipt.DeleteRule(iptables.TableNAT, iptables.ChainPrerouting, append(args, "-j", string(iptablesContainerNodePortChain))...); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error removing userspace rule: %v", err) + klog.Errorf("Error removing userspace rule: %v", err) encounteredError = true } } args = []string{"-m", "comment", "--comment", "Ensure that non-local NodePort traffic can flow"} if err := ipt.DeleteRule(iptables.TableFilter, iptables.ChainInput, append(args, "-j", string(iptablesNonLocalNodePortChain))...); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error removing userspace rule: %v", err) + klog.Errorf("Error removing userspace rule: %v", err) encounteredError = true } } @@ -271,13 +271,13 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) { // flush chain, then if successful delete, delete will fail if flush fails. if err := ipt.FlushChain(table, c); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error flushing userspace chain: %v", err) + klog.Errorf("Error flushing userspace chain: %v", err) encounteredError = true } } else { if err = ipt.DeleteChain(table, c); err != nil { if !iptables.IsNotFoundError(err) { - glog.Errorf("Error deleting userspace chain: %v", err) + klog.Errorf("Error deleting userspace chain: %v", err) encounteredError = true } } @@ -290,7 +290,7 @@ func CleanupLeftovers(ipt iptables.Interface) (encounteredError bool) { // Sync is called to immediately synchronize the proxier state to iptables func (proxier *Proxier) Sync() { if err := iptablesInit(proxier.iptables); err != nil { - glog.Errorf("Failed to ensure iptables: %v", err) + klog.Errorf("Failed to ensure iptables: %v", err) } proxier.ensurePortals() proxier.cleanupStaleStickySessions() @@ -302,7 +302,7 @@ func (proxier *Proxier) SyncLoop() { defer t.Stop() for { <-t.C - glog.V(6).Infof("Periodic sync") + klog.V(6).Infof("Periodic sync") proxier.Sync() } } @@ -315,7 +315,7 @@ func (proxier *Proxier) ensurePortals() { for name, info := range proxier.serviceMap { err := proxier.openPortal(name, info) if err != nil { - glog.Errorf("Failed to ensure portal for %q: %v", name, err) + klog.Errorf("Failed to ensure portal for %q: %v", name, err) } } } @@ -388,7 +388,7 @@ func (proxier *Proxier) addServiceOnPort(service proxy.ServicePortName, protocol } proxier.setServiceInfo(service, si) - glog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum) + klog.V(2).Infof("Proxying for service %q on %s port %d", service, protocol, portNum) go func(service proxy.ServicePortName, proxier *Proxier) { defer runtime.HandleCrash() atomic.AddInt32(&proxier.numProxyLoops, 1) @@ -405,7 +405,7 @@ func (proxier *Proxier) mergeService(service *v1.Service) sets.String { } svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return nil } existingPorts := sets.NewString() @@ -420,25 +420,25 @@ func (proxier *Proxier) mergeService(service *v1.Service) sets.String { continue } if exists { - glog.V(4).Infof("Something changed for service %q: stopping it", serviceName) + klog.V(4).Infof("Something changed for service %q: stopping it", serviceName) if err := proxier.closePortal(serviceName, info); err != nil { - glog.Errorf("Failed to close portal for %q: %v", serviceName, err) + klog.Errorf("Failed to close portal for %q: %v", serviceName, err) } if err := proxier.stopProxy(serviceName, info); err != nil { - glog.Errorf("Failed to stop service %q: %v", serviceName, err) + klog.Errorf("Failed to stop service %q: %v", serviceName, err) } } proxyPort, err := proxier.proxyPorts.AllocateNext() if err != nil { - glog.Errorf("failed to allocate proxy port for service %q: %v", serviceName, err) + klog.Errorf("failed to allocate proxy port for service %q: %v", serviceName, err) continue } serviceIP := net.ParseIP(service.Spec.ClusterIP) - glog.V(1).Infof("Adding new service %q at %s/%s", serviceName, net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), servicePort.Protocol) + klog.V(1).Infof("Adding new service %q at %s/%s", serviceName, net.JoinHostPort(serviceIP.String(), strconv.Itoa(int(servicePort.Port))), servicePort.Protocol) info, err = proxier.addServiceOnPort(serviceName, servicePort.Protocol, proxyPort, proxier.udpIdleTimeout) if err != nil { - glog.Errorf("Failed to start proxy for %q: %v", serviceName, err) + klog.Errorf("Failed to start proxy for %q: %v", serviceName, err) continue } info.portal.ip = serviceIP @@ -453,10 +453,10 @@ func (proxier *Proxier) mergeService(service *v1.Service) sets.String { info.stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds) } - glog.V(4).Infof("info: %#v", info) + klog.V(4).Infof("info: %#v", info) if err := proxier.openPortal(serviceName, info); err != nil { - glog.Errorf("Failed to open portal for %q: %v", serviceName, err) + klog.Errorf("Failed to open portal for %q: %v", serviceName, err) } proxier.loadBalancer.NewService(serviceName, info.sessionAffinityType, info.stickyMaxAgeSeconds) } @@ -470,7 +470,7 @@ func (proxier *Proxier) unmergeService(service *v1.Service, existingPorts sets.S } svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return } @@ -484,10 +484,10 @@ func (proxier *Proxier) unmergeService(service *v1.Service, existingPorts sets.S } serviceName := proxy.ServicePortName{NamespacedName: svcName, Port: servicePort.Name} - glog.V(1).Infof("Stopping service %q", serviceName) + klog.V(1).Infof("Stopping service %q", serviceName) info, exists := proxier.serviceMap[serviceName] if !exists { - glog.Errorf("Service %q is being removed but doesn't exist", serviceName) + klog.Errorf("Service %q is being removed but doesn't exist", serviceName) continue } @@ -496,16 +496,16 @@ func (proxier *Proxier) unmergeService(service *v1.Service, existingPorts sets.S } if err := proxier.closePortal(serviceName, info); err != nil { - glog.Errorf("Failed to close portal for %q: %v", serviceName, err) + klog.Errorf("Failed to close portal for %q: %v", serviceName, err) } if err := proxier.stopProxyInternal(serviceName, info); err != nil { - glog.Errorf("Failed to stop service %q: %v", serviceName, err) + klog.Errorf("Failed to stop service %q: %v", serviceName, err) } proxier.loadBalancer.DeleteService(serviceName) } for _, svcIP := range staleUDPServices.UnsortedList() { if err := conntrack.ClearEntriesForIP(proxier.exec, svcIP, v1.ProtocolUDP); err != nil { - glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) + klog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } } } @@ -600,31 +600,31 @@ func (proxier *Proxier) openOnePortal(portal portal, protocol v1.Protocol, proxy portalAddress := net.JoinHostPort(portal.ip.String(), strconv.Itoa(portal.port)) existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q, args:%v", iptablesContainerPortalChain, name, args) + klog.Errorf("Failed to install iptables %s rule for service %q, args:%v", iptablesContainerPortalChain, name, args) return err } if !existed { - glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s", name, protocol, portalAddress) + klog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s", name, protocol, portalAddress) } if portal.isExternal { args := proxier.iptablesContainerPortalArgs(portal.ip, false, true, portal.port, protocol, proxyIP, proxyPort, name) existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule that opens service %q for local traffic, args:%v", iptablesContainerPortalChain, name, args) + klog.Errorf("Failed to install iptables %s rule that opens service %q for local traffic, args:%v", iptablesContainerPortalChain, name, args) return err } if !existed { - glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s for local traffic", name, protocol, portalAddress) + klog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s for local traffic", name, protocol, portalAddress) } args = proxier.iptablesHostPortalArgs(portal.ip, true, portal.port, protocol, proxyIP, proxyPort, name) existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q for dst-local traffic", iptablesHostPortalChain, name) + klog.Errorf("Failed to install iptables %s rule for service %q for dst-local traffic", iptablesHostPortalChain, name) return err } if !existed { - glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s for dst-local traffic", name, protocol, portalAddress) + klog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s for dst-local traffic", name, protocol, portalAddress) } return nil } @@ -633,11 +633,11 @@ func (proxier *Proxier) openOnePortal(portal portal, protocol v1.Protocol, proxy args = proxier.iptablesHostPortalArgs(portal.ip, false, portal.port, protocol, proxyIP, proxyPort, name) existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name) + klog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name) return err } if !existed { - glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s", name, protocol, portalAddress) + klog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s", name, protocol, portalAddress) } return nil } @@ -665,7 +665,7 @@ func (proxier *Proxier) claimNodePort(ip net.IP, port int, protocol v1.Protocol, return fmt.Errorf("can't open node port for %s: %v", key.String(), err) } proxier.portMap[key] = &portMapValue{owner: owner, socket: socket} - glog.V(2).Infof("Claimed local port %s", key.String()) + klog.V(2).Infof("Claimed local port %s", key.String()) return nil } if existing.owner == owner { @@ -685,7 +685,7 @@ func (proxier *Proxier) releaseNodePort(ip net.IP, port int, protocol v1.Protoco existing, found := proxier.portMap[key] if !found { // We tolerate this, it happens if we are cleaning up a failed allocation - glog.Infof("Ignoring release on unowned port: %v", key) + klog.Infof("Ignoring release on unowned port: %v", key) return nil } if existing.owner != owner { @@ -709,32 +709,32 @@ func (proxier *Proxier) openNodePort(nodePort int, protocol v1.Protocol, proxyIP args := proxier.iptablesContainerPortalArgs(nil, false, false, nodePort, protocol, proxyIP, proxyPort, name) existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerNodePortChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name) + klog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerNodePortChain, name) return err } if !existed { - glog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort) + klog.Infof("Opened iptables from-containers public port for service %q on %s port %d", name, protocol, nodePort) } // Handle traffic from the host. args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostNodePortChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name) + klog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostNodePortChain, name) return err } if !existed { - glog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort) + klog.Infof("Opened iptables from-host public port for service %q on %s port %d", name, protocol, nodePort) } args = proxier.iptablesNonLocalNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableFilter, iptablesNonLocalNodePortChain, args...) if err != nil { - glog.Errorf("Failed to install iptables %s rule for service %q", iptablesNonLocalNodePortChain, name) + klog.Errorf("Failed to install iptables %s rule for service %q", iptablesNonLocalNodePortChain, name) return err } if !existed { - glog.Infof("Opened iptables from-non-local public port for service %q on %s port %d", name, protocol, nodePort) + klog.Infof("Opened iptables from-non-local public port for service %q on %s port %d", name, protocol, nodePort) } return nil @@ -755,9 +755,9 @@ func (proxier *Proxier) closePortal(service proxy.ServicePortName, info *Service el = append(el, proxier.closeNodePort(info.nodePort, info.protocol, proxier.listenIP, info.proxyPort, service)...) } if len(el) == 0 { - glog.V(3).Infof("Closed iptables portals for service %q", service) + klog.V(3).Infof("Closed iptables portals for service %q", service) } else { - glog.Errorf("Some errors closing iptables portals for service %q", service) + klog.Errorf("Some errors closing iptables portals for service %q", service) } return utilerrors.NewAggregate(el) } @@ -776,20 +776,20 @@ func (proxier *Proxier) closeOnePortal(portal portal, protocol v1.Protocol, prox // Handle traffic from containers. args := proxier.iptablesContainerPortalArgs(portal.ip, portal.isExternal, false, portal.port, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name) el = append(el, err) } if portal.isExternal { args := proxier.iptablesContainerPortalArgs(portal.ip, false, true, portal.port, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name) el = append(el, err) } args = proxier.iptablesHostPortalArgs(portal.ip, true, portal.port, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name) el = append(el, err) } return el @@ -798,7 +798,7 @@ func (proxier *Proxier) closeOnePortal(portal portal, protocol v1.Protocol, prox // Handle traffic from the host (portalIP is not external). args = proxier.iptablesHostPortalArgs(portal.ip, false, portal.port, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name) el = append(el, err) } @@ -811,21 +811,21 @@ func (proxier *Proxier) closeNodePort(nodePort int, protocol v1.Protocol, proxyI // Handle traffic from containers. args := proxier.iptablesContainerPortalArgs(nil, false, false, nodePort, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerNodePortChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerNodePortChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerNodePortChain, name) el = append(el, err) } // Handle traffic from the host. args = proxier.iptablesHostNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostNodePortChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostNodePortChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostNodePortChain, name) el = append(el, err) } // Handle traffic not local to the host args = proxier.iptablesNonLocalNodePortArgs(nodePort, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableFilter, iptablesNonLocalNodePortChain, args...); err != nil { - glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesNonLocalNodePortChain, name) + klog.Errorf("Failed to delete iptables %s rule for service %q", iptablesNonLocalNodePortChain, name) el = append(el, err) } @@ -934,7 +934,7 @@ func iptablesFlush(ipt iptables.Interface) error { el = append(el, err) } if len(el) != 0 { - glog.Errorf("Some errors flushing old iptables portals: %v", el) + klog.Errorf("Some errors flushing old iptables portals: %v", el) } return utilerrors.NewAggregate(el) } diff --git a/pkg/proxy/userspace/proxysocket.go b/pkg/proxy/userspace/proxysocket.go index 098f68c15aa25..2ff4980546525 100644 --- a/pkg/proxy/userspace/proxysocket.go +++ b/pkg/proxy/userspace/proxysocket.go @@ -25,9 +25,9 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" "k8s.io/kubernetes/pkg/proxy" ) @@ -95,10 +95,10 @@ func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protoc for _, dialTimeout := range EndpointDialTimeouts { endpoint, err := loadBalancer.NextEndpoint(service, srcAddr, sessionAffinityReset) if err != nil { - glog.Errorf("Couldn't find an endpoint for %s: %v", service, err) + klog.Errorf("Couldn't find an endpoint for %s: %v", service, err) return nil, err } - glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint) + klog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint) // TODO: This could spin up a new goroutine to make the outbound connection, // and keep accepting inbound traffic. outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout) @@ -106,7 +106,7 @@ func TryConnectEndpoints(service proxy.ServicePortName, srcAddr net.Addr, protoc if isTooManyFDsError(err) { panic("Dial failed: " + err.Error()) } - glog.Errorf("Dial failed: %v", err) + klog.Errorf("Dial failed: %v", err) sessionAffinityReset = true continue } @@ -135,13 +135,13 @@ func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv // Then the service port was just closed so the accept failure is to be expected. return } - glog.Errorf("Accept failed: %v", err) + klog.Errorf("Accept failed: %v", err) continue } - glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) + klog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) outConn, err := TryConnectEndpoints(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", loadBalancer) if err != nil { - glog.Errorf("Failed to connect to balancer: %v", err) + klog.Errorf("Failed to connect to balancer: %v", err) inConn.Close() continue } @@ -154,7 +154,7 @@ func (tcp *tcpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv func ProxyTCP(in, out *net.TCPConn) { var wg sync.WaitGroup wg.Add(2) - glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v", + klog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v", in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr()) go copyBytes("from backend", in, out, &wg) go copyBytes("to backend", out, in, &wg) @@ -163,14 +163,14 @@ func ProxyTCP(in, out *net.TCPConn) { func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) { defer wg.Done() - glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr()) + klog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr()) n, err := io.Copy(dest, src) if err != nil { if !isClosedError(err) { - glog.Errorf("I/O error: %v", err) + klog.Errorf("I/O error: %v", err) } } - glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr()) + klog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr()) dest.Close() src.Close() } @@ -215,11 +215,11 @@ func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv if err != nil { if e, ok := err.(net.Error); ok { if e.Temporary() { - glog.V(1).Infof("ReadFrom had a temporary failure: %v", err) + klog.V(1).Infof("ReadFrom had a temporary failure: %v", err) continue } } - glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err) + klog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err) break } // If this is a client we know already, reuse the connection and goroutine. @@ -232,14 +232,14 @@ func (udp *udpProxySocket) ProxyLoop(service proxy.ServicePortName, myInfo *Serv _, err = svrConn.Write(buffer[0:n]) if err != nil { if !logTimeout(err) { - glog.Errorf("Write failed: %v", err) + klog.Errorf("Write failed: %v", err) // TODO: Maybe tear down the goroutine for this client/server pair? } continue } err = svrConn.SetDeadline(time.Now().Add(myInfo.Timeout)) if err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) continue } } @@ -253,14 +253,14 @@ func (udp *udpProxySocket) getBackendConn(activeClients *ClientCache, cliAddr ne if !found { // TODO: This could spin up a new goroutine to make the outbound connection, // and keep accepting inbound traffic. - glog.V(3).Infof("New UDP connection from %s", cliAddr) + klog.V(3).Infof("New UDP connection from %s", cliAddr) var err error svrConn, err = TryConnectEndpoints(service, cliAddr, "udp", loadBalancer) if err != nil { return nil, err } if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) return nil, err } activeClients.Clients[cliAddr.String()] = svrConn @@ -281,19 +281,19 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ n, err := svrConn.Read(buffer[0:]) if err != nil { if !logTimeout(err) { - glog.Errorf("Read failed: %v", err) + klog.Errorf("Read failed: %v", err) } break } err = svrConn.SetDeadline(time.Now().Add(timeout)) if err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) break } n, err = udp.WriteTo(buffer[0:n], cliAddr) if err != nil { if !logTimeout(err) { - glog.Errorf("WriteTo failed: %v", err) + klog.Errorf("WriteTo failed: %v", err) } break } diff --git a/pkg/proxy/userspace/roundrobin.go b/pkg/proxy/userspace/roundrobin.go index 5e84e8c27c952..6bbc558eae805 100644 --- a/pkg/proxy/userspace/roundrobin.go +++ b/pkg/proxy/userspace/roundrobin.go @@ -25,9 +25,9 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/util/slice" ) @@ -82,7 +82,7 @@ func NewLoadBalancerRR() *LoadBalancerRR { } func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error { - glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort) + klog.V(4).Infof("LoadBalancerRR NewService %q", svcPort) lb.lock.Lock() defer lb.lock.Unlock() lb.newServiceInternal(svcPort, affinityType, ttlSeconds) @@ -97,7 +97,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi if _, exists := lb.services[svcPort]; !exists { lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)} - glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort) + klog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort) } else if affinityType != "" { lb.services[svcPort].affinity.affinityType = affinityType } @@ -105,7 +105,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi } func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) { - glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort) + klog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort) lb.lock.Lock() defer lb.lock.Unlock() delete(lb.services, svcPort) @@ -145,7 +145,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne if len(state.endpoints) == 0 { return "", ErrMissingEndpoints } - glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints) + klog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints) sessionAffinityEnabled := isSessionAffinity(&state.affinity) @@ -163,7 +163,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne // Affinity wins. endpoint := sessionAffinity.endpoint sessionAffinity.lastUsed = time.Now() - glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint) + klog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint) return endpoint, nil } } @@ -182,7 +182,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne affinity.lastUsed = time.Now() affinity.endpoint = endpoint affinity.clientIP = ipaddr - glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr]) + klog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr]) } return endpoint, nil @@ -214,7 +214,7 @@ func flattenValidEndpoints(endpoints []hostPortPair) []string { func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) { for _, affinity := range state.affinity.affinityMap { if affinity.endpoint == endpoint { - glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort) + klog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort) delete(state.affinity.affinityMap, affinity.clientIP) } } @@ -237,7 +237,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn } for mKey, mVal := range allEndpoints { if mVal == 1 { - glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort) + klog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort) removeSessionAffinityByEndpoint(state, svcPort, mKey) } } @@ -273,7 +273,7 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) { state, exists := lb.services[svcPort] if !exists || state == nil || len(newEndpoints) > 0 { - glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) + klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) lb.updateAffinityMap(svcPort, newEndpoints) // OnEndpointsAdd can be called without NewService being called externally. // To be safe we will call it here. A new service will only be created @@ -307,7 +307,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoint } if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) { - glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) + klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) lb.updateAffinityMap(svcPort, newEndpoints) // OnEndpointsUpdate can be called without NewService being called externally. // To be safe we will call it here. A new service will only be created @@ -335,7 +335,7 @@ func (lb *LoadBalancerRR) resetService(svcPort proxy.ServicePortName) { // If the service is still around, reset but don't delete. if state, ok := lb.services[svcPort]; ok { if len(state.endpoints) > 0 { - glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) + klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) state.endpoints = []string{} } state.index = 0 @@ -379,7 +379,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa } for ip, affinity := range state.affinity.affinityMap { if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds { - glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort) + klog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort) delete(state.affinity.affinityMap, ip) } } diff --git a/pkg/proxy/util/BUILD b/pkg/proxy/util/BUILD index 999dcf88daebe..fbbb01b18e769 100644 --- a/pkg/proxy/util/BUILD +++ b/pkg/proxy/util/BUILD @@ -17,7 +17,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/proxy/util/endpoints.go b/pkg/proxy/util/endpoints.go index 7c91b9cd6a102..716491cd25c64 100644 --- a/pkg/proxy/util/endpoints.go +++ b/pkg/proxy/util/endpoints.go @@ -21,7 +21,7 @@ import ( "net" "strconv" - "github.com/golang/glog" + "k8s.io/klog" ) // IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP @@ -35,14 +35,14 @@ func IPPart(s string) string { // Must be IP:port host, _, err := net.SplitHostPort(s) if err != nil { - glog.Errorf("Error parsing '%s': %v", s, err) + klog.Errorf("Error parsing '%s': %v", s, err) return "" } // Check if host string is a valid IP address if ip := net.ParseIP(host); ip != nil { return ip.String() } else { - glog.Errorf("invalid IP part '%s'", host) + klog.Errorf("invalid IP part '%s'", host) } return "" } @@ -52,12 +52,12 @@ func PortPart(s string) (int, error) { // Must be IP:port _, port, err := net.SplitHostPort(s) if err != nil { - glog.Errorf("Error parsing '%s': %v", s, err) + klog.Errorf("Error parsing '%s': %v", s, err) return -1, err } portNumber, err := strconv.Atoi(port) if err != nil { - glog.Errorf("Error parsing '%s': %v", port, err) + klog.Errorf("Error parsing '%s': %v", port, err) return -1, err } return portNumber, nil diff --git a/pkg/proxy/util/port.go b/pkg/proxy/util/port.go index 96317b1dc820b..35924e05e8712 100644 --- a/pkg/proxy/util/port.go +++ b/pkg/proxy/util/port.go @@ -21,7 +21,7 @@ import ( "net" "strconv" - "github.com/golang/glog" + "k8s.io/klog" ) // LocalPort describes a port on specific IP address and protocol @@ -60,7 +60,7 @@ func RevertPorts(replacementPortsMap, originalPortsMap map[LocalPort]Closeable) for k, v := range replacementPortsMap { // Only close newly opened local ports - leave ones that were open before this update if originalPortsMap[k] == nil { - glog.V(2).Infof("Closing local port %s", k.String()) + klog.V(2).Infof("Closing local port %s", k.String()) v.Close() } } diff --git a/pkg/proxy/util/utils.go b/pkg/proxy/util/utils.go index ca1e6c8fc9972..f1db309a941e9 100644 --- a/pkg/proxy/util/utils.go +++ b/pkg/proxy/util/utils.go @@ -27,7 +27,7 @@ import ( helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" utilnet "k8s.io/kubernetes/pkg/util/net" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -62,12 +62,12 @@ func IsLocalIP(ip string) (bool, error) { func ShouldSkipService(svcName types.NamespacedName, service *v1.Service) bool { // if ClusterIP is "None" or empty, skip proxying if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return true } // Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied if service.Spec.Type == v1.ServiceTypeExternalName { - glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName) + klog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName) return true } return false @@ -134,7 +134,7 @@ func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error) // LogAndEmitIncorrectIPVersionEvent logs and emits incorrect IP version event. func LogAndEmitIncorrectIPVersionEvent(recorder record.EventRecorder, fieldName, fieldValue, svcNamespace, svcName string, svcUID types.UID) { errMsg := fmt.Sprintf("%s in %s has incorrect IP version", fieldValue, fieldName) - glog.Errorf("%s (service %s/%s).", errMsg, svcNamespace, svcName) + klog.Errorf("%s (service %s/%s).", errMsg, svcNamespace, svcName) if recorder != nil { recorder.Eventf( &v1.ObjectReference{ diff --git a/pkg/proxy/winkernel/BUILD b/pkg/proxy/winkernel/BUILD index 0e3636f1990c4..4d6b5e0f8814b 100644 --- a/pkg/proxy/winkernel/BUILD +++ b/pkg/proxy/winkernel/BUILD @@ -24,7 +24,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/Microsoft/hcsshim:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/pkg/proxy/winkernel/proxier.go b/pkg/proxy/winkernel/proxier.go index a1343d24b306f..398dd246d3cf6 100644 --- a/pkg/proxy/winkernel/proxier.go +++ b/pkg/proxy/winkernel/proxier.go @@ -30,7 +30,7 @@ import ( "github.com/Microsoft/hcsshim" "github.com/davecgh/go-spew/spew" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -107,14 +107,14 @@ type hnsNetworkInfo struct { id string } -func Log(v interface{}, message string, level glog.Level) { - glog.V(level).Infof("%s, %s", message, spew.Sdump(v)) +func Log(v interface{}, message string, level klog.Level) { + klog.V(level).Infof("%s, %s", message, spew.Sdump(v)) } -func LogJson(v interface{}, message string, level glog.Level) { +func LogJson(v interface{}, message string, level klog.Level) { jsonString, err := json.Marshal(v) if err == nil { - glog.V(level).Infof("%s, %s", message, string(jsonString)) + klog.V(level).Infof("%s, %s", message, string(jsonString)) } } @@ -159,7 +159,7 @@ func (ep *endpointsInfo) Cleanup() { // Never delete a Local Endpoint. Local Endpoints are already created by other entities. // Remove only remote endpoints created by this service if ep.refCount <= 0 && !ep.isLocal { - glog.V(4).Infof("Removing endpoints for %v, since no one is referencing it", ep) + klog.V(4).Infof("Removing endpoints for %v, since no one is referencing it", ep) deleteHnsEndpoint(ep.hnsID) ep.hnsID = "" } @@ -206,7 +206,7 @@ func newServiceInfo(svcPortName proxy.ServicePortName, port *v1.ServicePort, ser if apiservice.NeedsHealthCheck(service) { p := service.Spec.HealthCheckNodePort if p == 0 { - glog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String()) + klog.Errorf("Service %q has no healthcheck nodeport", svcPortName.NamespacedName.String()) } else { info.healthCheckNodePort = int(p) } @@ -303,9 +303,9 @@ func (sm *proxyServiceMap) merge(other proxyServiceMap, curEndpoints proxyEndpoi existingPorts.Insert(svcPortName.Port) svcInfo, exists := (*sm)[svcPortName] if !exists { - glog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) + klog.V(1).Infof("Adding new service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) } else { - glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) + klog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) svcInfo.cleanupAllPolicies(curEndpoints[svcPortName]) delete(*sm, svcPortName) } @@ -321,14 +321,14 @@ func (sm *proxyServiceMap) unmerge(other proxyServiceMap, existingPorts, staleSe } info, exists := (*sm)[svcPortName] if exists { - glog.V(1).Infof("Removing service port %q", svcPortName) + klog.V(1).Infof("Removing service port %q", svcPortName) if info.protocol == v1.ProtocolUDP { staleServices.Insert(info.clusterIP.String()) } info.cleanupAllPolicies(curEndpoints[svcPortName]) delete(*sm, svcPortName) } else { - glog.Errorf("Service port %q removed, but doesn't exists", svcPortName) + klog.Errorf("Service port %q removed, but doesn't exists", svcPortName) } } } @@ -340,13 +340,13 @@ func (em proxyEndpointsMap) merge(other proxyEndpointsMap, curServices proxyServ if exists { // info, exists := curServices[svcPortName] - glog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) + klog.V(1).Infof("Updating existing service port %q at %s:%d/%s", svcPortName, info.clusterIP, info.port, info.protocol) if exists { - glog.V(2).Infof("Endpoints are modified. Service [%v] is stale", svcPortName) + klog.V(2).Infof("Endpoints are modified. Service [%v] is stale", svcPortName) info.cleanupAllPolicies(epInfos) } else { // If no service exists, just cleanup the remote endpoints - glog.V(2).Infof("Endpoints are orphaned. Cleaning up") + klog.V(2).Infof("Endpoints are orphaned. Cleaning up") // Cleanup Endpoints references for _, ep := range epInfos { ep.Cleanup() @@ -365,11 +365,11 @@ func (em proxyEndpointsMap) unmerge(other proxyEndpointsMap, curServices proxySe for svcPortName := range other { info, exists := curServices[svcPortName] if exists { - glog.V(2).Infof("Service [%v] is stale", info) + klog.V(2).Infof("Service [%v] is stale", info) info.cleanupAllPolicies(em[svcPortName]) } else { // If no service exists, just cleanup the remote endpoints - glog.V(2).Infof("Endpoints are orphaned. Cleaning up") + klog.V(2).Infof("Endpoints are orphaned. Cleaning up") // Cleanup Endpoints references epInfos, exists := em[svcPortName] if exists { @@ -470,12 +470,12 @@ func NewProxier( masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) if nodeIP == nil { - glog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") + klog.Warningf("invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP") nodeIP = net.ParseIP("127.0.0.1") } if len(clusterCIDR) == 0 { - glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") + klog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") } healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps @@ -487,11 +487,11 @@ func NewProxier( } hnsNetwork, err := getHnsNetworkInfo(hnsNetworkName) if err != nil { - glog.Fatalf("Unable to find Hns Network specified by %s. Please check environment variable KUBE_NETWORK", hnsNetworkName) + klog.Fatalf("Unable to find Hns Network specified by %s. Please check environment variable KUBE_NETWORK", hnsNetworkName) return nil, err } - glog.V(1).Infof("Hns Network loaded with info = %v", hnsNetwork) + klog.V(1).Infof("Hns Network loaded with info = %v", hnsNetwork) proxier := &Proxier{ portsMap: make(map[localPort]closeable), @@ -511,7 +511,7 @@ func NewProxier( } burstSyncs := 2 - glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) + klog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) return proxier, nil @@ -568,7 +568,7 @@ func deleteAllHnsLoadBalancerPolicy() { LogJson(plist, "Remove Policy", 3) _, err = plist.Delete() if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } } @@ -629,36 +629,36 @@ func deleteHnsLoadBalancerPolicy(hnsID string) { // Cleanup HNS policies hnsloadBalancer, err := hcsshim.GetPolicyListByID(hnsID) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return } LogJson(hnsloadBalancer, "Removing Policy", 2) _, err = hnsloadBalancer.Delete() if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } } func deleteHnsEndpoint(hnsID string) { hnsendpoint, err := hcsshim.GetHNSEndpointByID(hnsID) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return } _, err = hnsendpoint.Delete() if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) } - glog.V(3).Infof("Remote endpoint resource deleted id %s", hnsID) + klog.V(3).Infof("Remote endpoint resource deleted id %s", hnsID) } func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) { hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return nil, err } @@ -671,7 +671,7 @@ func getHnsNetworkInfo(hnsNetworkName string) (*hnsNetworkInfo, error) { func getHnsEndpointByIpAddress(ip net.IP, networkName string) (*hcsshim.HNSEndpoint, error) { hnsnetwork, err := hcsshim.GetHNSNetworkByName(networkName) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) return nil, err } @@ -746,12 +746,12 @@ func (proxier *Proxier) OnServiceSynced() { func shouldSkipService(svcName types.NamespacedName, service *v1.Service) bool { // if ClusterIP is "None" or empty, skip proxying if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return true } // Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied if service.Spec.Type == v1.ServiceTypeExternalName { - glog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName) + klog.V(3).Infof("Skipping service %s due to Type=ExternalName", svcName) return true } return false @@ -880,7 +880,7 @@ func endpointsToEndpointsMap(endpoints *v1.Endpoints, hostname string) proxyEndp for i := range ss.Ports { port := &ss.Ports[i] if port.Port == 0 { - glog.Warningf("ignoring invalid endpoint port %s", port.Name) + klog.Warningf("ignoring invalid endpoint port %s", port.Name) continue } svcPortName := proxy.ServicePortName{ @@ -890,19 +890,19 @@ func endpointsToEndpointsMap(endpoints *v1.Endpoints, hostname string) proxyEndp for i := range ss.Addresses { addr := &ss.Addresses[i] if addr.IP == "" { - glog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) + klog.Warningf("ignoring invalid endpoint port %s with empty host", port.Name) continue } isLocal := addr.NodeName != nil && *addr.NodeName == hostname epInfo := newEndpointInfo(addr.IP, uint16(port.Port), isLocal) endpointsMap[svcPortName] = append(endpointsMap[svcPortName], epInfo) } - if glog.V(3) { + if klog.V(3) { newEPList := []*endpointsInfo{} for _, ep := range endpointsMap[svcPortName] { newEPList = append(newEPList, ep) } - glog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList) + klog.Infof("Setting endpoints for %q to %+v", svcPortName, newEPList) } } } @@ -939,11 +939,11 @@ func (proxier *Proxier) syncProxyRules() { start := time.Now() defer func() { SyncProxyRulesLatency.Observe(sinceInMicroseconds(start)) - glog.V(4).Infof("syncProxyRules took %v", time.Since(start)) + klog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() // don't sync rules till we've received services and endpoints if !proxier.endpointsSynced || !proxier.servicesSynced { - glog.V(2).Info("Not syncing hns until Services and Endpoints have been received from master") + klog.V(2).Info("Not syncing hns until Services and Endpoints have been received from master") return } @@ -957,22 +957,22 @@ func (proxier *Proxier) syncProxyRules() { // merge stale services gathered from updateEndpointsMap for svcPortName := range endpointUpdateResult.staleServiceNames { if svcInfo, ok := proxier.serviceMap[svcPortName]; ok && svcInfo != nil && svcInfo.protocol == v1.ProtocolUDP { - glog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String()) + klog.V(2).Infof("Stale udp service %v -> %s", svcPortName, svcInfo.clusterIP.String()) staleServices.Insert(svcInfo.clusterIP.String()) } } - glog.V(3).Infof("Syncing Policies") + klog.V(3).Infof("Syncing Policies") // Program HNS by adding corresponding policies for each service. for svcName, svcInfo := range proxier.serviceMap { if svcInfo.policyApplied { - glog.V(4).Infof("Policy already applied for %s", spew.Sdump(svcInfo)) + klog.V(4).Infof("Policy already applied for %s", spew.Sdump(svcInfo)) continue } var hnsEndpoints []hcsshim.HNSEndpoint - glog.V(4).Infof("====Applying Policy for %s====", svcName) + klog.V(4).Infof("====Applying Policy for %s====", svcName) // Create Remote endpoints for every endpoint, corresponding to the service for _, ep := range proxier.endpointsMap[svcName] { @@ -1000,13 +1000,13 @@ func (proxier *Proxier) syncProxyRules() { if newHnsEndpoint == nil { if ep.isLocal { - glog.Errorf("Local endpoint not found for %v: err: %v on network %s", ep.ip, err, hnsNetworkName) + klog.Errorf("Local endpoint not found for %v: err: %v on network %s", ep.ip, err, hnsNetworkName) continue } // hns Endpoint resource was not found, create one hnsnetwork, err := hcsshim.GetHNSNetworkByName(hnsNetworkName) if err != nil { - glog.Errorf("%v", err) + klog.Errorf("%v", err) continue } @@ -1017,7 +1017,7 @@ func (proxier *Proxier) syncProxyRules() { newHnsEndpoint, err = hnsnetwork.CreateRemoteEndpoint(hnsEndpoint) if err != nil { - glog.Errorf("Remote endpoint creation failed: %v", err) + klog.Errorf("Remote endpoint creation failed: %v", err) continue } } @@ -1030,19 +1030,19 @@ func (proxier *Proxier) syncProxyRules() { Log(ep, "Endpoint resource found", 3) } - glog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName) + klog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName) if len(svcInfo.hnsID) > 0 { // This should not happen - glog.Warningf("Load Balancer already exists %s -- Debug ", svcInfo.hnsID) + klog.Warningf("Load Balancer already exists %s -- Debug ", svcInfo.hnsID) } if len(hnsEndpoints) == 0 { - glog.Errorf("Endpoint information not available for service %s. Not applying any policy", svcName) + klog.Errorf("Endpoint information not available for service %s. Not applying any policy", svcName) continue } - glog.V(4).Infof("Trying to Apply Policies for service %s", spew.Sdump(svcInfo)) + klog.V(4).Infof("Trying to Apply Policies for service %s", spew.Sdump(svcInfo)) var hnsLoadBalancer *hcsshim.PolicyList hnsLoadBalancer, err := getHnsLoadBalancer( @@ -1054,12 +1054,12 @@ func (proxier *Proxier) syncProxyRules() { uint16(svcInfo.port), ) if err != nil { - glog.Errorf("Policy creation failed: %v", err) + klog.Errorf("Policy creation failed: %v", err) continue } svcInfo.hnsID = hnsLoadBalancer.ID - glog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID) + klog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID) // If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints if svcInfo.nodePort > 0 { @@ -1072,12 +1072,12 @@ func (proxier *Proxier) syncProxyRules() { uint16(svcInfo.nodePort), ) if err != nil { - glog.Errorf("Policy creation failed: %v", err) + klog.Errorf("Policy creation failed: %v", err) continue } svcInfo.nodePorthnsID = hnsLoadBalancer.ID - glog.V(3).Infof("Hns LoadBalancer resource created for nodePort resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID) + klog.V(3).Infof("Hns LoadBalancer resource created for nodePort resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID) } // Create a Load Balancer Policy for each external IP @@ -1092,11 +1092,11 @@ func (proxier *Proxier) syncProxyRules() { uint16(svcInfo.port), ) if err != nil { - glog.Errorf("Policy creation failed: %v", err) + klog.Errorf("Policy creation failed: %v", err) continue } externalIp.hnsID = hnsLoadBalancer.ID - glog.V(3).Infof("Hns LoadBalancer resource created for externalIp resources %v, Id[%s]", externalIp, hnsLoadBalancer.ID) + klog.V(3).Infof("Hns LoadBalancer resource created for externalIp resources %v, Id[%s]", externalIp, hnsLoadBalancer.ID) } // Create a Load Balancer Policy for each loadbalancer ingress for _, lbIngressIp := range svcInfo.loadBalancerIngressIPs { @@ -1110,11 +1110,11 @@ func (proxier *Proxier) syncProxyRules() { uint16(svcInfo.port), ) if err != nil { - glog.Errorf("Policy creation failed: %v", err) + klog.Errorf("Policy creation failed: %v", err) continue } lbIngressIp.hnsID = hnsLoadBalancer.ID - glog.V(3).Infof("Hns LoadBalancer resource created for loadBalancer Ingress resources %v", lbIngressIp) + klog.V(3).Infof("Hns LoadBalancer resource created for loadBalancer Ingress resources %v", lbIngressIp) } svcInfo.policyApplied = true Log(svcInfo, "+++Policy Successfully applied for service +++", 2) @@ -1129,17 +1129,17 @@ func (proxier *Proxier) syncProxyRules() { // not "OnlyLocal", but the services list will not, and the healthChecker // will just drop those endpoints. if err := proxier.healthChecker.SyncServices(serviceUpdateResult.hcServices); err != nil { - glog.Errorf("Error syncing healtcheck services: %v", err) + klog.Errorf("Error syncing healtcheck services: %v", err) } if err := proxier.healthChecker.SyncEndpoints(endpointUpdateResult.hcEndpoints); err != nil { - glog.Errorf("Error syncing healthcheck endpoints: %v", err) + klog.Errorf("Error syncing healthcheck endpoints: %v", err) } // Finish housekeeping. // TODO: these could be made more consistent. for _, svcIP := range staleServices.UnsortedList() { // TODO : Check if this is required to cleanup stale services here - glog.V(5).Infof("Pending delete stale service IP %s connections", svcIP) + klog.V(5).Infof("Pending delete stale service IP %s connections", svcIP) } } diff --git a/pkg/proxy/winuserspace/BUILD b/pkg/proxy/winuserspace/BUILD index 2c2e4d4e5d977..3925dc96d72c5 100644 --- a/pkg/proxy/winuserspace/BUILD +++ b/pkg/proxy/winuserspace/BUILD @@ -26,8 +26,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/miekg/dns:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/proxy/winuserspace/proxier.go b/pkg/proxy/winuserspace/proxier.go index 9770451e7b850..4b5a218cfc0b5 100644 --- a/pkg/proxy/winuserspace/proxier.go +++ b/pkg/proxy/winuserspace/proxier.go @@ -25,7 +25,7 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -70,7 +70,7 @@ func (info *serviceInfo) isAlive() bool { func logTimeout(err error) bool { if e, ok := err.(net.Error); ok { if e.Timeout() { - glog.V(3).Infof("connection to endpoint closed due to inactivity") + klog.V(3).Infof("connection to endpoint closed due to inactivity") return true } } @@ -140,7 +140,7 @@ func NewProxier(loadBalancer LoadBalancer, listenIP net.IP, netsh netsh.Interfac return nil, fmt.Errorf("failed to select a host interface: %v", err) } - glog.V(2).Infof("Setting proxy IP to %v", hostIP) + klog.V(2).Infof("Setting proxy IP to %v", hostIP) return createProxier(loadBalancer, listenIP, netsh, hostIP, syncPeriod, udpIdleTimeout) } @@ -167,7 +167,7 @@ func (proxier *Proxier) SyncLoop() { defer t.Stop() for { <-t.C - glog.V(6).Infof("Periodic sync") + klog.V(6).Infof("Periodic sync") proxier.Sync() } } @@ -234,7 +234,7 @@ func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPo if existed, err := proxier.netsh.EnsureIPAddress(args, serviceIP); err != nil { return nil, err } else if !existed { - glog.V(3).Infof("Added ip address to fowarder interface for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol) + klog.V(3).Infof("Added ip address to fowarder interface for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol) } } @@ -259,7 +259,7 @@ func (proxier *Proxier) addServicePortPortal(servicePortPortalName ServicePortPo } proxier.setServiceInfo(servicePortPortalName, si) - glog.V(2).Infof("Proxying for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol) + klog.V(2).Infof("Proxying for service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(port)), protocol) go func(service ServicePortPortalName, proxier *Proxier) { defer runtime.HandleCrash() atomic.AddInt32(&proxier.numProxyLoops, 1) @@ -313,7 +313,7 @@ func (proxier *Proxier) mergeService(service *v1.Service) map[ServicePortPortalN } svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return nil } existingPortPortals := make(map[ServicePortPortalName]bool) @@ -337,19 +337,19 @@ func (proxier *Proxier) mergeService(service *v1.Service) map[ServicePortPortalN continue } if exists { - glog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName) + klog.V(4).Infof("Something changed for service %q: stopping it", servicePortPortalName) if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil { - glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err) + klog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err) } } - glog.V(1).Infof("Adding new service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), protocol) + klog.V(1).Infof("Adding new service %q at %s/%s", servicePortPortalName, net.JoinHostPort(listenIP, strconv.Itoa(listenPort)), protocol) info, err := proxier.addServicePortPortal(servicePortPortalName, protocol, listenIP, listenPort, proxier.udpIdleTimeout) if err != nil { - glog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err) + klog.Errorf("Failed to start proxy for %q: %v", servicePortPortalName, err) continue } info.sessionAffinityType = service.Spec.SessionAffinity - glog.V(10).Infof("info: %#v", info) + klog.V(10).Infof("info: %#v", info) } if len(listenIPPortMap) > 0 { // only one loadbalancer per service port portal @@ -377,7 +377,7 @@ func (proxier *Proxier) unmergeService(service *v1.Service, existingPortPortals } svcName := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} if !helper.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) + klog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) return } @@ -409,15 +409,15 @@ func (proxier *Proxier) unmergeService(service *v1.Service, existingPortPortals continue } - glog.V(1).Infof("Stopping service %q", servicePortPortalName) + klog.V(1).Infof("Stopping service %q", servicePortPortalName) info, exists := proxier.getServiceInfo(servicePortPortalName) if !exists { - glog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName) + klog.Errorf("Service %q is being removed but doesn't exist", servicePortPortalName) continue } if err := proxier.closeServicePortPortal(servicePortPortalName, info); err != nil { - glog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err) + klog.Errorf("Failed to close service port portal %q: %v", servicePortPortalName, err) } } diff --git a/pkg/proxy/winuserspace/proxysocket.go b/pkg/proxy/winuserspace/proxysocket.go index 23782d6209b9f..c7c5691e3b35f 100644 --- a/pkg/proxy/winuserspace/proxysocket.go +++ b/pkg/proxy/winuserspace/proxysocket.go @@ -26,11 +26,11 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" "github.com/miekg/dns" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/util/ipconfig" "k8s.io/utils/exec" @@ -133,10 +133,10 @@ func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string } endpoint, err := proxier.loadBalancer.NextEndpoint(servicePortName, srcAddr, sessionAffinityReset) if err != nil { - glog.Errorf("Couldn't find an endpoint for %s: %v", service, err) + klog.Errorf("Couldn't find an endpoint for %s: %v", service, err) return nil, err } - glog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint) + klog.V(3).Infof("Mapped service %q to endpoint %s", service, endpoint) // TODO: This could spin up a new goroutine to make the outbound connection, // and keep accepting inbound traffic. outConn, err := net.DialTimeout(protocol, endpoint, dialTimeout) @@ -144,7 +144,7 @@ func tryConnect(service ServicePortPortalName, srcAddr net.Addr, protocol string if isTooManyFDsError(err) { panic("Dial failed: " + err.Error()) } - glog.Errorf("Dial failed: %v", err) + klog.Errorf("Dial failed: %v", err) sessionAffinityReset = true continue } @@ -173,13 +173,13 @@ func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv // Then the service port was just closed so the accept failure is to be expected. return } - glog.Errorf("Accept failed: %v", err) + klog.Errorf("Accept failed: %v", err) continue } - glog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) + klog.V(3).Infof("Accepted TCP connection from %v to %v", inConn.RemoteAddr(), inConn.LocalAddr()) outConn, err := tryConnect(service, inConn.(*net.TCPConn).RemoteAddr(), "tcp", proxier) if err != nil { - glog.Errorf("Failed to connect to balancer: %v", err) + klog.Errorf("Failed to connect to balancer: %v", err) inConn.Close() continue } @@ -192,7 +192,7 @@ func (tcp *tcpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv func proxyTCP(in, out *net.TCPConn) { var wg sync.WaitGroup wg.Add(2) - glog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v", + klog.V(4).Infof("Creating proxy between %v <-> %v <-> %v <-> %v", in.RemoteAddr(), in.LocalAddr(), out.LocalAddr(), out.RemoteAddr()) go copyBytes("from backend", in, out, &wg) go copyBytes("to backend", out, in, &wg) @@ -201,14 +201,14 @@ func proxyTCP(in, out *net.TCPConn) { func copyBytes(direction string, dest, src *net.TCPConn, wg *sync.WaitGroup) { defer wg.Done() - glog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr()) + klog.V(4).Infof("Copying %s: %s -> %s", direction, src.RemoteAddr(), dest.RemoteAddr()) n, err := io.Copy(dest, src) if err != nil { if !isClosedError(err) { - glog.Errorf("I/O error: %v", err) + klog.Errorf("I/O error: %v", err) } } - glog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr()) + klog.V(4).Infof("Copied %d bytes %s: %s -> %s", n, direction, src.RemoteAddr(), dest.RemoteAddr()) dest.Close() src.Close() } @@ -283,7 +283,7 @@ func appendDNSSuffix(msg *dns.Msg, buffer []byte, length int, dnsSuffix string) msg.Question[0].Name = origName if err != nil { - glog.Warningf("Unable to pack DNS packet. Error is: %v", err) + klog.Warningf("Unable to pack DNS packet. Error is: %v", err) return length, err } @@ -310,7 +310,7 @@ func recoverDNSQuestion(origName string, msg *dns.Msg, buffer []byte, length int mbuf, err := msg.PackBuffer(buffer) if err != nil { - glog.Warningf("Unable to pack DNS packet. Error is: %v", err) + klog.Warningf("Unable to pack DNS packet. Error is: %v", err) return length, err } @@ -330,7 +330,7 @@ func processUnpackedDNSQueryPacket( length int, dnsSearch []string) int { if dnsSearch == nil || len(dnsSearch) == 0 { - glog.V(1).Infof("DNS search list is not initialized and is empty.") + klog.V(1).Infof("DNS search list is not initialized and is empty.") return length } @@ -348,13 +348,13 @@ func processUnpackedDNSQueryPacket( state.msg.MsgHdr.Id = msg.MsgHdr.Id if index < 0 || index >= int32(len(dnsSearch)) { - glog.V(1).Infof("Search index %d is out of range.", index) + klog.V(1).Infof("Search index %d is out of range.", index) return length } length, err := appendDNSSuffix(msg, buffer, length, dnsSearch[index]) if err != nil { - glog.Errorf("Append DNS suffix failed: %v", err) + klog.Errorf("Append DNS suffix failed: %v", err) } return length @@ -373,7 +373,7 @@ func processUnpackedDNSResponsePacket( var drop bool var err error if dnsSearch == nil || len(dnsSearch) == 0 { - glog.V(1).Infof("DNS search list is not initialized and is empty.") + klog.V(1).Infof("DNS search list is not initialized and is empty.") return drop, length } @@ -389,19 +389,19 @@ func processUnpackedDNSResponsePacket( drop = true length, err = appendDNSSuffix(state.msg, buffer, length, dnsSearch[index]) if err != nil { - glog.Errorf("Append DNS suffix failed: %v", err) + klog.Errorf("Append DNS suffix failed: %v", err) } _, err = svrConn.Write(buffer[0:length]) if err != nil { if !logTimeout(err) { - glog.Errorf("Write failed: %v", err) + klog.Errorf("Write failed: %v", err) } } } else { length, err = recoverDNSQuestion(state.msg.Question[0].Name, msg, buffer, length) if err != nil { - glog.Errorf("Recover DNS question failed: %v", err) + klog.Errorf("Recover DNS question failed: %v", err) } dnsClients.mu.Lock() @@ -421,7 +421,7 @@ func processDNSQueryPacket( dnsSearch []string) (int, error) { msg := &dns.Msg{} if err := msg.Unpack(buffer[:length]); err != nil { - glog.Warningf("Unable to unpack DNS packet. Error is: %v", err) + klog.Warningf("Unable to unpack DNS packet. Error is: %v", err) return length, err } @@ -432,14 +432,14 @@ func processDNSQueryPacket( // QDCOUNT if len(msg.Question) != 1 { - glog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question)) - glog.V(1).Infof("DNS suffix appending does not support more than one question.") + klog.V(1).Infof("Number of entries in the question section of the DNS packet is: %d", len(msg.Question)) + klog.V(1).Infof("DNS suffix appending does not support more than one question.") return length, nil } // ANCOUNT, NSCOUNT, ARCOUNT if len(msg.Answer) != 0 || len(msg.Ns) != 0 || len(msg.Extra) != 0 { - glog.V(1).Infof("DNS packet contains more than question section.") + klog.V(1).Infof("DNS packet contains more than question section.") return length, nil } @@ -448,7 +448,7 @@ func processDNSQueryPacket( if packetRequiresDNSSuffix(dnsQType, dnsQClass) { host, _, err := net.SplitHostPort(cliAddr.String()) if err != nil { - glog.V(1).Infof("Failed to get host from client address: %v", err) + klog.V(1).Infof("Failed to get host from client address: %v", err) host = cliAddr.String() } @@ -468,7 +468,7 @@ func processDNSResponsePacket( var drop bool msg := &dns.Msg{} if err := msg.Unpack(buffer[:length]); err != nil { - glog.Warningf("Unable to unpack DNS packet. Error is: %v", err) + klog.Warningf("Unable to unpack DNS packet. Error is: %v", err) return drop, length, err } @@ -479,7 +479,7 @@ func processDNSResponsePacket( // QDCOUNT if len(msg.Question) != 1 { - glog.V(1).Infof("Number of entries in the response section of the DNS packet is: %d", len(msg.Answer)) + klog.V(1).Infof("Number of entries in the response section of the DNS packet is: %d", len(msg.Answer)) return drop, length, nil } @@ -488,7 +488,7 @@ func processDNSResponsePacket( if packetRequiresDNSSuffix(dnsQType, dnsQClass) { host, _, err := net.SplitHostPort(cliAddr.String()) if err != nil { - glog.V(1).Infof("Failed to get host from client address: %v", err) + klog.V(1).Infof("Failed to get host from client address: %v", err) host = cliAddr.String() } @@ -525,11 +525,11 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv if err != nil { if e, ok := err.(net.Error); ok { if e.Temporary() { - glog.V(1).Infof("ReadFrom had a temporary failure: %v", err) + klog.V(1).Infof("ReadFrom had a temporary failure: %v", err) continue } } - glog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err) + klog.Errorf("ReadFrom failed, exiting ProxyLoop: %v", err) break } @@ -537,7 +537,7 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv if isDNSService(service.Port) { n, err = processDNSQueryPacket(myInfo.dnsClients, cliAddr, buffer[:], n, dnsSearch) if err != nil { - glog.Errorf("Process DNS query packet failed: %v", err) + klog.Errorf("Process DNS query packet failed: %v", err) } } @@ -551,14 +551,14 @@ func (udp *udpProxySocket) ProxyLoop(service ServicePortPortalName, myInfo *serv _, err = svrConn.Write(buffer[0:n]) if err != nil { if !logTimeout(err) { - glog.Errorf("Write failed: %v", err) + klog.Errorf("Write failed: %v", err) // TODO: Maybe tear down the goroutine for this client/server pair? } continue } err = svrConn.SetDeadline(time.Now().Add(myInfo.timeout)) if err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) continue } } @@ -572,14 +572,14 @@ func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, dnsClients if !found { // TODO: This could spin up a new goroutine to make the outbound connection, // and keep accepting inbound traffic. - glog.V(3).Infof("New UDP connection from %s", cliAddr) + klog.V(3).Infof("New UDP connection from %s", cliAddr) var err error svrConn, err = tryConnect(service, cliAddr, "udp", proxier) if err != nil { return nil, err } if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) return nil, err } activeClients.clients[cliAddr.String()] = svrConn @@ -600,7 +600,7 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ n, err := svrConn.Read(buffer[0:]) if err != nil { if !logTimeout(err) { - glog.Errorf("Read failed: %v", err) + klog.Errorf("Read failed: %v", err) } break } @@ -609,20 +609,20 @@ func (udp *udpProxySocket) proxyClient(cliAddr net.Addr, svrConn net.Conn, activ if isDNSService(service.Port) { drop, n, err = processDNSResponsePacket(svrConn, dnsClients, cliAddr, buffer[:], n, dnsSearch) if err != nil { - glog.Errorf("Process DNS response packet failed: %v", err) + klog.Errorf("Process DNS response packet failed: %v", err) } } if !drop { err = svrConn.SetDeadline(time.Now().Add(timeout)) if err != nil { - glog.Errorf("SetDeadline failed: %v", err) + klog.Errorf("SetDeadline failed: %v", err) break } n, err = udp.WriteTo(buffer[0:n], cliAddr) if err != nil { if !logTimeout(err) { - glog.Errorf("WriteTo failed: %v", err) + klog.Errorf("WriteTo failed: %v", err) } break } diff --git a/pkg/proxy/winuserspace/roundrobin.go b/pkg/proxy/winuserspace/roundrobin.go index d3135a038afa0..a712ed60bd12a 100644 --- a/pkg/proxy/winuserspace/roundrobin.go +++ b/pkg/proxy/winuserspace/roundrobin.go @@ -25,9 +25,9 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/util/slice" ) @@ -82,7 +82,7 @@ func NewLoadBalancerRR() *LoadBalancerRR { } func (lb *LoadBalancerRR) NewService(svcPort proxy.ServicePortName, affinityType v1.ServiceAffinity, ttlSeconds int) error { - glog.V(4).Infof("LoadBalancerRR NewService %q", svcPort) + klog.V(4).Infof("LoadBalancerRR NewService %q", svcPort) lb.lock.Lock() defer lb.lock.Unlock() lb.newServiceInternal(svcPort, affinityType, ttlSeconds) @@ -97,7 +97,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi if _, exists := lb.services[svcPort]; !exists { lb.services[svcPort] = &balancerState{affinity: *newAffinityPolicy(affinityType, ttlSeconds)} - glog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort) + klog.V(4).Infof("LoadBalancerRR service %q did not exist, created", svcPort) } else if affinityType != "" { lb.services[svcPort].affinity.affinityType = affinityType } @@ -105,7 +105,7 @@ func (lb *LoadBalancerRR) newServiceInternal(svcPort proxy.ServicePortName, affi } func (lb *LoadBalancerRR) DeleteService(svcPort proxy.ServicePortName) { - glog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort) + klog.V(4).Infof("LoadBalancerRR DeleteService %q", svcPort) lb.lock.Lock() defer lb.lock.Unlock() delete(lb.services, svcPort) @@ -135,7 +135,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne if len(state.endpoints) == 0 { return "", ErrMissingEndpoints } - glog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints) + klog.V(4).Infof("NextEndpoint for service %q, srcAddr=%v: endpoints: %+v", svcPort, srcAddr, state.endpoints) sessionAffinityEnabled := isSessionAffinity(&state.affinity) @@ -153,7 +153,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne // Affinity wins. endpoint := sessionAffinity.endpoint sessionAffinity.lastUsed = time.Now() - glog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint) + klog.V(4).Infof("NextEndpoint for service %q from IP %s with sessionAffinity %#v: %s", svcPort, ipaddr, sessionAffinity, endpoint) return endpoint, nil } } @@ -172,7 +172,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne affinity.lastUsed = time.Now() affinity.endpoint = endpoint affinity.clientIP = ipaddr - glog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr]) + klog.V(4).Infof("Updated affinity key %s: %#v", ipaddr, state.affinity.affinityMap[ipaddr]) } return endpoint, nil @@ -204,7 +204,7 @@ func flattenValidEndpoints(endpoints []hostPortPair) []string { func removeSessionAffinityByEndpoint(state *balancerState, svcPort proxy.ServicePortName, endpoint string) { for _, affinity := range state.affinity.affinityMap { if affinity.endpoint == endpoint { - glog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort) + klog.V(4).Infof("Removing client: %s from affinityMap for service %q", affinity.endpoint, svcPort) delete(state.affinity.affinityMap, affinity.clientIP) } } @@ -227,7 +227,7 @@ func (lb *LoadBalancerRR) updateAffinityMap(svcPort proxy.ServicePortName, newEn } for mKey, mVal := range allEndpoints { if mVal == 1 { - glog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort) + klog.V(2).Infof("Delete endpoint %s for service %q", mKey, svcPort) removeSessionAffinityByEndpoint(state, svcPort, mKey) } } @@ -263,7 +263,7 @@ func (lb *LoadBalancerRR) OnEndpointsAdd(endpoints *v1.Endpoints) { state, exists := lb.services[svcPort] if !exists || state == nil || len(newEndpoints) > 0 { - glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) + klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) lb.updateAffinityMap(svcPort, newEndpoints) // OnEndpointsAdd can be called without NewService being called externally. // To be safe we will call it here. A new service will only be created @@ -297,7 +297,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoint } if !exists || state == nil || len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) { - glog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) + klog.V(1).Infof("LoadBalancerRR: Setting endpoints for %s to %+v", svcPort, newEndpoints) lb.updateAffinityMap(svcPort, newEndpoints) // OnEndpointsUpdate can be called without NewService being called externally. // To be safe we will call it here. A new service will only be created @@ -315,7 +315,7 @@ func (lb *LoadBalancerRR) OnEndpointsUpdate(oldEndpoints, endpoints *v1.Endpoint for portname := range oldPortsToEndpoints { svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} if _, exists := registeredEndpoints[svcPort]; !exists { - glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) + klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) // Reset but don't delete. state := lb.services[svcPort] state.endpoints = []string{} @@ -333,7 +333,7 @@ func (lb *LoadBalancerRR) OnEndpointsDelete(endpoints *v1.Endpoints) { for portname := range portsToEndpoints { svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}, Port: portname} - glog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) + klog.V(2).Infof("LoadBalancerRR: Removing endpoints for %s", svcPort) // If the service is still around, reset but don't delete. if state, ok := lb.services[svcPort]; ok { state.endpoints = []string{} @@ -367,7 +367,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa } for ip, affinity := range state.affinity.affinityMap { if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds { - glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort) + klog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort) delete(state.affinity.affinityMap, ip) } } diff --git a/pkg/registry/core/rest/BUILD b/pkg/registry/core/rest/BUILD index 1931f2e91599f..f03a3434662b9 100644 --- a/pkg/registry/core/rest/BUILD +++ b/pkg/registry/core/rest/BUILD @@ -60,7 +60,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/storage/etcd/util:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/registry/core/rest/storage_core.go b/pkg/registry/core/rest/storage_core.go index 78202ee47eb5c..f188d9e49a455 100644 --- a/pkg/registry/core/rest/storage_core.go +++ b/pkg/registry/core/rest/storage_core.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime/schema" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -260,7 +260,7 @@ func (s componentStatusStorage) serversToValidate() map[string]*componentstatus. for ix, machine := range s.storageFactory.Backends() { etcdUrl, err := url.Parse(machine.Server) if err != nil { - glog.Errorf("Failed to parse etcd url for validation: %v", err) + klog.Errorf("Failed to parse etcd url for validation: %v", err) continue } var port int @@ -269,7 +269,7 @@ func (s componentStatusStorage) serversToValidate() map[string]*componentstatus. var portString string addr, portString, err = net.SplitHostPort(etcdUrl.Host) if err != nil { - glog.Errorf("Failed to split host/port: %s (%v)", etcdUrl.Host, err) + klog.Errorf("Failed to split host/port: %s (%v)", etcdUrl.Host, err) continue } port, _ = strconv.Atoi(portString) diff --git a/pkg/registry/core/service/portallocator/BUILD b/pkg/registry/core/service/portallocator/BUILD index 6da73adec1b22..f2251bf9c838a 100644 --- a/pkg/registry/core/service/portallocator/BUILD +++ b/pkg/registry/core/service/portallocator/BUILD @@ -17,7 +17,7 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/registry/core/service/allocator:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/registry/core/service/portallocator/allocator.go b/pkg/registry/core/service/portallocator/allocator.go index a9db71cd32843..f33a2ce79e7ea 100644 --- a/pkg/registry/core/service/portallocator/allocator.go +++ b/pkg/registry/core/service/portallocator/allocator.go @@ -24,7 +24,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/service/allocator" - "github.com/golang/glog" + "k8s.io/klog" ) // Interface manages the allocation of ports out of a range. Interface @@ -152,7 +152,7 @@ func (r *PortAllocator) ForEach(fn func(int)) { func (r *PortAllocator) Release(port int) error { ok, offset := r.contains(port) if !ok { - glog.Warningf("port is not in the range when release it. port: %v", port) + klog.Warningf("port is not in the range when release it. port: %v", port) return nil } diff --git a/pkg/registry/core/service/storage/BUILD b/pkg/registry/core/service/storage/BUILD index 779dd91710e8f..40c4719a256d9 100644 --- a/pkg/registry/core/service/storage/BUILD +++ b/pkg/registry/core/service/storage/BUILD @@ -75,7 +75,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/dryrun:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/registry/core/service/storage/rest.go b/pkg/registry/core/service/storage/rest.go index 15862cdb99fcf..b0a54a21951cd 100644 --- a/pkg/registry/core/service/storage/rest.go +++ b/pkg/registry/core/service/storage/rest.go @@ -25,7 +25,6 @@ import ( "net/url" "strconv" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,6 +37,7 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" + "k8s.io/klog" apiservice "k8s.io/kubernetes/pkg/api/service" api "k8s.io/kubernetes/pkg/apis/core" @@ -306,7 +306,7 @@ func (rs *REST) healthCheckNodePortUpdate(oldService, service *api.Service, node // Allocate a health check node port or attempt to reserve the user-specified one if provided. // Insert health check node port into the service's HealthCheckNodePort field if needed. case !neededHealthCheckNodePort && needsHealthCheckNodePort: - glog.Infof("Transition to LoadBalancer type service with ExternalTrafficPolicy=Local") + klog.Infof("Transition to LoadBalancer type service with ExternalTrafficPolicy=Local") if err := allocateHealthCheckNodePort(service, nodePortOp); err != nil { return false, errors.NewInternalError(err) } @@ -314,8 +314,8 @@ func (rs *REST) healthCheckNodePortUpdate(oldService, service *api.Service, node // Case 2: Transition from needs HealthCheckNodePort to don't need HealthCheckNodePort. // Free the existing healthCheckNodePort and clear the HealthCheckNodePort field. case neededHealthCheckNodePort && !needsHealthCheckNodePort: - glog.Infof("Transition to non LoadBalancer type service or LoadBalancer type service with ExternalTrafficPolicy=Global") - glog.V(4).Infof("Releasing healthCheckNodePort: %d", oldHealthCheckNodePort) + klog.Infof("Transition to non LoadBalancer type service or LoadBalancer type service with ExternalTrafficPolicy=Global") + klog.V(4).Infof("Releasing healthCheckNodePort: %d", oldHealthCheckNodePort) nodePortOp.ReleaseDeferred(int(oldHealthCheckNodePort)) // Clear the HealthCheckNodePort field. service.Spec.HealthCheckNodePort = 0 @@ -324,7 +324,7 @@ func (rs *REST) healthCheckNodePortUpdate(oldService, service *api.Service, node // Reject changing the value of the HealthCheckNodePort field. case neededHealthCheckNodePort && needsHealthCheckNodePort: if oldHealthCheckNodePort != newHealthCheckNodePort { - glog.Warningf("Attempt to change value of health check node port DENIED") + klog.Warningf("Attempt to change value of health check node port DENIED") fldPath := field.NewPath("spec", "healthCheckNodePort") el := field.ErrorList{field.Invalid(fldPath, newHealthCheckNodePort, "cannot change healthCheckNodePort on loadBalancer service with externalTraffic=Local during update")} @@ -571,7 +571,7 @@ func allocateHealthCheckNodePort(service *api.Service, nodePortOp *portallocator return fmt.Errorf("failed to allocate requested HealthCheck NodePort %v: %v", healthCheckNodePort, err) } - glog.V(4).Infof("Reserved user requested healthCheckNodePort: %d", healthCheckNodePort) + klog.V(4).Infof("Reserved user requested healthCheckNodePort: %d", healthCheckNodePort) } else { // If the request has no health check nodePort specified, allocate any. healthCheckNodePort, err := nodePortOp.AllocateNext() @@ -579,7 +579,7 @@ func allocateHealthCheckNodePort(service *api.Service, nodePortOp *portallocator return fmt.Errorf("failed to allocate a HealthCheck NodePort %v: %v", healthCheckNodePort, err) } service.Spec.HealthCheckNodePort = int32(healthCheckNodePort) - glog.V(4).Infof("Reserved allocated healthCheckNodePort: %d", healthCheckNodePort) + klog.V(4).Infof("Reserved allocated healthCheckNodePort: %d", healthCheckNodePort) } return nil } diff --git a/pkg/registry/rbac/rest/BUILD b/pkg/registry/rbac/rest/BUILD index 7686c89b4d7e3..0a6c7efa24527 100644 --- a/pkg/registry/rbac/rest/BUILD +++ b/pkg/registry/rbac/rest/BUILD @@ -43,7 +43,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/registry/rbac/rest/storage_rbac.go b/pkg/registry/rbac/rest/storage_rbac.go index 6568688e94a16..1c0b500f9d864 100644 --- a/pkg/registry/rbac/rest/storage_rbac.go +++ b/pkg/registry/rbac/rest/storage_rbac.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" rbacapiv1 "k8s.io/api/rbac/v1" rbacapiv1alpha1 "k8s.io/api/rbac/v1alpha1" @@ -180,11 +180,11 @@ func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { } switch { case result.Protected && result.Operation != reconciliation.ReconcileNone: - glog.Warningf("skipped reconcile-protected clusterrole.%s/%s with missing permissions: %v", rbac.GroupName, clusterRole.Name, result.MissingRules) + klog.Warningf("skipped reconcile-protected clusterrole.%s/%s with missing permissions: %v", rbac.GroupName, clusterRole.Name, result.MissingRules) case result.Operation == reconciliation.ReconcileUpdate: - glog.Infof("updated clusterrole.%s/%s with additional permissions: %v", rbac.GroupName, clusterRole.Name, result.MissingRules) + klog.Infof("updated clusterrole.%s/%s with additional permissions: %v", rbac.GroupName, clusterRole.Name, result.MissingRules) case result.Operation == reconciliation.ReconcileCreate: - glog.Infof("created clusterrole.%s/%s", rbac.GroupName, clusterRole.Name) + klog.Infof("created clusterrole.%s/%s", rbac.GroupName, clusterRole.Name) } return nil }) @@ -208,13 +208,13 @@ func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { } switch { case result.Protected && result.Operation != reconciliation.ReconcileNone: - glog.Warningf("skipped reconcile-protected clusterrolebinding.%s/%s with missing subjects: %v", rbac.GroupName, clusterRoleBinding.Name, result.MissingSubjects) + klog.Warningf("skipped reconcile-protected clusterrolebinding.%s/%s with missing subjects: %v", rbac.GroupName, clusterRoleBinding.Name, result.MissingSubjects) case result.Operation == reconciliation.ReconcileUpdate: - glog.Infof("updated clusterrolebinding.%s/%s with additional subjects: %v", rbac.GroupName, clusterRoleBinding.Name, result.MissingSubjects) + klog.Infof("updated clusterrolebinding.%s/%s with additional subjects: %v", rbac.GroupName, clusterRoleBinding.Name, result.MissingSubjects) case result.Operation == reconciliation.ReconcileCreate: - glog.Infof("created clusterrolebinding.%s/%s", rbac.GroupName, clusterRoleBinding.Name) + klog.Infof("created clusterrolebinding.%s/%s", rbac.GroupName, clusterRoleBinding.Name) case result.Operation == reconciliation.ReconcileRecreate: - glog.Infof("recreated clusterrolebinding.%s/%s", rbac.GroupName, clusterRoleBinding.Name) + klog.Infof("recreated clusterrolebinding.%s/%s", rbac.GroupName, clusterRoleBinding.Name) } return nil }) @@ -239,11 +239,11 @@ func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { } switch { case result.Protected && result.Operation != reconciliation.ReconcileNone: - glog.Warningf("skipped reconcile-protected role.%s/%s in %v with missing permissions: %v", rbac.GroupName, role.Name, namespace, result.MissingRules) + klog.Warningf("skipped reconcile-protected role.%s/%s in %v with missing permissions: %v", rbac.GroupName, role.Name, namespace, result.MissingRules) case result.Operation == reconciliation.ReconcileUpdate: - glog.Infof("updated role.%s/%s in %v with additional permissions: %v", rbac.GroupName, role.Name, namespace, result.MissingRules) + klog.Infof("updated role.%s/%s in %v with additional permissions: %v", rbac.GroupName, role.Name, namespace, result.MissingRules) case result.Operation == reconciliation.ReconcileCreate: - glog.Infof("created role.%s/%s in %v", rbac.GroupName, role.Name, namespace) + klog.Infof("created role.%s/%s in %v", rbac.GroupName, role.Name, namespace) } return nil }) @@ -269,13 +269,13 @@ func (p *PolicyData) EnsureRBACPolicy() genericapiserver.PostStartHookFunc { } switch { case result.Protected && result.Operation != reconciliation.ReconcileNone: - glog.Warningf("skipped reconcile-protected rolebinding.%s/%s in %v with missing subjects: %v", rbac.GroupName, roleBinding.Name, namespace, result.MissingSubjects) + klog.Warningf("skipped reconcile-protected rolebinding.%s/%s in %v with missing subjects: %v", rbac.GroupName, roleBinding.Name, namespace, result.MissingSubjects) case result.Operation == reconciliation.ReconcileUpdate: - glog.Infof("updated rolebinding.%s/%s in %v with additional subjects: %v", rbac.GroupName, roleBinding.Name, namespace, result.MissingSubjects) + klog.Infof("updated rolebinding.%s/%s in %v with additional subjects: %v", rbac.GroupName, roleBinding.Name, namespace, result.MissingSubjects) case result.Operation == reconciliation.ReconcileCreate: - glog.Infof("created rolebinding.%s/%s in %v", rbac.GroupName, roleBinding.Name, namespace) + klog.Infof("created rolebinding.%s/%s in %v", rbac.GroupName, roleBinding.Name, namespace) case result.Operation == reconciliation.ReconcileRecreate: - glog.Infof("recreated rolebinding.%s/%s in %v", rbac.GroupName, roleBinding.Name, namespace) + klog.Infof("recreated rolebinding.%s/%s in %v", rbac.GroupName, roleBinding.Name, namespace) } return nil }) @@ -324,7 +324,7 @@ func primeAggregatedClusterRoles(clusterRolesToAggregate map[string]string, clus // the old role already moved to an aggregated role, so there are no custom rules to migrate at this point return nil } - glog.V(1).Infof("migrating %v to %v", existingRole.Name, newName) + klog.V(1).Infof("migrating %v to %v", existingRole.Name, newName) existingRole.Name = newName existingRole.ResourceVersion = "" // clear this so the object can be created. if _, err := clusterRoleClient.ClusterRoles().Create(existingRole); err != nil && !apierrors.IsAlreadyExists(err) { diff --git a/pkg/registry/rbac/validation/BUILD b/pkg/registry/rbac/validation/BUILD index ad664075455b1..00095ac361259 100644 --- a/pkg/registry/rbac/validation/BUILD +++ b/pkg/registry/rbac/validation/BUILD @@ -41,7 +41,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/registry/rbac/validation/rule.go b/pkg/registry/rbac/validation/rule.go index 833ffc1e6c1ed..6c88791f8bef7 100644 --- a/pkg/registry/rbac/validation/rule.go +++ b/pkg/registry/rbac/validation/rule.go @@ -22,7 +22,7 @@ import ( "fmt" "strings" - "github.com/golang/glog" + "k8s.io/klog" rbacv1 "k8s.io/api/rbac/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -61,7 +61,7 @@ func ConfirmNoEscalation(ctx context.Context, ruleResolver AuthorizationRuleReso ownerRules, err := ruleResolver.RulesFor(user, namespace) if err != nil { // As per AuthorizationRuleResolver contract, this may return a non fatal error with an incomplete list of policies. Log the error and continue. - glog.V(1).Infof("non-fatal error getting local rules for %v: %v", user, err) + klog.V(1).Infof("non-fatal error getting local rules for %v: %v", user, err) ruleResolutionErrors = append(ruleResolutionErrors, err) } diff --git a/pkg/registry/scheduling/rest/BUILD b/pkg/registry/scheduling/rest/BUILD index bb9ed054c6306..25f8f79d4376c 100644 --- a/pkg/registry/scheduling/rest/BUILD +++ b/pkg/registry/scheduling/rest/BUILD @@ -24,7 +24,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/storage:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/registry/scheduling/rest/storage_scheduling.go b/pkg/registry/scheduling/rest/storage_scheduling.go index aad6e09ec048e..ed291b32c6506 100644 --- a/pkg/registry/scheduling/rest/storage_scheduling.go +++ b/pkg/registry/scheduling/rest/storage_scheduling.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -88,16 +88,16 @@ func AddSystemPriorityClasses() genericapiserver.PostStartHookFunc { if err != nil && !apierrors.IsAlreadyExists(err) { return false, err } else { - glog.Infof("created PriorityClass %s with value %v", pc.Name, pc.Value) + klog.Infof("created PriorityClass %s with value %v", pc.Name, pc.Value) } } else { // Unable to get the priority class for reasons other than "not found". - glog.Warningf("unable to get PriorityClass %v: %v. Retrying...", pc.Name, err) + klog.Warningf("unable to get PriorityClass %v: %v. Retrying...", pc.Name, err) return false, err } } } - glog.Infof("all system priority classes are created successfully or already exist.") + klog.Infof("all system priority classes are created successfully or already exist.") return true, nil }) // if we're never able to make it through initialization, kill the API server. diff --git a/pkg/scheduler/BUILD b/pkg/scheduler/BUILD index 22585cbad75c5..1158a60ad5c25 100644 --- a/pkg/scheduler/BUILD +++ b/pkg/scheduler/BUILD @@ -34,7 +34,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/algorithm/predicates/BUILD b/pkg/scheduler/algorithm/predicates/BUILD index fa735524e35e4..0653a92a7af87 100644 --- a/pkg/scheduler/algorithm/predicates/BUILD +++ b/pkg/scheduler/algorithm/predicates/BUILD @@ -41,7 +41,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go b/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go index 8e155ea2f18a5..ff2215eb28daf 100644 --- a/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go +++ b/pkg/scheduler/algorithm/predicates/csi_volume_predicate.go @@ -19,9 +19,9 @@ package predicates import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/algorithm" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" @@ -126,26 +126,26 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes( pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil { - glog.V(4).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName) + klog.V(4).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName) continue } pvName := pvc.Spec.VolumeName // TODO - the actual handling of unbound PVCs will be fixed by late binding design. if pvName == "" { - glog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName) + klog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName) continue } pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil { - glog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName) + klog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName) continue } csiSource := pv.Spec.PersistentVolumeSource.CSI if csiSource == nil { - glog.V(4).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName) + klog.V(4).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName) continue } driverName := csiSource.Driver diff --git a/pkg/scheduler/algorithm/predicates/metadata.go b/pkg/scheduler/algorithm/predicates/metadata.go index 3feb23e4937b0..9284cda23818f 100644 --- a/pkg/scheduler/algorithm/predicates/metadata.go +++ b/pkg/scheduler/algorithm/predicates/metadata.go @@ -21,7 +21,7 @@ import ( "fmt" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -140,7 +140,7 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf // incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, nodeNameToInfoMap) if err != nil { - glog.Errorf("[predicate meta data generation] error finding pods that match affinity terms: %v", err) + klog.Errorf("[predicate meta data generation] error finding pods that match affinity terms: %v", err) return nil } predicateMetadata := &predicateMetadata{ @@ -153,7 +153,7 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap, } for predicateName, precomputeFunc := range predicateMetadataProducers { - glog.V(10).Infof("Precompute: %v", predicateName) + klog.V(10).Infof("Precompute: %v", predicateName) precomputeFunc(predicateMetadata) } return predicateMetadata @@ -502,7 +502,7 @@ func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool { } affinityProperties, err := getAffinityTermProperties(pod, GetPodAffinityTerms(affinity.PodAffinity)) if err != nil { - glog.Errorf("error in getting affinity properties of Pod %v", pod.Name) + klog.Errorf("error in getting affinity properties of Pod %v", pod.Name) return false } return podMatchesAllAffinityTermProperties(targetPod, affinityProperties) @@ -519,7 +519,7 @@ func targetPodMatchesAntiAffinityOfPod(pod, targetPod *v1.Pod) bool { } properties, err := getAffinityTermProperties(pod, GetPodAntiAffinityTerms(affinity.PodAntiAffinity)) if err != nil { - glog.Errorf("error in getting anti-affinity properties of Pod %v", pod.Name) + klog.Errorf("error in getting anti-affinity properties of Pod %v", pod.Name) return false } return podMatchesAnyAffinityTermProperties(targetPod, properties) diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index 03df6d57519bc..7594c76405d6c 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -23,7 +23,7 @@ import ( "regexp" "strconv" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -329,7 +329,7 @@ func NewMaxPDVolumeCountPredicate( filter = AzureDiskVolumeFilter volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey) default: - glog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType, + klog.Fatalf("Wrong filterName, Only Support %v %v %v ", EBSVolumeFilterType, GCEPDVolumeFilterType, AzureDiskVolumeFilterType) return nil @@ -383,9 +383,9 @@ func getMaxEBSVolume(nodeInstanceType string) int { func getMaxVolLimitFromEnv() int { if rawMaxVols := os.Getenv(KubeMaxPDVols); rawMaxVols != "" { if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil { - glog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err) + klog.Errorf("Unable to parse maximum PD volumes value, using default: %v", err) } else if parsedMaxVols <= 0 { - glog.Errorf("Maximum PD volumes must be a positive value, using default ") + klog.Errorf("Maximum PD volumes must be a positive value, using default ") } else { return parsedMaxVols } @@ -413,7 +413,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil || pvc == nil { // if the PVC is not found, log the error and count the PV towards the PV limit - glog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err) + klog.V(4).Infof("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err) filteredVolumes[pvID] = true continue } @@ -424,7 +424,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s // it was forcefully unbound by admin. The pod can still use the // original PV where it was bound to -> log the error and count // the PV towards the PV limit - glog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName) + klog.V(4).Infof("PVC %s/%s is not bound, assuming PVC matches predicate when counting limits", namespace, pvcName) filteredVolumes[pvID] = true continue } @@ -433,7 +433,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace s if err != nil || pv == nil { // if the PV is not found, log the error // and count the PV towards the PV limit - glog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err) + klog.V(4).Infof("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err) filteredVolumes[pvID] = true continue } @@ -665,12 +665,12 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMetad nodeV, _ := nodeConstraints[k] volumeVSet, err := volumeutil.LabelZonesToSet(v) if err != nil { - glog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err) + klog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err) continue } if !volumeVSet.Has(nodeV) { - glog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k) + klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k) return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil } } @@ -781,11 +781,11 @@ func PodFitsResources(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s } } - if glog.V(10) { + if klog.V(10) { if len(predicateFails) == 0 { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", + klog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber) } } @@ -834,14 +834,14 @@ func podMatchesNodeSelectorAndAffinityTerms(pod *v1.Pod, node *v1.Node) bool { // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. // if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { // nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution.NodeSelectorTerms - // glog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms) + // klog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms) // nodeAffinityMatches = nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms) // } // Match node selector for requiredDuringSchedulingIgnoredDuringExecution. if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms - glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms) + klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms) nodeAffinityMatches = nodeAffinityMatches && nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms) } @@ -933,7 +933,7 @@ type ServiceAffinity struct { // only should be referenced by NewServiceAffinityPredicate. func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata) { if pm.pod == nil { - glog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.") + klog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.") return } pm.serviceAffinityInUse = true @@ -945,7 +945,7 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata) // In the future maybe we will return them as part of the function. if errSvc != nil || errList != nil { - glog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList) + klog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList) } // consider only the pods that belong to the same namespace pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace) @@ -1172,10 +1172,10 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm return false, failedPredicates, error } - if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + if klog.V(10) { + // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied", + klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied", podName(pod), node.Name) } return true, nil, nil @@ -1274,7 +1274,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1. existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { if apierrors.IsNotFound(err) { - glog.Errorf("Node not found, %v", existingPod.Spec.NodeName) + klog.Errorf("Node not found, %v", existingPod.Spec.NodeName) continue } return nil, err @@ -1304,12 +1304,12 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything()) if err != nil { errMessage := fmt.Sprintf("Failed to get all pods, %+v", err) - glog.Error(errMessage) + klog.Error(errMessage) return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) } if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairsOfPods(pod, filteredPods); err != nil { errMessage := fmt.Sprintf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err) - glog.Error(errMessage) + klog.Error(errMessage) return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) } } @@ -1318,14 +1318,14 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta // the scheduled pod anti-affinity terms for topologyKey, topologyValue := range node.Labels { if topologyMaps.topologyPairToPods[topologyPair{key: topologyKey, value: topologyValue}] != nil { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name) + klog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name) return ErrExistingPodsAntiAffinityRulesNotMatch, nil } } - if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + if klog.V(10) { + // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity terms satisfied.", + klog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity terms satisfied.", podName(pod), node.Name) } return nil, nil @@ -1382,7 +1382,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, // in the cluster matches the namespace and selector of this pod and the pod matches // its own terms, then we allow the pod to pass the affinity check. if !(len(topologyPairsPotentialAffinityPods.topologyPairToPods) == 0 && targetPodMatchesAffinityOfPod(pod, pod)) { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", + klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", podName(pod), node.Name) return ErrPodAffinityRulesNotMatch, nil } @@ -1394,7 +1394,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, if antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 { matchExists := c.nodeMatchesAnyTopologyTerm(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms) if matchExists { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity", + klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity", podName(pod), node.Name) return ErrPodAntiAffinityRulesNotMatch, nil } @@ -1414,7 +1414,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, affTermsMatch, termsSelectorMatch, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, affinityTerms) if err != nil { errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinity, err: %v", podName(pod), node.Name, err) - glog.Error(errMessage) + klog.Error(errMessage) return ErrPodAffinityRulesNotMatch, errors.New(errMessage) } if termsSelectorMatch { @@ -1429,7 +1429,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, if len(antiAffinityTerms) > 0 { antiAffTermsMatch, _, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, antiAffinityTerms) if err != nil || antiAffTermsMatch { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm, err: %v", + klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinityTerm, err: %v", podName(pod), node.Name, err) return ErrPodAntiAffinityRulesNotMatch, nil } @@ -1443,23 +1443,23 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, // in the cluster matches the namespace and selector of this pod and the pod matches // its own terms, then we allow the pod to pass the affinity check. if termsSelectorMatchFound { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", + klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", podName(pod), node.Name) return ErrPodAffinityRulesNotMatch, nil } // Check if pod matches its own affinity properties (namespace and label selector). if !targetPodMatchesAffinityOfPod(pod, pod) { - glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", + klog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAffinity", podName(pod), node.Name) return ErrPodAffinityRulesNotMatch, nil } } } - if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + if klog.V(10) { + // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.", + klog.Infof("Schedule Pod %+v on Node %+v is allowed, pod affinity/anti-affinity constraints satisfied.", podName(pod), node.Name) } return nil, nil @@ -1634,12 +1634,12 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe failReasons := []algorithm.PredicateFailureReason{} if !boundSatisfied { - glog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + klog.V(5).Infof("Bound PVs not satisfied for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) failReasons = append(failReasons, ErrVolumeNodeConflict) } if !unboundSatisfied { - glog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + klog.V(5).Infof("Couldn't find matching PVs for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) failReasons = append(failReasons, ErrVolumeBindConflict) } @@ -1648,6 +1648,6 @@ func (c *VolumeBindingChecker) predicate(pod *v1.Pod, meta algorithm.PredicateMe } // All volumes bound or matching PVs found for all unbound PVCs - glog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) + klog.V(5).Infof("All PVCs found matches for pod %v/%v, node %q", pod.Namespace, pod.Name, node.Name) return true, nil, nil } diff --git a/pkg/scheduler/algorithm/priorities/BUILD b/pkg/scheduler/algorithm/priorities/BUILD index 2dfe92abdcee9..1eb3d94a69200 100644 --- a/pkg/scheduler/algorithm/priorities/BUILD +++ b/pkg/scheduler/algorithm/priorities/BUILD @@ -44,7 +44,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/pkg/scheduler/algorithm/priorities/interpod_affinity.go index 7e640566c82a6..32cf27c83bfcc 100644 --- a/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -30,7 +30,7 @@ import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) // InterPodAffinity contains information to calculate inter pod affinity. @@ -137,7 +137,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { if apierrors.IsNotFound(err) { - glog.Errorf("Node not found, %v", existingPod.Spec.NodeName) + klog.Errorf("Node not found, %v", existingPod.Spec.NodeName) return nil } return err @@ -233,8 +233,8 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node fScore = float64(schedulerapi.MaxPriority) * ((pm.counts[node.Name] - minCount) / (maxCount - minCount)) } result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) - if glog.V(10) { - glog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore)) + if klog.V(10) { + klog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore)) } } return result, nil diff --git a/pkg/scheduler/algorithm/priorities/resource_allocation.go b/pkg/scheduler/algorithm/priorities/resource_allocation.go index 8d709dcb44e1b..027eabae5ff99 100644 --- a/pkg/scheduler/algorithm/priorities/resource_allocation.go +++ b/pkg/scheduler/algorithm/priorities/resource_allocation.go @@ -19,9 +19,9 @@ package priorities import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" @@ -64,9 +64,9 @@ func (r *ResourceAllocationPriority) PriorityMap( score = r.scorer(&requested, &allocatable, false, 0, 0) } - if glog.V(10) { + if klog.V(10) { if len(pod.Spec.Volumes) >= 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil { - glog.Infof( + klog.Infof( "%v -> %v: %v, capacity %d millicores %d memory bytes, %d volumes, total request %d millicores %d memory bytes %d volumes, score %d", pod.Name, node.Name, r.Name, allocatable.MilliCPU, allocatable.Memory, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount, @@ -75,7 +75,7 @@ func (r *ResourceAllocationPriority) PriorityMap( score, ) } else { - glog.Infof( + klog.Infof( "%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", pod.Name, node.Name, r.Name, allocatable.MilliCPU, allocatable.Memory, diff --git a/pkg/scheduler/algorithm/priorities/resource_limits.go b/pkg/scheduler/algorithm/priorities/resource_limits.go index 816b423f588c2..82b803cbf7215 100644 --- a/pkg/scheduler/algorithm/priorities/resource_limits.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits.go @@ -23,7 +23,7 @@ import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) // ResourceLimitsPriorityMap is a priority function that increases score of input node by 1 if the node satisfies @@ -52,10 +52,10 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule score = 1 } - if glog.V(10) { - // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is + if klog.V(10) { + // We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof( + klog.Infof( "%v -> %v: Resource Limits Priority, allocatable %d millicores %d memory bytes, pod limits %d millicores %d memory bytes, score %d", pod.Name, node.Name, allocatableResources.MilliCPU, allocatableResources.Memory, diff --git a/pkg/scheduler/algorithm/priorities/selector_spreading.go b/pkg/scheduler/algorithm/priorities/selector_spreading.go index 52bb980441270..1371d765a53ab 100644 --- a/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -26,7 +26,7 @@ import ( schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" utilnode "k8s.io/kubernetes/pkg/util/node" - "github.com/golang/glog" + "k8s.io/klog" ) // When zone information is present, give 2/3 of the weighting to zone spreading, 1/3 to node spreading @@ -94,7 +94,7 @@ func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{ // Ignore the previous deleted version for spreading purposes // (it can still be considered for resource restrictions etc.) if nodePod.DeletionTimestamp != nil { - glog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name) + klog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name) continue } for _, selector := range selectors { @@ -160,8 +160,8 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa } } result[i].Score = int(fScore) - if glog.V(10) { - glog.Infof( + if klog.V(10) { + klog.Infof( "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore), ) } diff --git a/pkg/scheduler/algorithmprovider/defaults/BUILD b/pkg/scheduler/algorithmprovider/defaults/BUILD index 7e624f3737f5a..ae77ea8609c41 100644 --- a/pkg/scheduler/algorithmprovider/defaults/BUILD +++ b/pkg/scheduler/algorithmprovider/defaults/BUILD @@ -19,7 +19,7 @@ go_library( "//pkg/scheduler/factory:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/algorithmprovider/defaults/defaults.go b/pkg/scheduler/algorithmprovider/defaults/defaults.go index 6976e659d5a92..74467fe17a536 100644 --- a/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -17,7 +17,7 @@ limitations under the License. package defaults import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -207,12 +207,12 @@ func ApplyFeatureGates() { factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred) factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred) - glog.Infof("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory") + klog.Infof("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory") } // Prioritizes nodes that satisfy pod's resource limits if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) { - glog.Infof("Registering resourcelimits priority function") + klog.Infof("Registering resourcelimits priority function") factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1) // Register the priority function to specific provider too. factory.InsertPriorityKeyToAlgorithmProviderMap(factory.RegisterPriorityFunction2("ResourceLimitsPriority", priorities.ResourceLimitsPriorityMap, nil, 1)) diff --git a/pkg/scheduler/cache/BUILD b/pkg/scheduler/cache/BUILD index 861cfbdbbe02c..ba0ac4da9a7bf 100644 --- a/pkg/scheduler/cache/BUILD +++ b/pkg/scheduler/cache/BUILD @@ -15,7 +15,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/cache/node_info.go b/pkg/scheduler/cache/node_info.go index 1af4f647b1f85..8b623c72ca3ae 100644 --- a/pkg/scheduler/cache/node_info.go +++ b/pkg/scheduler/cache/node_info.go @@ -22,7 +22,7 @@ import ( "sync" "sync/atomic" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -529,7 +529,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error { for i := range n.podsWithAffinity { k2, err := GetPodKey(n.podsWithAffinity[i]) if err != nil { - glog.Errorf("Cannot get pod key, err: %v", err) + klog.Errorf("Cannot get pod key, err: %v", err) continue } if k1 == k2 { @@ -542,7 +542,7 @@ func (n *NodeInfo) RemovePod(pod *v1.Pod) error { for i := range n.pods { k2, err := GetPodKey(n.pods[i]) if err != nil { - glog.Errorf("Cannot get pod key, err: %v", err) + klog.Errorf("Cannot get pod key, err: %v", err) continue } if k1 == k2 { diff --git a/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD index 1415c43027401..7df943d3f7e31 100644 --- a/pkg/scheduler/core/BUILD +++ b/pkg/scheduler/core/BUILD @@ -30,7 +30,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/core/equivalence/BUILD b/pkg/scheduler/core/equivalence/BUILD index b4595519b80c6..a6ad12ff724b3 100644 --- a/pkg/scheduler/core/equivalence/BUILD +++ b/pkg/scheduler/core/equivalence/BUILD @@ -15,7 +15,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/core/equivalence/eqivalence.go b/pkg/scheduler/core/equivalence/eqivalence.go index db94c738580fa..d776981999bd4 100644 --- a/pkg/scheduler/core/equivalence/eqivalence.go +++ b/pkg/scheduler/core/equivalence/eqivalence.go @@ -23,10 +23,10 @@ import ( "hash/fnv" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" @@ -143,7 +143,7 @@ func (c *Cache) predicateKeysToIDs(predicateKeys sets.String) []int { if id, ok := c.predicateIDMap[predicateKey]; ok { predicateIDs = append(predicateIDs, id) } else { - glog.Errorf("predicate key %q not found", predicateKey) + klog.Errorf("predicate key %q not found", predicateKey) } } return predicateIDs @@ -160,7 +160,7 @@ func (c *Cache) InvalidatePredicates(predicateKeys sets.String) { for _, n := range c.nodeToCache { n.invalidatePreds(predicateIDs) } - glog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys) + klog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys) } @@ -175,7 +175,7 @@ func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.S if n, ok := c.nodeToCache[nodeName]; ok { n.invalidatePreds(predicateIDs) } - glog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys) + klog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys) } // InvalidateAllPredicatesOnNode clears all cached results for one node. @@ -185,7 +185,7 @@ func (c *Cache) InvalidateAllPredicatesOnNode(nodeName string) { if node, ok := c.nodeToCache[nodeName]; ok { node.invalidate() } - glog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName) + klog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName) } // InvalidateCachedPredicateItemForPodAdd is a wrapper of @@ -344,7 +344,7 @@ func (n *NodeCache) updateResult( } n.predicateGenerations[predicateID]++ - glog.V(5).Infof("Cache update: node=%s, predicate=%s,pod=%s,value=%v", + klog.V(5).Infof("Cache update: node=%s, predicate=%s,pod=%s,value=%v", nodeInfo.Node().Name, predicateKey, podName, predicateItem) } diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index b60e3bf7bd5c6..40effb6fd34f2 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -26,7 +26,7 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" @@ -253,7 +253,7 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, return nil, nil, nil, err } if !podEligibleToPreemptOthers(pod, g.cachedNodeInfoMap) { - glog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name) + klog.V(5).Infof("Pod %v/%v is not eligible for more preemption.", pod.Namespace, pod.Name) return nil, nil, nil, nil } allNodes, err := nodeLister.List() @@ -265,7 +265,7 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, } potentialNodes := nodesWherePreemptionMightHelp(allNodes, fitError.FailedPredicates) if len(potentialNodes) == 0 { - glog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name) + klog.V(3).Infof("Preemption will not help schedule pod %v/%v on any node.", pod.Namespace, pod.Name) // In this case, we should clean-up any existing nominated node name of the pod. return nil, nil, []*v1.Pod{pod}, nil } @@ -321,7 +321,7 @@ func (g *genericScheduler) processPreemptionWithExtenders( ) if err != nil { if extender.IsIgnorable() { - glog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set", + klog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set", extender, err) continue } @@ -468,7 +468,7 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v filteredList, failedMap, err := extender.Filter(pod, filtered, g.cachedNodeInfoMap) if err != nil { if extender.IsIgnorable() { - glog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set", + klog.Warningf("Skipping extender %v as it returned error %v and has ignorable flag set", extender, err) continue } else { @@ -599,7 +599,7 @@ func podFitsOnNode( failedPredicates = append(failedPredicates, reasons...) // if alwaysCheckAllPredicates is false, short circuit all predicates when one predicate fails. if !alwaysCheckAllPredicates { - glog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate " + + klog.V(5).Infoln("since alwaysCheckAllPredicates has not been set, the predicate " + "evaluation is short circuited and there are chances " + "of other predicates failing as well.") break @@ -695,9 +695,9 @@ func PrioritizeNodes( if err := config.Reduce(pod, meta, nodeNameToInfo, results[index]); err != nil { appendError(err) } - if glog.V(10) { + if klog.V(10) { for _, hostPriority := range results[index] { - glog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, config.Name, hostPriority.Score) + klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), hostPriority.Host, config.Name, hostPriority.Score) } } }(i, priorityConfig) @@ -735,8 +735,8 @@ func PrioritizeNodes( mu.Lock() for i := range *prioritizedList { host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score - if glog.V(10) { - glog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, ext.Name(), score) + if klog.V(10) { + klog.Infof("%v -> %v: %v, Score: (%d)", util.GetPodFullName(pod), host, ext.Name(), score) } combinedScores[host] += score * weight } @@ -750,9 +750,9 @@ func PrioritizeNodes( } } - if glog.V(10) { + if klog.V(10) { for i := range result { - glog.Infof("Host %s => Score %d", result[i].Host, result[i].Score) + klog.Infof("Host %s => Score %d", result[i].Host, result[i].Score) } } return result, nil @@ -881,7 +881,7 @@ func pickOneNodeForPreemption(nodesToVictims map[*v1.Node]*schedulerapi.Victims) if lenNodes2 > 0 { return minNodes2[0] } - glog.Errorf("Error in logic of node scoring for preemption. We should never reach here!") + klog.Errorf("Error in logic of node scoring for preemption. We should never reach here!") return nil } @@ -1016,7 +1016,7 @@ func selectVictimsOnNode( // TODO(bsalamat): Consider checking affinity to lower priority pods if feasible with reasonable performance. if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false, nil); !fits { if err != nil { - glog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err) + klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err) } return nil, 0, false } @@ -1032,7 +1032,7 @@ func selectVictimsOnNode( if !fits { removePod(p) victims = append(victims, p) - glog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", p.Name, nodeInfo.Node().Name) + klog.V(5).Infof("Pod %v is a potential preemption victim on node %v.", p.Name, nodeInfo.Node().Name) } return fits } @@ -1087,7 +1087,7 @@ func nodesWherePreemptionMightHelp(nodes []*v1.Node, failedPredicatesMap FailedP } } if !found || !unresolvableReasonExist { - glog.V(3).Infof("Node %v is a potential node for preemption.", node.Name) + klog.V(3).Infof("Node %v is a potential node for preemption.", node.Name) potentialNodes = append(potentialNodes, node) } } diff --git a/pkg/scheduler/factory/BUILD b/pkg/scheduler/factory/BUILD index c3e97c022c591..7be893b060674 100644 --- a/pkg/scheduler/factory/BUILD +++ b/pkg/scheduler/factory/BUILD @@ -50,7 +50,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index 078ab6eb5dbde..eaa9dee3f6bd2 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -25,7 +25,7 @@ import ( "reflect" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -472,7 +472,7 @@ func (c *configFactory) skipPodUpdate(pod *v1.Pod) bool { if !reflect.DeepEqual(assumedPodCopy, podCopy) { return false } - glog.V(3).Infof("Skipping pod %s/%s update", pod.Namespace, pod.Name) + klog.V(3).Infof("Skipping pod %s/%s update", pod.Namespace, pod.Name) return true } @@ -480,7 +480,7 @@ func (c *configFactory) onPvAdd(obj interface{}) { if c.enableEquivalenceClassCache { pv, ok := obj.(*v1.PersistentVolume) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolume: %v", obj) + klog.Errorf("cannot convert to *v1.PersistentVolume: %v", obj) return } c.invalidatePredicatesForPv(pv) @@ -498,12 +498,12 @@ func (c *configFactory) onPvUpdate(old, new interface{}) { if c.enableEquivalenceClassCache { newPV, ok := new.(*v1.PersistentVolume) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolume: %v", new) + klog.Errorf("cannot convert to *v1.PersistentVolume: %v", new) return } oldPV, ok := old.(*v1.PersistentVolume) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolume: %v", old) + klog.Errorf("cannot convert to *v1.PersistentVolume: %v", old) return } c.invalidatePredicatesForPvUpdate(oldPV, newPV) @@ -549,11 +549,11 @@ func (c *configFactory) onPvDelete(obj interface{}) { var ok bool pv, ok = t.Obj.(*v1.PersistentVolume) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolume: %v", t.Obj) + klog.Errorf("cannot convert to *v1.PersistentVolume: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *v1.PersistentVolume: %v", t) + klog.Errorf("cannot convert to *v1.PersistentVolume: %v", t) return } c.invalidatePredicatesForPv(pv) @@ -600,7 +600,7 @@ func (c *configFactory) onPvcAdd(obj interface{}) { if c.enableEquivalenceClassCache { pvc, ok := obj.(*v1.PersistentVolumeClaim) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", obj) + klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", obj) return } c.invalidatePredicatesForPvc(pvc) @@ -616,12 +616,12 @@ func (c *configFactory) onPvcUpdate(old, new interface{}) { if c.enableEquivalenceClassCache { newPVC, ok := new.(*v1.PersistentVolumeClaim) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", new) + klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", new) return } oldPVC, ok := old.(*v1.PersistentVolumeClaim) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", old) + klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", old) return } c.invalidatePredicatesForPvcUpdate(oldPVC, newPVC) @@ -639,11 +639,11 @@ func (c *configFactory) onPvcDelete(obj interface{}) { var ok bool pvc, ok = t.Obj.(*v1.PersistentVolumeClaim) if !ok { - glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t.Obj) + klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t) + klog.Errorf("cannot convert to *v1.PersistentVolumeClaim: %v", t) return } c.invalidatePredicatesForPvc(pvc) @@ -692,7 +692,7 @@ func (c *configFactory) invalidatePredicatesForPvcUpdate(old, new *v1.Persistent func (c *configFactory) onStorageClassAdd(obj interface{}) { sc, ok := obj.(*storagev1.StorageClass) if !ok { - glog.Errorf("cannot convert to *storagev1.StorageClass: %v", obj) + klog.Errorf("cannot convert to *storagev1.StorageClass: %v", obj) return } @@ -717,11 +717,11 @@ func (c *configFactory) onStorageClassDelete(obj interface{}) { var ok bool sc, ok = t.Obj.(*storagev1.StorageClass) if !ok { - glog.Errorf("cannot convert to *storagev1.StorageClass: %v", t.Obj) + klog.Errorf("cannot convert to *storagev1.StorageClass: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *storagev1.StorageClass: %v", t) + klog.Errorf("cannot convert to *storagev1.StorageClass: %v", t) return } c.invalidatePredicatesForStorageClass(sc) @@ -794,12 +794,12 @@ func (c *configFactory) GetScheduledPodLister() corelisters.PodLister { func (c *configFactory) addPodToCache(obj interface{}) { pod, ok := obj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert to *v1.Pod: %v", obj) + klog.Errorf("cannot convert to *v1.Pod: %v", obj) return } if err := c.schedulerCache.AddPod(pod); err != nil { - glog.Errorf("scheduler cache AddPod failed: %v", err) + klog.Errorf("scheduler cache AddPod failed: %v", err) } c.podQueue.AssignedPodAdded(pod) @@ -811,12 +811,12 @@ func (c *configFactory) addPodToCache(obj interface{}) { func (c *configFactory) updatePodInCache(oldObj, newObj interface{}) { oldPod, ok := oldObj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert oldObj to *v1.Pod: %v", oldObj) + klog.Errorf("cannot convert oldObj to *v1.Pod: %v", oldObj) return } newPod, ok := newObj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert newObj to *v1.Pod: %v", newObj) + klog.Errorf("cannot convert newObj to *v1.Pod: %v", newObj) return } @@ -826,7 +826,7 @@ func (c *configFactory) updatePodInCache(oldObj, newObj interface{}) { // snapshotted before updates are written, we would update equivalence // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.UpdatePod(oldPod, newPod); err != nil { - glog.Errorf("scheduler cache UpdatePod failed: %v", err) + klog.Errorf("scheduler cache UpdatePod failed: %v", err) } c.invalidateCachedPredicatesOnUpdatePod(newPod, oldPod) @@ -904,11 +904,11 @@ func (c *configFactory) deletePodFromCache(obj interface{}) { var ok bool pod, ok = t.Obj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert to *v1.Pod: %v", t.Obj) + klog.Errorf("cannot convert to *v1.Pod: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *v1.Pod: %v", t) + klog.Errorf("cannot convert to *v1.Pod: %v", t) return } // NOTE: Updates must be written to scheduler cache before invalidating @@ -917,7 +917,7 @@ func (c *configFactory) deletePodFromCache(obj interface{}) { // snapshotted before updates are written, we would update equivalence // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.RemovePod(pod); err != nil { - glog.Errorf("scheduler cache RemovePod failed: %v", err) + klog.Errorf("scheduler cache RemovePod failed: %v", err) } c.invalidateCachedPredicatesOnDeletePod(pod) @@ -948,7 +948,7 @@ func (c *configFactory) invalidateCachedPredicatesOnDeletePod(pod *v1.Pod) { func (c *configFactory) addNodeToCache(obj interface{}) { node, ok := obj.(*v1.Node) if !ok { - glog.Errorf("cannot convert to *v1.Node: %v", obj) + klog.Errorf("cannot convert to *v1.Node: %v", obj) return } @@ -960,7 +960,7 @@ func (c *configFactory) addNodeToCache(obj interface{}) { } if err := c.schedulerCache.AddNode(node); err != nil { - glog.Errorf("scheduler cache AddNode failed: %v", err) + klog.Errorf("scheduler cache AddNode failed: %v", err) } c.podQueue.MoveAllToActiveQueue() @@ -970,12 +970,12 @@ func (c *configFactory) addNodeToCache(obj interface{}) { func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) { oldNode, ok := oldObj.(*v1.Node) if !ok { - glog.Errorf("cannot convert oldObj to *v1.Node: %v", oldObj) + klog.Errorf("cannot convert oldObj to *v1.Node: %v", oldObj) return } newNode, ok := newObj.(*v1.Node) if !ok { - glog.Errorf("cannot convert newObj to *v1.Node: %v", newObj) + klog.Errorf("cannot convert newObj to *v1.Node: %v", newObj) return } @@ -985,7 +985,7 @@ func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) { // snapshotted before updates are written, we would update equivalence // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.UpdateNode(oldNode, newNode); err != nil { - glog.Errorf("scheduler cache UpdateNode failed: %v", err) + klog.Errorf("scheduler cache UpdateNode failed: %v", err) } c.invalidateCachedPredicatesOnNodeUpdate(newNode, oldNode) @@ -1019,11 +1019,11 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node, oldTaints, oldErr := helper.GetTaintsFromNodeAnnotations(oldNode.GetAnnotations()) if oldErr != nil { - glog.Errorf("Failed to get taints from old node annotation for equivalence cache") + klog.Errorf("Failed to get taints from old node annotation for equivalence cache") } newTaints, newErr := helper.GetTaintsFromNodeAnnotations(newNode.GetAnnotations()) if newErr != nil { - glog.Errorf("Failed to get taints from new node annotation for equivalence cache") + klog.Errorf("Failed to get taints from new node annotation for equivalence cache") } if !reflect.DeepEqual(oldTaints, newTaints) || !reflect.DeepEqual(oldNode.Spec.Taints, newNode.Spec.Taints) { @@ -1070,11 +1070,11 @@ func (c *configFactory) deleteNodeFromCache(obj interface{}) { var ok bool node, ok = t.Obj.(*v1.Node) if !ok { - glog.Errorf("cannot convert to *v1.Node: %v", t.Obj) + klog.Errorf("cannot convert to *v1.Node: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *v1.Node: %v", t) + klog.Errorf("cannot convert to *v1.Node: %v", t) return } // NOTE: Updates must be written to scheduler cache before invalidating @@ -1083,7 +1083,7 @@ func (c *configFactory) deleteNodeFromCache(obj interface{}) { // snapshotted before updates are written, we would update equivalence // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.RemoveNode(node); err != nil { - glog.Errorf("scheduler cache RemoveNode failed: %v", err) + klog.Errorf("scheduler cache RemoveNode failed: %v", err) } if c.enableEquivalenceClassCache { c.equivalencePodCache.InvalidateAllPredicatesOnNode(node.GetName()) @@ -1097,7 +1097,7 @@ func (c *configFactory) Create() (*Config, error) { // Creates a scheduler from the name of a registered algorithm provider. func (c *configFactory) CreateFromProvider(providerName string) (*Config, error) { - glog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName) + klog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName) provider, err := GetAlgorithmProvider(providerName) if err != nil { return nil, err @@ -1107,7 +1107,7 @@ func (c *configFactory) CreateFromProvider(providerName string) (*Config, error) // Creates a scheduler from the configuration file func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, error) { - glog.V(2).Infof("Creating scheduler from configuration: %v", policy) + klog.V(2).Infof("Creating scheduler from configuration: %v", policy) // validate the policy configuration if err := validation.ValidatePolicy(policy); err != nil { @@ -1116,7 +1116,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, e predicateKeys := sets.NewString() if policy.Predicates == nil { - glog.V(2).Infof("Using predicates from algorithm provider '%v'", DefaultProvider) + klog.V(2).Infof("Using predicates from algorithm provider '%v'", DefaultProvider) provider, err := GetAlgorithmProvider(DefaultProvider) if err != nil { return nil, err @@ -1124,14 +1124,14 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, e predicateKeys = provider.FitPredicateKeys } else { for _, predicate := range policy.Predicates { - glog.V(2).Infof("Registering predicate: %s", predicate.Name) + klog.V(2).Infof("Registering predicate: %s", predicate.Name) predicateKeys.Insert(RegisterCustomFitPredicate(predicate)) } } priorityKeys := sets.NewString() if policy.Priorities == nil { - glog.V(2).Infof("Using priorities from algorithm provider '%v'", DefaultProvider) + klog.V(2).Infof("Using priorities from algorithm provider '%v'", DefaultProvider) provider, err := GetAlgorithmProvider(DefaultProvider) if err != nil { return nil, err @@ -1139,7 +1139,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, e priorityKeys = provider.PriorityFunctionKeys } else { for _, priority := range policy.Priorities { - glog.V(2).Infof("Registering priority: %s", priority.Name) + klog.V(2).Infof("Registering priority: %s", priority.Name) priorityKeys.Insert(RegisterCustomPriorityFunction(priority)) } } @@ -1148,7 +1148,7 @@ func (c *configFactory) CreateFromConfig(policy schedulerapi.Policy) (*Config, e if len(policy.ExtenderConfigs) != 0 { ignoredExtendedResources := sets.NewString() for ii := range policy.ExtenderConfigs { - glog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii]) + klog.V(2).Infof("Creating extender with config %+v", policy.ExtenderConfigs[ii]) extender, err := core.NewHTTPExtender(&policy.ExtenderConfigs[ii]) if err != nil { return nil, err @@ -1196,7 +1196,7 @@ func (c *configFactory) getBinderFunc(extenders []algorithm.SchedulerExtender) f // Creates a scheduler from a set of registered fit predicate keys and priority keys. func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Config, error) { - glog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys) + klog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys) if c.GetHardPodAffinitySymmetricWeight() < 1 || c.GetHardPodAffinitySymmetricWeight() > 100 { return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 1-100", c.GetHardPodAffinitySymmetricWeight()) @@ -1225,7 +1225,7 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, // Init equivalence class cache if c.enableEquivalenceClassCache { c.equivalencePodCache = equivalence.NewCache(predicates.Ordering()) - glog.Info("Created equivalence class cache") + klog.Info("Created equivalence class cache") } algo := core.NewGenericScheduler( @@ -1331,10 +1331,10 @@ func (c *configFactory) getPluginArgs() (*PluginFactoryArgs, error) { func (c *configFactory) getNextPod() *v1.Pod { pod, err := c.podQueue.Pop() if err == nil { - glog.V(4).Infof("About to try and schedule pod %v/%v", pod.Namespace, pod.Name) + klog.V(4).Infof("About to try and schedule pod %v/%v", pod.Namespace, pod.Name) return pod } - glog.Errorf("Error while retrieving next pod from scheduling queue: %v", err) + klog.Errorf("Error while retrieving next pod from scheduling queue: %v", err) return nil } @@ -1433,10 +1433,10 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) core func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue internalqueue.SchedulingQueue) func(pod *v1.Pod, err error) { return func(pod *v1.Pod, err error) { if err == core.ErrNoNodesAvailable { - glog.V(4).Infof("Unable to schedule %v/%v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name) + klog.V(4).Infof("Unable to schedule %v/%v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name) } else { if _, ok := err.(*core.FitError); ok { - glog.V(4).Infof("Unable to schedule %v/%v: no fit: %v; waiting", pod.Namespace, pod.Name, err) + klog.V(4).Infof("Unable to schedule %v/%v: no fit: %v; waiting", pod.Namespace, pod.Name, err) } else if errors.IsNotFound(err) { if errStatus, ok := err.(errors.APIStatus); ok && errStatus.Status().Details.Kind == "node" { nodeName := errStatus.Status().Details.Name @@ -1458,7 +1458,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue } } } else { - glog.Errorf("Error scheduling %v/%v: %v; retrying", pod.Namespace, pod.Name, err) + klog.Errorf("Error scheduling %v/%v: %v; retrying", pod.Namespace, pod.Name, err) } } @@ -1480,7 +1480,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue if !util.PodPriorityEnabled() { entry := backoff.GetEntry(podID) if !entry.TryWait(backoff.MaxDuration()) { - glog.Warningf("Request for pod %v already in flight, abandoning", podID) + klog.Warningf("Request for pod %v already in flight, abandoning", podID) return } } @@ -1500,7 +1500,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue break } if errors.IsNotFound(err) { - glog.Warningf("A pod %v no longer exists", podID) + klog.Warningf("A pod %v no longer exists", podID) if c.volumeBinder != nil { // Volume binder only wants to keep unassigned pods @@ -1508,7 +1508,7 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue } return } - glog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err) + klog.Errorf("Error getting pod %v for retry: %v; retrying...", podID, err) if getBackoff = getBackoff * 2; getBackoff > maximalGetBackoff { getBackoff = maximalGetBackoff } @@ -1542,7 +1542,7 @@ type binder struct { // Bind just does a POST binding RPC. func (b *binder) Bind(binding *v1.Binding) error { - glog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name) + klog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name) return b.Client.CoreV1().Pods(binding.Namespace).Bind(binding) } @@ -1551,7 +1551,7 @@ type podConditionUpdater struct { } func (p *podConditionUpdater) Update(pod *v1.Pod, condition *v1.PodCondition) error { - glog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status) + klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status) if podutil.UpdatePodCondition(&pod.Status, condition) { _, err := p.Client.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) return err diff --git a/pkg/scheduler/factory/plugins.go b/pkg/scheduler/factory/plugins.go index a83cf78e5ec4e..216a93f7e60a7 100644 --- a/pkg/scheduler/factory/plugins.go +++ b/pkg/scheduler/factory/plugins.go @@ -30,7 +30,7 @@ import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/scheduler/volumebinder" - "github.com/golang/glog" + "k8s.io/klog" ) // PluginFactoryArgs are passed to all plugin factory functions. @@ -233,12 +233,12 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string { } } else if predicateFactory, ok = fitPredicateMap[policy.Name]; ok { // checking to see if a pre-defined predicate is requested - glog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name) + klog.V(2).Infof("Predicate type %s already registered, reusing.", policy.Name) return policy.Name } if predicateFactory == nil { - glog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name) + klog.Fatalf("Invalid configuration: Predicate type not found for %s", policy.Name) } return RegisterFitPredicateFactory(policy.Name, predicateFactory) @@ -345,7 +345,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string { } } } else if existingPcf, ok := priorityFunctionMap[policy.Name]; ok { - glog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name) + klog.V(2).Infof("Priority type %s already registered, reusing.", policy.Name) // set/update the weight based on the policy pcf = &PriorityConfigFactory{ Function: existingPcf.Function, @@ -355,7 +355,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string { } if pcf == nil { - glog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name) + klog.Fatalf("Invalid configuration: Priority type not found for %s", policy.Name) } return RegisterPriorityConfigFactory(policy.Name, *pcf) @@ -369,7 +369,7 @@ func buildScoringFunctionShapeFromRequestedToCapacityRatioArguments(arguments *s } shape, err := priorities.NewFunctionShape(points) if err != nil { - glog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error()) + klog.Fatalf("invalid RequestedToCapacityRatioPriority arguments: %s", err.Error()) } return shape } @@ -500,7 +500,7 @@ var validName = regexp.MustCompile("^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$") func validateAlgorithmNameOrDie(name string) { if !validName.MatchString(name) { - glog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName) + klog.Fatalf("Algorithm name %v does not match the name validation regexp \"%v\".", name, validName) } } @@ -514,7 +514,7 @@ func validatePredicateOrDie(predicate schedulerapi.PredicatePolicy) { numArgs++ } if numArgs != 1 { - glog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name) + klog.Fatalf("Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s", numArgs, predicate.Name) } } } @@ -532,7 +532,7 @@ func validatePriorityOrDie(priority schedulerapi.PriorityPolicy) { numArgs++ } if numArgs != 1 { - glog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name) + klog.Fatalf("Exactly 1 priority argument is required, numArgs: %v, Priority: %s", numArgs, priority.Name) } } } diff --git a/pkg/scheduler/internal/cache/BUILD b/pkg/scheduler/internal/cache/BUILD index 53f7681782cf4..6d7e3da8bcc5b 100644 --- a/pkg/scheduler/internal/cache/BUILD +++ b/pkg/scheduler/internal/cache/BUILD @@ -18,7 +18,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/internal/cache/cache.go b/pkg/scheduler/internal/cache/cache.go index 16f7b25799df8..535236e5c1fb5 100644 --- a/pkg/scheduler/internal/cache/cache.go +++ b/pkg/scheduler/internal/cache/cache.go @@ -29,7 +29,7 @@ import ( "k8s.io/kubernetes/pkg/features" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -205,7 +205,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { cache.mu.RLock() defer cache.mu.RUnlock() - glog.V(5).Infof("Finished binding for pod %v. Can be expired.", key) + klog.V(5).Infof("Finished binding for pod %v. Can be expired.", key) currState, ok := cache.podStates[key] if ok && cache.assumedPods[key] { dl := now.Add(cache.ttl) @@ -289,7 +289,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error { case ok && cache.assumedPods[key]: if currState.pod.Spec.NodeName != pod.Spec.NodeName { // The pod was added to a different node than it was assumed to. - glog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) + klog.Warningf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) // Clean this up. cache.removePod(currState.pod) cache.addPod(pod) @@ -325,8 +325,8 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { // before Update event, in which case the state would change from Assumed to Added. case ok && !cache.assumedPods[key]: if currState.pod.Spec.NodeName != newPod.Spec.NodeName { - glog.Errorf("Pod %v updated on a different node than previously added to.", key) - glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") + klog.Errorf("Pod %v updated on a different node than previously added to.", key) + klog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") } if err := cache.updatePod(oldPod, newPod); err != nil { return err @@ -353,8 +353,8 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { // before Remove event, in which case the state would change from Assumed to Added. case ok && !cache.assumedPods[key]: if currState.pod.Spec.NodeName != pod.Spec.NodeName { - glog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) - glog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") + klog.Errorf("Pod %v was assumed to be on %v but got added to %v", key, pod.Spec.NodeName, currState.pod.Spec.NodeName) + klog.Fatalf("Schedulercache is corrupted and can badly affect scheduling decisions") } err := cache.removePod(currState.pod) if err != nil { @@ -526,14 +526,14 @@ func (cache *schedulerCache) cleanupAssumedPods(now time.Time) { panic("Key found in assumed set but not in podStates. Potentially a logical error.") } if !ps.bindingFinished { - glog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.", + klog.V(3).Infof("Couldn't expire cache for pod %v/%v. Binding is still in progress.", ps.pod.Namespace, ps.pod.Name) continue } if now.After(*ps.deadline) { - glog.Warningf("Pod %s/%s expired", ps.pod.Namespace, ps.pod.Name) + klog.Warningf("Pod %s/%s expired", ps.pod.Namespace, ps.pod.Name) if err := cache.expirePod(key, ps); err != nil { - glog.Errorf("ExpirePod failed for %s: %v", key, err) + klog.Errorf("ExpirePod failed for %s: %v", key, err) } } } diff --git a/pkg/scheduler/internal/cache/debugger/BUILD b/pkg/scheduler/internal/cache/debugger/BUILD index 42c9c060614a9..320c9734fba3d 100644 --- a/pkg/scheduler/internal/cache/debugger/BUILD +++ b/pkg/scheduler/internal/cache/debugger/BUILD @@ -16,7 +16,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/internal/cache/debugger/comparer.go b/pkg/scheduler/internal/cache/debugger/comparer.go index 00a1b0c3d618e..e78df11184a51 100644 --- a/pkg/scheduler/internal/cache/debugger/comparer.go +++ b/pkg/scheduler/internal/cache/debugger/comparer.go @@ -20,10 +20,10 @@ import ( "sort" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/klog" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" @@ -39,8 +39,8 @@ type CacheComparer struct { // Compare compares the nodes and pods of NodeLister with Cache.Snapshot. func (c *CacheComparer) Compare() error { - glog.V(3).Info("cache comparer started") - defer glog.V(3).Info("cache comparer finished") + klog.V(3).Info("cache comparer started") + defer klog.V(3).Info("cache comparer finished") nodes, err := c.NodeLister.List(labels.Everything()) if err != nil { @@ -57,11 +57,11 @@ func (c *CacheComparer) Compare() error { waitingPods := c.PodQueue.WaitingPods() if missed, redundant := c.CompareNodes(nodes, snapshot.Nodes); len(missed)+len(redundant) != 0 { - glog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant) + klog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant) } if missed, redundant := c.ComparePods(pods, waitingPods, snapshot.Nodes); len(missed)+len(redundant) != 0 { - glog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant) + klog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant) } return nil diff --git a/pkg/scheduler/internal/cache/debugger/dumper.go b/pkg/scheduler/internal/cache/debugger/dumper.go index 213468c789c8e..b9084d377d965 100644 --- a/pkg/scheduler/internal/cache/debugger/dumper.go +++ b/pkg/scheduler/internal/cache/debugger/dumper.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/scheduler/cache" @@ -44,9 +44,9 @@ func (d *CacheDumper) DumpAll() { // dumpNodes writes NodeInfo to the scheduler logs. func (d *CacheDumper) dumpNodes() { snapshot := d.cache.Snapshot() - glog.Info("Dump of cached NodeInfo") + klog.Info("Dump of cached NodeInfo") for _, nodeInfo := range snapshot.Nodes { - glog.Info(printNodeInfo(nodeInfo)) + klog.Info(printNodeInfo(nodeInfo)) } } @@ -57,7 +57,7 @@ func (d *CacheDumper) dumpSchedulingQueue() { for _, p := range waitingPods { podData.WriteString(printPod(p)) } - glog.Infof("Dump of scheduling queue:\n%s", podData.String()) + klog.Infof("Dump of scheduling queue:\n%s", podData.String()) } // printNodeInfo writes parts of NodeInfo to a string. diff --git a/pkg/scheduler/internal/cache/node_tree.go b/pkg/scheduler/internal/cache/node_tree.go index 8e8b4f0a6a1bc..80ce6d195fa8c 100644 --- a/pkg/scheduler/internal/cache/node_tree.go +++ b/pkg/scheduler/internal/cache/node_tree.go @@ -23,7 +23,7 @@ import ( "k8s.io/api/core/v1" utilnode "k8s.io/kubernetes/pkg/util/node" - "github.com/golang/glog" + "k8s.io/klog" ) // NodeTree is a tree-like data structure that holds node names in each zone. Zone names are @@ -46,7 +46,7 @@ type nodeArray struct { func (na *nodeArray) next() (nodeName string, exhausted bool) { if len(na.nodes) == 0 { - glog.Error("The nodeArray is empty. It should have been deleted from NodeTree.") + klog.Error("The nodeArray is empty. It should have been deleted from NodeTree.") return "", false } if na.lastIndex >= len(na.nodes) { @@ -81,7 +81,7 @@ func (nt *NodeTree) addNode(n *v1.Node) { if na, ok := nt.tree[zone]; ok { for _, nodeName := range na.nodes { if nodeName == n.Name { - glog.Warningf("node %v already exist in the NodeTree", n.Name) + klog.Warningf("node %v already exist in the NodeTree", n.Name) return } } @@ -90,7 +90,7 @@ func (nt *NodeTree) addNode(n *v1.Node) { nt.zones = append(nt.zones, zone) nt.tree[zone] = &nodeArray{nodes: []string{n.Name}, lastIndex: 0} } - glog.V(5).Infof("Added node %v in group %v to NodeTree", n.Name, zone) + klog.V(5).Infof("Added node %v in group %v to NodeTree", n.Name, zone) nt.NumNodes++ } @@ -110,13 +110,13 @@ func (nt *NodeTree) removeNode(n *v1.Node) error { if len(na.nodes) == 0 { nt.removeZone(zone) } - glog.V(5).Infof("Removed node %v in group %v from NodeTree", n.Name, zone) + klog.V(5).Infof("Removed node %v in group %v from NodeTree", n.Name, zone) nt.NumNodes-- return nil } } } - glog.Errorf("Node %v in group %v was not found", n.Name, zone) + klog.Errorf("Node %v in group %v was not found", n.Name, zone) return fmt.Errorf("node %v in group %v was not found", n.Name, zone) } diff --git a/pkg/scheduler/internal/queue/BUILD b/pkg/scheduler/internal/queue/BUILD index 3be73a6295ddb..c675f85924820 100644 --- a/pkg/scheduler/internal/queue/BUILD +++ b/pkg/scheduler/internal/queue/BUILD @@ -13,7 +13,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/internal/queue/scheduling_queue.go b/pkg/scheduler/internal/queue/scheduling_queue.go index 271264e065396..b9a1b5c8970ef 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/pkg/scheduler/internal/queue/scheduling_queue.go @@ -32,7 +32,7 @@ import ( "reflect" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -219,7 +219,7 @@ func (p *PriorityQueue) addNominatedPodIfNeeded(pod *v1.Pod) { if len(nnn) > 0 { for _, np := range p.nominatedPods[nnn] { if np.UID == pod.UID { - glog.Errorf("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name) + klog.Errorf("Pod %v/%v already exists in the nominated map!", pod.Namespace, pod.Name) return } } @@ -258,10 +258,10 @@ func (p *PriorityQueue) Add(pod *v1.Pod) error { defer p.lock.Unlock() err := p.activeQ.Add(pod) if err != nil { - glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) } else { if p.unschedulableQ.get(pod) != nil { - glog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name) + klog.Errorf("Error: pod %v/%v is already in the unschedulable queue.", pod.Namespace, pod.Name) p.deleteNominatedPodIfExists(pod) p.unschedulableQ.delete(pod) } @@ -284,7 +284,7 @@ func (p *PriorityQueue) AddIfNotPresent(pod *v1.Pod) error { } err := p.activeQ.Add(pod) if err != nil { - glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) } else { p.addNominatedPodIfNeeded(pod) p.cond.Broadcast() @@ -433,7 +433,7 @@ func (p *PriorityQueue) MoveAllToActiveQueue() { defer p.lock.Unlock() for _, pod := range p.unschedulableQ.pods { if err := p.activeQ.Add(pod); err != nil { - glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) } } p.unschedulableQ.clear() @@ -448,7 +448,7 @@ func (p *PriorityQueue) movePodsToActiveQueue(pods []*v1.Pod) { if err := p.activeQ.Add(pod); err == nil { p.unschedulableQ.delete(pod) } else { - glog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) + klog.Errorf("Error adding pod %v/%v to the scheduling queue: %v", pod.Namespace, pod.Name, err) } } p.receivedMoveRequest = true @@ -469,7 +469,7 @@ func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(up, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { - glog.Errorf("Error getting label selectors for pod: %v.", up.Name) + klog.Errorf("Error getting label selectors for pod: %v.", up.Name) } if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { podsToMove = append(podsToMove, up) diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 4e6a20f8b5c6e..b9b94d3970662 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -45,7 +45,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/util" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -295,20 +295,20 @@ func (sched *Scheduler) schedule(pod *v1.Pod) (string, error) { // It returns the node name and an error if any. func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, error) { if !util.PodPriorityEnabled() || sched.config.DisablePreemption { - glog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." + + klog.V(3).Infof("Pod priority feature is not enabled or preemption is disabled by scheduler configuration." + " No preemption is performed.") return "", nil } preemptor, err := sched.config.PodPreemptor.GetUpdatedPod(preemptor) if err != nil { - glog.Errorf("Error getting the updated preemptor pod object: %v", err) + klog.Errorf("Error getting the updated preemptor pod object: %v", err) return "", err } node, victims, nominatedPodsToClear, err := sched.config.Algorithm.Preempt(preemptor, sched.config.NodeLister, scheduleErr) metrics.PreemptionVictims.Set(float64(len(victims))) if err != nil { - glog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name) + klog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name) return "", err } var nodeName = "" @@ -316,12 +316,12 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e nodeName = node.Name err = sched.config.PodPreemptor.SetNominatedNodeName(preemptor, nodeName) if err != nil { - glog.Errorf("Error in preemption process. Cannot update pod %v/%v annotations: %v", preemptor.Namespace, preemptor.Name, err) + klog.Errorf("Error in preemption process. Cannot update pod %v/%v annotations: %v", preemptor.Namespace, preemptor.Name, err) return "", err } for _, victim := range victims { if err := sched.config.PodPreemptor.DeletePod(victim); err != nil { - glog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err) + klog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err) return "", err } sched.config.Recorder.Eventf(victim, v1.EventTypeNormal, "Preempted", "by %v/%v on node %v", preemptor.Namespace, preemptor.Name, nodeName) @@ -334,7 +334,7 @@ func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, e for _, p := range nominatedPodsToClear { rErr := sched.config.PodPreemptor.RemoveNominatedNodeName(p) if rErr != nil { - glog.Errorf("Cannot remove nominated node annotation of pod: %v", rErr) + klog.Errorf("Cannot remove nominated node annotation of pod: %v", rErr) // We do not return as this error is not critical. } } @@ -376,14 +376,14 @@ func (sched *Scheduler) bindVolumes(assumed *v1.Pod) error { var reason string var eventType string - glog.V(5).Infof("Trying to bind volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name) + klog.V(5).Infof("Trying to bind volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name) err := sched.config.VolumeBinder.Binder.BindPodVolumes(assumed) if err != nil { - glog.V(1).Infof("Failed to bind volumes for pod \"%v/%v\": %v", assumed.Namespace, assumed.Name, err) + klog.V(1).Infof("Failed to bind volumes for pod \"%v/%v\": %v", assumed.Namespace, assumed.Name, err) // Unassume the Pod and retry scheduling if forgetErr := sched.config.SchedulerCache.ForgetPod(assumed); forgetErr != nil { - glog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr) + klog.Errorf("scheduler cache ForgetPod failed: %v", forgetErr) } reason = "VolumeBindingFailed" @@ -398,7 +398,7 @@ func (sched *Scheduler) bindVolumes(assumed *v1.Pod) error { return err } - glog.V(5).Infof("Success binding volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name) + klog.V(5).Infof("Success binding volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name) return nil } @@ -416,7 +416,7 @@ func (sched *Scheduler) assume(assumed *v1.Pod, host string) error { // snapshotted before updates are written, we would update equivalence // cache with stale information which is based on snapshot of old cache. if err := sched.config.SchedulerCache.AssumePod(assumed); err != nil { - glog.Errorf("scheduler cache AssumePod failed: %v", err) + klog.Errorf("scheduler cache AssumePod failed: %v", err) // This is most probably result of a BUG in retrying logic. // We report an error here so that pod scheduling can be retried. @@ -451,12 +451,12 @@ func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error { // it's atomic with setting host. err := sched.config.GetBinder(assumed).Bind(b) if finErr := sched.config.SchedulerCache.FinishBinding(assumed); finErr != nil { - glog.Errorf("scheduler cache FinishBinding failed: %v", finErr) + klog.Errorf("scheduler cache FinishBinding failed: %v", finErr) } if err != nil { - glog.V(1).Infof("Failed to bind pod: %v/%v", assumed.Namespace, assumed.Name) + klog.V(1).Infof("Failed to bind pod: %v/%v", assumed.Namespace, assumed.Name) if err := sched.config.SchedulerCache.ForgetPod(assumed); err != nil { - glog.Errorf("scheduler cache ForgetPod failed: %v", err) + klog.Errorf("scheduler cache ForgetPod failed: %v", err) } sched.config.Error(assumed, err) sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "Binding rejected: %v", err) @@ -483,11 +483,11 @@ func (sched *Scheduler) scheduleOne() { } if pod.DeletionTimestamp != nil { sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) - glog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) + klog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) return } - glog.V(3).Infof("Attempting to schedule pod: %v/%v", pod.Namespace, pod.Name) + klog.V(3).Infof("Attempting to schedule pod: %v/%v", pod.Namespace, pod.Name) // Synchronously attempt to find a fit for the pod. start := time.Now() @@ -508,7 +508,7 @@ func (sched *Scheduler) scheduleOne() { // schedule it. (hopefully) metrics.PodScheduleFailures.Inc() } else { - glog.Errorf("error selecting node for pod: %v", err) + klog.Errorf("error selecting node for pod: %v", err) metrics.PodScheduleErrors.Inc() } return @@ -527,7 +527,7 @@ func (sched *Scheduler) scheduleOne() { // This function modifies 'assumedPod' if volume binding is required. allBound, err := sched.assumeVolumes(assumedPod, suggestedHost) if err != nil { - glog.Errorf("error assuming volumes: %v", err) + klog.Errorf("error assuming volumes: %v", err) metrics.PodScheduleErrors.Inc() return } @@ -535,7 +535,7 @@ func (sched *Scheduler) scheduleOne() { // assume modifies `assumedPod` by setting NodeName=suggestedHost err = sched.assume(assumedPod, suggestedHost) if err != nil { - glog.Errorf("error assuming pod: %v", err) + klog.Errorf("error assuming pod: %v", err) metrics.PodScheduleErrors.Inc() return } @@ -545,7 +545,7 @@ func (sched *Scheduler) scheduleOne() { if !allBound { err := sched.bindVolumes(assumedPod) if err != nil { - glog.Errorf("error binding volumes: %v", err) + klog.Errorf("error binding volumes: %v", err) metrics.PodScheduleErrors.Inc() return } @@ -560,7 +560,7 @@ func (sched *Scheduler) scheduleOne() { }) metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start)) if err != nil { - glog.Errorf("error binding pod: %v", err) + klog.Errorf("error binding pod: %v", err) metrics.PodScheduleErrors.Inc() } else { metrics.PodScheduleSuccesses.Inc() diff --git a/pkg/scheduler/util/BUILD b/pkg/scheduler/util/BUILD index 57839b6708ea4..810d2c5cb0a9b 100644 --- a/pkg/scheduler/util/BUILD +++ b/pkg/scheduler/util/BUILD @@ -34,7 +34,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/util/backoff_utils.go b/pkg/scheduler/util/backoff_utils.go index 50920ae86c83b..506cd1270acc0 100644 --- a/pkg/scheduler/util/backoff_utils.go +++ b/pkg/scheduler/util/backoff_utils.go @@ -24,7 +24,7 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" - "github.com/golang/glog" + "k8s.io/klog" ) type clock interface { @@ -76,7 +76,7 @@ func (b *BackoffEntry) getBackoff(maxDuration time.Duration) time.Duration { newDuration = maxDuration } b.backoff = newDuration - glog.V(4).Infof("Backing off %s", duration.String()) + klog.V(4).Infof("Backing off %s", duration.String()) return duration } diff --git a/pkg/serviceaccount/BUILD b/pkg/serviceaccount/BUILD index 71a9836dce4b9..485dcf07d0a64 100644 --- a/pkg/serviceaccount/BUILD +++ b/pkg/serviceaccount/BUILD @@ -22,9 +22,9 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/gopkg.in/square/go-jose.v2:go_default_library", "//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/serviceaccount/claims.go b/pkg/serviceaccount/claims.go index 7e36d39cc29ff..3b9cd08c286d8 100644 --- a/pkg/serviceaccount/claims.go +++ b/pkg/serviceaccount/claims.go @@ -21,8 +21,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/klog" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/kubernetes/pkg/apis/core" @@ -97,7 +97,7 @@ var _ = Validator(&validator{}) func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{}) (*ServiceAccountInfo, error) { private, ok := privateObj.(*privateClaims) if !ok { - glog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj) + klog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj) return nil, errors.New("Token could not be validated.") } err := public.Validate(jwt.Expected{ @@ -108,7 +108,7 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ case err == jwt.ErrExpired: return nil, errors.New("Token has expired.") default: - glog.Errorf("unexpected validation error: %T", err) + klog.Errorf("unexpected validation error: %T", err) return nil, errors.New("Token could not be validated.") } @@ -132,15 +132,15 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ // Make sure service account still exists (name and UID) serviceAccount, err := v.getter.GetServiceAccount(namespace, saref.Name) if err != nil { - glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err) + klog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err) return nil, err } if serviceAccount.DeletionTimestamp != nil { - glog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name) + klog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name) return nil, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, saref.Name) } if string(serviceAccount.UID) != saref.UID { - glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, saref.Name, string(serviceAccount.UID), saref.UID) + klog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, saref.Name, string(serviceAccount.UID), saref.UID) return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, saref.UID) } @@ -148,15 +148,15 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ // Make sure token hasn't been invalidated by deletion of the secret secret, err := v.getter.GetSecret(namespace, secref.Name) if err != nil { - glog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err) + klog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err) return nil, errors.New("Token has been invalidated") } if secret.DeletionTimestamp != nil { - glog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name) + klog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name) return nil, errors.New("Token has been invalidated") } if secref.UID != string(secret.UID) { - glog.V(4).Infof("Secret UID no longer matches %s/%s: %q != %q", namespace, secref.Name, string(secret.UID), secref.UID) + klog.V(4).Infof("Secret UID no longer matches %s/%s: %q != %q", namespace, secref.Name, string(secret.UID), secref.UID) return nil, fmt.Errorf("Secret UID (%s) does not match claim (%s)", secret.UID, secref.UID) } } @@ -166,15 +166,15 @@ func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{ // Make sure token hasn't been invalidated by deletion of the pod pod, err := v.getter.GetPod(namespace, podref.Name) if err != nil { - glog.V(4).Infof("Could not retrieve bound pod %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err) + klog.V(4).Infof("Could not retrieve bound pod %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err) return nil, errors.New("Token has been invalidated") } if pod.DeletionTimestamp != nil { - glog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name) + klog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name) return nil, errors.New("Token has been invalidated") } if podref.UID != string(pod.UID) { - glog.V(4).Infof("Pod UID no longer matches %s/%s: %q != %q", namespace, podref.Name, string(pod.UID), podref.UID) + klog.V(4).Infof("Pod UID no longer matches %s/%s: %q != %q", namespace, podref.Name, string(pod.UID), podref.UID) return nil, fmt.Errorf("Pod UID (%s) does not match claim (%s)", pod.UID, podref.UID) } podName = podref.Name diff --git a/pkg/serviceaccount/legacy.go b/pkg/serviceaccount/legacy.go index 4d0a32c2cda94..57c482f0ba62d 100644 --- a/pkg/serviceaccount/legacy.go +++ b/pkg/serviceaccount/legacy.go @@ -21,8 +21,8 @@ import ( "errors" "fmt" - "github.com/golang/glog" "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/klog" "k8s.io/api/core/v1" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" @@ -65,7 +65,7 @@ var _ = Validator(&legacyValidator{}) func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, privateObj interface{}) (*ServiceAccountInfo, error) { private, ok := privateObj.(*legacyPrivateClaims) if !ok { - glog.Errorf("jwt validator expected private claim of type *legacyPrivateClaims but got: %T", privateObj) + klog.Errorf("jwt validator expected private claim of type *legacyPrivateClaims but got: %T", privateObj) return nil, errors.New("Token could not be validated.") } @@ -99,30 +99,30 @@ func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, private // Make sure token hasn't been invalidated by deletion of the secret secret, err := v.getter.GetSecret(namespace, secretName) if err != nil { - glog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err) + klog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err) return nil, errors.New("Token has been invalidated") } if secret.DeletionTimestamp != nil { - glog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) + klog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) return nil, errors.New("Token has been invalidated") } if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) != 0 { - glog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) + klog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) return nil, errors.New("Token does not match server's copy") } // Make sure service account still exists (name and UID) serviceAccount, err := v.getter.GetServiceAccount(namespace, serviceAccountName) if err != nil { - glog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err) + klog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err) return nil, err } if serviceAccount.DeletionTimestamp != nil { - glog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName) + klog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName) return nil, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName) } if string(serviceAccount.UID) != serviceAccountUID { - glog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID) + klog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID) return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID) } } diff --git a/pkg/ssh/BUILD b/pkg/ssh/BUILD index ea52417688d62..2cc03ea717cc7 100644 --- a/pkg/ssh/BUILD +++ b/pkg/ssh/BUILD @@ -12,8 +12,8 @@ go_test( embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/crypto/ssh:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -25,9 +25,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/golang.org/x/crypto/ssh:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/ssh/ssh.go b/pkg/ssh/ssh.go index dc8aa3acc1ea4..bee21f6ee5ab1 100644 --- a/pkg/ssh/ssh.go +++ b/pkg/ssh/ssh.go @@ -37,9 +37,9 @@ import ( "sync" "time" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "golang.org/x/crypto/ssh" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" @@ -79,7 +79,7 @@ type SSHTunnel struct { func (s *SSHTunnel) copyBytes(out io.Writer, in io.Reader) { if _, err := io.Copy(out, in); err != nil { - glog.Errorf("Error in SSH tunnel: %v", err) + klog.Errorf("Error in SSH tunnel: %v", err) } } @@ -353,8 +353,8 @@ func (l *SSHTunnelList) delayedHealthCheck(e sshTunnelEntry, delay time.Duration defer runtime.HandleCrash() time.Sleep(delay) if err := l.healthCheck(e); err != nil { - glog.Errorf("Healthcheck failed for tunnel to %q: %v", e.Address, err) - glog.Infof("Attempting once to re-establish tunnel to %q", e.Address) + klog.Errorf("Healthcheck failed for tunnel to %q: %v", e.Address, err) + klog.Infof("Attempting once to re-establish tunnel to %q", e.Address) l.removeAndReAdd(e) } }() @@ -391,7 +391,7 @@ func (l *SSHTunnelList) removeAndReAdd(e sshTunnelEntry) { } l.tunnelsLock.Unlock() if err := e.Tunnel.Close(); err != nil { - glog.Infof("Failed to close removed tunnel: %v", err) + klog.Infof("Failed to close removed tunnel: %v", err) } go l.createAndAddTunnel(e.Address) } @@ -399,9 +399,9 @@ func (l *SSHTunnelList) removeAndReAdd(e sshTunnelEntry) { func (l *SSHTunnelList) Dial(ctx context.Context, net, addr string) (net.Conn, error) { start := time.Now() id := mathrand.Int63() // So you can match begins/ends in the log. - glog.Infof("[%x: %v] Dialing...", id, addr) + klog.Infof("[%x: %v] Dialing...", id, addr) defer func() { - glog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Since(start)) + klog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Since(start)) }() tunnel, err := l.pickTunnel(strings.Split(addr, ":")[0]) if err != nil { @@ -423,7 +423,7 @@ func (l *SSHTunnelList) pickTunnel(addr string) (tunnel, error) { return entry.Tunnel, nil } } - glog.Warningf("SSH tunnel not found for address %q, picking random node", addr) + klog.Warningf("SSH tunnel not found for address %q, picking random node", addr) n := mathrand.Intn(len(l.entries)) return l.entries[n].Tunnel, nil } @@ -464,11 +464,11 @@ func (l *SSHTunnelList) Update(addrs []string) { for i := range l.entries { if _, ok := wantAddrsMap[l.entries[i].Address]; !ok { tunnelEntry := l.entries[i] - glog.Infof("Removing tunnel to deleted node at %q", tunnelEntry.Address) + klog.Infof("Removing tunnel to deleted node at %q", tunnelEntry.Address) go func() { defer runtime.HandleCrash() if err := tunnelEntry.Tunnel.Close(); err != nil { - glog.Errorf("Failed to close tunnel to %q: %v", tunnelEntry.Address, err) + klog.Errorf("Failed to close tunnel to %q: %v", tunnelEntry.Address, err) } }() } else { @@ -480,14 +480,14 @@ func (l *SSHTunnelList) Update(addrs []string) { } func (l *SSHTunnelList) createAndAddTunnel(addr string) { - glog.Infof("Trying to add tunnel to %q", addr) + klog.Infof("Trying to add tunnel to %q", addr) tunnel, err := l.tunnelCreator.NewSSHTunnel(l.user, l.keyfile, addr) if err != nil { - glog.Errorf("Failed to create tunnel for %q: %v", addr, err) + klog.Errorf("Failed to create tunnel for %q: %v", addr, err) return } if err := tunnel.Open(); err != nil { - glog.Errorf("Failed to open tunnel to %q: %v", addr, err) + klog.Errorf("Failed to open tunnel to %q: %v", addr, err) l.tunnelsLock.Lock() delete(l.adding, addr) l.tunnelsLock.Unlock() @@ -497,7 +497,7 @@ func (l *SSHTunnelList) createAndAddTunnel(addr string) { l.entries = append(l.entries, sshTunnelEntry{addr, tunnel}) delete(l.adding, addr) l.tunnelsLock.Unlock() - glog.Infof("Successfully added tunnel for %q", addr) + klog.Infof("Successfully added tunnel for %q", addr) } func EncodePrivateKey(private *rsa.PrivateKey) []byte { diff --git a/pkg/ssh/ssh_test.go b/pkg/ssh/ssh_test.go index a8098973d2878..6148233f7ece0 100644 --- a/pkg/ssh/ssh_test.go +++ b/pkg/ssh/ssh_test.go @@ -29,8 +29,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" - "github.com/golang/glog" "golang.org/x/crypto/ssh" + "k8s.io/klog" ) type testSSHServer struct { @@ -94,11 +94,11 @@ func runTestSSHServer(user, password string) (*testSSHServer, error) { conn, err := listener.Accept() if err != nil { - glog.Errorf("Failed to accept: %v", err) + klog.Errorf("Failed to accept: %v", err) } _, chans, reqs, err := ssh.NewServerConn(conn, config) if err != nil { - glog.Errorf("Failed handshake: %v", err) + klog.Errorf("Failed handshake: %v", err) } go ssh.DiscardRequests(reqs) for newChannel := range chans { @@ -108,11 +108,11 @@ func runTestSSHServer(user, password string) (*testSSHServer, error) { } channel, requests, err := newChannel.Accept() if err != nil { - glog.Errorf("Failed to accept channel: %v", err) + klog.Errorf("Failed to accept channel: %v", err) } for req := range requests { - glog.Infof("Got request: %v", req) + klog.Infof("Got request: %v", req) } channel.Close() diff --git a/pkg/util/async/BUILD b/pkg/util/async/BUILD index 57830ca887a78..c201c307be9e2 100644 --- a/pkg/util/async/BUILD +++ b/pkg/util/async/BUILD @@ -15,7 +15,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/async", deps = [ "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/util/async/bounded_frequency_runner.go b/pkg/util/async/bounded_frequency_runner.go index f03a7ec2d5d28..20a06ad668a8b 100644 --- a/pkg/util/async/bounded_frequency_runner.go +++ b/pkg/util/async/bounded_frequency_runner.go @@ -23,7 +23,7 @@ import ( "k8s.io/client-go/util/flowcontrol" - "github.com/golang/glog" + "k8s.io/klog" ) // BoundedFrequencyRunner manages runs of a user-provided function. @@ -167,13 +167,13 @@ func construct(name string, fn func(), minInterval, maxInterval time.Duration, b // Loop handles the periodic timer and run requests. This is expected to be // called as a goroutine. func (bfr *BoundedFrequencyRunner) Loop(stop <-chan struct{}) { - glog.V(3).Infof("%s Loop running", bfr.name) + klog.V(3).Infof("%s Loop running", bfr.name) bfr.timer.Reset(bfr.maxInterval) for { select { case <-stop: bfr.stop() - glog.V(3).Infof("%s Loop stopping", bfr.name) + klog.V(3).Infof("%s Loop stopping", bfr.name) return case <-bfr.timer.C(): bfr.tryRun() @@ -218,7 +218,7 @@ func (bfr *BoundedFrequencyRunner) tryRun() { bfr.lastRun = bfr.timer.Now() bfr.timer.Stop() bfr.timer.Reset(bfr.maxInterval) - glog.V(3).Infof("%s: ran, next possible in %v, periodic in %v", bfr.name, bfr.minInterval, bfr.maxInterval) + klog.V(3).Infof("%s: ran, next possible in %v, periodic in %v", bfr.name, bfr.minInterval, bfr.maxInterval) return } @@ -227,13 +227,13 @@ func (bfr *BoundedFrequencyRunner) tryRun() { elapsed := bfr.timer.Since(bfr.lastRun) // how long since last run nextPossible := bfr.minInterval - elapsed // time to next possible run nextScheduled := bfr.maxInterval - elapsed // time to next periodic run - glog.V(4).Infof("%s: %v since last run, possible in %v, scheduled in %v", bfr.name, elapsed, nextPossible, nextScheduled) + klog.V(4).Infof("%s: %v since last run, possible in %v, scheduled in %v", bfr.name, elapsed, nextPossible, nextScheduled) if nextPossible < nextScheduled { // Set the timer for ASAP, but don't drain here. Assuming Loop is running, // it might get a delivery in the mean time, but that is OK. bfr.timer.Stop() bfr.timer.Reset(nextPossible) - glog.V(3).Infof("%s: throttled, scheduling run in %v", bfr.name, nextPossible) + klog.V(3).Infof("%s: throttled, scheduling run in %v", bfr.name, nextPossible) } } diff --git a/pkg/util/bandwidth/BUILD b/pkg/util/bandwidth/BUILD index 2854892376d71..bc590bbfa7c27 100644 --- a/pkg/util/bandwidth/BUILD +++ b/pkg/util/bandwidth/BUILD @@ -22,7 +22,7 @@ go_library( ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "//conditions:default": [], diff --git a/pkg/util/bandwidth/linux.go b/pkg/util/bandwidth/linux.go index b8936a34f014e..7050b4f763ca7 100644 --- a/pkg/util/bandwidth/linux.go +++ b/pkg/util/bandwidth/linux.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) // tcShaper provides an implementation of the BandwidthShaper interface on Linux using the 'tc' tool. @@ -53,10 +53,10 @@ func NewTCShaper(iface string) BandwidthShaper { } func (t *tcShaper) execAndLog(cmdStr string, args ...string) error { - glog.V(6).Infof("Running: %s %s", cmdStr, strings.Join(args, " ")) + klog.V(6).Infof("Running: %s %s", cmdStr, strings.Join(args, " ")) cmd := t.e.Command(cmdStr, args...) out, err := cmd.CombinedOutput() - glog.V(6).Infof("Output from tc: %s", string(out)) + klog.V(6).Infof("Output from tc: %s", string(out)) return err } @@ -259,7 +259,7 @@ func (t *tcShaper) ReconcileInterface() error { return err } if !exists { - glog.V(4).Info("Didn't find bandwidth interface, creating") + klog.V(4).Info("Didn't find bandwidth interface, creating") return t.initializeInterface() } fields := strings.Split(output, " ") diff --git a/pkg/util/coverage/coverage.go b/pkg/util/coverage/coverage.go index a6cdb2e73d487..2a36558e335d6 100644 --- a/pkg/util/coverage/coverage.go +++ b/pkg/util/coverage/coverage.go @@ -23,8 +23,8 @@ package coverage import ( "flag" "fmt" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" "os" "testing" "time" @@ -86,6 +86,6 @@ func FlushCoverage() { // This gets us atomic updates from the perspective of another process trying to access // the file. if err := os.Rename(tempCoveragePath(), coverageFile); err != nil { - glog.Errorf("Couldn't move coverage file from %s to %s", coverageFile, tempCoveragePath()) + klog.Errorf("Couldn't move coverage file from %s to %s", coverageFile, tempCoveragePath()) } } diff --git a/pkg/util/flag/BUILD b/pkg/util/flag/BUILD index ac88251b33c06..976dd85000b26 100644 --- a/pkg/util/flag/BUILD +++ b/pkg/util/flag/BUILD @@ -12,8 +12,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/flag", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/util/flag/flags.go b/pkg/util/flag/flags.go index b58a52cc0e7f7..1d57c3e8d6b3e 100644 --- a/pkg/util/flag/flags.go +++ b/pkg/util/flag/flags.go @@ -21,8 +21,8 @@ import ( "net" "strconv" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" ) @@ -30,7 +30,7 @@ import ( // PrintFlags logs the flags in the flagset func PrintFlags(flags *pflag.FlagSet) { flags.VisitAll(func(flag *pflag.Flag) { - glog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value) }) } diff --git a/pkg/util/goroutinemap/BUILD b/pkg/util/goroutinemap/BUILD index f1d77a0335476..92688fde68685 100644 --- a/pkg/util/goroutinemap/BUILD +++ b/pkg/util/goroutinemap/BUILD @@ -13,7 +13,7 @@ go_library( deps = [ "//pkg/util/goroutinemap/exponentialbackoff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/util/goroutinemap/goroutinemap.go b/pkg/util/goroutinemap/goroutinemap.go index 474e1215cc380..9b6eb73192707 100644 --- a/pkg/util/goroutinemap/goroutinemap.go +++ b/pkg/util/goroutinemap/goroutinemap.go @@ -25,8 +25,8 @@ import ( "fmt" "sync" - "github.com/golang/glog" k8sRuntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" ) @@ -135,7 +135,7 @@ func (grm *goRoutineMap) operationComplete( delete(grm.operations, operationName) if *err != nil { // Log error - glog.Errorf("operation for %q failed with: %v", + klog.Errorf("operation for %q failed with: %v", operationName, *err) } @@ -147,7 +147,7 @@ func (grm *goRoutineMap) operationComplete( grm.operations[operationName] = existingOp // Log error - glog.Errorf("%v", + klog.Errorf("%v", existingOp.expBackoff.GenerateNoRetriesPermittedMsg(operationName)) } } diff --git a/pkg/util/ipconfig/BUILD b/pkg/util/ipconfig/BUILD index ffcecee10316b..39504f89bbbbe 100644 --- a/pkg/util/ipconfig/BUILD +++ b/pkg/util/ipconfig/BUILD @@ -14,7 +14,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/util/ipconfig", deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/util/ipconfig/ipconfig.go b/pkg/util/ipconfig/ipconfig.go index 924c20b94a6f8..5764511602b9d 100644 --- a/pkg/util/ipconfig/ipconfig.go +++ b/pkg/util/ipconfig/ipconfig.go @@ -21,7 +21,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" utilexec "k8s.io/utils/exec" ) @@ -66,7 +66,7 @@ func (runner *runner) GetDNSSuffixSearchList() ([]string, error) { // TODO: this does not work when the label is localized suffixList := []string{} if runtime.GOOS != "windows" { - glog.V(1).Infof("ipconfig not supported on GOOS=%s", runtime.GOOS) + klog.V(1).Infof("ipconfig not supported on GOOS=%s", runtime.GOOS) return suffixList, nil } @@ -92,7 +92,7 @@ func (runner *runner) GetDNSSuffixSearchList() ([]string, error) { } } } else { - glog.V(1).Infof("Running %s %s failed: %v", cmdIpconfig, cmdDefaultArgs, err) + klog.V(1).Infof("Running %s %s failed: %v", cmdIpconfig, cmdDefaultArgs, err) } return suffixList, err diff --git a/pkg/util/ipset/BUILD b/pkg/util/ipset/BUILD index 8be0d8d63d9dd..186fa432043a7 100644 --- a/pkg/util/ipset/BUILD +++ b/pkg/util/ipset/BUILD @@ -9,7 +9,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/ipset", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/util/ipset/ipset.go b/pkg/util/ipset/ipset.go index fd367f281211c..2d400a6244e7c 100644 --- a/pkg/util/ipset/ipset.go +++ b/pkg/util/ipset/ipset.go @@ -24,7 +24,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" utilexec "k8s.io/utils/exec" ) @@ -111,12 +111,12 @@ func (set *IPSet) Validate() bool { } // check hash size value of ipset if set.HashSize <= 0 { - glog.Errorf("Invalid hashsize value %d, should be >0", set.HashSize) + klog.Errorf("Invalid hashsize value %d, should be >0", set.HashSize) return false } // check max elem value of ipset if set.MaxElem <= 0 { - glog.Errorf("Invalid maxelem value %d, should be >0", set.MaxElem) + klog.Errorf("Invalid maxelem value %d, should be >0", set.MaxElem) return false } @@ -167,7 +167,7 @@ type Entry struct { // Validate checks if a given ipset entry is valid or not. The set parameter is the ipset that entry belongs to. func (e *Entry) Validate(set *IPSet) bool { if e.Port < 0 { - glog.Errorf("Entry %v port number %d should be >=0 for ipset %v", e, e.Port, set) + klog.Errorf("Entry %v port number %d should be >=0 for ipset %v", e, e.Port, set) return false } switch e.SetType { @@ -184,7 +184,7 @@ func (e *Entry) Validate(set *IPSet) bool { // IP2 can not be empty for `hash:ip,port,ip` type ip set if net.ParseIP(e.IP2) == nil { - glog.Errorf("Error parsing entry %v second ip address %v for ipset %v", e, e.IP2, set) + klog.Errorf("Error parsing entry %v second ip address %v for ipset %v", e, e.IP2, set) return false } case HashIPPortNet: @@ -195,22 +195,22 @@ func (e *Entry) Validate(set *IPSet) bool { // Net can not be empty for `hash:ip,port,net` type ip set if _, ipNet, err := net.ParseCIDR(e.Net); ipNet == nil { - glog.Errorf("Error parsing entry %v ip net %v for ipset %v, error: %v", e, e.Net, set, err) + klog.Errorf("Error parsing entry %v ip net %v for ipset %v, error: %v", e, e.Net, set, err) return false } case BitmapPort: // check if port number satisfies its ipset's requirement of port range if set == nil { - glog.Errorf("Unable to reference ip set where the entry %v exists", e) + klog.Errorf("Unable to reference ip set where the entry %v exists", e) return false } begin, end, err := parsePortRange(set.PortRange) if err != nil { - glog.Errorf("Failed to parse set %v port range %s for ipset %v, error: %v", set, set.PortRange, set, err) + klog.Errorf("Failed to parse set %v port range %s for ipset %v, error: %v", set, set.PortRange, set, err) return false } if e.Port < begin || e.Port > end { - glog.Errorf("Entry %v port number %d is not in the port range %s of its ipset %v", e, e.Port, set.PortRange, set) + klog.Errorf("Entry %v port number %d is not in the port range %s of its ipset %v", e, e.Port, set.PortRange, set) return false } } @@ -251,7 +251,7 @@ func (e *Entry) checkIPandProtocol(set *IPSet) bool { } if net.ParseIP(e.IP) == nil { - glog.Errorf("Error parsing entry %v ip address %v for ipset %v", e, e.IP, set) + klog.Errorf("Error parsing entry %v ip address %v for ipset %v", e, e.IP, set) return false } @@ -424,17 +424,17 @@ func getIPSetVersionString(exec utilexec.Interface) (string, error) { func validatePortRange(portRange string) bool { strs := strings.Split(portRange, "-") if len(strs) != 2 { - glog.Errorf("port range should be in the format of `a-b`") + klog.Errorf("port range should be in the format of `a-b`") return false } for i := range strs { num, err := strconv.Atoi(strs[i]) if err != nil { - glog.Errorf("Failed to parse %s, error: %v", strs[i], err) + klog.Errorf("Failed to parse %s, error: %v", strs[i], err) return false } if num < 0 { - glog.Errorf("port number %d should be >=0", num) + klog.Errorf("port number %d should be >=0", num) return false } } @@ -448,7 +448,7 @@ func validateIPSetType(set Type) bool { return true } } - glog.Errorf("Currently supported ipset types are: %v, %s is not supported", ValidIPSetTypes, set) + klog.Errorf("Currently supported ipset types are: %v, %s is not supported", ValidIPSetTypes, set) return false } @@ -457,7 +457,7 @@ func validateHashFamily(family string) bool { if family == ProtocolFamilyIPV4 || family == ProtocolFamilyIPV6 { return true } - glog.Errorf("Currently supported ip set hash families are: [%s, %s], %s is not supported", ProtocolFamilyIPV4, ProtocolFamilyIPV6, family) + klog.Errorf("Currently supported ip set hash families are: [%s, %s], %s is not supported", ProtocolFamilyIPV4, ProtocolFamilyIPV6, family) return false } @@ -485,7 +485,7 @@ func validateProtocol(protocol string) bool { if protocol == ProtocolTCP || protocol == ProtocolUDP || protocol == ProtocolSCTP { return true } - glog.Errorf("Invalid entry's protocol: %s, supported protocols are [%s, %s, %s]", protocol, ProtocolTCP, ProtocolUDP, ProtocolSCTP) + klog.Errorf("Invalid entry's protocol: %s, supported protocols are [%s, %s, %s]", protocol, ProtocolTCP, ProtocolUDP, ProtocolSCTP) return false } diff --git a/pkg/util/iptables/BUILD b/pkg/util/iptables/BUILD index 40529906a583e..9b289951f5201 100644 --- a/pkg/util/iptables/BUILD +++ b/pkg/util/iptables/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/trace:go_default_library", "//vendor/github.com/godbus/dbus:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ diff --git a/pkg/util/iptables/iptables.go b/pkg/util/iptables/iptables.go index e81c21497ba76..c9f3d9860d1cc 100644 --- a/pkg/util/iptables/iptables.go +++ b/pkg/util/iptables/iptables.go @@ -26,10 +26,10 @@ import ( "time" godbus "github.com/godbus/dbus" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" utilversion "k8s.io/apimachinery/pkg/util/version" utiltrace "k8s.io/apiserver/pkg/util/trace" + "k8s.io/klog" utildbus "k8s.io/kubernetes/pkg/util/dbus" utilexec "k8s.io/utils/exec" ) @@ -152,7 +152,7 @@ type runner struct { func newInternal(exec utilexec.Interface, dbus utildbus.Interface, protocol Protocol, lockfilePath string) Interface { vstring, err := getIPTablesVersionString(exec, protocol) if err != nil { - glog.Warningf("Error checking iptables version, assuming version at least %s: %v", MinCheckVersion, err) + klog.Warningf("Error checking iptables version, assuming version at least %s: %v", MinCheckVersion, err) vstring = MinCheckVersion } @@ -197,7 +197,7 @@ const ( func (runner *runner) connectToFirewallD() { bus, err := runner.dbus.SystemBus() if err != nil { - glog.V(1).Infof("Could not connect to D-Bus system bus: %s", err) + klog.V(1).Infof("Could not connect to D-Bus system bus: %s", err) return } runner.hasListener = true @@ -324,7 +324,7 @@ func (runner *runner) SaveInto(table Table, buffer *bytes.Buffer) error { // run and return iptablesSaveCmd := iptablesSaveCommand(runner.protocol) args := []string{"-t", string(table)} - glog.V(4).Infof("running %s %v", iptablesSaveCmd, args) + klog.V(4).Infof("running %s %v", iptablesSaveCmd, args) cmd := runner.exec.Command(iptablesSaveCmd, args...) // Since CombinedOutput() doesn't support redirecting it to a buffer, // we need to workaround it by redirecting stdout and stderr to buffer @@ -380,7 +380,7 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla trace.Step("Locks grabbed") defer func(locker iptablesLocker) { if err := locker.Close(); err != nil { - glog.Errorf("Failed to close iptables locks: %v", err) + klog.Errorf("Failed to close iptables locks: %v", err) } }(locker) } @@ -388,7 +388,7 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla // run the command and return the output or an error including the output and error fullArgs := append(runner.restoreWaitFlag, args...) iptablesRestoreCmd := iptablesRestoreCommand(runner.protocol) - glog.V(4).Infof("running %s %v", iptablesRestoreCmd, fullArgs) + klog.V(4).Infof("running %s %v", iptablesRestoreCmd, fullArgs) cmd := runner.exec.Command(iptablesRestoreCmd, fullArgs...) cmd.SetStdin(bytes.NewBuffer(data)) b, err := cmd.CombinedOutput() @@ -430,7 +430,7 @@ func (runner *runner) runContext(ctx context.Context, op operation, args []strin iptablesCmd := iptablesCommand(runner.protocol) fullArgs := append(runner.waitFlag, string(op)) fullArgs = append(fullArgs, args...) - glog.V(5).Infof("running iptables %s %v", string(op), args) + klog.V(5).Infof("running iptables %s %v", string(op), args) if ctx == nil { return runner.exec.Command(iptablesCmd, fullArgs...).CombinedOutput() } @@ -458,7 +458,7 @@ func trimhex(s string) string { // of hack and half-measures. We should nix this ASAP. func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...string) (bool, error) { iptablesSaveCmd := iptablesSaveCommand(runner.protocol) - glog.V(1).Infof("running %s -t %s", iptablesSaveCmd, string(table)) + klog.V(1).Infof("running %s -t %s", iptablesSaveCmd, string(table)) out, err := runner.exec.Command(iptablesSaveCmd, "-t", string(table)).CombinedOutput() if err != nil { return false, fmt.Errorf("error checking rule: %v", err) @@ -497,7 +497,7 @@ func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...st if sets.NewString(fields...).IsSuperset(argset) { return true, nil } - glog.V(5).Infof("DBG: fields is not a superset of args: fields=%v args=%v", fields, args) + klog.V(5).Infof("DBG: fields is not a superset of args: fields=%v args=%v", fields, args) } return false, nil @@ -544,12 +544,12 @@ func makeFullArgs(table Table, chain Chain, args ...string) []string { func getIPTablesHasCheckCommand(vstring string) bool { minVersion, err := utilversion.ParseGeneric(MinCheckVersion) if err != nil { - glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinCheckVersion, err) + klog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinCheckVersion, err) return true } version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return true } return version.AtLeast(minVersion) @@ -559,13 +559,13 @@ func getIPTablesHasCheckCommand(vstring string) bool { func getIPTablesWaitFlag(vstring string) []string { version, err := utilversion.ParseGeneric(vstring) if err != nil { - glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) + klog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return nil } minVersion, err := utilversion.ParseGeneric(WaitMinVersion) if err != nil { - glog.Errorf("WaitMinVersion (%s) is not a valid version string: %v", WaitMinVersion, err) + klog.Errorf("WaitMinVersion (%s) is not a valid version string: %v", WaitMinVersion, err) return nil } if version.LessThan(minVersion) { @@ -574,7 +574,7 @@ func getIPTablesWaitFlag(vstring string) []string { minVersion, err = utilversion.ParseGeneric(WaitSecondsMinVersion) if err != nil { - glog.Errorf("WaitSecondsMinVersion (%s) is not a valid version string: %v", WaitSecondsMinVersion, err) + klog.Errorf("WaitSecondsMinVersion (%s) is not a valid version string: %v", WaitSecondsMinVersion, err) return nil } if version.LessThan(minVersion) { @@ -608,11 +608,11 @@ func getIPTablesVersionString(exec utilexec.Interface, protocol Protocol) (strin func getIPTablesRestoreWaitFlag(exec utilexec.Interface, protocol Protocol) []string { vstring, err := getIPTablesRestoreVersionString(exec, protocol) if err != nil || vstring == "" { - glog.V(3).Infof("couldn't get iptables-restore version; assuming it doesn't support --wait") + klog.V(3).Infof("couldn't get iptables-restore version; assuming it doesn't support --wait") return nil } if _, err := utilversion.ParseGeneric(vstring); err != nil { - glog.V(3).Infof("couldn't parse iptables-restore version; assuming it doesn't support --wait") + klog.V(3).Infof("couldn't parse iptables-restore version; assuming it doesn't support --wait") return nil } @@ -691,7 +691,7 @@ func (runner *runner) AddReloadFunc(reloadFunc func()) { // runs all reload funcs to re-sync iptables rules func (runner *runner) reload() { - glog.V(1).Infof("reloading iptables rules") + klog.V(1).Infof("reloading iptables rules") for _, f := range runner.reloadFuncs { f() diff --git a/pkg/util/ipvs/BUILD b/pkg/util/ipvs/BUILD index 28ef80b71226d..784a343e37d6b 100644 --- a/pkg/util/ipvs/BUILD +++ b/pkg/util/ipvs/BUILD @@ -41,7 +41,7 @@ go_library( "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/pkg/util/ipvs/ipvs_linux.go b/pkg/util/ipvs/ipvs_linux.go index b7fcca90531b9..b4f53be6fdb96 100644 --- a/pkg/util/ipvs/ipvs_linux.go +++ b/pkg/util/ipvs/ipvs_linux.go @@ -26,7 +26,7 @@ import ( "syscall" libipvs "github.com/docker/libnetwork/ipvs" - "github.com/golang/glog" + "k8s.io/klog" utilexec "k8s.io/utils/exec" ) @@ -43,7 +43,7 @@ type Protocol uint16 func New(exec utilexec.Interface) Interface { handle, err := libipvs.New("") if err != nil { - glog.Errorf("IPVS interface can't be initialized, error: %v", err) + klog.Errorf("IPVS interface can't be initialized, error: %v", err) return nil } return &runner{ diff --git a/pkg/util/ipvs/kernelcheck_linux.go b/pkg/util/ipvs/kernelcheck_linux.go index 726606b97e062..286a3098c0c46 100644 --- a/pkg/util/ipvs/kernelcheck_linux.go +++ b/pkg/util/ipvs/kernelcheck_linux.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilsexec "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) // RequiredIPVSKernelModulesAvailableCheck tests IPVS required kernel modules. @@ -42,7 +42,7 @@ func (r RequiredIPVSKernelModulesAvailableCheck) Name() string { // Check try to validates IPVS required kernel modules exists or not. // The name of function can not be changed. func (r RequiredIPVSKernelModulesAvailableCheck) Check() (warnings, errors []error) { - glog.V(1).Infoln("validating the kernel module IPVS required exists in machine or not") + klog.V(1).Infoln("validating the kernel module IPVS required exists in machine or not") kernelVersion, ipvsModules, err := GetKernelVersionAndIPVSMods(r.Executor) if err != nil { diff --git a/pkg/util/keymutex/BUILD b/pkg/util/keymutex/BUILD index 256ed34181eb5..267a4b1f6336c 100644 --- a/pkg/util/keymutex/BUILD +++ b/pkg/util/keymutex/BUILD @@ -13,7 +13,7 @@ go_library( "keymutex.go", ], importpath = "k8s.io/kubernetes/pkg/util/keymutex", - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) go_test( diff --git a/pkg/util/keymutex/hashed.go b/pkg/util/keymutex/hashed.go index 5fe9a025c2455..5176ae916c224 100644 --- a/pkg/util/keymutex/hashed.go +++ b/pkg/util/keymutex/hashed.go @@ -21,7 +21,7 @@ import ( "runtime" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // NewHashed returns a new instance of KeyMutex which hashes arbitrary keys to @@ -44,16 +44,16 @@ type hashedKeyMutex struct { // Acquires a lock associated with the specified ID. func (km *hashedKeyMutex) LockKey(id string) { - glog.V(5).Infof("hashedKeyMutex.LockKey(...) called for id %q\r\n", id) + klog.V(5).Infof("hashedKeyMutex.LockKey(...) called for id %q\r\n", id) km.mutexes[km.hash(id)%len(km.mutexes)].Lock() - glog.V(5).Infof("hashedKeyMutex.LockKey(...) for id %q completed.\r\n", id) + klog.V(5).Infof("hashedKeyMutex.LockKey(...) for id %q completed.\r\n", id) } // Releases the lock associated with the specified ID. func (km *hashedKeyMutex) UnlockKey(id string) error { - glog.V(5).Infof("hashedKeyMutex.UnlockKey(...) called for id %q\r\n", id) + klog.V(5).Infof("hashedKeyMutex.UnlockKey(...) called for id %q\r\n", id) km.mutexes[km.hash(id)%len(km.mutexes)].Unlock() - glog.V(5).Infof("hashedKeyMutex.UnlockKey(...) for id %q completed.\r\n", id) + klog.V(5).Infof("hashedKeyMutex.UnlockKey(...) for id %q completed.\r\n", id) return nil } diff --git a/pkg/util/mount/BUILD b/pkg/util/mount/BUILD index 8a7211ec4afe0..221afb7a9c65c 100644 --- a/pkg/util/mount/BUILD +++ b/pkg/util/mount/BUILD @@ -18,7 +18,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/mount", visibility = ["//visibility:public"], deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ @@ -79,8 +79,8 @@ go_test( ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//pkg/util/nsenter:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ diff --git a/pkg/util/mount/exec_mount.go b/pkg/util/mount/exec_mount.go index 226f02704cc11..634189dea9bc1 100644 --- a/pkg/util/mount/exec_mount.go +++ b/pkg/util/mount/exec_mount.go @@ -22,7 +22,7 @@ import ( "fmt" "os" - "github.com/golang/glog" + "k8s.io/klog" ) // ExecMounter is a mounter that uses provided Exec interface to mount and @@ -59,10 +59,10 @@ func (m *execMounter) Mount(source string, target string, fstype string, options // doExecMount calls exec(mount ) using given exec interface. func (m *execMounter) doExecMount(source, target, fstype string, options []string) error { - glog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) + klog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) mountArgs := makeMountArgs(source, target, fstype, options) output, err := m.exec.Run("mount", mountArgs...) - glog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) + klog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) if err != nil { return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n", err, "mount", source, target, fstype, options, string(output)) @@ -75,9 +75,9 @@ func (m *execMounter) doExecMount(source, target, fstype string, options []strin func (m *execMounter) Unmount(target string) error { outputBytes, err := m.exec.Run("umount", target) if err == nil { - glog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes)) + klog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes)) } else { - glog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes)) + klog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes)) } return err diff --git a/pkg/util/mount/fake.go b/pkg/util/mount/fake.go index 27279630c2f39..06e0fcccdc16e 100644 --- a/pkg/util/mount/fake.go +++ b/pkg/util/mount/fake.go @@ -22,7 +22,7 @@ import ( "path/filepath" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // FakeMounter implements mount.Interface for tests. @@ -93,7 +93,7 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options absTarget = target } f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype, Opts: opts}) - glog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget) + klog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget) f.Log = append(f.Log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype}) return nil } @@ -111,7 +111,7 @@ func (f *FakeMounter) Unmount(target string) error { newMountpoints := []MountPoint{} for _, mp := range f.MountPoints { if mp.Path == absTarget { - glog.V(5).Infof("Fake mounter: unmounted %s from %s", mp.Device, absTarget) + klog.V(5).Infof("Fake mounter: unmounted %s from %s", mp.Device, absTarget) // Don't copy it to newMountpoints continue } @@ -154,11 +154,11 @@ func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { for _, mp := range f.MountPoints { if mp.Path == absFile { - glog.V(5).Infof("isLikelyNotMountPoint for %s: mounted %s, false", file, mp.Path) + klog.V(5).Infof("isLikelyNotMountPoint for %s: mounted %s, false", file, mp.Path) return false, nil } } - glog.V(5).Infof("isLikelyNotMountPoint for %s: true", file) + klog.V(5).Infof("isLikelyNotMountPoint for %s: true", file) return true, nil } diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index c3f3a226afba3..6ebeff053b59d 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -30,9 +30,9 @@ import ( "strings" "syscall" - "github.com/golang/glog" "golang.org/x/sys/unix" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" utilfile "k8s.io/kubernetes/pkg/util/file" utilio "k8s.io/kubernetes/pkg/util/io" utilexec "k8s.io/utils/exec" @@ -143,12 +143,12 @@ func (m *Mounter) doMount(mounterPath string, mountCmd string, source string, ta // No code here, mountCmd and mountArgs are already populated. } - glog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs) + klog.V(4).Infof("Mounting cmd (%s) with arguments (%s)", mountCmd, mountArgs) command := exec.Command(mountCmd, mountArgs...) output, err := command.CombinedOutput() if err != nil { args := strings.Join(mountArgs, " ") - glog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output)) + klog.Errorf("Mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output)) return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s\nOutput: %s\n", err, mountCmd, args, string(output)) } @@ -161,7 +161,7 @@ func (m *Mounter) doMount(mounterPath string, mountCmd string, source string, ta // systemd-runs (needed by Mount()) works. func detectSystemd() bool { if _, err := exec.LookPath("systemd-run"); err != nil { - glog.V(2).Infof("Detected OS without systemd") + klog.V(2).Infof("Detected OS without systemd") return false } // Try to run systemd-run --scope /bin/true, that should be enough @@ -171,12 +171,12 @@ func detectSystemd() bool { cmd := exec.Command("systemd-run", "--description=Kubernetes systemd probe", "--scope", "true") output, err := cmd.CombinedOutput() if err != nil { - glog.V(2).Infof("Cannot run systemd-run, assuming non-systemd OS") - glog.V(4).Infof("systemd-run failed with: %v", err) - glog.V(4).Infof("systemd-run output: %s", string(output)) + klog.V(2).Infof("Cannot run systemd-run, assuming non-systemd OS") + klog.V(4).Infof("systemd-run failed with: %v", err) + klog.V(4).Infof("systemd-run output: %s", string(output)) return false } - glog.V(2).Infof("Detected OS with systemd") + klog.V(2).Infof("Detected OS with systemd") return true } @@ -208,7 +208,7 @@ func addSystemdScope(systemdRunPath, mountName, command string, args []string) ( // Unmount unmounts the target. func (mounter *Mounter) Unmount(target string) error { - glog.V(4).Infof("Unmounting %s", target) + klog.V(4).Infof("Unmounting %s", target) command := exec.Command("umount", target) output, err := command.CombinedOutput() if err != nil { @@ -290,7 +290,7 @@ func exclusiveOpenFailsOnDevice(pathname string) (bool, error) { } if !isDevice { - glog.Errorf("Path %q is not referring to a device.", pathname) + klog.Errorf("Path %q is not referring to a device.", pathname) return false, nil } fd, errno := unix.Open(pathname, unix.O_RDONLY|unix.O_EXCL, 0) @@ -319,11 +319,11 @@ func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (str func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { refs, err := mounter.GetMountRefs(mountPath) if err != nil { - glog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) + klog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) return "", err } if len(refs) == 0 { - glog.V(4).Infof("Directory %s is not mounted", mountPath) + klog.V(4).Infof("Directory %s is not mounted", mountPath) return "", fmt.Errorf("directory %s is not mounted", mountPath) } basemountPath := path.Join(pluginDir, MountsInGlobalPDPath) @@ -331,7 +331,7 @@ func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (str if strings.HasPrefix(ref, basemountPath) { volumeID, err := filepath.Rel(basemountPath, ref) if err != nil { - glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) + klog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) return "", err } return volumeID, nil @@ -437,26 +437,26 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, if !readOnly { // Run fsck on the disk to fix repairable issues, only do this for volumes requested as rw. - glog.V(4).Infof("Checking for issues with fsck on disk: %s", source) + klog.V(4).Infof("Checking for issues with fsck on disk: %s", source) args := []string{"-a", source} out, err := mounter.Exec.Run("fsck", args...) if err != nil { ee, isExitError := err.(utilexec.ExitError) switch { case err == utilexec.ErrExecutableNotFound: - glog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.") + klog.Warningf("'fsck' not found on system; continuing mount without running 'fsck'.") case isExitError && ee.ExitStatus() == fsckErrorsCorrected: - glog.Infof("Device %s has errors which were corrected by fsck.", source) + klog.Infof("Device %s has errors which were corrected by fsck.", source) case isExitError && ee.ExitStatus() == fsckErrorsUncorrected: return fmt.Errorf("'fsck' found errors on device %s but could not correct them: %s.", source, string(out)) case isExitError && ee.ExitStatus() > fsckErrorsUncorrected: - glog.Infof("`fsck` error %s", string(out)) + klog.Infof("`fsck` error %s", string(out)) } } } // Try to mount the disk - glog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target) + klog.V(4).Infof("Attempting to mount disk: %s %s %s", fstype, source, target) mountErr := mounter.Interface.Mount(source, target, fstype, options) if mountErr != nil { // Mount failed. This indicates either that the disk is unformatted or @@ -485,14 +485,14 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, source, } } - glog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) + klog.Infof("Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", source, fstype, args) _, err := mounter.Exec.Run("mkfs."+fstype, args...) if err == nil { // the disk has been formatted successfully try to mount it again. - glog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) + klog.Infof("Disk successfully formatted (mkfs): %s - %s %s", fstype, source, target) return mounter.Interface.Mount(source, target, fstype, options) } - glog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err) + klog.Errorf("format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", source, fstype, target, options, err) return err } else { // Disk is already formatted and failed to mount @@ -511,10 +511,10 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, // GetDiskFormat uses 'blkid' to see if the given disk is unformated func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { args := []string{"-p", "-s", "TYPE", "-s", "PTTYPE", "-o", "export", disk} - glog.V(4).Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args) + klog.V(4).Infof("Attempting to determine if disk %q is formatted using blkid with args: (%v)", disk, args) dataOut, err := mounter.Exec.Run("blkid", args...) output := string(dataOut) - glog.V(4).Infof("Output: %q, err: %v", output, err) + klog.V(4).Infof("Output: %q, err: %v", output, err) if err != nil { if exit, ok := err.(utilexec.ExitError); ok { @@ -526,7 +526,7 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { return "", nil } } - glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) + klog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) return "", err } @@ -552,7 +552,7 @@ func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { } if len(pttype) > 0 { - glog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype) + klog.V(4).Infof("Disk %s detected partition table type: %s", disk, pttype) // Returns a special non-empty string as filesystem type, then kubelet // will not format it. return "unknown data, probably partitions", nil @@ -686,11 +686,11 @@ func doMakeRShared(path string, mountInfoFilename string) error { return err } if shared { - glog.V(4).Infof("Directory %s is already on a shared mount", path) + klog.V(4).Infof("Directory %s is already on a shared mount", path) return nil } - glog.V(2).Infof("Bind-mounting %q with shared mount propagation", path) + klog.V(2).Infof("Bind-mounting %q with shared mount propagation", path) // mount --bind /var/lib/kubelet /var/lib/kubelet if err := syscall.Mount(path, path, "" /*fstype*/, syscall.MS_BIND, "" /*data*/); err != nil { return fmt.Errorf("failed to bind-mount %s: %v", path, err) @@ -766,7 +766,7 @@ func prepareSubpathTarget(mounter Interface, subpath Subpath) (bool, string, err } if !notMount { // It's already mounted - glog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", bindPathTarget) + klog.V(5).Infof("Skipping bind-mounting subpath %s: already mounted", bindPathTarget) return true, bindPathTarget, nil } @@ -819,7 +819,7 @@ func doBindSubPath(mounter Interface, subpath Subpath) (hostPath string, err err if err != nil { return "", fmt.Errorf("error resolving symlinks in %q: %v", subpath.Path, err) } - glog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, newPath, subpath.VolumePath) + klog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, newPath, subpath.VolumePath) subpath.VolumePath = newVolumePath subpath.Path = newPath @@ -841,9 +841,9 @@ func doBindSubPath(mounter Interface, subpath Subpath) (hostPath string, err err defer func() { // Cleanup subpath on error if !success { - glog.V(4).Infof("doBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) + klog.V(4).Infof("doBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) if cleanErr := cleanSubPath(mounter, subpath); cleanErr != nil { - glog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) + klog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) } } }() @@ -853,13 +853,13 @@ func doBindSubPath(mounter Interface, subpath Subpath) (hostPath string, err err // Do the bind mount options := []string{"bind"} - glog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget) + klog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget) if err = mounter.Mount(mountSource, bindPathTarget, "" /*fstype*/, options); err != nil { return "", fmt.Errorf("error mounting %s: %s", subpath.Path, err) } success = true - glog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) + klog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) return bindPathTarget, nil } @@ -871,7 +871,7 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error { func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error { // scan /var/lib/kubelet/pods//volume-subpaths//* subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName) - glog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir) + klog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir) containerDirs, err := ioutil.ReadDir(subPathDir) if err != nil { @@ -883,10 +883,10 @@ func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error for _, containerDir := range containerDirs { if !containerDir.IsDir() { - glog.V(4).Infof("Container file is not a directory: %s", containerDir.Name()) + klog.V(4).Infof("Container file is not a directory: %s", containerDir.Name()) continue } - glog.V(4).Infof("Cleaning up subpath mounts for container %s", containerDir.Name()) + klog.V(4).Infof("Cleaning up subpath mounts for container %s", containerDir.Name()) // scan /var/lib/kubelet/pods//volume-subpaths///* fullContainerDirPath := filepath.Join(subPathDir, containerDir.Name()) @@ -903,27 +903,27 @@ func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error if err := os.Remove(fullContainerDirPath); err != nil { return fmt.Errorf("error deleting %s: %s", fullContainerDirPath, err) } - glog.V(5).Infof("Removed %s", fullContainerDirPath) + klog.V(5).Infof("Removed %s", fullContainerDirPath) } // Whole pod volume subpaths have been cleaned up, remove its subpath directory. if err := os.Remove(subPathDir); err != nil { return fmt.Errorf("error deleting %s: %s", subPathDir, err) } - glog.V(5).Infof("Removed %s", subPathDir) + klog.V(5).Infof("Removed %s", subPathDir) // Remove entire subpath directory if it's the last one podSubPathDir := filepath.Join(podDir, containerSubPathDirectoryName) if err := os.Remove(podSubPathDir); err != nil && !os.IsExist(err) { return fmt.Errorf("error deleting %s: %s", podSubPathDir, err) } - glog.V(5).Infof("Removed %s", podSubPathDir) + klog.V(5).Infof("Removed %s", podSubPathDir) return nil } // doCleanSubPath tears down the single subpath bind mount func doCleanSubPath(mounter Interface, fullContainerDirPath, subPathIndex string) error { // process /var/lib/kubelet/pods//volume-subpaths/// - glog.V(4).Infof("Cleaning up subpath mounts for subpath %v", subPathIndex) + klog.V(4).Infof("Cleaning up subpath mounts for subpath %v", subPathIndex) fullSubPath := filepath.Join(fullContainerDirPath, subPathIndex) notMnt, err := IsNotMountPoint(mounter, fullSubPath) if err != nil { @@ -934,13 +934,13 @@ func doCleanSubPath(mounter Interface, fullContainerDirPath, subPathIndex string if err = mounter.Unmount(fullSubPath); err != nil { return fmt.Errorf("error unmounting %s: %s", fullSubPath, err) } - glog.V(5).Infof("Unmounted %s", fullSubPath) + klog.V(5).Infof("Unmounted %s", fullSubPath) } // Remove it *non*-recursively, just in case there were some hiccups. if err = os.Remove(fullSubPath); err != nil { return fmt.Errorf("error deleting %s: %s", fullSubPath, err) } - glog.V(5).Infof("Removed %s", fullSubPath) + klog.V(5).Infof("Removed %s", fullSubPath) return nil } @@ -972,7 +972,7 @@ func removeEmptyDirs(baseDir, endDir string) error { s, err := os.Stat(curDir) if err != nil { if os.IsNotExist(err) { - glog.V(5).Infof("curDir %q doesn't exist, skipping", curDir) + klog.V(5).Infof("curDir %q doesn't exist, skipping", curDir) continue } return fmt.Errorf("error stat %q: %v", curDir, err) @@ -983,12 +983,12 @@ func removeEmptyDirs(baseDir, endDir string) error { err = os.Remove(curDir) if os.IsExist(err) { - glog.V(5).Infof("Directory %q not empty, not removing", curDir) + klog.V(5).Infof("Directory %q not empty, not removing", curDir) break } else if err != nil { return fmt.Errorf("error removing directory %q: %v", curDir, err) } - glog.V(5).Infof("Removed directory %q", curDir) + klog.V(5).Infof("Removed directory %q", curDir) } return nil } @@ -1055,7 +1055,7 @@ func getMode(pathname string) (os.FileMode, error) { // and base must be either already resolved symlinks or thet will be resolved in // kubelet's mount namespace (in case it runs containerized). func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { - glog.V(4).Infof("Creating directory %q within base %q", pathname, base) + klog.V(4).Infof("Creating directory %q within base %q", pathname, base) if !PathWithinBase(pathname, base) { return fmt.Errorf("path %s is outside of allowed base %s", pathname, base) @@ -1068,7 +1068,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { if s.IsDir() { // The directory already exists. It can be outside of the parent, // but there is no race-proof check. - glog.V(4).Infof("Directory %s already exists", pathname) + klog.V(4).Infof("Directory %s already exists", pathname) return nil } return &os.PathError{Op: "mkdir", Path: pathname, Err: syscall.ENOTDIR} @@ -1088,7 +1088,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { return fmt.Errorf("path %s is outside of allowed base %s", fullExistingPath, err) } - glog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) + klog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) parentFD, err := doSafeOpen(fullExistingPath, base) if err != nil { return fmt.Errorf("cannot open directory %s: %s", existingPath, err) @@ -1097,12 +1097,12 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { defer func() { if parentFD != -1 { if err = syscall.Close(parentFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) } } if childFD != -1 { if err = syscall.Close(childFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", childFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", childFD, pathname, err) } } }() @@ -1112,7 +1112,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { // created directory into symlink. for _, dir := range toCreate { currentPath = filepath.Join(currentPath, dir) - glog.V(4).Infof("Creating %s", dir) + klog.V(4).Infof("Creating %s", dir) err = syscall.Mkdirat(parentFD, currentPath, uint32(perm)) if err != nil { return fmt.Errorf("cannot create directory %s: %s", currentPath, err) @@ -1131,7 +1131,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { // and user either gets error or the file that it can already access. if err = syscall.Close(parentFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safemkdir(%v): %v", parentFD, pathname, err) } parentFD = childFD childFD = -1 @@ -1180,7 +1180,7 @@ func findExistingPrefix(base, pathname string) (string, []string, error) { } defer func() { if err = syscall.Close(fd); err != nil { - glog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) + klog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) } }() for i, dir := range dirs { @@ -1194,7 +1194,7 @@ func findExistingPrefix(base, pathname string) (string, []string, error) { return base, nil, err } if err = syscall.Close(fd); err != nil { - glog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) + klog.V(4).Infof("Closing FD %v failed for findExistingPrefix(%v): %v", fd, pathname, err) } fd = childFD currentPath = filepath.Join(currentPath, dir) @@ -1226,7 +1226,7 @@ func doSafeOpen(pathname string, base string) (int, error) { defer func() { if parentFD != -1 { if err = syscall.Close(parentFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", parentFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", parentFD, pathname, err) } } }() @@ -1235,7 +1235,7 @@ func doSafeOpen(pathname string, base string) (int, error) { defer func() { if childFD != -1 { if err = syscall.Close(childFD); err != nil { - glog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", childFD, pathname, err) + klog.V(4).Infof("Closing FD %v failed for safeopen(%v): %v", childFD, pathname, err) } } }() @@ -1250,7 +1250,7 @@ func doSafeOpen(pathname string, base string) (int, error) { return -1, fmt.Errorf("path %s is outside of allowed base %s", currentPath, base) } - glog.V(5).Infof("Opening path %s", currentPath) + klog.V(5).Infof("Opening path %s", currentPath) childFD, err = syscall.Openat(parentFD, seg, openFDFlags, 0) if err != nil { return -1, fmt.Errorf("cannot open %s: %s", currentPath, err) diff --git a/pkg/util/mount/mount_linux_test.go b/pkg/util/mount/mount_linux_test.go index 417592602dabf..89dbfdf041c6c 100644 --- a/pkg/util/mount/mount_linux_test.go +++ b/pkg/util/mount/mount_linux_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) func TestReadProcMountsFrom(t *testing.T) { @@ -634,7 +634,7 @@ func TestSafeMakeDir(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("test %q", test.name) + klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "safe-make-dir-"+test.name+"-") if err != nil { t.Fatalf(err.Error()) @@ -646,7 +646,7 @@ func TestSafeMakeDir(t *testing.T) { t.Errorf("test %q: %s", test.name, err) } if err != nil { - glog.Infof("got error: %s", err) + klog.Infof("got error: %s", err) } if err == nil && test.expectError { t.Errorf("test %q: expected error, got none", test.name) @@ -792,7 +792,7 @@ func TestRemoveEmptyDirs(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("test %q", test.name) + klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "remove-empty-dirs-"+test.name+"-") if err != nil { t.Fatalf(err.Error()) @@ -963,7 +963,7 @@ func TestCleanSubPaths(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("test %q", test.name) + klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "clean-subpaths-"+test.name+"-") if err != nil { t.Fatalf(err.Error()) @@ -1219,7 +1219,7 @@ func TestBindSubPath(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("test %q", test.name) + klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "bind-subpath-"+test.name+"-") if err != nil { t.Fatalf(err.Error()) @@ -1651,7 +1651,7 @@ func TestSafeOpen(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("test %q", test.name) + klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "safe-open-"+test.name+"-") if err != nil { t.Fatalf(err.Error()) @@ -1664,7 +1664,7 @@ func TestSafeOpen(t *testing.T) { t.Errorf("test %q: %s", test.name, err) } if err != nil { - glog.Infof("got error: %s", err) + klog.Infof("got error: %s", err) } if err == nil && test.expectError { t.Errorf("test %q: expected error, got none", test.name) @@ -1798,7 +1798,7 @@ func TestFindExistingPrefix(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("test %q", test.name) + klog.V(4).Infof("test %q", test.name) base, err := ioutil.TempDir("", "find-prefix-"+test.name+"-") if err != nil { t.Fatalf(err.Error()) @@ -1810,7 +1810,7 @@ func TestFindExistingPrefix(t *testing.T) { t.Errorf("test %q: %s", test.name, err) } if err != nil { - glog.Infof("got error: %s", err) + klog.Infof("got error: %s", err) } if err == nil && test.expectError { t.Errorf("test %q: expected error, got none", test.name) diff --git a/pkg/util/mount/mount_windows.go b/pkg/util/mount/mount_windows.go index 535803abf502e..dfdcdfc337714 100644 --- a/pkg/util/mount/mount_windows.go +++ b/pkg/util/mount/mount_windows.go @@ -28,7 +28,7 @@ import ( "strings" "syscall" - "github.com/golang/glog" + "k8s.io/klog" utilfile "k8s.io/kubernetes/pkg/util/file" ) @@ -54,7 +54,7 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio target = normalizeWindowsPath(target) if source == "tmpfs" { - glog.V(3).Infof("azureMount: mounting source (%q), target (%q), with options (%q)", source, target, options) + klog.V(3).Infof("azureMount: mounting source (%q), target (%q), with options (%q)", source, target, options) return os.MkdirAll(target, 0755) } @@ -63,7 +63,7 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio return err } - glog.V(4).Infof("azureMount: mount options(%q) source:%q, target:%q, fstype:%q, begin to mount", + klog.V(4).Infof("azureMount: mount options(%q) source:%q, target:%q, fstype:%q, begin to mount", options, source, target, fstype) bindSource := "" @@ -73,7 +73,7 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio bindSource = normalizeWindowsPath(source) } else { if len(options) < 2 { - glog.Warningf("azureMount: mount options(%q) command number(%d) less than 2, source:%q, target:%q, skip mounting", + klog.Warningf("azureMount: mount options(%q) command number(%d) less than 2, source:%q, target:%q, skip mounting", options, len(options), source, target) return nil } @@ -102,7 +102,7 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio } if output, err := exec.Command("cmd", "/c", "mklink", "/D", target, bindSource).CombinedOutput(); err != nil { - glog.Errorf("mklink failed: %v, source(%q) target(%q) output: %q", err, bindSource, target, string(output)) + klog.Errorf("mklink failed: %v, source(%q) target(%q) output: %q", err, bindSource, target, string(output)) return err } @@ -111,10 +111,10 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio // Unmount unmounts the target. func (mounter *Mounter) Unmount(target string) error { - glog.V(4).Infof("azureMount: Unmount target (%q)", target) + klog.V(4).Infof("azureMount: Unmount target (%q)", target) target = normalizeWindowsPath(target) if output, err := exec.Command("cmd", "/c", "rmdir", target).CombinedOutput(); err != nil { - glog.Errorf("rmdir failed: %v, output: %q", err, string(output)) + klog.Errorf("rmdir failed: %v, output: %q", err, string(output)) return err } return nil @@ -168,7 +168,7 @@ func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (str func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (string, error) { refs, err := mounter.GetMountRefs(mountPath) if err != nil { - glog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) + klog.V(4).Infof("GetMountRefs failed for mount path %q: %v", mountPath, err) return "", err } if len(refs) == 0 { @@ -179,7 +179,7 @@ func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (str if strings.Contains(ref, basemountPath) { volumeID, err := filepath.Rel(normalizeWindowsPath(basemountPath), ref) if err != nil { - glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) + klog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err) return "", err } return volumeID, nil @@ -362,10 +362,10 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error { func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { // Try to mount the disk - glog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, source, target) + klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, source, target) if err := ValidateDiskNumber(source); err != nil { - glog.Errorf("diskMount: formatAndMount failed, err: %v", err) + klog.Errorf("diskMount: formatAndMount failed, err: %v", err) return err } @@ -380,7 +380,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, if output, err := mounter.Exec.Run("powershell", "/c", cmd); err != nil { return fmt.Errorf("diskMount: format disk failed, error: %v, output: %q", err, string(output)) } - glog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype) + klog.V(4).Infof("diskMount: Disk successfully formatted, disk: %q, fstype: %q", source, fstype) driveLetter, err := getDriveLetterByDiskNumber(source, mounter.Exec) if err != nil { @@ -388,9 +388,9 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, } driverPath := driveLetter + ":" target = normalizeWindowsPath(target) - glog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, driverPath, target) + klog.V(4).Infof("Attempting to formatAndMount disk: %s %s %s", fstype, driverPath, target) if output, err := mounter.Exec.Run("cmd", "/c", "mklink", "/D", target, driverPath); err != nil { - glog.Errorf("mklink failed: %v, output: %q", err, string(output)) + klog.Errorf("mklink failed: %v, output: %q", err, string(output)) return err } return nil @@ -499,7 +499,7 @@ func (mounter *Mounter) SafeMakeDir(subdir string, base string, perm os.FileMode } func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { - glog.V(4).Infof("Creating directory %q within base %q", pathname, base) + klog.V(4).Infof("Creating directory %q within base %q", pathname, base) if !PathWithinBase(pathname, base) { return fmt.Errorf("path %s is outside of allowed base %s", pathname, base) @@ -512,7 +512,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { if s.IsDir() { // The directory already exists. It can be outside of the parent, // but there is no race-proof check. - glog.V(4).Infof("Directory %s already exists", pathname) + klog.V(4).Infof("Directory %s already exists", pathname) return nil } return &os.PathError{Op: "mkdir", Path: pathname, Err: syscall.ENOTDIR} @@ -547,13 +547,13 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { return err } - glog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) + klog.V(4).Infof("%q already exists, %q to create", fullExistingPath, filepath.Join(toCreate...)) currentPath := fullExistingPath // create the directories one by one, making sure nobody can change // created directory into symlink by lock that directory immediately for _, dir := range toCreate { currentPath = filepath.Join(currentPath, dir) - glog.V(4).Infof("Creating %s", dir) + klog.V(4).Infof("Creating %s", dir) if err := os.Mkdir(currentPath, perm); err != nil { return fmt.Errorf("cannot create directory %s: %s", currentPath, err) } diff --git a/pkg/util/mount/nsenter_mount.go b/pkg/util/mount/nsenter_mount.go index 330883d400e78..ff2eceaf7c357 100644 --- a/pkg/util/mount/nsenter_mount.go +++ b/pkg/util/mount/nsenter_mount.go @@ -25,8 +25,8 @@ import ( "strings" "syscall" - "github.com/golang/glog" "golang.org/x/sys/unix" + "k8s.io/klog" utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/nsenter" ) @@ -77,11 +77,11 @@ func (n *NsenterMounter) Mount(source string, target string, fstype string, opti // doNsenterMount nsenters the host's mount namespace and performs the // requested mount. func (n *NsenterMounter) doNsenterMount(source, target, fstype string, options []string) error { - glog.V(5).Infof("nsenter mount %s %s %s %v", source, target, fstype, options) + klog.V(5).Infof("nsenter mount %s %s %s %v", source, target, fstype, options) cmd, args := n.makeNsenterArgs(source, target, fstype, options) outputBytes, err := n.ne.Exec(cmd, args).CombinedOutput() if len(outputBytes) != 0 { - glog.V(5).Infof("Output of mounting %s to %s: %v", source, target, string(outputBytes)) + klog.V(5).Infof("Output of mounting %s to %s: %v", source, target, string(outputBytes)) } return err } @@ -131,10 +131,10 @@ func (n *NsenterMounter) Unmount(target string) error { // No need to execute systemd-run here, it's enough that unmount is executed // in the host's mount namespace. It will finish appropriate fuse daemon(s) // running in any scope. - glog.V(5).Infof("nsenter unmount args: %v", args) + klog.V(5).Infof("nsenter unmount args: %v", args) outputBytes, err := n.ne.Exec("umount", args).CombinedOutput() if len(outputBytes) != 0 { - glog.V(5).Infof("Output of unmounting %s: %v", target, string(outputBytes)) + klog.V(5).Infof("Output of unmounting %s: %v", target, string(outputBytes)) } return err } @@ -163,7 +163,7 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { // Check the directory exists if _, err = os.Stat(file); os.IsNotExist(err) { - glog.V(5).Infof("findmnt: directory %s does not exist", file) + klog.V(5).Infof("findmnt: directory %s does not exist", file) return true, err } @@ -178,10 +178,10 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { // Also add fstype output to make sure that the output of target file will give the full path // TODO: Need more refactoring for this function. Track the solution with issue #26996 args := []string{"-o", "target,fstype", "--noheadings", "--first-only", "--target", resolvedFile} - glog.V(5).Infof("nsenter findmnt args: %v", args) + klog.V(5).Infof("nsenter findmnt args: %v", args) out, err := n.ne.Exec("findmnt", args).CombinedOutput() if err != nil { - glog.V(2).Infof("Failed findmnt command for path %s: %s %v", resolvedFile, out, err) + klog.V(2).Infof("Failed findmnt command for path %s: %s %v", resolvedFile, out, err) // Different operating systems behave differently for paths which are not mount points. // On older versions (e.g. 2.20.1) we'd get error, on newer ones (e.g. 2.26.2) we'd get "/". // It's safer to assume that it's not a mount point. @@ -192,13 +192,13 @@ func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { return false, err } - glog.V(5).Infof("IsLikelyNotMountPoint findmnt output for path %s: %v:", resolvedFile, mountTarget) + klog.V(5).Infof("IsLikelyNotMountPoint findmnt output for path %s: %v:", resolvedFile, mountTarget) if mountTarget == resolvedFile { - glog.V(5).Infof("IsLikelyNotMountPoint: %s is a mount point", resolvedFile) + klog.V(5).Infof("IsLikelyNotMountPoint: %s is a mount point", resolvedFile) return false, nil } - glog.V(5).Infof("IsLikelyNotMountPoint: %s is not a mount point", resolvedFile) + klog.V(5).Infof("IsLikelyNotMountPoint: %s is not a mount point", resolvedFile) return true, nil } @@ -375,7 +375,7 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st if err != nil { return "", fmt.Errorf("error resolving symlinks in %q: %v", subpath.Path, err) } - glog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, evaluatedHostSubpath, subpath.VolumePath) + klog.V(5).Infof("doBindSubPath %q (%q) for volumepath %q", subpath.Path, evaluatedHostSubpath, subpath.VolumePath) subpath.VolumePath = mounter.ne.KubeletPath(evaluatedHostVolumePath) subpath.Path = mounter.ne.KubeletPath(evaluatedHostSubpath) @@ -398,9 +398,9 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st defer func() { // Cleanup subpath on error if !success { - glog.V(4).Infof("doNsEnterBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) + klog.V(4).Infof("doNsEnterBindSubPath() failed for %q, cleaning up subpath", bindPathTarget) if cleanErr := cleanSubPath(mounter, subpath); cleanErr != nil { - glog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) + klog.Errorf("Failed to clean subpath %q: %v", bindPathTarget, cleanErr) } } }() @@ -408,7 +408,7 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st // Leap of faith: optimistically expect that nobody has modified previously // expanded evalSubPath with evil symlinks and bind-mount it. // Mount is done on the host! don't use kubelet path! - glog.V(5).Infof("bind mounting %q at %q", evaluatedHostSubpath, bindPathTarget) + klog.V(5).Infof("bind mounting %q at %q", evaluatedHostSubpath, bindPathTarget) if err = mounter.Mount(evaluatedHostSubpath, bindPathTarget, "" /*fstype*/, []string{"bind"}); err != nil { return "", fmt.Errorf("error mounting %s: %s", evaluatedHostSubpath, err) } @@ -421,7 +421,7 @@ func doNsEnterBindSubPath(mounter *NsenterMounter, subpath Subpath) (hostPath st } success = true - glog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) + klog.V(3).Infof("Bound SubPath %s into %s", subpath.Path, bindPathTarget) return bindPathTarget, nil } diff --git a/pkg/util/netsh/BUILD b/pkg/util/netsh/BUILD index 980f16ba9add3..a356f094a9e39 100644 --- a/pkg/util/netsh/BUILD +++ b/pkg/util/netsh/BUILD @@ -14,7 +14,7 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/util/netsh", deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/util/netsh/netsh.go b/pkg/util/netsh/netsh.go index 30b66536b1dbc..a9f1731b45dc7 100644 --- a/pkg/util/netsh/netsh.go +++ b/pkg/util/netsh/netsh.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" utilexec "k8s.io/utils/exec" ) @@ -68,7 +68,7 @@ func New(exec utilexec.Interface) Interface { // EnsurePortProxyRule checks if the specified redirect exists, if not creates it. func (runner *runner) EnsurePortProxyRule(args []string) (bool, error) { - glog.V(4).Infof("running netsh interface portproxy add v4tov4 %v", args) + klog.V(4).Infof("running netsh interface portproxy add v4tov4 %v", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { @@ -87,7 +87,7 @@ func (runner *runner) EnsurePortProxyRule(args []string) (bool, error) { // DeletePortProxyRule deletes the specified portproxy rule. If the rule did not exist, return error. func (runner *runner) DeletePortProxyRule(args []string) error { - glog.V(4).Infof("running netsh interface portproxy delete v4tov4 %v", args) + klog.V(4).Infof("running netsh interface portproxy delete v4tov4 %v", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { @@ -116,12 +116,12 @@ func (runner *runner) EnsureIPAddress(args []string, ip net.IP) (bool, error) { exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner) if exists == true { - glog.V(4).Infof("not adding IP address %q as it already exists", ipToCheck) + klog.V(4).Infof("not adding IP address %q as it already exists", ipToCheck) return true, nil } // IP Address is not already added, add it now - glog.V(4).Infof("running netsh interface ipv4 add address %v", args) + klog.V(4).Infof("running netsh interface ipv4 add address %v", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { @@ -129,7 +129,7 @@ func (runner *runner) EnsureIPAddress(args []string, ip net.IP) (bool, error) { // Query all the IP addresses and see if the one we added is present // PS: We are using netsh interface ipv4 show address here to query all the IP addresses, instead of // querying net.InterfaceAddrs() as it returns the IP address as soon as it is added even though it is uninitialized - glog.V(3).Infof("Waiting until IP: %v is added to the network adapter", ipToCheck) + klog.V(3).Infof("Waiting until IP: %v is added to the network adapter", ipToCheck) for { if exists, _ := checkIPExists(ipToCheck, argsShowAddress, runner); exists { return true, nil @@ -149,7 +149,7 @@ func (runner *runner) EnsureIPAddress(args []string, ip net.IP) (bool, error) { // DeleteIPAddress checks if the specified IP address is present and, if so, deletes it. func (runner *runner) DeleteIPAddress(args []string) error { - glog.V(4).Infof("running netsh interface ipv4 delete address %v", args) + klog.V(4).Infof("running netsh interface ipv4 delete address %v", args) out, err := runner.exec.Command(cmdNetsh, args...).CombinedOutput() if err == nil { @@ -187,7 +187,7 @@ func checkIPExists(ipToCheck string, args []string, runner *runner) (bool, error return false, err } ipAddressString := string(ipAddress[:]) - glog.V(3).Infof("Searching for IP: %v in IP dump: %v", ipToCheck, ipAddressString) + klog.V(3).Infof("Searching for IP: %v in IP dump: %v", ipToCheck, ipAddressString) showAddressArray := strings.Split(ipAddressString, "\n") for _, showAddress := range showAddressArray { if strings.Contains(showAddress, "IP") { diff --git a/pkg/util/node/BUILD b/pkg/util/node/BUILD index 5a6dfba5f2bbc..7e1696cefea32 100644 --- a/pkg/util/node/BUILD +++ b/pkg/util/node/BUILD @@ -18,7 +18,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/util/node/node.go b/pkg/util/node/node.go index 76bf808ccd6d5..ff50385515076 100644 --- a/pkg/util/node/node.go +++ b/pkg/util/node/node.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -100,12 +100,12 @@ func GetNodeIP(client clientset.Interface, hostname string) net.IP { var nodeIP net.IP node, err := client.CoreV1().Nodes().Get(hostname, metav1.GetOptions{}) if err != nil { - glog.Warningf("Failed to retrieve node info: %v", err) + klog.Warningf("Failed to retrieve node info: %v", err) return nil } nodeIP, err = GetNodeHostIP(node) if err != nil { - glog.Warningf("Failed to retrieve node IP: %v", err) + klog.Warningf("Failed to retrieve node IP: %v", err) return nil } return nodeIP diff --git a/pkg/util/nsenter/BUILD b/pkg/util/nsenter/BUILD index 05765dcc9be68..9a94fae10156f 100644 --- a/pkg/util/nsenter/BUILD +++ b/pkg/util/nsenter/BUILD @@ -24,7 +24,7 @@ go_library( "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ diff --git a/pkg/util/nsenter/exec.go b/pkg/util/nsenter/exec.go index 201f1270c7725..134497f0a752a 100644 --- a/pkg/util/nsenter/exec.go +++ b/pkg/util/nsenter/exec.go @@ -23,7 +23,7 @@ import ( "fmt" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/utils/exec" ) @@ -49,7 +49,7 @@ func NewNsenterExecutor(hostRootFsPath string, executor exec.Interface) *Executo func (nsExecutor *Executor) Command(cmd string, args ...string) exec.Cmd { fullArgs := append([]string{fmt.Sprintf("--mount=%s", nsExecutor.hostProcMountNsPath), "--"}, append([]string{cmd}, args...)...) - glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + klog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) return nsExecutor.executor.Command(nsenterPath, fullArgs...) } @@ -57,7 +57,7 @@ func (nsExecutor *Executor) Command(cmd string, args ...string) exec.Cmd { func (nsExecutor *Executor) CommandContext(ctx context.Context, cmd string, args ...string) exec.Cmd { fullArgs := append([]string{fmt.Sprintf("--mount=%s", nsExecutor.hostProcMountNsPath), "--"}, append([]string{cmd}, args...)...) - glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + klog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) return nsExecutor.executor.CommandContext(ctx, nsenterPath, fullArgs...) } diff --git a/pkg/util/nsenter/nsenter.go b/pkg/util/nsenter/nsenter.go index e928a57ac9fe0..56361e7846e7e 100644 --- a/pkg/util/nsenter/nsenter.go +++ b/pkg/util/nsenter/nsenter.go @@ -28,7 +28,7 @@ import ( "k8s.io/utils/exec" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -127,7 +127,7 @@ func (ne *Nsenter) Exec(cmd string, args []string) exec.Cmd { hostProcMountNsPath := filepath.Join(ne.hostRootFsPath, mountNsPath) fullArgs := append([]string{fmt.Sprintf("--mount=%s", hostProcMountNsPath), "--"}, append([]string{ne.AbsHostPath(cmd)}, args...)...) - glog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) + klog.V(5).Infof("Running nsenter command: %v %v", nsenterPath, fullArgs) return ne.executor.Command(nsenterPath, fullArgs...) } @@ -170,7 +170,7 @@ func (ne *Nsenter) EvalSymlinks(pathname string, mustExist bool) (string, error) } outBytes, err := ne.Exec("realpath", args).CombinedOutput() if err != nil { - glog.Infof("failed to resolve symbolic links on %s: %v", pathname, err) + klog.Infof("failed to resolve symbolic links on %s: %v", pathname, err) return "", err } return strings.TrimSpace(string(outBytes)), nil diff --git a/pkg/util/oom/BUILD b/pkg/util/oom/BUILD index 712fc9061a6e9..b27ec22804ebc 100644 --- a/pkg/util/oom/BUILD +++ b/pkg/util/oom/BUILD @@ -19,7 +19,7 @@ go_library( deps = select({ "@io_bazel_rules_go//go/platform:linux": [ "//pkg/kubelet/cm/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/pkg/util/oom/oom_linux.go b/pkg/util/oom/oom_linux.go index ad6d5c264b556..243c30d0cc40c 100644 --- a/pkg/util/oom/oom_linux.go +++ b/pkg/util/oom/oom_linux.go @@ -29,7 +29,7 @@ import ( cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util" - "github.com/golang/glog" + "k8s.io/klog" ) func NewOOMAdjuster() *OOMAdjuster { @@ -62,24 +62,24 @@ func applyOOMScoreAdj(pid int, oomScoreAdj int) error { maxTries := 2 oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj") value := strconv.Itoa(oomScoreAdj) - glog.V(4).Infof("attempting to set %q to %q", oomScoreAdjPath, value) + klog.V(4).Infof("attempting to set %q to %q", oomScoreAdjPath, value) var err error for i := 0; i < maxTries; i++ { err = ioutil.WriteFile(oomScoreAdjPath, []byte(value), 0700) if err != nil { if os.IsNotExist(err) { - glog.V(2).Infof("%q does not exist", oomScoreAdjPath) + klog.V(2).Infof("%q does not exist", oomScoreAdjPath) return os.ErrNotExist } - glog.V(3).Info(err) + klog.V(3).Info(err) time.Sleep(100 * time.Millisecond) continue } return nil } if err != nil { - glog.V(2).Infof("failed to set %q to %q: %v", oomScoreAdjPath, value, err) + klog.V(2).Infof("failed to set %q to %q: %v", oomScoreAdjPath, value, err) } return err } @@ -97,20 +97,20 @@ func (oomAdjuster *OOMAdjuster) applyOOMScoreAdjContainer(cgroupName string, oom return os.ErrNotExist } continueAdjusting = true - glog.V(10).Infof("Error getting process list for cgroup %s: %+v", cgroupName, err) + klog.V(10).Infof("Error getting process list for cgroup %s: %+v", cgroupName, err) } else if len(pidList) == 0 { - glog.V(10).Infof("Pid list is empty") + klog.V(10).Infof("Pid list is empty") continueAdjusting = true } else { for _, pid := range pidList { if !adjustedProcessSet[pid] { - glog.V(10).Infof("pid %d needs to be set", pid) + klog.V(10).Infof("pid %d needs to be set", pid) if err = oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err == nil { adjustedProcessSet[pid] = true } else if err == os.ErrNotExist { continue } else { - glog.V(10).Infof("cannot adjust oom score for pid %d - %v", pid, err) + klog.V(10).Infof("cannot adjust oom score for pid %d - %v", pid, err) continueAdjusting = true } // Processes can come and go while we try to apply oom score adjust value. So ignore errors here. diff --git a/pkg/util/procfs/BUILD b/pkg/util/procfs/BUILD index 1c02e42970111..223f2265fb6cf 100644 --- a/pkg/util/procfs/BUILD +++ b/pkg/util/procfs/BUILD @@ -19,7 +19,7 @@ go_library( deps = select({ "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/pkg/util/procfs/procfs_linux.go b/pkg/util/procfs/procfs_linux.go index 0741b617efe80..46059c33e77ed 100644 --- a/pkg/util/procfs/procfs_linux.go +++ b/pkg/util/procfs/procfs_linux.go @@ -32,8 +32,8 @@ import ( "syscall" "unicode" - "github.com/golang/glog" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" ) type ProcFS struct{} @@ -137,7 +137,7 @@ func getPids(re *regexp.Regexp) []int { cmdline, err := ioutil.ReadFile(filepath.Join("/proc", entry.Name(), "cmdline")) if err != nil { - glog.V(4).Infof("Error reading file %s: %+v", filepath.Join("/proc", entry.Name(), "cmdline"), err) + klog.V(4).Infof("Error reading file %s: %+v", filepath.Join("/proc", entry.Name(), "cmdline"), err) continue } diff --git a/pkg/util/resizefs/BUILD b/pkg/util/resizefs/BUILD index 9d1cf01487f07..04f11e1fc0c5a 100644 --- a/pkg/util/resizefs/BUILD +++ b/pkg/util/resizefs/BUILD @@ -23,7 +23,7 @@ go_library( ], "@io_bazel_rules_go//go/platform:linux": [ "//pkg/util/mount:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ "//pkg/util/mount:go_default_library", diff --git a/pkg/util/resizefs/resizefs_linux.go b/pkg/util/resizefs/resizefs_linux.go index 518eba2bb6032..4eabdb1ddc0b1 100644 --- a/pkg/util/resizefs/resizefs_linux.go +++ b/pkg/util/resizefs/resizefs_linux.go @@ -21,7 +21,7 @@ package resizefs import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" ) @@ -50,7 +50,7 @@ func (resizefs *ResizeFs) Resize(devicePath string, deviceMountPath string) (boo return false, nil } - glog.V(3).Infof("ResizeFS.Resize - Expanding mounted volume %s", devicePath) + klog.V(3).Infof("ResizeFS.Resize - Expanding mounted volume %s", devicePath) switch format { case "ext3", "ext4": return resizefs.extResize(devicePath) @@ -63,7 +63,7 @@ func (resizefs *ResizeFs) Resize(devicePath string, deviceMountPath string) (boo func (resizefs *ResizeFs) extResize(devicePath string) (bool, error) { output, err := resizefs.mounter.Exec.Run("resize2fs", devicePath) if err == nil { - glog.V(2).Infof("Device %s resized successfully", devicePath) + klog.V(2).Infof("Device %s resized successfully", devicePath) return true, nil } @@ -77,7 +77,7 @@ func (resizefs *ResizeFs) xfsResize(deviceMountPath string) (bool, error) { output, err := resizefs.mounter.Exec.Run("xfs_growfs", args...) if err == nil { - glog.V(2).Infof("Device %s resized successfully", deviceMountPath) + klog.V(2).Infof("Device %s resized successfully", deviceMountPath) return true, nil } diff --git a/pkg/volume/BUILD b/pkg/volume/BUILD index 4d77d6073264b..ba5d04ba07801 100644 --- a/pkg/volume/BUILD +++ b/pkg/volume/BUILD @@ -32,7 +32,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/awsebs/BUILD b/pkg/volume/awsebs/BUILD index e1c1fa595fa47..27213a3b62221 100644 --- a/pkg/volume/awsebs/BUILD +++ b/pkg/volume/awsebs/BUILD @@ -31,7 +31,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -54,7 +54,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/awsebs/attacher.go b/pkg/volume/awsebs/attacher.go index 6d325b978cf2a..a4a320633c2fd 100644 --- a/pkg/volume/awsebs/attacher.go +++ b/pkg/volume/awsebs/attacher.go @@ -23,7 +23,7 @@ import ( "strconv" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -79,7 +79,7 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName // succeeds in that case, so no need to do that separately. devicePath, err := attacher.awsVolumes.AttachDisk(volumeID, nodeName) if err != nil { - glog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err) + klog.Errorf("Error attaching volume %q to node %q: %+v", volumeID, nodeName, err) return "", err } @@ -88,14 +88,14 @@ func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, nodeName func (attacher *awsElasticBlockStoreAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { - glog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for AWS", nodeName) + klog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for AWS", nodeName) volumeNodeMap := map[types.NodeName][]*volume.Spec{ nodeName: specs, } nodeVolumesResult := make(map[*volume.Spec]bool) nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap) if err != nil { - glog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err) + klog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err) return nodeVolumesResult, err } @@ -115,7 +115,7 @@ func (attacher *awsElasticBlockStoreAttacher) BulkVerifyVolumes(volumesByNode ma volumeSource, _, err := getVolumeSource(volumeSpec) if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err) + klog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err) continue } @@ -135,7 +135,7 @@ func (attacher *awsElasticBlockStoreAttacher) BulkVerifyVolumes(volumesByNode ma attachedResult, err := attacher.awsVolumes.DisksAreAttached(diskNamesByNode) if err != nil { - glog.Errorf("Error checking if volumes are attached to nodes err = %v", err) + klog.Errorf("Error checking if volumes are attached to nodes err = %v", err) return volumesAttachedCheck, err } @@ -175,15 +175,15 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d for { select { case <-ticker.C: - glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID) + klog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID) devicePaths := getDiskByIDPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath) path, err := verifyDevicePath(devicePaths) if err != nil { // Log error, if any, and continue checking periodically. See issue #11321 - glog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err) + klog.Errorf("Error verifying AWS Volume (%q) is attached: %v", volumeID, err) } else if path != "" { // A device path has successfully been created for the PD - glog.Infof("Successfully found attached AWS Volume %q.", volumeID) + klog.Infof("Successfully found attached AWS Volume %q.", volumeID) return path, nil } case <-timer.C: @@ -267,7 +267,7 @@ func (detacher *awsElasticBlockStoreDetacher) Detach(volumeName string, nodeName volumeID := aws.KubernetesVolumeID(path.Base(volumeName)) if _, err := detacher.awsVolumes.DetachDisk(volumeID, nodeName); err != nil { - glog.Errorf("Error detaching volumeID %q: %v", volumeID, err) + klog.Errorf("Error detaching volumeID %q: %v", volumeID, err) return err } return nil diff --git a/pkg/volume/awsebs/attacher_test.go b/pkg/volume/awsebs/attacher_test.go index ae5fc9c8527a1..df92328f03ed2 100644 --- a/pkg/volume/awsebs/attacher_test.go +++ b/pkg/volume/awsebs/attacher_test.go @@ -20,7 +20,7 @@ import ( "errors" "testing" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -232,7 +232,7 @@ func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName t return "", errors.New("Unexpected AttachDisk call: wrong nodeName") } - glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret) + klog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret) return expected.retDeviceName, expected.ret } @@ -257,7 +257,7 @@ func (testcase *testcase) DetachDisk(diskName aws.KubernetesVolumeID, nodeName t return "", errors.New("Unexpected DetachDisk call: wrong nodeName") } - glog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret) + klog.V(4).Infof("DetachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceName, expected.ret) return expected.retDeviceName, expected.ret } diff --git a/pkg/volume/awsebs/aws_ebs.go b/pkg/volume/awsebs/aws_ebs.go index 31e61eb3264eb..c6270c1752a16 100644 --- a/pkg/volume/awsebs/aws_ebs.go +++ b/pkg/volume/awsebs/aws_ebs.go @@ -25,7 +25,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -118,13 +118,13 @@ func (plugin *awsElasticBlockStorePlugin) GetVolumeLimits() (map[string]int64, e instances, ok := cloud.Instances() if !ok { - glog.V(3).Infof("Failed to get instances from cloud provider") + klog.V(3).Infof("Failed to get instances from cloud provider") return volumeLimits, nil } instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName()) if err != nil { - glog.Errorf("Failed to get instance type from AWS cloud provider") + klog.Errorf("Failed to get instance type from AWS cloud provider") return volumeLimits, nil } @@ -205,7 +205,7 @@ func (plugin *awsElasticBlockStorePlugin) NewDeleter(spec *volume.Spec) (volume. func (plugin *awsElasticBlockStorePlugin) newDeleterInternal(spec *volume.Spec, manager ebsManager) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AWSElasticBlockStore == nil { - glog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil") + klog.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil") return nil, fmt.Errorf("spec.PersistentVolumeSource.AWSElasticBlockStore is nil") } return &awsElasticBlockStoreDeleter{ @@ -273,7 +273,7 @@ func (plugin *awsElasticBlockStorePlugin) ConstructVolumeSpec(volName, mountPath if length == 3 { sourceName = awsURLNamePrefix + names[1] + "/" + volName // names[1] is the zone label } - glog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName) + klog.V(4).Infof("Convert aws volume name from %q to %q ", volumeID, sourceName) } awsVolume := &v1.Volume{ @@ -382,9 +382,9 @@ func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error { func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error { // TODO: handle failed mounts here. notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mount point: %s %v", dir, err) + klog.Errorf("cannot validate mount point: %s %v", dir, err) return err } if !notMnt { @@ -407,27 +407,27 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("failed to unmount %s: %v", dir, mntErr) + klog.Errorf("failed to unmount %s: %v", dir, mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed for %s: %v", dir, mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } os.Remove(dir) - glog.Errorf("Mount of disk %s failed: %v", dir, err) + klog.Errorf("Mount of disk %s failed: %v", dir, err) return err } @@ -435,7 +435,7 @@ func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error volume.SetVolumeOwnership(b, fsGroup) } - glog.V(4).Infof("Successfully mounted %s", dir) + klog.V(4).Infof("Successfully mounted %s", dir) return nil } @@ -451,11 +451,11 @@ func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (stri basePath := filepath.Join(host.GetPluginDir(awsElasticBlockStorePluginName), mount.MountsInGlobalPDPath) rel, err := filepath.Rel(basePath, globalPath) if err != nil { - glog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err) + klog.Errorf("Failed to get volume id from global mount %s - %v", globalPath, err) return "", err } if strings.Contains(rel, "../") { - glog.Errorf("Unexpected mount path: %s", globalPath) + klog.Errorf("Unexpected mount path: %s", globalPath) return "", fmt.Errorf("unexpected mount path: " + globalPath) } // Reverse the :// replacement done in makeGlobalPDPath @@ -463,7 +463,7 @@ func getVolumeIDFromGlobalMount(host volume.VolumeHost, globalPath string) (stri if strings.HasPrefix(volumeID, "aws/") { volumeID = strings.Replace(volumeID, "aws/", "aws://", 1) } - glog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID) + klog.V(2).Info("Mapping mount dir ", globalPath, " to volumeID ", volumeID) return volumeID, nil } @@ -517,7 +517,7 @@ func (c *awsElasticBlockStoreProvisioner) Provision(selectedNode *v1.Node, allow volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies) if err != nil { - glog.Errorf("Provision failed: %v", err) + klog.Errorf("Provision failed: %v", err) return nil, err } diff --git a/pkg/volume/awsebs/aws_ebs_block.go b/pkg/volume/awsebs/aws_ebs_block.go index efe52a470b8a6..a6d64b5c49571 100644 --- a/pkg/volume/awsebs/aws_ebs_block.go +++ b/pkg/volume/awsebs/aws_ebs_block.go @@ -22,7 +22,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -43,7 +43,7 @@ func (plugin *awsElasticBlockStorePlugin) ConstructBlockVolumeSpec(podUID types. if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %s", globalMapPathUUID) + klog.V(5).Infof("globalMapPathUUID: %s", globalMapPathUUID) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) <= 1 { diff --git a/pkg/volume/awsebs/aws_util.go b/pkg/volume/awsebs/aws_util.go index ce5b0b1bc415a..e1c85aa8b7065 100644 --- a/pkg/volume/awsebs/aws_util.go +++ b/pkg/volume/awsebs/aws_util.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -60,13 +60,13 @@ func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error { if err != nil { // AWS cloud provider returns volume.deletedVolumeInUseError when // necessary, no handling needed here. - glog.V(2).Infof("Error deleting EBS Disk volume %s: %v", d.volumeID, err) + klog.V(2).Infof("Error deleting EBS Disk volume %s: %v", d.volumeID, err) return err } if deleted { - glog.V(2).Infof("Successfully deleted EBS Disk volume %s", d.volumeID) + klog.V(2).Infof("Successfully deleted EBS Disk volume %s", d.volumeID) } else { - glog.V(2).Infof("Successfully deleted EBS Disk volume %s (actually already deleted)", d.volumeID) + klog.V(2).Infof("Successfully deleted EBS Disk volume %s (actually already deleted)", d.volumeID) } return nil } @@ -97,7 +97,7 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner, node * volumeOptions, err := populateVolumeOptions(c.plugin.GetPluginName(), c.options.PVC.Name, capacity, tags, c.options.Parameters, node, allowedTopologies, zonesWithNodes) if err != nil { - glog.V(2).Infof("Error populating EBS options: %v", err) + klog.V(2).Infof("Error populating EBS options: %v", err) return "", 0, nil, "", err } @@ -108,15 +108,15 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner, node * name, err := cloud.CreateDisk(volumeOptions) if err != nil { - glog.V(2).Infof("Error creating EBS Disk volume: %v", err) + klog.V(2).Infof("Error creating EBS Disk volume: %v", err) return "", 0, nil, "", err } - glog.V(2).Infof("Successfully created EBS Disk volume %s", name) + klog.V(2).Infof("Successfully created EBS Disk volume %s", name) labels, err := cloud.GetVolumeLabels(name) if err != nil { // We don't really want to leak the volume here... - glog.Errorf("error building labels for new EBS volume %q: %v", name, err) + klog.Errorf("error building labels for new EBS volume %q: %v", name, err) } fstype := "" @@ -230,14 +230,14 @@ func getDiskByIDPaths(volumeID aws.KubernetesVolumeID, partition string, deviceP // and we have to get the volume id from the nvme interface awsVolumeID, err := volumeID.MapToAWSVolumeID() if err != nil { - glog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err) + klog.Warningf("error mapping volume %q to AWS volume: %v", volumeID, err) } else { // This is the magic name on which AWS presents NVME devices under /dev/disk/by-id/ // For example, vol-0fab1d5e3f72a5e23 creates a symlink at /dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_vol0fab1d5e3f72a5e23 nvmeName := "nvme-Amazon_Elastic_Block_Store_" + strings.Replace(string(awsVolumeID), "-", "", -1) nvmePath, err := findNvmeVolume(nvmeName) if err != nil { - glog.Warningf("error looking for nvme volume %q: %v", volumeID, err) + klog.Warningf("error looking for nvme volume %q: %v", volumeID, err) } else if nvmePath != "" { devicePaths = append(devicePaths, nvmePath) } @@ -263,14 +263,14 @@ func findNvmeVolume(findName string) (device string, err error) { stat, err := os.Lstat(p) if err != nil { if os.IsNotExist(err) { - glog.V(6).Infof("nvme path not found %q", p) + klog.V(6).Infof("nvme path not found %q", p) return "", nil } return "", fmt.Errorf("error getting stat of %q: %v", p, err) } if stat.Mode()&os.ModeSymlink != os.ModeSymlink { - glog.Warningf("nvme file %q found, but was not a symlink", p) + klog.Warningf("nvme file %q found, but was not a symlink", p) return "", nil } diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index 8330a7ab74086..fd28f977d79fa 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -41,7 +41,7 @@ go_library( "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 5fd911f3724bc..5787518aa52c2 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -26,7 +26,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -62,13 +62,13 @@ var getLunMutex = keymutex.NewHashed(0) func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Warningf("failed to get azure disk spec (%v)", err) + klog.Warningf("failed to get azure disk spec (%v)", err) return "", err } instanceid, err := a.cloud.InstanceID(context.TODO(), nodeName) if err != nil { - glog.Warningf("failed to get azure instance id (%v)", err) + klog.Warningf("failed to get azure instance id (%v)", err) return "", fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) } @@ -80,31 +80,31 @@ func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) ( lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName) if err == cloudprovider.InstanceNotFound { // Log error and continue with attach - glog.Warningf( + klog.Warningf( "Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v", instanceid, err) } if err == nil { // Volume is already attached to node. - glog.V(2).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun) + klog.V(2).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, instanceid, lun) } else { - glog.V(2).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName) + klog.V(2).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName) getLunMutex.LockKey(instanceid) defer getLunMutex.UnlockKey(instanceid) lun, err = diskController.GetNextDiskLun(nodeName) if err != nil { - glog.Warningf("no LUN available for instance %q (%v)", nodeName, err) + klog.Warningf("no LUN available for instance %q (%v)", nodeName, err) return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q (%v)", volumeSource.DiskName, instanceid, err) } - glog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName) + klog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName) isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode)) if err == nil { - glog.V(2).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName) + klog.V(2).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName) } else { - glog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err) + klog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, instanceid, err) return "", fmt.Errorf("Attach volume %q to instance %q failed with %v", volumeSource.DiskName, instanceid, err) } } @@ -119,7 +119,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty for _, spec := range specs { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err) + klog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err) continue } @@ -135,7 +135,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName) if err != nil { // Log error and continue with attach - glog.Errorf( + klog.Errorf( "azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v", volumeIDList, nodeName, err) return volumesAttachedCheck, err @@ -145,7 +145,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty if !attached { spec := volumeSpecMap[volumeID] volumesAttachedCheck[spec] = false - glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) + klog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) } } return volumesAttachedCheck, nil @@ -167,13 +167,13 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, var lun int32 if runtime.GOOS == "windows" { - glog.V(2).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)", + klog.V(2).Infof("azureDisk - WaitForAttach: begin to GetDiskLun by diskName(%s), DataDiskURI(%s), nodeName(%s), devicePath(%s)", diskName, volumeSource.DataDiskURI, nodeName, devicePath) lun, err = diskController.GetDiskLun(diskName, volumeSource.DataDiskURI, nodeName) if err != nil { return "", err } - glog.V(2).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun) + klog.V(2).Infof("azureDisk - WaitForAttach: GetDiskLun succeeded, got lun(%v)", lun) } else { lun, err = getDiskLUN(devicePath) if err != nil { @@ -247,9 +247,9 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str // testing original mount point, make sure the mount link is valid if _, err := (&osIOHandler{}).ReadDir(deviceMountPath); err != nil { // mount link is invalid, now unmount and remount later - glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", deviceMountPath, err) + klog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", deviceMountPath, err) if err := mounter.Unmount(deviceMountPath); err != nil { - glog.Errorf("azureDisk - Unmount deviceMountPath %s failed with %v", deviceMountPath, err) + klog.Errorf("azureDisk - Unmount deviceMountPath %s failed with %v", deviceMountPath, err) return err } notMnt = true @@ -284,11 +284,11 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro instanceid, err := d.cloud.InstanceID(context.TODO(), nodeName) if err != nil { - glog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err) + klog.Warningf("no instance id for node %q, skip detaching (%v)", nodeName, err) return nil } - glog.V(2).Infof("detach %v from node %q", diskURI, nodeName) + klog.V(2).Infof("detach %v from node %q", diskURI, nodeName) diskController, err := getDiskController(d.plugin.host) if err != nil { @@ -300,10 +300,10 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro err = diskController.DetachDiskByName("", diskURI, nodeName) if err != nil { - glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) + klog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) } - glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName) + klog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName) return err } @@ -311,9 +311,9 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { err := util.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter(detacher.plugin.GetPluginName())) if err == nil { - glog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath) + klog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath) } else { - glog.Warningf("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error()) + klog.Warningf("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error()) } return err } diff --git a/pkg/volume/azure_dd/azure_common_linux.go b/pkg/volume/azure_dd/azure_common_linux.go index 383b46f46876a..6b693b17e2b04 100644 --- a/pkg/volume/azure_dd/azure_common_linux.go +++ b/pkg/volume/azure_dd/azure_common_linux.go @@ -24,7 +24,7 @@ import ( "strconv" libstrings "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" ) @@ -42,21 +42,21 @@ func listAzureDiskPath(io ioHandler) []string { } } } - glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList) + klog.V(12).Infof("Azure sys disks paths: %v", azureDiskList) return azureDiskList } // getDiskLinkByDevName get disk link by device name from devLinkPath, e.g. /dev/disk/azure/, /dev/disk/by-id/ func getDiskLinkByDevName(io ioHandler, devLinkPath, devName string) (string, error) { dirs, err := io.ReadDir(devLinkPath) - glog.V(12).Infof("azureDisk - begin to find %s from %s", devName, devLinkPath) + klog.V(12).Infof("azureDisk - begin to find %s from %s", devName, devLinkPath) if err == nil { for _, f := range dirs { diskPath := devLinkPath + f.Name() - glog.V(12).Infof("azureDisk - begin to Readlink: %s", diskPath) + klog.V(12).Infof("azureDisk - begin to Readlink: %s", diskPath) link, linkErr := io.Readlink(diskPath) if linkErr != nil { - glog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr) + klog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr) continue } if libstrings.HasSuffix(link, devName) { @@ -75,11 +75,11 @@ func scsiHostRescan(io ioHandler, exec mount.Exec) { name := scsi_path + f.Name() + "/scan" data := []byte("- - -") if err = io.WriteFile(name, data, 0666); err != nil { - glog.Warningf("failed to rescan scsi host %s", name) + klog.Warningf("failed to rescan scsi host %s", name) } } } else { - glog.Warningf("failed to read %s, err %v", scsi_path, err) + klog.Warningf("failed to read %s, err %v", scsi_path, err) } } @@ -101,10 +101,10 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st continue } if len(azureDisks) == 0 { - glog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name) + klog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name) target, err := strconv.Atoi(arr[0]) if err != nil { - glog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err) + klog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err) continue } // as observed, targets 0-3 are used by OS disks. Skip them @@ -118,7 +118,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st l, err := strconv.Atoi(arr[3]) if err != nil { // unknown path format, continue to read the next one - glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err) + klog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err) continue } if lun == l { @@ -127,24 +127,24 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st vendorPath := filepath.Join(sys_path, name, "vendor") vendorBytes, err := io.ReadFile(vendorPath) if err != nil { - glog.Errorf("failed to read device vendor, err: %v", err) + klog.Errorf("failed to read device vendor, err: %v", err) continue } vendor := libstrings.TrimSpace(string(vendorBytes)) if libstrings.ToUpper(vendor) != "MSFT" { - glog.V(4).Infof("vendor doesn't match VHD, got %s", vendor) + klog.V(4).Infof("vendor doesn't match VHD, got %s", vendor) continue } modelPath := filepath.Join(sys_path, name, "model") modelBytes, err := io.ReadFile(modelPath) if err != nil { - glog.Errorf("failed to read device model, err: %v", err) + klog.Errorf("failed to read device model, err: %v", err) continue } model := libstrings.TrimSpace(string(modelBytes)) if libstrings.ToUpper(model) != "VIRTUAL DISK" { - glog.V(4).Infof("model doesn't match VHD, got %s", model) + klog.V(4).Infof("model doesn't match VHD, got %s", model) continue } @@ -154,7 +154,7 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st found := false devName := dev[0].Name() for _, diskName := range azureDisks { - glog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName) + klog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName) if devName == diskName { found = true break @@ -165,10 +165,10 @@ func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (st for _, devLinkPath := range devLinkPaths { diskPath, err := getDiskLinkByDevName(io, devLinkPath, devName) if err == nil { - glog.V(4).Infof("azureDisk - found %s by %s under %s", diskPath, devName, devLinkPath) + klog.V(4).Infof("azureDisk - found %s by %s under %s", diskPath, devName, devLinkPath) return diskPath, nil } - glog.Warningf("azureDisk - getDiskLinkByDevName by %s under %s failed, error: %v", devName, devLinkPath, err) + klog.Warningf("azureDisk - getDiskLinkByDevName by %s under %s failed, error: %v", devName, devLinkPath, err) } return "/dev/" + devName, nil } diff --git a/pkg/volume/azure_dd/azure_common_windows.go b/pkg/volume/azure_dd/azure_common_windows.go index 6ee1a6d055850..c48f191f3092d 100644 --- a/pkg/volume/azure_dd/azure_common_windows.go +++ b/pkg/volume/azure_dd/azure_common_windows.go @@ -24,7 +24,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" ) @@ -33,7 +33,7 @@ func scsiHostRescan(io ioHandler, exec mount.Exec) { cmd := "Update-HostStorageCache" output, err := exec.Run("powershell", "/c", cmd) if err != nil { - glog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output)) + klog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output)) } } @@ -42,7 +42,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error cmd := `Get-Disk | select number, location | ConvertTo-Json` output, err := exec.Run("powershell", "/c", cmd) if err != nil { - glog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output)) + klog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output)) return "", err } @@ -52,7 +52,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error var data []map[string]interface{} if err = json.Unmarshal(output, &data); err != nil { - glog.Errorf("Get-Disk output is not a json array, output: %q", string(output)) + klog.Errorf("Get-Disk output is not a json array, output: %q", string(output)) return "", err } @@ -66,27 +66,27 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error arr := strings.Split(location, " ") arrLen := len(arr) if arrLen < 3 { - glog.Warningf("unexpected json structure from Get-Disk, location: %q", jsonLocation) + klog.Warningf("unexpected json structure from Get-Disk, location: %q", jsonLocation) continue } - glog.V(4).Infof("found a disk, locatin: %q, lun: %q", location, arr[arrLen-1]) + klog.V(4).Infof("found a disk, locatin: %q, lun: %q", location, arr[arrLen-1]) //last element of location field is LUN number, e.g. // "location": "Integrated : Adapter 3 : Port 0 : Target 0 : LUN 1" l, err := strconv.Atoi(arr[arrLen-1]) if err != nil { - glog.Warningf("cannot parse element from data structure, location: %q, element: %q", location, arr[arrLen-1]) + klog.Warningf("cannot parse element from data structure, location: %q, element: %q", location, arr[arrLen-1]) continue } if l == lun { - glog.V(4).Infof("found a disk and lun, locatin: %q, lun: %d", location, lun) + klog.V(4).Infof("found a disk and lun, locatin: %q, lun: %d", location, lun) if d, ok := v["number"]; ok { if diskNum, ok := d.(float64); ok { - glog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun) + klog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun) return strconv.Itoa(int(diskNum)), nil } - glog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location) + klog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location) } return "", fmt.Errorf("LUN(%d) found, but could not get disk number, location: %q", lun, location) } @@ -99,7 +99,7 @@ func findDiskByLun(lun int, iohandler ioHandler, exec mount.Exec) (string, error func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) { if err := mount.ValidateDiskNumber(disk); err != nil { - glog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err) + klog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err) return } @@ -111,8 +111,8 @@ func formatIfNotFormatted(disk string, fstype string, exec mount.Exec) { cmd += fmt.Sprintf(" | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", fstype) output, err := exec.Run("powershell", "/c", cmd) if err != nil { - glog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output)) + klog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output)) } else { - glog.Infof("azureDisk Mount: Disk successfully formatted, disk: %q, fstype: %q\n", disk, fstype) + klog.Infof("azureDisk Mount: Disk successfully formatted, disk: %q, fstype: %q\n", disk, fstype) } } diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index f5e4c206d63a5..961d5a35714a9 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -23,7 +23,7 @@ import ( "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -149,20 +149,20 @@ func (plugin *azureDataDiskPlugin) GetVolumeLimits() (map[string]int64, error) { instances, ok := az.Instances() if !ok { - glog.Warningf("Failed to get instances from cloud provider") + klog.Warningf("Failed to get instances from cloud provider") return volumeLimits, nil } instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName()) if err != nil { - glog.Errorf("Failed to get instance type from Azure cloud provider, nodeName: %s", plugin.host.GetNodeName()) + klog.Errorf("Failed to get instance type from Azure cloud provider, nodeName: %s", plugin.host.GetNodeName()) return volumeLimits, nil } if vmSizeList == nil { result, err := az.VirtualMachineSizesClient.List(context.TODO(), az.Location) if err != nil || result.Value == nil { - glog.Errorf("failed to list vm sizes in GetVolumeLimits, plugin.host: %s, location: %s", plugin.host.GetHostName(), az.Location) + klog.Errorf("failed to list vm sizes in GetVolumeLimits, plugin.host: %s, location: %s", plugin.host.GetHostName(), az.Location) return volumeLimits, nil } vmSizeList = result.Value @@ -183,11 +183,11 @@ func getMaxDataDiskCount(instanceType string, sizeList *[]compute.VirtualMachine vmsize := strings.ToUpper(instanceType) for _, size := range *sizeList { if size.Name == nil || size.MaxDataDiskCount == nil { - glog.Errorf("failed to get vm size in getMaxDataDiskCount") + klog.Errorf("failed to get vm size in getMaxDataDiskCount") continue } if strings.ToUpper(*size.Name) == vmsize { - glog.V(2).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %d", *size.Name, *size.MaxDataDiskCount) + klog.V(2).Infof("got a matching size in getMaxDataDiskCount, Name: %s, MaxDataDiskCount: %d", *size.Name, *size.MaxDataDiskCount) return int64(*size.MaxDataDiskCount) } } @@ -208,7 +208,7 @@ func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessM func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) { azure, err := getCloud(plugin.host) if err != nil { - glog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err) + klog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err) return nil, err } @@ -221,7 +221,7 @@ func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) { func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) { azure, err := getCloud(plugin.host) if err != nil { - glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName()) + klog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName()) return nil, err } diff --git a/pkg/volume/azure_dd/azure_dd_block.go b/pkg/volume/azure_dd/azure_dd_block.go index af1d195047e18..704304994639d 100644 --- a/pkg/volume/azure_dd/azure_dd_block.go +++ b/pkg/volume/azure_dd/azure_dd_block.go @@ -20,10 +20,10 @@ import ( "fmt" "path/filepath" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -44,7 +44,7 @@ func (plugin *azureDataDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, vo if err != nil { return nil, err } - glog.V(5).Infof("constructing block volume spec from globalMapPathUUID: %s", globalMapPathUUID) + klog.V(5).Infof("constructing block volume spec from globalMapPathUUID: %s", globalMapPathUUID) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) <= 1 { @@ -63,7 +63,7 @@ func getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName string) (*volume.S if len(diskName) <= 1 { return nil, fmt.Errorf("failed to get diskName from global path=%s", globalMapPath) } - glog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName) + klog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName) block := v1.PersistentVolumeBlock pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/volume/azure_dd/azure_mounter.go b/pkg/volume/azure_dd/azure_mounter.go index d8b7ae50df6b2..66c0e12910b80 100644 --- a/pkg/volume/azure_dd/azure_mounter.go +++ b/pkg/volume/azure_dd/azure_mounter.go @@ -21,8 +21,8 @@ import ( "os" "runtime" - "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -46,7 +46,7 @@ func (m *azureDiskMounter) GetAttributes() volume.Attributes { readOnly := false volumeSource, _, err := getVolumeSource(m.spec) if err != nil { - glog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err) + klog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err) } else if volumeSource.ReadOnly != nil { readOnly = *volumeSource.ReadOnly } @@ -74,7 +74,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { volumeSource, _, err := getVolumeSource(m.spec) if err != nil { - glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name()) + klog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name()) return err } @@ -82,20 +82,20 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { mountPoint, err := mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err) + klog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err) return err } if !mountPoint { // testing original mount point, make sure the mount link is valid _, err := (&osIOHandler{}).ReadDir(dir) if err == nil { - glog.V(4).Infof("azureDisk - already mounted to target %s", dir) + klog.V(4).Infof("azureDisk - already mounted to target %s", dir) return nil } // mount link is invalid, now unmount and remount later - glog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", dir, err) + klog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", dir, err) if err := mounter.Unmount(dir); err != nil { - glog.Errorf("azureDisk - Unmount directory %s failed with %v", dir, err) + klog.Errorf("azureDisk - Unmount directory %s failed with %v", dir, err) return err } mountPoint = true @@ -104,7 +104,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { if runtime.GOOS != "windows" { // in windows, we will use mklink to mount, will MkdirAll in Mount func if err := os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err) + klog.Errorf("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err) return err } } @@ -119,7 +119,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { options = util.JoinMountOptions(m.options.MountOptions, options) } - glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir) + klog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir) isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk) @@ -131,7 +131,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { // Everything in the following control flow is meant as an // attempt cleanup a failed setupAt (bind mount) if mountErr != nil { - glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr) + klog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr) mountPoint, err := mounter.IsLikelyNotMountPoint(dir) if err != nil { return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr) @@ -155,7 +155,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr) } - glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, mountErr) + klog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, mountErr) return mountErr } @@ -163,7 +163,7 @@ func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { volume.SetVolumeOwnership(m, fsGroup) } - glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir) + klog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir) return nil } @@ -175,11 +175,11 @@ func (u *azureDiskUnmounter) TearDownAt(dir string) error { if pathExists, pathErr := util.PathExists(dir); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } - glog.V(4).Infof("azureDisk - TearDownAt: %s", dir) + klog.V(4).Infof("azureDisk - TearDownAt: %s", dir) mounter := u.plugin.host.GetMounter(u.plugin.GetPluginName()) mountPoint, err := mounter.IsLikelyNotMountPoint(dir) if err != nil { diff --git a/pkg/volume/azure_file/BUILD b/pkg/volume/azure_file/BUILD index 3f278c7b22739..b4fa68effeadc 100644 --- a/pkg/volume/azure_file/BUILD +++ b/pkg/volume/azure_file/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/azure_file/azure_file.go b/pkg/volume/azure_file/azure_file.go index bc2c0deecc130..e0234ce780ba8 100644 --- a/pkg/volume/azure_file/azure_file.go +++ b/pkg/volume/azure_file/azure_file.go @@ -22,12 +22,12 @@ import ( "os" "runtime" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" @@ -237,20 +237,20 @@ func (b *azureFileMounter) SetUp(fsGroup *int64) error { func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { return err } if !notMnt { // testing original mount point, make sure the mount link is valid if _, err := ioutil.ReadDir(dir); err == nil { - glog.V(4).Infof("azureFile - already mounted to target %s", dir) + klog.V(4).Infof("azureFile - already mounted to target %s", dir) return nil } // mount link is invalid, now unmount and remount later - glog.Warningf("azureFile - ReadDir %s failed with %v, unmount this directory", dir, err) + klog.Warningf("azureFile - ReadDir %s failed with %v, unmount this directory", dir, err) if err := b.mounter.Unmount(dir); err != nil { - glog.Errorf("azureFile - Unmount directory %s failed with %v", dir, err) + klog.Errorf("azureFile - Unmount directory %s failed with %v", dir, err) return err } notMnt = true @@ -285,22 +285,22 @@ func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error { if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } @@ -376,7 +376,7 @@ func getStorageEndpointSuffix(cloudprovider cloudprovider.Interface) string { const publicCloudStorageEndpointSuffix = "core.windows.net" azure, err := getAzureCloud(cloudprovider) if err != nil { - glog.Warningf("No Azure cloud provider found. Using the Azure public cloud endpoint: %s", publicCloudStorageEndpointSuffix) + klog.Warningf("No Azure cloud provider found. Using the Azure public cloud endpoint: %s", publicCloudStorageEndpointSuffix) return publicCloudStorageEndpointSuffix } return azure.Environment.StorageEndpointSuffix diff --git a/pkg/volume/azure_file/azure_provision.go b/pkg/volume/azure_file/azure_provision.go index 96ba0b10cc9fa..60466cedab62f 100644 --- a/pkg/volume/azure_file/azure_provision.go +++ b/pkg/volume/azure_file/azure_provision.go @@ -21,7 +21,7 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-07-01/storage" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -56,7 +56,7 @@ type azureFileDeleter struct { func (plugin *azureFilePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) if err != nil { - glog.V(4).Infof("failed to get azure provider") + klog.V(4).Infof("failed to get azure provider") return nil, err } @@ -92,7 +92,7 @@ func (plugin *azureFilePlugin) newDeleterInternal(spec *volume.Spec, util azureU func (plugin *azureFilePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) if err != nil { - glog.V(4).Infof("failed to get azure provider") + klog.V(4).Infof("failed to get azure provider") return nil, err } if len(options.PVC.Spec.AccessModes) == 0 { @@ -120,7 +120,7 @@ func (f *azureFileDeleter) GetPath() string { } func (f *azureFileDeleter) Delete() error { - glog.V(4).Infof("deleting volume %s", f.shareName) + klog.V(4).Infof("deleting volume %s", f.shareName) return f.azureProvider.DeleteFileShare(f.accountName, f.accountKey, f.shareName) } diff --git a/pkg/volume/cephfs/BUILD b/pkg/volume/cephfs/BUILD index 63d03a82c05e0..eeb4ee0dce5b4 100644 --- a/pkg/volume/cephfs/BUILD +++ b/pkg/volume/cephfs/BUILD @@ -21,7 +21,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/cephfs/cephfs.go b/pkg/volume/cephfs/cephfs.go index c78fbee8c36a3..748fe11a47513 100644 --- a/pkg/volume/cephfs/cephfs.go +++ b/pkg/volume/cephfs/cephfs.go @@ -24,10 +24,10 @@ import ( "runtime" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -110,7 +110,7 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume. } for name, data := range secrets.Data { secret = string(data) - glog.V(4).Infof("found ceph secret info: %s", name) + klog.V(4).Infof("found ceph secret info: %s", name) } } return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter(plugin.GetPluginName()), secret) @@ -225,7 +225,7 @@ func (cephfsVolume *cephfsMounter) SetUp(fsGroup *int64) error { // SetUpAt attaches the disk and bind mounts to the volume path. func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { return err } @@ -239,7 +239,7 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { // check whether it belongs to fuse, if not, default to use kernel mount. if cephfsVolume.checkFuseMount() { - glog.V(4).Info("CephFS fuse mount.") + klog.V(4).Info("CephFS fuse mount.") err = cephfsVolume.execFuseMount(dir) // cleanup no matter if fuse mount fail. keyringPath := cephfsVolume.GetKeyringPath() @@ -252,10 +252,10 @@ func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error { return nil } // if cephfs fuse mount failed, fallback to kernel mount. - glog.V(2).Infof("CephFS fuse mount failed: %v, fallback to kernel mount.", err) + klog.V(2).Infof("CephFS fuse mount failed: %v, fallback to kernel mount.", err) } - glog.V(4).Info("CephFS kernel mount.") + klog.V(4).Info("CephFS kernel mount.") err = cephfsVolume.execMount(dir) if err != nil { @@ -336,7 +336,7 @@ func (cephfsVolume *cephfsMounter) checkFuseMount() bool { switch runtime.GOOS { case "linux": if _, err := execute.Run("/usr/bin/test", "-x", "/sbin/mount.fuse.ceph"); err == nil { - glog.V(4).Info("/sbin/mount.fuse.ceph exists, it should be fuse mount.") + klog.V(4).Info("/sbin/mount.fuse.ceph exists, it should be fuse mount.") return true } return false @@ -351,7 +351,7 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error { if cephfsVolume.secret != "" { // TODO: cephfs fuse currently doesn't support secret option, // remove keyring file create once secret option is supported. - glog.V(4).Info("cephfs mount begin using fuse.") + klog.V(4).Info("cephfs mount begin using fuse.") keyringPath := cephfsVolume.GetKeyringPath() os.MkdirAll(keyringPath, 0750) @@ -370,13 +370,13 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error { writerContext := fmt.Sprintf("cephfuse:%v.keyring", cephfsVolume.id) writer, err := util.NewAtomicWriter(keyringPath, writerContext) if err != nil { - glog.Errorf("failed to create atomic writer: %v", err) + klog.Errorf("failed to create atomic writer: %v", err) return err } err = writer.Write(payload) if err != nil { - glog.Errorf("failed to write payload to dir: %v", err) + klog.Errorf("failed to write payload to dir: %v", err) return err } @@ -419,7 +419,7 @@ func (cephfsVolume *cephfs) execFuseMount(mountpoint string) error { mountArgs = append(mountArgs, strings.Join(opt, ",")) } - glog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs) + klog.V(4).Infof("Mounting cmd ceph-fuse with arguments (%s)", mountArgs) command := exec.Command("ceph-fuse", mountArgs...) output, err := command.CombinedOutput() if err != nil || !(strings.Contains(string(output), "starting fuse")) { diff --git a/pkg/volume/cinder/BUILD b/pkg/volume/cinder/BUILD index efd173464cc97..9435be1ec42a1 100644 --- a/pkg/volume/cinder/BUILD +++ b/pkg/volume/cinder/BUILD @@ -35,7 +35,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -59,7 +59,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/cinder/attacher.go b/pkg/volume/cinder/attacher.go index 2cdc6da7a5896..0db22a27386e0 100644 --- a/pkg/volume/cinder/attacher.go +++ b/pkg/volume/cinder/attacher.go @@ -24,10 +24,10 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -145,31 +145,31 @@ func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, nodeName types.Nod attached, err := attacher.cinderProvider.DiskIsAttached(instanceID, volumeID) if err != nil { // Log error and continue with attach - glog.Warningf( + klog.Warningf( "Error checking if volume (%q) is already attached to current instance (%q). Will continue and try attach anyway. err=%v", volumeID, instanceID, err) } if err == nil && attached { // Volume is already attached to instance. - glog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID) + klog.Infof("Attach operation is successful. volume %q is already attached to instance %q.", volumeID, instanceID) } else { _, err = attacher.cinderProvider.AttachDisk(instanceID, volumeID) if err == nil { if err = attacher.waitDiskAttached(instanceID, volumeID); err != nil { - glog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err) + klog.Errorf("Error waiting for volume %q to be attached from node %q: %v", volumeID, nodeName, err) return "", err } - glog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID) + klog.Infof("Attach operation successful: volume %q attached to instance %q.", volumeID, instanceID) } else { - glog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err) + klog.Infof("Attach volume %q to instance %q failed with: %v", volumeID, instanceID, err) return "", err } } devicePath, err := attacher.cinderProvider.GetAttachmentDiskPath(instanceID, volumeID) if err != nil { - glog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err) + klog.Infof("Can not get device path of volume %q which be attached to instance %q, failed with: %v", volumeID, instanceID, err) return "", err } @@ -183,7 +183,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod for _, spec := range specs { volumeID, _, _, err := getVolumeInfo(spec) if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) + klog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) continue } @@ -195,7 +195,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod attachedResult, err := attacher.cinderProvider.DisksAreAttachedByName(nodeName, volumeIDList) if err != nil { // Log error and continue with attach - glog.Errorf( + klog.Errorf( "Error checking if Volumes (%v) are already attached to current node (%q). Will continue and try attach anyway. err=%v", volumeIDList, nodeName, err) return volumesAttachedCheck, err @@ -205,7 +205,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod if !attached { spec := volumeSpecMap[volumeID] volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) + klog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) } } return volumesAttachedCheck, nil @@ -231,7 +231,7 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath for { select { case <-ticker.C: - glog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID) + klog.V(5).Infof("Checking Cinder disk %q is attached.", volumeID) probeAttachedVolume() if !attacher.cinderProvider.ShouldTrustDevicePath() { // Using the Cinder volume ID, find the real device path (See Issue #33128) @@ -239,11 +239,11 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath } exists, err := volumeutil.PathExists(devicePath) if exists && err == nil { - glog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) + klog.Infof("Successfully found attached Cinder disk %q at %v.", volumeID, devicePath) return devicePath, nil } // Log an error, and continue checking periodically - glog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) + klog.Errorf("Error: could not find attached Cinder disk %q (path: %q): %v", volumeID, devicePath, err) // Using exponential backoff instead of linear ticker.Stop() duration = time.Duration(float64(duration) * probeVolumeFactor) @@ -379,26 +379,26 @@ func (detacher *cinderDiskDetacher) Detach(volumeName string, nodeName types.Nod attached, instanceID, err := detacher.cinderProvider.DiskIsAttachedByName(nodeName, volumeID) if err != nil { // Log error and continue with detach - glog.Errorf( + klog.Errorf( "Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", volumeID, nodeName, err) } if err == nil && !attached { // Volume is already detached from node. - glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName) + klog.Infof("detach operation was successful. volume %q is already detached from node %q.", volumeID, nodeName) return nil } if err = detacher.cinderProvider.DetachDisk(instanceID, volumeID); err != nil { - glog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err) + klog.Errorf("Error detaching volume %q from node %q: %v", volumeID, nodeName, err) return err } if err = detacher.waitDiskDetached(instanceID, volumeID); err != nil { - glog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err) + klog.Errorf("Error waiting for volume %q to detach from node %q: %v", volumeID, nodeName, err) return err } - glog.Infof("detached volume %q from node %q", volumeID, nodeName) + klog.Infof("detached volume %q from node %q", volumeID, nodeName) return nil } diff --git a/pkg/volume/cinder/attacher_test.go b/pkg/volume/cinder/attacher_test.go index 5902caf8fc946..7b444d84667d6 100644 --- a/pkg/volume/cinder/attacher_test.go +++ b/pkg/volume/cinder/attacher_test.go @@ -31,8 +31,8 @@ import ( "fmt" "sort" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" ) const ( @@ -468,7 +468,7 @@ func (testcase *testcase) AttachDisk(instanceID, volumeID string) (string, error return "", errors.New("unexpected AttachDisk call: wrong instanceID") } - glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret) + klog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", volumeID, instanceID, expected.retDeviceName, expected.ret) testcase.attachOrDetach = &attachStatus return expected.retDeviceName, expected.ret @@ -494,7 +494,7 @@ func (testcase *testcase) DetachDisk(instanceID, volumeID string) error { return errors.New("unexpected DetachDisk call: wrong instanceID") } - glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret) + klog.V(4).Infof("DetachDisk call: %s, %s, returning %v", volumeID, instanceID, expected.ret) testcase.attachOrDetach = &detachStatus return expected.ret @@ -504,11 +504,11 @@ func (testcase *testcase) OperationPending(diskName string) (bool, string, error expected := &testcase.operationPending if expected.volumeStatus == VolumeStatusPending { - glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret) + klog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret) return true, expected.volumeStatus, expected.ret } - glog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret) + klog.V(4).Infof("OperationPending call: %s, returning %v, %v, %v", diskName, expected.pending, expected.volumeStatus, expected.ret) return false, expected.volumeStatus, expected.ret } @@ -542,7 +542,7 @@ func (testcase *testcase) DiskIsAttached(instanceID, volumeID string) (bool, err return false, errors.New("unexpected DiskIsAttached call: wrong instanceID") } - glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret) + klog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", volumeID, instanceID, expected.isAttached, expected.ret) return expected.isAttached, expected.ret } @@ -566,7 +566,7 @@ func (testcase *testcase) GetAttachmentDiskPath(instanceID, volumeID string) (st return "", errors.New("unexpected GetAttachmentDiskPath call: wrong instanceID") } - glog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret) + klog.V(4).Infof("GetAttachmentDiskPath call: %s, %s, returning %v, %v", volumeID, instanceID, expected.retPath, expected.ret) return expected.retPath, expected.ret } @@ -610,7 +610,7 @@ func (testcase *testcase) DiskIsAttachedByName(nodeName types.NodeName, volumeID return false, instanceID, errors.New("unexpected DiskIsAttachedByName call: wrong instanceID") } - glog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret) + klog.V(4).Infof("DiskIsAttachedByName call: %s, %s, returning %v, %v, %v", volumeID, nodeName, expected.isAttached, expected.instanceID, expected.ret) return expected.isAttached, expected.instanceID, expected.ret } @@ -664,7 +664,7 @@ func (testcase *testcase) DisksAreAttached(instanceID string, volumeIDs []string return areAttached, errors.New("Unexpected DisksAreAttached call: wrong instanceID") } - glog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", volumeIDs, instanceID, expected.areAttached, expected.ret) + klog.V(4).Infof("DisksAreAttached call: %v, %s, returning %v, %v", volumeIDs, instanceID, expected.areAttached, expected.ret) return expected.areAttached, expected.ret } @@ -694,7 +694,7 @@ func (testcase *testcase) DisksAreAttachedByName(nodeName types.NodeName, volume return areAttached, errors.New("Unexpected DisksAreAttachedByName call: wrong instanceID") } - glog.V(4).Infof("DisksAreAttachedByName call: %v, %s, returning %v, %v", volumeIDs, nodeName, expected.areAttached, expected.ret) + klog.V(4).Infof("DisksAreAttachedByName call: %v, %s, returning %v, %v", volumeIDs, nodeName, expected.areAttached, expected.ret) return expected.areAttached, expected.ret } diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go index 8e585338f02c0..953c4660f4556 100644 --- a/pkg/volume/cinder/cinder.go +++ b/pkg/volume/cinder/cinder.go @@ -22,13 +22,13 @@ import ( "os" "path" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/keymutex" @@ -234,7 +234,7 @@ func (plugin *cinderPlugin) ConstructVolumeSpec(volumeName, mountPath string) (* if err != nil { return nil, err } - glog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath) + klog.V(4).Infof("Found volume %s mounted to %s", sourceName, mountPath) cinderVolume := &v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{ @@ -263,7 +263,7 @@ func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resour return oldSize, err } - glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value())) + klog.V(2).Infof("volume %s expanded to new size %d successfully", volumeID, int(newSize.Value())) return expandedSize, nil } @@ -342,18 +342,18 @@ func (b *cinderVolumeMounter) SetUp(fsGroup *int64) error { // SetUp bind mounts to the volume path. func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir) + klog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir) b.plugin.volumeLocks.LockKey(b.pdName) defer b.plugin.volumeLocks.UnlockKey(b.pdName) notmnt, err := b.mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Errorf("Cannot validate mount point: %s %v", dir, err) + klog.Errorf("Cannot validate mount point: %s %v", dir, err) return err } if !notmnt { - glog.V(4).Infof("Something is already mounted to target %s", dir) + klog.V(4).Infof("Something is already mounted to target %s", dir) return nil } globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName) @@ -364,46 +364,46 @@ func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } if err := os.MkdirAll(dir, 0750); err != nil { - glog.V(4).Infof("Could not create directory %s: %v", dir, err) + klog.V(4).Infof("Could not create directory %s: %v", dir, err) return err } mountOptions := util.JoinMountOptions(options, b.mountOptions) // Perform a bind mount to the full path to allow duplicate mounts of the same PD. - glog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, mountOptions) + klog.V(4).Infof("Attempting to mount cinder volume %s to %s with options %v", b.pdName, dir, mountOptions) err = b.mounter.Mount(globalPDPath, dir, "", options) if err != nil { - glog.V(4).Infof("Mount failed: %v", err) + klog.V(4).Infof("Mount failed: %v", err) notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) return err } } os.Remove(dir) - glog.Errorf("Failed to mount %s: %v", dir, err) + klog.Errorf("Failed to mount %s: %v", dir, err) return err } if !b.readOnly { volume.SetVolumeOwnership(b, fsGroup) } - glog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir) + klog.V(3).Infof("Cinder volume %s mounted to %s", b.pdName, dir) return nil } @@ -432,18 +432,18 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error { if pathExists, pathErr := util.PathExists(dir); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } - glog.V(5).Infof("Cinder TearDown of %s", dir) + klog.V(5).Infof("Cinder TearDown of %s", dir) notmnt, err := c.mounter.IsLikelyNotMountPoint(dir) if err != nil { - glog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err) + klog.V(4).Infof("IsLikelyNotMountPoint check failed: %v", err) return err } if notmnt { - glog.V(4).Infof("Nothing is mounted to %s, ignoring", dir) + klog.V(4).Infof("Nothing is mounted to %s, ignoring", dir) return os.Remove(dir) } @@ -452,15 +452,15 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error { // NewMounter. We could then find volumeID there without probing MountRefs. refs, err := c.mounter.GetMountRefs(dir) if err != nil { - glog.V(4).Infof("GetMountRefs failed: %v", err) + klog.V(4).Infof("GetMountRefs failed: %v", err) return err } if len(refs) == 0 { - glog.V(4).Infof("Directory %s is not mounted", dir) + klog.V(4).Infof("Directory %s is not mounted", dir) return fmt.Errorf("directory %s is not mounted", dir) } c.pdName = path.Base(refs[0]) - glog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir) + klog.V(4).Infof("Found volume %s mounted to %s", c.pdName, dir) // lock the volume (and thus wait for any concurrrent SetUpAt to finish) c.plugin.volumeLocks.LockKey(c.pdName) @@ -469,23 +469,23 @@ func (c *cinderVolumeUnmounter) TearDownAt(dir string) error { // Reload list of references, there might be SetUpAt finished in the meantime refs, err = c.mounter.GetMountRefs(dir) if err != nil { - glog.V(4).Infof("GetMountRefs failed: %v", err) + klog.V(4).Infof("GetMountRefs failed: %v", err) return err } if err := c.mounter.Unmount(dir); err != nil { - glog.V(4).Infof("Unmount failed: %v", err) + klog.V(4).Infof("Unmount failed: %v", err) return err } - glog.V(3).Infof("Successfully unmounted: %s\n", dir) + klog.V(3).Infof("Successfully unmounted: %s\n", dir) notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if notmnt { if err := os.Remove(dir); err != nil { - glog.V(4).Infof("Failed to remove directory after unmount: %v", err) + klog.V(4).Infof("Failed to remove directory after unmount: %v", err) return err } } diff --git a/pkg/volume/cinder/cinder_block.go b/pkg/volume/cinder/cinder_block.go index 02a5d74450077..90e2056e0496d 100644 --- a/pkg/volume/cinder/cinder_block.go +++ b/pkg/volume/cinder/cinder_block.go @@ -20,9 +20,9 @@ import ( "fmt" "path/filepath" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -44,7 +44,7 @@ func (plugin *cinderPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeNam if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) <= 1 { diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index accc40a2a6e1f..57c5254e80bb8 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -24,8 +24,8 @@ import ( "strings" "time" - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -95,7 +95,7 @@ func (util *DiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) er os.Remove(globalPDPath) return err } - glog.V(2).Infof("Safe mount successful: %q\n", devicePath) + klog.V(2).Infof("Safe mount successful: %q\n", devicePath) } return nil } @@ -109,7 +109,7 @@ func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error { if err := os.Remove(globalPDPath); err != nil { return err } - glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath) + klog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath) cloud, err := cd.plugin.getCloudProvider() if err != nil { @@ -122,7 +122,7 @@ func (util *DiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error { if err = cloud.DetachDisk(instanceid, cd.pdName); err != nil { return err } - glog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName) + klog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName) return nil } @@ -136,10 +136,10 @@ func (util *DiskUtil) DeleteVolume(cd *cinderVolumeDeleter) error { if err = cloud.DeleteVolume(cd.pdName); err != nil { // OpenStack cloud provider returns volume.tryAgainError when necessary, // no handling needed here. - glog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err) + klog.V(2).Infof("Error deleting cinder volume %s: %v", cd.pdName, err) return err } - glog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName) + klog.V(2).Infof("Successfully deleted cinder volume %s", cd.pdName) return nil } @@ -149,7 +149,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { zones := make(sets.String) nodes, err := kubeClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { - glog.V(2).Infof("Error listing nodes") + klog.V(2).Infof("Error listing nodes") return zones, err } for _, node := range nodes.Items { @@ -157,7 +157,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { zones.Insert(zone) } } - glog.V(4).Infof("zones found: %v", zones) + klog.V(4).Infof("zones found: %v", zones) return zones, nil } @@ -201,7 +201,7 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, al // No zone specified, choose one randomly in the same region zones, err := getZonesFromNodes(c.plugin.host.GetKubeClient()) if err != nil { - glog.V(2).Infof("error getting zone information: %v", err) + klog.V(2).Infof("error getting zone information: %v", err) return "", 0, nil, "", err } // if we did not get any zones, lets leave it blank and gophercloud will @@ -209,7 +209,7 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, al if len(zones) > 0 { availability, err = volutil.SelectZoneForVolume(false, false, "", nil, zones, node, allowedTopologies, c.options.PVC.Name) if err != nil { - glog.V(2).Infof("error selecting zone for volume: %v", err) + klog.V(2).Infof("error selecting zone for volume: %v", err) return "", 0, nil, "", err } } @@ -217,10 +217,10 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, al volumeID, volumeAZ, volumeRegion, IgnoreVolumeAZ, err := cloud.CreateVolume(name, volSizeGiB, vtype, availability, c.options.CloudTags) if err != nil { - glog.V(2).Infof("Error creating cinder volume: %v", err) + klog.V(2).Infof("Error creating cinder volume: %v", err) return "", 0, nil, "", err } - glog.V(2).Infof("Successfully created cinder volume %s", volumeID) + klog.V(2).Infof("Successfully created cinder volume %s", volumeID) // these are needed that pod is spawning to same AZ volumeLabels = make(map[string]string) @@ -248,17 +248,17 @@ func probeAttachedVolume() error { cmdSettle := executor.Command("udevadm", argsSettle...) _, errSettle := cmdSettle.CombinedOutput() if errSettle != nil { - glog.Errorf("error running udevadm settle %v\n", errSettle) + klog.Errorf("error running udevadm settle %v\n", errSettle) } args := []string{"trigger"} cmd := executor.Command("udevadm", args...) _, err := cmd.CombinedOutput() if err != nil { - glog.Errorf("error running udevadm trigger %v\n", err) + klog.Errorf("error running udevadm trigger %v\n", err) return err } - glog.V(4).Infof("Successfully probed all attachments") + klog.V(4).Infof("Successfully probed all attachments") return nil } diff --git a/pkg/volume/configmap/BUILD b/pkg/volume/configmap/BUILD index 64bfe582e60ba..1047a1b090f2e 100644 --- a/pkg/volume/configmap/BUILD +++ b/pkg/volume/configmap/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/configmap/configmap.go b/pkg/volume/configmap/configmap.go index 4fd1a7332d85a..823ce53d4b365 100644 --- a/pkg/volume/configmap/configmap.go +++ b/pkg/volume/configmap/configmap.go @@ -19,11 +19,11 @@ package configmap import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -180,7 +180,7 @@ func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error { } func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) + klog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) // Wrap EmptyDir, let it do the setup. wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts) @@ -192,7 +192,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { configMap, err := b.getConfigMap(b.pod.Namespace, b.source.Name) if err != nil { if !(errors.IsNotFound(err) && optional) { - glog.Errorf("Couldn't get configMap %v/%v: %v", b.pod.Namespace, b.source.Name, err) + klog.Errorf("Couldn't get configMap %v/%v: %v", b.pod.Namespace, b.source.Name, err) return err } configMap = &v1.ConfigMap{ @@ -204,7 +204,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } totalBytes := totalBytes(configMap) - glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes", + klog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes", b.pod.Namespace, b.source.Name, len(configMap.Data)+len(configMap.BinaryData), @@ -228,12 +228,12 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if !setupSuccess { unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID) if unmountCreateErr != nil { - glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) + klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) return } tearDownErr := unmounter.TearDown() if tearDownErr != nil { - glog.Errorf("Error tearing down volume %s with : %v", b.volName, tearDownErr) + klog.Errorf("Error tearing down volume %s with : %v", b.volName, tearDownErr) } } }() @@ -241,19 +241,19 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { - glog.Errorf("Error creating atomic writer: %v", err) + klog.Errorf("Error creating atomic writer: %v", err) return err } err = writer.Write(payload) if err != nil { - glog.Errorf("Error writing payload to dir: %v", err) + klog.Errorf("Error writing payload to dir: %v", err) return err } err = volume.SetVolumeOwnership(b, fsGroup) if err != nil { - glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) + klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } setupSuccess = true diff --git a/pkg/volume/csi/BUILD b/pkg/volume/csi/BUILD index c9adb17cc5a7b..770a422dac93b 100644 --- a/pkg/volume/csi/BUILD +++ b/pkg/volume/csi/BUILD @@ -31,8 +31,8 @@ go_library( "//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1:go_default_library", "//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -70,7 +70,7 @@ go_test( "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library", "//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/csi/csi_attacher.go b/pkg/volume/csi/csi_attacher.go index 34f75e9a65f67..bc86d12f9db8b 100644 --- a/pkg/volume/csi/csi_attacher.go +++ b/pkg/volume/csi/csi_attacher.go @@ -27,7 +27,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" csipb "github.com/container-storage-interface/spec/lib/go/csi/v0" "k8s.io/api/core/v1" @@ -60,23 +60,23 @@ var _ volume.DeviceMounter = &csiAttacher{} func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { if spec == nil { - glog.Error(log("attacher.Attach missing volume.Spec")) + klog.Error(log("attacher.Attach missing volume.Spec")) return "", errors.New("missing spec") } csiSource, err := getCSISourceFromSpec(spec) if err != nil { - glog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err)) + klog.Error(log("attacher.Attach failed to get CSI persistent source: %v", err)) return "", err } skip, err := c.plugin.skipAttach(csiSource.Driver) if err != nil { - glog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err)) + klog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err)) return "", err } if skip { - glog.V(4).Infof(log("skipping attach for driver %s", csiSource.Driver)) + klog.V(4).Infof(log("skipping attach for driver %s", csiSource.Driver)) return "", nil } @@ -102,23 +102,23 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string alreadyExist := false if err != nil { if !apierrs.IsAlreadyExists(err) { - glog.Error(log("attacher.Attach failed: %v", err)) + klog.Error(log("attacher.Attach failed: %v", err)) return "", err } alreadyExist = true } if alreadyExist { - glog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle)) + klog.V(4).Info(log("attachment [%v] for volume [%v] already exists (will not be recreated)", attachID, csiSource.VolumeHandle)) } else { - glog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle)) + klog.V(4).Info(log("attachment [%v] for volume [%v] created successfully", attachID, csiSource.VolumeHandle)) } if _, err := c.waitForVolumeAttachment(csiSource.VolumeHandle, attachID, csiTimeout); err != nil { return "", err } - glog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID)) + klog.V(4).Info(log("attacher.Attach finished OK with VolumeAttachment object [%s]", attachID)) return attachID, nil } @@ -126,17 +126,17 @@ func (c *csiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1.Pod, timeout time.Duration) (string, error) { source, err := getCSISourceFromSpec(spec) if err != nil { - glog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err)) + klog.Error(log("attacher.WaitForAttach failed to extract CSI volume source: %v", err)) return "", err } skip, err := c.plugin.skipAttach(source.Driver) if err != nil { - glog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err)) + klog.Error(log("attacher.Attach failed to find if driver is attachable: %v", err)) return "", err } if skip { - glog.V(4).Infof(log("Driver is not attachable, skip waiting for attach")) + klog.V(4).Infof(log("Driver is not attachable, skip waiting for attach")) return "", nil } @@ -144,7 +144,7 @@ func (c *csiAttacher) WaitForAttach(spec *volume.Spec, attachID string, pod *v1. } func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, timeout time.Duration) (string, error) { - glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) + klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable defer timer.Stop() @@ -153,10 +153,10 @@ func (c *csiAttacher) waitForVolumeAttachment(volumeHandle, attachID string, tim } func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) (string, error) { - glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) + klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { - glog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) + klog.Error(log("attacher.WaitForAttach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) return "", fmt.Errorf("volume %v has GET error for volume attachment %v: %v", volumeHandle, attachID, err) } successful, err := verifyAttachmentStatus(attach, volumeHandle) @@ -179,7 +179,7 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str select { case event, ok := <-ch: if !ok { - glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID) + klog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID) return "", errors.New("volume attachment watch channel had been closed") } @@ -195,7 +195,7 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str } case watch.Deleted: // if deleted, fail fast - glog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID)) + klog.Error(log("VolumeAttachment [%s] has been deleted, will not continue to wait for attachment", attachID)) return "", errors.New("volume attachment has been deleted") case watch.Error: @@ -204,7 +204,7 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str } case <-timer.C: - glog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) + klog.Error(log("attacher.WaitForAttach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) return "", fmt.Errorf("attachment timeout for volume %v", volumeHandle) } } @@ -213,7 +213,7 @@ func (c *csiAttacher) waitForVolumeAttachmentInternal(volumeHandle, attachID str func verifyAttachmentStatus(attachment *storage.VolumeAttachment, volumeHandle string) (bool, error) { // if being deleted, fail fast if attachment.GetDeletionTimestamp() != nil { - glog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachment.Name)) + klog.Error(log("VolumeAttachment [%s] has deletion timestamp, will not continue to wait for attachment", attachment.Name)) return false, errors.New("volume attachment is being deleted") } // attachment OK @@ -223,30 +223,30 @@ func verifyAttachmentStatus(attachment *storage.VolumeAttachment, volumeHandle s // driver reports attach error attachErr := attachment.Status.AttachError if attachErr != nil { - glog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message)) + klog.Error(log("attachment for %v failed: %v", volumeHandle, attachErr.Message)) return false, errors.New(attachErr.Message) } return false, nil } func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { - glog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs))) + klog.V(4).Info(log("probing attachment status for %d volume(s) ", len(specs))) attached := make(map[*volume.Spec]bool) for _, spec := range specs { if spec == nil { - glog.Error(log("attacher.VolumesAreAttached missing volume.Spec")) + klog.Error(log("attacher.VolumesAreAttached missing volume.Spec")) return nil, errors.New("missing spec") } source, err := getCSISourceFromSpec(spec) if err != nil { - glog.Error(log("attacher.VolumesAreAttached failed: %v", err)) + klog.Error(log("attacher.VolumesAreAttached failed: %v", err)) continue } skip, err := c.plugin.skipAttach(source.Driver) if err != nil { - glog.Error(log("Failed to check CSIDriver for %s: %s", source.Driver, err)) + klog.Error(log("Failed to check CSIDriver for %s: %s", source.Driver, err)) } else { if skip { // This volume is not attachable, pretend it's attached @@ -256,14 +256,14 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No } attachID := getAttachmentName(source.VolumeHandle, source.Driver, string(nodeName)) - glog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID)) + klog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID)) attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { attached[spec] = false - glog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err)) + klog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err)) continue } - glog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached)) + klog.V(4).Info(log("attacher.VolumesAreAttached attachment [%v] has status.attached=%t", attachID, attach.Status.Attached)) attached[spec] = attach.Status.Attached } @@ -271,18 +271,18 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No } func (c *csiAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { - glog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec)) + klog.V(4).Info(log("attacher.GetDeviceMountPath(%v)", spec)) deviceMountPath, err := makeDeviceMountPath(c.plugin, spec) if err != nil { - glog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err)) + klog.Error(log("attacher.GetDeviceMountPath failed to make device mount path: %v", err)) return "", err } - glog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath) + klog.V(4).Infof("attacher.GetDeviceMountPath succeeded, deviceMountPath: %s", deviceMountPath) return deviceMountPath, nil } func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) (err error) { - glog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath)) + klog.V(4).Infof(log("attacher.MountDevice(%s, %s)", devicePath, deviceMountPath)) if deviceMountPath == "" { err = fmt.Errorf("attacher.MountDevice failed, deviceMountPath is empty") @@ -291,12 +291,12 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo mounted, err := isDirMounted(c.plugin, deviceMountPath) if err != nil { - glog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath)) + klog.Error(log("attacher.MountDevice failed while checking mount status for dir [%s]", deviceMountPath)) return err } if mounted { - glog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath)) + klog.V(4).Info(log("attacher.MountDevice skipping mount, dir already mounted [%s]", deviceMountPath)) return nil } @@ -306,35 +306,35 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo } csiSource, err := getCSISourceFromSpec(spec) if err != nil { - glog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err)) + klog.Error(log("attacher.MountDevice failed to get CSI persistent source: %v", err)) return err } // Store volume metadata for UnmountDevice. Keep it around even if the // driver does not support NodeStage, UnmountDevice still needs it. if err = os.MkdirAll(deviceMountPath, 0750); err != nil { - glog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err)) + klog.Error(log("attacher.MountDevice failed to create dir %#v: %v", deviceMountPath, err)) return err } - glog.V(4).Info(log("created target path successfully [%s]", deviceMountPath)) + klog.V(4).Info(log("created target path successfully [%s]", deviceMountPath)) dataDir := filepath.Dir(deviceMountPath) data := map[string]string{ volDataKey.volHandle: csiSource.VolumeHandle, volDataKey.driverName: csiSource.Driver, } if err = saveVolumeData(dataDir, volDataFileName, data); err != nil { - glog.Error(log("failed to save volume info data: %v", err)) + klog.Error(log("failed to save volume info data: %v", err)) if cleanerr := os.RemoveAll(dataDir); err != nil { - glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr)) + klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, cleanerr)) } return err } defer func() { if err != nil { // clean up metadata - glog.Errorf(log("attacher.MountDevice failed: %v", err)) + klog.Errorf(log("attacher.MountDevice failed: %v", err)) if err := removeMountDir(c.plugin, deviceMountPath); err != nil { - glog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err)) + klog.Error(log("attacher.MountDevice failed to remove mount dir after errir [%s]: %v", deviceMountPath, err)) } } }() @@ -352,7 +352,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo return err } if !stageUnstageSet { - glog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) + klog.Infof(log("attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) // defer does *not* remove the metadata file and it's correct - UnmountDevice needs it there. return nil } @@ -391,7 +391,7 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo return err } - glog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath)) + klog.V(4).Infof(log("attacher.MountDevice successfully requested NodeStageVolume [%s]", deviceMountPath)) return nil } @@ -402,12 +402,12 @@ var _ volume.DeviceUnmounter = &csiAttacher{} func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error { // volumeName in format driverNamevolumeHandle generated by plugin.GetVolumeName() if volumeName == "" { - glog.Error(log("detacher.Detach missing value for parameter volumeName")) + klog.Error(log("detacher.Detach missing value for parameter volumeName")) return errors.New("missing expected parameter volumeName") } parts := strings.Split(volumeName, volNameSep) if len(parts) != 2 { - glog.Error(log("detacher.Detach insufficient info encoded in volumeName")) + klog.Error(log("detacher.Detach insufficient info encoded in volumeName")) return errors.New("volumeName missing expected data") } @@ -417,19 +417,19 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error { if err := c.k8s.StorageV1beta1().VolumeAttachments().Delete(attachID, nil); err != nil { if apierrs.IsNotFound(err) { // object deleted or never existed, done - glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID)) + klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volID)) return nil } - glog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err)) + klog.Error(log("detacher.Detach failed to delete VolumeAttachment [%s]: %v", attachID, err)) return err } - glog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID)) + klog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID)) return c.waitForVolumeDetachment(volID, attachID) } func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error { - glog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) + klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) timeout := c.waitSleepTime * 10 timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable @@ -439,21 +439,21 @@ func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) err } func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID string, timer *time.Timer, timeout time.Duration) error { - glog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) + klog.V(4).Info(log("probing VolumeAttachment [id=%v]", attachID)) attach, err := c.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { if apierrs.IsNotFound(err) { //object deleted or never existed, done - glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle)) + klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] not found, object deleted", attachID, volumeHandle)) return nil } - glog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) + klog.Error(log("detacher.WaitForDetach failed for volume [%s] (will continue to try): %v", volumeHandle, err)) return err } // driver reports attach error detachErr := attach.Status.DetachError if detachErr != nil { - glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) + klog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) return errors.New(detachErr.Message) } @@ -468,7 +468,7 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str select { case event, ok := <-ch: if !ok { - glog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID) + klog.Errorf("[attachment.ID=%v] watch channel had been closed", attachID) return errors.New("volume attachment watch channel had been closed") } @@ -478,12 +478,12 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str // driver reports attach error detachErr := attach.Status.DetachError if detachErr != nil { - glog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) + klog.Error(log("detachment for VolumeAttachment [%v] for volume [%s] failed: %v", attachID, volumeHandle, detachErr.Message)) return errors.New(detachErr.Message) } case watch.Deleted: //object deleted - glog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle)) + klog.V(4).Info(log("VolumeAttachment object [%v] for volume [%v] has been deleted", attachID, volumeHandle)) return nil case watch.Error: @@ -492,14 +492,14 @@ func (c *csiAttacher) waitForVolumeDetachmentInternal(volumeHandle, attachID str } case <-timer.C: - glog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) + klog.Error(log("detacher.WaitForDetach timeout after %v [volume=%v; attachment.ID=%v]", timeout, volumeHandle, attachID)) return fmt.Errorf("detachment timeout for volume %v", volumeHandle) } } } func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { - glog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath)) + klog.V(4).Info(log("attacher.UnmountDevice(%s)", deviceMountPath)) // Setup var driverName, volID string @@ -509,12 +509,12 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { driverName = data[volDataKey.driverName] volID = data[volDataKey.volHandle] } else { - glog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err)) + klog.Error(log("UnmountDevice failed to load volume data file [%s]: %v", dataDir, err)) // The volume might have been mounted by old CSI volume plugin. Fall back to the old behavior: read PV from API server driverName, volID, err = getDriverAndVolNameFromDeviceMountPath(c.k8s, deviceMountPath) if err != nil { - glog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err)) + klog.Errorf(log("attacher.UnmountDevice failed to get driver and volume name from device mount path: %v", err)) return err } } @@ -529,11 +529,11 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { // Check whether "STAGE_UNSTAGE_VOLUME" is set stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) if err != nil { - glog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err)) + klog.Errorf(log("attacher.UnmountDevice failed to check whether STAGE_UNSTAGE_VOLUME set: %v", err)) return err } if !stageUnstageSet { - glog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice...")) + klog.Infof(log("attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice...")) // Just delete the global directory + json file if err := removeMountDir(c.plugin, deviceMountPath); err != nil { return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err) @@ -548,7 +548,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { deviceMountPath) if err != nil { - glog.Errorf(log("attacher.UnmountDevice failed: %v", err)) + klog.Errorf(log("attacher.UnmountDevice failed: %v", err)) return err } @@ -557,7 +557,7 @@ func (c *csiAttacher) UnmountDevice(deviceMountPath string) error { return fmt.Errorf("failed to clean up gloubal mount %s: %s", dataDir, err) } - glog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath)) + klog.V(4).Infof(log("attacher.UnmountDevice successfully requested NodeStageVolume [%s]", deviceMountPath)) return nil } diff --git a/pkg/volume/csi/csi_attacher_test.go b/pkg/volume/csi/csi_attacher_test.go index 52ef4f38defff..72c0c236e219a 100644 --- a/pkg/volume/csi/csi_attacher_test.go +++ b/pkg/volume/csi/csi_attacher_test.go @@ -24,7 +24,6 @@ import ( "testing" "time" - "github.com/golang/glog" storage "k8s.io/api/storage/v1beta1" apierrs "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -39,6 +38,7 @@ import ( core "k8s.io/client-go/testing" utiltesting "k8s.io/client-go/util/testing" fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -85,11 +85,11 @@ func markVolumeAttached(t *testing.T, client clientset.Interface, watch *watch.R t.Error(err) } if attach != nil { - glog.Infof("stopping wait") + klog.Infof("stopping wait") break } } - glog.Infof("stopped wait") + klog.Infof("stopped wait") if attach == nil { t.Logf("attachment not found for id:%v", attachID) diff --git a/pkg/volume/csi/csi_block.go b/pkg/volume/csi/csi_block.go index 389b80a443d9a..41129fd4fb427 100644 --- a/pkg/volume/csi/csi_block.go +++ b/pkg/volume/csi/csi_block.go @@ -23,7 +23,7 @@ import ( "os" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -51,7 +51,7 @@ var _ volume.BlockVolumeMapper = &csiBlockMapper{} // Example: plugins/kubernetes.io/csi/volumeDevices/{volumeID}/dev func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) { dir := getVolumeDevicePluginDir(spec.Name(), m.plugin.host) - glog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir)) + klog.V(4).Infof(log("blockMapper.GetGlobalMapPath = %s", dir)) return dir, nil } @@ -60,7 +60,7 @@ func (m *csiBlockMapper) GetGlobalMapPath(spec *volume.Spec) (string, error) { func (m *csiBlockMapper) GetPodDeviceMapPath() (string, string) { path := filepath.Join(m.plugin.host.GetPodVolumeDeviceDir(m.podUID, csiPluginName), m.specName, "dev") specName := m.specName - glog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, specName)) + klog.V(4).Infof(log("blockMapper.GetPodDeviceMapPath [path=%s; name=%s]", path, specName)) return path, specName } @@ -70,26 +70,26 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { return "", errors.New("CSIBlockVolume feature not enabled") } - glog.V(4).Infof(log("blockMapper.SetupDevice called")) + klog.V(4).Infof(log("blockMapper.SetupDevice called")) if m.spec == nil { - glog.Error(log("blockMapper.Map spec is nil")) + klog.Error(log("blockMapper.Map spec is nil")) return "", fmt.Errorf("spec is nil") } csiSource, err := getCSISourceFromSpec(m.spec) if err != nil { - glog.Error(log("blockMapper.SetupDevice failed to get CSI persistent source: %v", err)) + klog.Error(log("blockMapper.SetupDevice failed to get CSI persistent source: %v", err)) return "", err } globalMapPath, err := m.GetGlobalMapPath(m.spec) if err != nil { - glog.Error(log("blockMapper.SetupDevice failed to get global map path: %v", err)) + klog.Error(log("blockMapper.SetupDevice failed to get global map path: %v", err)) return "", err } globalMapPathBlockFile := filepath.Join(globalMapPath, "file") - glog.V(4).Infof(log("blockMapper.SetupDevice global device map path file set [%s]", globalMapPathBlockFile)) + klog.V(4).Infof(log("blockMapper.SetupDevice global device map path file set [%s]", globalMapPathBlockFile)) csi := m.csiClient ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) @@ -98,11 +98,11 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { // Check whether "STAGE_UNSTAGE_VOLUME" is set stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) if err != nil { - glog.Error(log("blockMapper.SetupDevice failed to check STAGE_UNSTAGE_VOLUME capability: %v", err)) + klog.Error(log("blockMapper.SetupDevice failed to check STAGE_UNSTAGE_VOLUME capability: %v", err)) return "", err } if !stageUnstageSet { - glog.Infof(log("blockMapper.SetupDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) + klog.Infof(log("blockMapper.SetupDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice...")) return "", nil } @@ -113,12 +113,12 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { // search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { - glog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err)) + klog.Error(log("blockMapper.SetupDevice failed to get volume attachment [id=%v]: %v", attachID, err)) return "", err } if attachment == nil { - glog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID)) + klog.Error(log("blockMapper.SetupDevice unable to find VolumeAttachment [id=%s]", attachID)) return "", errors.New("no existing VolumeAttachment found") } publishVolumeInfo := attachment.Status.AttachmentMetadata @@ -134,22 +134,22 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { // setup path globalMapPath and block file before call to NodeStageVolume if err := os.MkdirAll(globalMapPath, 0750); err != nil { - glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPath, err)) + klog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPath, err)) return "", err } - glog.V(4).Info(log("blockMapper.SetupDevice created global device map path successfully [%s]", globalMapPath)) + klog.V(4).Info(log("blockMapper.SetupDevice created global device map path successfully [%s]", globalMapPath)) // create block device file blockFile, err := os.OpenFile(globalMapPathBlockFile, os.O_CREATE|os.O_RDWR, 0750) if err != nil { - glog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPathBlockFile, err)) + klog.Error(log("blockMapper.SetupDevice failed to create dir %s: %v", globalMapPathBlockFile, err)) return "", err } if err := blockFile.Close(); err != nil { - glog.Error(log("blockMapper.SetupDevice failed to close file %s: %v", globalMapPathBlockFile, err)) + klog.Error(log("blockMapper.SetupDevice failed to close file %s: %v", globalMapPathBlockFile, err)) return "", err } - glog.V(4).Info(log("blockMapper.SetupDevice created global map path block device file successfully [%s]", globalMapPathBlockFile)) + klog.V(4).Info(log("blockMapper.SetupDevice created global map path block device file successfully [%s]", globalMapPathBlockFile)) //TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI accessMode := v1.ReadWriteOnce @@ -167,14 +167,14 @@ func (m *csiBlockMapper) SetUpDevice() (string, error) { csiSource.VolumeAttributes) if err != nil { - glog.Error(log("blockMapper.SetupDevice failed: %v", err)) + klog.Error(log("blockMapper.SetupDevice failed: %v", err)) if err := os.RemoveAll(globalMapPath); err != nil { - glog.Error(log("blockMapper.SetupDevice failed to remove dir after a NodeStageVolume() error [%s]: %v", globalMapPath, err)) + klog.Error(log("blockMapper.SetupDevice failed to remove dir after a NodeStageVolume() error [%s]: %v", globalMapPath, err)) } return "", err } - glog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPathBlockFile)) + klog.V(4).Infof(log("blockMapper.SetupDevice successfully requested NodeStageVolume [%s]", globalMapPathBlockFile)) return globalMapPathBlockFile, nil } @@ -183,16 +183,16 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol return errors.New("CSIBlockVolume feature not enabled") } - glog.V(4).Infof(log("blockMapper.MapDevice mapping block device %s", devicePath)) + klog.V(4).Infof(log("blockMapper.MapDevice mapping block device %s", devicePath)) if m.spec == nil { - glog.Error(log("blockMapper.MapDevice spec is nil")) + klog.Error(log("blockMapper.MapDevice spec is nil")) return fmt.Errorf("spec is nil") } csiSource, err := getCSISourceFromSpec(m.spec) if err != nil { - glog.Error(log("blockMapper.MapDevice failed to get CSI persistent source: %v", err)) + klog.Error(log("blockMapper.MapDevice failed to get CSI persistent source: %v", err)) return err } @@ -203,11 +203,11 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol globalMapPathBlockFile := devicePath dir, _ := m.GetPodDeviceMapPath() targetBlockFilePath := filepath.Join(dir, "file") - glog.V(4).Infof(log("blockMapper.MapDevice target volume map file path %s", targetBlockFilePath)) + klog.V(4).Infof(log("blockMapper.MapDevice target volume map file path %s", targetBlockFilePath)) stageCapable, err := hasStageUnstageCapability(ctx, csi) if err != nil { - glog.Error(log("blockMapper.MapDevice failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err)) + klog.Error(log("blockMapper.MapDevice failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err)) return err } @@ -221,12 +221,12 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol // search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName attachment, err := m.k8s.StorageV1beta1().VolumeAttachments().Get(attachID, meta.GetOptions{}) if err != nil { - glog.Error(log("blockMapper.MapDevice failed to get volume attachment [id=%v]: %v", attachID, err)) + klog.Error(log("blockMapper.MapDevice failed to get volume attachment [id=%v]: %v", attachID, err)) return err } if attachment == nil { - glog.Error(log("blockMapper.MapDevice unable to find VolumeAttachment [id=%s]", attachID)) + klog.Error(log("blockMapper.MapDevice unable to find VolumeAttachment [id=%s]", attachID)) return errors.New("no existing VolumeAttachment found") } publishVolumeInfo := attachment.Status.AttachmentMetadata @@ -235,29 +235,29 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol if csiSource.NodePublishSecretRef != nil { nodePublishSecrets, err = getCredentialsFromSecret(m.k8s, csiSource.NodePublishSecretRef) if err != nil { - glog.Errorf("blockMapper.MapDevice failed to get NodePublishSecretRef %s/%s: %v", + klog.Errorf("blockMapper.MapDevice failed to get NodePublishSecretRef %s/%s: %v", csiSource.NodePublishSecretRef.Namespace, csiSource.NodePublishSecretRef.Name, err) return err } } if err := os.MkdirAll(dir, 0750); err != nil { - glog.Error(log("blockMapper.MapDevice failed to create dir %s: %v", dir, err)) + klog.Error(log("blockMapper.MapDevice failed to create dir %s: %v", dir, err)) return err } - glog.V(4).Info(log("blockMapper.MapDevice created target volume map path successfully [%s]", dir)) + klog.V(4).Info(log("blockMapper.MapDevice created target volume map path successfully [%s]", dir)) // create target map volume block file targetBlockFile, err := os.OpenFile(targetBlockFilePath, os.O_CREATE|os.O_RDWR, 0750) if err != nil { - glog.Error(log("blockMapper.MapDevice failed to create file %s: %v", targetBlockFilePath, err)) + klog.Error(log("blockMapper.MapDevice failed to create file %s: %v", targetBlockFilePath, err)) return err } if err := targetBlockFile.Close(); err != nil { - glog.Error(log("blockMapper.MapDevice failed to close file %s: %v", targetBlockFilePath, err)) + klog.Error(log("blockMapper.MapDevice failed to close file %s: %v", targetBlockFilePath, err)) return err } - glog.V(4).Info(log("blockMapper.MapDevice created target volume map file successfully [%s]", targetBlockFilePath)) + klog.V(4).Info(log("blockMapper.MapDevice created target volume map file successfully [%s]", targetBlockFilePath)) //TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI accessMode := v1.ReadWriteOnce @@ -280,9 +280,9 @@ func (m *csiBlockMapper) MapDevice(devicePath, globalMapPath, volumeMapPath, vol ) if err != nil { - glog.Errorf(log("blockMapper.MapDevice failed: %v", err)) + klog.Errorf(log("blockMapper.MapDevice failed: %v", err)) if err := os.RemoveAll(dir); err != nil { - glog.Error(log("blockMapper.MapDevice failed to remove mapped dir after a NodePublish() error [%s]: %v", dir, err)) + klog.Error(log("blockMapper.MapDevice failed to remove mapped dir after a NodePublish() error [%s]: %v", dir, err)) } return err } @@ -298,7 +298,7 @@ func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error return errors.New("CSIBlockVolume feature not enabled") } - glog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath)) + klog.V(4).Infof(log("unmapper.TearDownDevice(globalMapPath=%s; devicePath=%s)", globalMapPath, devicePath)) csi := m.csiClient ctx, cancel := context.WithTimeout(context.Background(), csiTimeout) @@ -306,20 +306,20 @@ func (m *csiBlockMapper) TearDownDevice(globalMapPath, devicePath string) error // unmap global device map path if err := csi.NodeUnstageVolume(ctx, m.volumeID, globalMapPath); err != nil { - glog.Errorf(log("blockMapper.TearDownDevice failed: %v", err)) + klog.Errorf(log("blockMapper.TearDownDevice failed: %v", err)) return err } - glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnstageVolume successfully [%s]", globalMapPath)) + klog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnstageVolume successfully [%s]", globalMapPath)) // request to remove pod volume map path also podVolumePath, volumeName := m.GetPodDeviceMapPath() podVolumeMapPath := filepath.Join(podVolumePath, volumeName) if err := csi.NodeUnpublishVolume(ctx, m.volumeID, podVolumeMapPath); err != nil { - glog.Error(log("blockMapper.TearDownDevice failed: %v", err)) + klog.Error(log("blockMapper.TearDownDevice failed: %v", err)) return err } - glog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnpublished successfully [%s]", podVolumeMapPath)) + klog.V(4).Infof(log("blockMapper.TearDownDevice NodeUnpublished successfully [%s]", podVolumeMapPath)) return nil } diff --git a/pkg/volume/csi/csi_client.go b/pkg/volume/csi/csi_client.go index 1d41400d1159b..d8355ae29be74 100644 --- a/pkg/volume/csi/csi_client.go +++ b/pkg/volume/csi/csi_client.go @@ -25,10 +25,10 @@ import ( "time" csipb "github.com/container-storage-interface/spec/lib/go/csi/v0" - "github.com/golang/glog" "google.golang.org/grpc" api "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" ) @@ -112,7 +112,7 @@ func (c *csiDriverClient) NodeGetInfo(ctx context.Context) ( maxVolumePerNode int64, accessibleTopology *csipb.Topology, err error) { - glog.V(4).Info(log("calling NodeGetInfo rpc")) + klog.V(4).Info(log("calling NodeGetInfo rpc")) nodeClient, closer, err := c.nodeClientCreator(c.driverName) if err != nil { @@ -141,7 +141,7 @@ func (c *csiDriverClient) NodePublishVolume( fsType string, mountOptions []string, ) error { - glog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath)) + klog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath)) if volID == "" { return errors.New("missing volume id") } @@ -190,7 +190,7 @@ func (c *csiDriverClient) NodePublishVolume( } func (c *csiDriverClient) NodeUnpublishVolume(ctx context.Context, volID string, targetPath string) error { - glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath)) + klog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath)) if volID == "" { return errors.New("missing volume id") } @@ -222,7 +222,7 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context, nodeStageSecrets map[string]string, volumeAttribs map[string]string, ) error { - glog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath)) + klog.V(4).Info(log("calling NodeStageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath)) if volID == "" { return errors.New("missing volume id") } @@ -266,7 +266,7 @@ func (c *csiDriverClient) NodeStageVolume(ctx context.Context, } func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingTargetPath string) error { - glog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath)) + klog.V(4).Info(log("calling NodeUnstageVolume rpc [volid=%s,staging_target_path=%s]", volID, stagingTargetPath)) if volID == "" { return errors.New("missing volume id") } @@ -289,7 +289,7 @@ func (c *csiDriverClient) NodeUnstageVolume(ctx context.Context, volID, stagingT } func (c *csiDriverClient) NodeGetCapabilities(ctx context.Context) ([]*csipb.NodeServiceCapability, error) { - glog.V(4).Info(log("calling NodeGetCapabilities rpc")) + klog.V(4).Info(log("calling NodeGetCapabilities rpc")) nodeClient, closer, err := c.nodeClientCreator(c.driverName) if err != nil { @@ -334,7 +334,7 @@ func newGrpcConn(driverName string) (*grpc.ClientConn, error) { addr = driver.driverEndpoint } network := "unix" - glog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr)) + klog.V(4).Infof(log("creating new gRPC connection for [%s://%s]", network, addr)) return grpc.Dial( addr, diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index dfc9cd4a86d7b..b088411277425 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -23,7 +23,7 @@ import ( "os" "path" - "github.com/golang/glog" + "k8s.io/klog" api "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -75,7 +75,7 @@ var _ volume.Volume = &csiMountMgr{} func (c *csiMountMgr) GetPath() string { dir := path.Join(getTargetPath(c.podUID, c.specVolumeID, c.plugin.host), "/mount") - glog.V(4).Info(log("mounter.GetPath generated [%s]", dir)) + klog.V(4).Info(log("mounter.GetPath generated [%s]", dir)) return dir } @@ -96,22 +96,22 @@ func (c *csiMountMgr) SetUp(fsGroup *int64) error { } func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { - glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir)) + klog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir)) mounted, err := isDirMounted(c.plugin, dir) if err != nil { - glog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir)) + klog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir)) return err } if mounted { - glog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir)) + klog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir)) return nil } csiSource, err := getCSISourceFromSpec(c.spec) if err != nil { - glog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err)) + klog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err)) return err } @@ -123,14 +123,14 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { deviceMountPath := "" stageUnstageSet, err := hasStageUnstageCapability(ctx, csi) if err != nil { - glog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err)) + klog.Error(log("mounter.SetUpAt failed to check for STAGE_UNSTAGE_VOLUME capabilty: %v", err)) return err } if stageUnstageSet { deviceMountPath, err = makeDeviceMountPath(c.plugin, c.spec) if err != nil { - glog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err)) + klog.Error(log("mounter.SetUpAt failed to make device mount path: %v", err)) return err } } @@ -156,10 +156,10 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { // create target_dir before call to NodePublish if err := os.MkdirAll(dir, 0750); err != nil { - glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err)) + klog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err)) return err } - glog.V(4).Info(log("created target path successfully [%s]", dir)) + klog.V(4).Info(log("created target path successfully [%s]", dir)) //TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI accessMode := api.ReadWriteOnce @@ -170,7 +170,7 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { // Inject pod information into volume_attributes podAttrs, err := c.podAttributes() if err != nil { - glog.Error(log("mouter.SetUpAt failed to assemble volume attributes: %v", err)) + klog.Error(log("mouter.SetUpAt failed to assemble volume attributes: %v", err)) return err } if podAttrs != nil { @@ -199,9 +199,9 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { ) if err != nil { - glog.Errorf(log("mounter.SetupAt failed: %v", err)) + klog.Errorf(log("mounter.SetupAt failed: %v", err)) if removeMountDirErr := removeMountDir(c.plugin, dir); removeMountDirErr != nil { - glog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr)) + klog.Error(log("mounter.SetupAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, removeMountDirErr)) } return err } @@ -216,18 +216,18 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { // attempt to rollback mount. fsGrpErr := fmt.Errorf("applyFSGroup failed for vol %s: %v", c.volumeID, err) if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil { - glog.Error(log("NodeUnpublishVolume failed for [%s]: %v", c.volumeID, unpubErr)) + klog.Error(log("NodeUnpublishVolume failed for [%s]: %v", c.volumeID, unpubErr)) return fsGrpErr } if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil { - glog.Error(log("removeMountDir failed for [%s]: %v", dir, unmountErr)) + klog.Error(log("removeMountDir failed for [%s]: %v", dir, unmountErr)) return fsGrpErr } return fsGrpErr } - glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir)) + klog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir)) return nil } @@ -242,7 +242,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) { csiDriver, err := c.plugin.csiDriverLister.Get(c.driverName) if err != nil { if apierrs.IsNotFound(err) { - glog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName)) + klog.V(4).Infof(log("CSIDriver %q not found, not adding pod information", c.driverName)) return nil, nil } return nil, err @@ -250,7 +250,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) { // if PodInfoOnMountVersion is not set or not v1 we do not set pod attributes if csiDriver.Spec.PodInfoOnMountVersion == nil || *csiDriver.Spec.PodInfoOnMountVersion != currentPodInfoMountVersion { - glog.V(4).Infof(log("CSIDriver %q does not require pod information", c.driverName)) + klog.V(4).Infof(log("CSIDriver %q does not require pod information", c.driverName)) return nil, nil } @@ -260,7 +260,7 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) { "csi.storage.k8s.io/pod.uid": string(c.pod.UID), "csi.storage.k8s.io/serviceAccount.name": c.pod.Spec.ServiceAccountName, } - glog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName)) + klog.V(4).Infof(log("CSIDriver %q requires pod information", c.driverName)) return attrs, nil } @@ -269,7 +269,7 @@ func (c *csiMountMgr) GetAttributes() volume.Attributes { path := c.GetPath() supportSelinux, err := mounter.GetSELinuxSupport(path) if err != nil { - glog.V(2).Info(log("error checking for SELinux support: %s", err)) + klog.V(2).Info(log("error checking for SELinux support: %s", err)) // Best guess supportSelinux = false } @@ -287,19 +287,19 @@ func (c *csiMountMgr) TearDown() error { return c.TearDownAt(c.GetPath()) } func (c *csiMountMgr) TearDownAt(dir string) error { - glog.V(4).Infof(log("Unmounter.TearDown(%s)", dir)) + klog.V(4).Infof(log("Unmounter.TearDown(%s)", dir)) // is dir even mounted ? // TODO (vladimirvivien) this check may not work for an emptyDir or local storage // see https://github.com/kubernetes/kubernetes/pull/56836#discussion_r155834524 mounted, err := isDirMounted(c.plugin, dir) if err != nil { - glog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err)) + klog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err)) return err } if !mounted { - glog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir)) + klog.V(4).Info(log("unmounter.Teardown skipping unmount, dir not mounted [%s]", dir)) return nil } @@ -310,16 +310,16 @@ func (c *csiMountMgr) TearDownAt(dir string) error { defer cancel() if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil { - glog.Errorf(log("mounter.TearDownAt failed: %v", err)) + klog.Errorf(log("mounter.TearDownAt failed: %v", err)) return err } // clean mount point dir if err := removeMountDir(c.plugin, dir); err != nil { - glog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err)) + klog.Error(log("mounter.TearDownAt failed to clean mount dir [%s]: %v", dir, err)) return err } - glog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir)) + klog.V(4).Infof(log("mounte.TearDownAt successfully unmounted dir [%s]", dir)) return nil } @@ -331,22 +331,22 @@ func (c *csiMountMgr) TearDownAt(dir string) error { func (c *csiMountMgr) applyFSGroup(fsType string, fsGroup *int64) error { if fsGroup != nil { if fsType == "" { - glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, fsType not provided")) + klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, fsType not provided")) return nil } accessModes := c.spec.PersistentVolume.Spec.AccessModes if c.spec.PersistentVolume.Spec.AccessModes == nil { - glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, access modes not provided")) + klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, access modes not provided")) return nil } if !hasReadWriteOnce(accessModes) { - glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, only support ReadWriteOnce access mode")) + klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, only support ReadWriteOnce access mode")) return nil } if c.readOnly { - glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, volume is readOnly")) + klog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, volume is readOnly")) return nil } @@ -355,7 +355,7 @@ func (c *csiMountMgr) applyFSGroup(fsType string, fsGroup *int64) error { return err } - glog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *fsGroup, c.volumeID)) + klog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *fsGroup, c.volumeID)) } return nil @@ -366,7 +366,7 @@ func isDirMounted(plug *csiPlugin, dir string) (bool, error) { mounter := plug.host.GetMounter(plug.GetPluginName()) notMnt, err := mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir)) + klog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir)) return false, err } return !notMnt, nil @@ -374,39 +374,39 @@ func isDirMounted(plug *csiPlugin, dir string) (bool, error) { // removeMountDir cleans the mount dir when dir is not mounted and removed the volume data file in dir func removeMountDir(plug *csiPlugin, mountPath string) error { - glog.V(4).Info(log("removing mount path [%s]", mountPath)) + klog.V(4).Info(log("removing mount path [%s]", mountPath)) if pathExists, pathErr := util.PathExists(mountPath); pathErr != nil { - glog.Error(log("failed while checking mount path stat [%s]", pathErr)) + klog.Error(log("failed while checking mount path stat [%s]", pathErr)) return pathErr } else if !pathExists { - glog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath)) + klog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath)) return nil } mounter := plug.host.GetMounter(plug.GetPluginName()) notMnt, err := mounter.IsLikelyNotMountPoint(mountPath) if err != nil { - glog.Error(log("mount dir removal failed [%s]: %v", mountPath, err)) + klog.Error(log("mount dir removal failed [%s]: %v", mountPath, err)) return err } if notMnt { - glog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath)) + klog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath)) if err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) { - glog.Error(log("failed to remove dir [%s]: %v", mountPath, err)) + klog.Error(log("failed to remove dir [%s]: %v", mountPath, err)) return err } // remove volume data file as well volPath := path.Dir(mountPath) dataFile := path.Join(volPath, volDataFileName) - glog.V(4).Info(log("also deleting volume info data file [%s]", dataFile)) + klog.V(4).Info(log("also deleting volume info data file [%s]", dataFile)) if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) { - glog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err)) + klog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err)) return err } // remove volume path - glog.V(4).Info(log("deleting volume path [%s]", volPath)) + klog.V(4).Info(log("deleting volume path [%s]", volPath)) if err := os.Remove(volPath); err != nil && !os.IsNotExist(err) { - glog.Error(log("failed to delete volume path [%s]: %v", volPath, err)) + klog.Error(log("failed to delete volume path [%s]: %v", volPath, err)) return err } } diff --git a/pkg/volume/csi/csi_mounter_test.go b/pkg/volume/csi/csi_mounter_test.go index 4aa1a44d79741..2b56a493323d2 100644 --- a/pkg/volume/csi/csi_mounter_test.go +++ b/pkg/volume/csi/csi_mounter_test.go @@ -27,7 +27,6 @@ import ( "reflect" - "github.com/golang/glog" api "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1beta1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,6 +37,7 @@ import ( fakeclient "k8s.io/client-go/kubernetes/fake" csiapi "k8s.io/csi-api/pkg/apis/csi/v1alpha1" fakecsi "k8s.io/csi-api/pkg/client/clientset/versioned/fake" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -145,7 +145,7 @@ func MounterSetUpTests(t *testing.T, podInfoEnabled bool) { emptyPodMountInfoVersion := "" for _, test := range tests { t.Run(test.name, func(t *testing.T) { - glog.Infof("Starting test %s", test.name) + klog.Infof("Starting test %s", test.name) fakeClient := fakeclient.NewSimpleClientset() fakeCSIClient := fakecsi.NewSimpleClientset( getCSIDriver("no-info", &emptyPodMountInfoVersion, nil), diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index 5c8240b81cd7c..271ca573aed25 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -27,7 +27,7 @@ import ( "context" - "github.com/golang/glog" + "k8s.io/klog" api "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -108,7 +108,7 @@ var PluginHandler = &RegistrationHandler{} // ValidatePlugin is called by kubelet's plugin watcher upon detection // of a new registration socket opened by CSI Driver registrar side car. func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, versions []string) error { - glog.Infof(log("Trying to register a new plugin with name: %s endpoint: %s versions: %s", + klog.Infof(log("Trying to register a new plugin with name: %s endpoint: %s versions: %s", pluginName, endpoint, strings.Join(versions, ","))) return nil @@ -116,7 +116,7 @@ func (h *RegistrationHandler) ValidatePlugin(pluginName string, endpoint string, // RegisterPlugin is called when a plugin can be registered func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string) error { - glog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint)) + klog.Infof(log("Register new plugin with name: %s at endpoint: %s", pluginName, endpoint)) func() { // Storing endpoint of newly registered CSI driver into the map, where CSI driver name will be the key @@ -138,9 +138,9 @@ func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string) driverNodeID, maxVolumePerNode, accessibleTopology, err := csi.NodeGetInfo(ctx) if err != nil { - glog.Error(log("registrationHandler.RegisterPlugin failed at CSI.NodeGetInfo: %v", err)) + klog.Error(log("registrationHandler.RegisterPlugin failed at CSI.NodeGetInfo: %v", err)) if unregErr := unregisterDriver(pluginName); unregErr != nil { - glog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous: %v", unregErr)) + klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous: %v", unregErr)) return unregErr } return err @@ -148,9 +148,9 @@ func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string) err = nim.InstallCSIDriver(pluginName, driverNodeID, maxVolumePerNode, accessibleTopology) if err != nil { - glog.Error(log("registrationHandler.RegisterPlugin failed at AddNodeInfo: %v", err)) + klog.Error(log("registrationHandler.RegisterPlugin failed at AddNodeInfo: %v", err)) if unregErr := unregisterDriver(pluginName); unregErr != nil { - glog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous error: %v", unregErr)) + klog.Error(log("registrationHandler.RegisterPlugin failed to unregister plugin due to previous error: %v", unregErr)) return unregErr } return err @@ -162,9 +162,9 @@ func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string) // DeRegisterPlugin is called when a plugin removed its socket, signaling // it is no longer available func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) { - glog.V(4).Info(log("registrationHandler.DeRegisterPlugin request for plugin %s", pluginName)) + klog.V(4).Info(log("registrationHandler.DeRegisterPlugin request for plugin %s", pluginName)) if err := unregisterDriver(pluginName); err != nil { - glog.Error(log("registrationHandler.DeRegisterPlugin failed: %v", err)) + klog.Error(log("registrationHandler.DeRegisterPlugin failed: %v", err)) } } @@ -174,7 +174,7 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error { if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) { csiClient := host.GetCSIClient() if csiClient == nil { - glog.Warning("The client for CSI Custom Resources is not available, skipping informer initialization") + klog.Warning("The client for CSI Custom Resources is not available, skipping informer initialization") } else { // Start informer for CSIDrivers. factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod) @@ -203,7 +203,7 @@ func (p *csiPlugin) GetPluginName() string { func (p *csiPlugin) GetVolumeName(spec *volume.Spec) (string, error) { csi, err := getCSISourceFromSpec(spec) if err != nil { - glog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err)) + klog.Error(log("plugin.GetVolumeName failed to extract volume source from spec: %v", err)) return "", err } @@ -236,7 +236,7 @@ func (p *csiPlugin) NewMounter( k8s := p.host.GetKubeClient() if k8s == nil { - glog.Error(log("failed to get a kubernetes client")) + klog.Error(log("failed to get a kubernetes client")) return nil, errors.New("failed to get a Kubernetes client") } @@ -260,10 +260,10 @@ func (p *csiPlugin) NewMounter( dataDir := path.Dir(dir) // dropoff /mount at end if err := os.MkdirAll(dataDir, 0750); err != nil { - glog.Error(log("failed to create dir %#v: %v", dataDir, err)) + klog.Error(log("failed to create dir %#v: %v", dataDir, err)) return nil, err } - glog.V(4).Info(log("created path successfully [%s]", dataDir)) + klog.V(4).Info(log("created path successfully [%s]", dataDir)) // persist volume info data for teardown node := string(p.host.GetNodeName()) @@ -277,21 +277,21 @@ func (p *csiPlugin) NewMounter( } if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil { - glog.Error(log("failed to save volume info data: %v", err)) + klog.Error(log("failed to save volume info data: %v", err)) if err := os.RemoveAll(dataDir); err != nil { - glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err)) + klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err)) return nil, err } return nil, err } - glog.V(4).Info(log("mounter created successfully")) + klog.V(4).Info(log("mounter created successfully")) return mounter, nil } func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) { - glog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID)) + klog.V(4).Infof(log("setting up unmounter for [name=%v, podUID=%v]", specName, podUID)) unmounter := &csiMountMgr{ plugin: p, @@ -304,7 +304,7 @@ func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmo dataDir := path.Dir(dir) // dropoff /mount at end data, err := loadVolumeData(dataDir, volDataFileName) if err != nil { - glog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err)) + klog.Error(log("unmounter failed to load volume data file [%s]: %v", dir, err)) return nil, err } unmounter.driverName = data[volDataKey.driverName] @@ -315,15 +315,15 @@ func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmo } func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { - glog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath)) + klog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath)) volData, err := loadVolumeData(mountPath, volDataFileName) if err != nil { - glog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err)) + klog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err)) return nil, err } - glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData)) + klog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData)) fsMode := api.PersistentVolumeFilesystem pv := &api.PersistentVolume{ @@ -365,7 +365,7 @@ var _ volume.DeviceMountableVolumePlugin = &csiPlugin{} func (p *csiPlugin) NewAttacher() (volume.Attacher, error) { k8s := p.host.GetKubeClient() if k8s == nil { - glog.Error(log("unable to get kubernetes client from host")) + klog.Error(log("unable to get kubernetes client from host")) return nil, errors.New("unable to get Kubernetes client") } @@ -383,7 +383,7 @@ func (p *csiPlugin) NewDeviceMounter() (volume.DeviceMounter, error) { func (p *csiPlugin) NewDetacher() (volume.Detacher, error) { k8s := p.host.GetKubeClient() if k8s == nil { - glog.Error(log("unable to get kubernetes client from host")) + klog.Error(log("unable to get kubernetes client from host")) return nil, errors.New("unable to get Kubernetes client") } @@ -420,12 +420,12 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt return nil, err } - glog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver)) + klog.V(4).Info(log("setting up block mapper for [volume=%v,driver=%v]", pvSource.VolumeHandle, pvSource.Driver)) client := newCsiDriverClient(pvSource.Driver) k8s := p.host.GetKubeClient() if k8s == nil { - glog.Error(log("failed to get a kubernetes client")) + klog.Error(log("failed to get a kubernetes client")) return nil, errors.New("failed to get a Kubernetes client") } @@ -445,10 +445,10 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt dataDir := getVolumeDeviceDataDir(spec.Name(), p.host) if err := os.MkdirAll(dataDir, 0750); err != nil { - glog.Error(log("failed to create data dir %s: %v", dataDir, err)) + klog.Error(log("failed to create data dir %s: %v", dataDir, err)) return nil, err } - glog.V(4).Info(log("created path successfully [%s]", dataDir)) + klog.V(4).Info(log("created path successfully [%s]", dataDir)) // persist volume info data for teardown node := string(p.host.GetNodeName()) @@ -462,9 +462,9 @@ func (p *csiPlugin) NewBlockVolumeMapper(spec *volume.Spec, podRef *api.Pod, opt } if err := saveVolumeData(dataDir, volDataFileName, volData); err != nil { - glog.Error(log("failed to save volume info data: %v", err)) + klog.Error(log("failed to save volume info data: %v", err)) if err := os.RemoveAll(dataDir); err != nil { - glog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err)) + klog.Error(log("failed to remove dir after error [%s]: %v", dataDir, err)) return nil, err } return nil, err @@ -478,7 +478,7 @@ func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (vo return nil, errors.New("CSIBlockVolume feature not enabled") } - glog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID)) + klog.V(4).Infof(log("setting up block unmapper for [Spec=%v, podUID=%v]", volName, podUID)) unmapper := &csiBlockMapper{ plugin: p, podUID: podUID, @@ -489,7 +489,7 @@ func (p *csiPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (vo dataDir := getVolumeDeviceDataDir(unmapper.specName, p.host) data, err := loadVolumeData(dataDir, volDataFileName) if err != nil { - glog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err)) + klog.Error(log("unmapper failed to load volume data file [%s]: %v", dataDir, err)) return nil, err } unmapper.driverName = data[volDataKey.driverName] @@ -504,16 +504,16 @@ func (p *csiPlugin) ConstructBlockVolumeSpec(podUID types.UID, specVolName, mapP return nil, errors.New("CSIBlockVolume feature not enabled") } - glog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath) + klog.V(4).Infof("plugin.ConstructBlockVolumeSpec [podUID=%s, specVolName=%s, path=%s]", string(podUID), specVolName, mapPath) dataDir := getVolumeDeviceDataDir(specVolName, p.host) volData, err := loadVolumeData(dataDir, volDataFileName) if err != nil { - glog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err)) + klog.Error(log("plugin.ConstructBlockVolumeSpec failed loading volume data using [%s]: %v", mapPath, err)) return nil, err } - glog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData)) + klog.V(4).Info(log("plugin.ConstructBlockVolumeSpec extracted [%#v]", volData)) blockMode := api.PersistentVolumeBlock pv := &api.PersistentVolume{ @@ -587,7 +587,7 @@ func unregisterDriver(driverName string) error { }() if err := nim.UninstallCSIDriver(driverName); err != nil { - glog.Errorf("Error uninstalling CSI driver: %v", err) + klog.Errorf("Error uninstalling CSI driver: %v", err) return err } diff --git a/pkg/volume/csi/csi_util.go b/pkg/volume/csi/csi_util.go index 00d40fef39ec2..3fe103584c414 100644 --- a/pkg/volume/csi/csi_util.go +++ b/pkg/volume/csi/csi_util.go @@ -22,10 +22,10 @@ import ( "os" "path" - "github.com/golang/glog" api "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/klog" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" "time" @@ -40,7 +40,7 @@ func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretRef credentials := map[string]string{} secret, err := k8s.CoreV1().Secrets(secretRef.Namespace).Get(secretRef.Name, meta.GetOptions{}) if err != nil { - glog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err) + klog.Errorf("failed to find the secret %s in the namespace %s with error: %v\n", secretRef.Name, secretRef.Namespace, err) return credentials, err } for key, value := range secret.Data { @@ -53,18 +53,18 @@ func getCredentialsFromSecret(k8s kubernetes.Interface, secretRef *api.SecretRef // saveVolumeData persists parameter data as json file at the provided location func saveVolumeData(dir string, fileName string, data map[string]string) error { dataFilePath := path.Join(dir, fileName) - glog.V(4).Info(log("saving volume data file [%s]", dataFilePath)) + klog.V(4).Info(log("saving volume data file [%s]", dataFilePath)) file, err := os.Create(dataFilePath) if err != nil { - glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err)) + klog.Error(log("failed to save volume data file %s: %v", dataFilePath, err)) return err } defer file.Close() if err := json.NewEncoder(file).Encode(data); err != nil { - glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err)) + klog.Error(log("failed to save volume data file %s: %v", dataFilePath, err)) return err } - glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath)) + klog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath)) return nil } @@ -72,17 +72,17 @@ func saveVolumeData(dir string, fileName string, data map[string]string) error { func loadVolumeData(dir string, fileName string) (map[string]string, error) { // remove /mount at the end dataFileName := path.Join(dir, fileName) - glog.V(4).Info(log("loading volume data file [%s]", dataFileName)) + klog.V(4).Info(log("loading volume data file [%s]", dataFileName)) file, err := os.Open(dataFileName) if err != nil { - glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err)) + klog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err)) return nil, err } defer file.Close() data := map[string]string{} if err := json.NewDecoder(file).Decode(&data); err != nil { - glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err)) + klog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err)) return nil, err } diff --git a/pkg/volume/csi/nodeinfomanager/BUILD b/pkg/volume/csi/nodeinfomanager/BUILD index a240c7f5893e7..74543354b66f2 100644 --- a/pkg/volume/csi/nodeinfomanager/BUILD +++ b/pkg/volume/csi/nodeinfomanager/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", "//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index 72057cae677b1..37f24151b7ef6 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -23,7 +23,6 @@ import ( "fmt" csipb "github.com/container-storage-interface/spec/lib/go/csi/v0" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -33,6 +32,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/util/retry" csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" @@ -508,7 +508,7 @@ func (nim *nodeInfoManager) uninstallDriverFromCSINodeInfo(csiDriverName string) func updateMaxAttachLimit(driverName string, maxLimit int64) nodeUpdateFunc { return func(node *v1.Node) (*v1.Node, bool, error) { if maxLimit <= 0 { - glog.V(4).Infof("skipping adding attach limit for %s", driverName) + klog.V(4).Infof("skipping adding attach limit for %s", driverName) return node, false, nil } diff --git a/pkg/volume/downwardapi/BUILD b/pkg/volume/downwardapi/BUILD index deb9cf9b261e3..d223844116f0c 100644 --- a/pkg/volume/downwardapi/BUILD +++ b/pkg/volume/downwardapi/BUILD @@ -19,7 +19,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/downwardapi/downwardapi.go b/pkg/volume/downwardapi/downwardapi.go index 801ed712e2f95..391198ad9dd3f 100644 --- a/pkg/volume/downwardapi/downwardapi.go +++ b/pkg/volume/downwardapi/downwardapi.go @@ -29,7 +29,7 @@ import ( "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "github.com/golang/glog" + "k8s.io/klog" ) // ProbeVolumePlugins is the entry point for plugin detection in a package. @@ -170,23 +170,23 @@ func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error { } func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir) + klog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir) // Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts) if err != nil { - glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) + klog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) return err } data, err := CollectData(b.source.Items, b.pod, b.plugin.host, b.source.DefaultMode) if err != nil { - glog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) + klog.Errorf("Error preparing data for downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) return err } setupSuccess := false if err := wrapped.SetUpAt(dir, fsGroup); err != nil { - glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) + klog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) return err } @@ -199,12 +199,12 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if !setupSuccess { unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID) if unmountCreateErr != nil { - glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) + klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) return } tearDownErr := unmounter.TearDown() if tearDownErr != nil { - glog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr) + klog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr) } } }() @@ -212,19 +212,19 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { - glog.Errorf("Error creating atomic writer: %v", err) + klog.Errorf("Error creating atomic writer: %v", err) return err } err = writer.Write(data) if err != nil { - glog.Errorf("Error writing payload to dir: %v", err) + klog.Errorf("Error writing payload to dir: %v", err) return err } err = volume.SetVolumeOwnership(b, fsGroup) if err != nil { - glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) + klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } @@ -255,7 +255,7 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu if fileInfo.FieldRef != nil { // TODO: unify with Kubelet.podFieldSelectorRuntimeValue if values, err := fieldpath.ExtractFieldPathAsString(pod, fileInfo.FieldRef.FieldPath); err != nil { - glog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error()) + klog.Errorf("Unable to extract field %s: %s", fileInfo.FieldRef.FieldPath, err.Error()) errlist = append(errlist, err) } else { fileProjection.Data = []byte(values) @@ -266,7 +266,7 @@ func CollectData(items []v1.DownwardAPIVolumeFile, pod *v1.Pod, host volume.Volu if err != nil { errlist = append(errlist, err) } else if values, err := resource.ExtractResourceValueByContainerNameAndNodeAllocatable(fileInfo.ResourceFieldRef, pod, containerName, nodeAllocatable); err != nil { - glog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error()) + klog.Errorf("Unable to extract field %s: %s", fileInfo.ResourceFieldRef.Resource, err.Error()) errlist = append(errlist, err) } else { fileProjection.Data = []byte(values) diff --git a/pkg/volume/emptydir/BUILD b/pkg/volume/emptydir/BUILD index ceeeb1031fd67..fd4eaff0332dd 100644 --- a/pkg/volume/emptydir/BUILD +++ b/pkg/volume/emptydir/BUILD @@ -25,7 +25,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//vendor/golang.org/x/sys/unix:go_default_library", diff --git a/pkg/volume/emptydir/empty_dir.go b/pkg/volume/emptydir/empty_dir.go index d01ac4edd55a0..0dd6c6508a008 100644 --- a/pkg/volume/emptydir/empty_dir.go +++ b/pkg/volume/emptydir/empty_dir.go @@ -21,11 +21,11 @@ import ( "os" "path" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/mount" stringsutil "k8s.io/kubernetes/pkg/util/strings" @@ -253,7 +253,7 @@ func (ed *emptyDir) setupTmpfs(dir string) error { return nil } - glog.V(3).Infof("pod %v: mounting tmpfs for volume %v", ed.pod.UID, ed.volName) + klog.V(3).Infof("pod %v: mounting tmpfs for volume %v", ed.pod.UID, ed.volName) return ed.mounter.Mount("tmpfs", dir, "tmpfs", nil /* options */) } @@ -281,7 +281,7 @@ func (ed *emptyDir) setupHugepages(dir string) error { return err } - glog.V(3).Infof("pod %v: mounting hugepages for volume %v", ed.pod.UID, ed.volName) + klog.V(3).Infof("pod %v: mounting hugepages for volume %v", ed.pod.UID, ed.volName) return ed.mounter.Mount("nodev", dir, "hugetlbfs", []string{pageSizeMountOption}) } @@ -349,7 +349,7 @@ func (ed *emptyDir) setupDir(dir string) error { } if fileinfo.Mode().Perm() != perm.Perm() { - glog.Errorf("Expected directory %q permissions to be: %s; got: %s", dir, perm.Perm(), fileinfo.Mode().Perm()) + klog.Errorf("Expected directory %q permissions to be: %s; got: %s", dir, perm.Perm(), fileinfo.Mode().Perm()) } } @@ -370,7 +370,7 @@ func (ed *emptyDir) TearDownAt(dir string) error { if pathExists, pathErr := volumeutil.PathExists(dir); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } diff --git a/pkg/volume/emptydir/empty_dir_linux.go b/pkg/volume/emptydir/empty_dir_linux.go index 880c2b37c3069..3f8b162827458 100644 --- a/pkg/volume/emptydir/empty_dir_linux.go +++ b/pkg/volume/emptydir/empty_dir_linux.go @@ -21,8 +21,8 @@ package emptydir import ( "fmt" - "github.com/golang/glog" "golang.org/x/sys/unix" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/util/mount" @@ -40,7 +40,7 @@ type realMountDetector struct { } func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, error) { - glog.V(5).Infof("Determining mount medium of %v", path) + klog.V(5).Infof("Determining mount medium of %v", path) notMnt, err := m.mounter.IsLikelyNotMountPoint(path) if err != nil { return v1.StorageMediumDefault, false, fmt.Errorf("IsLikelyNotMountPoint(%q): %v", path, err) @@ -50,7 +50,7 @@ func (m *realMountDetector) GetMountMedium(path string) (v1.StorageMedium, bool, return v1.StorageMediumDefault, false, fmt.Errorf("statfs(%q): %v", path, err) } - glog.V(5).Infof("Statfs_t of %v: %+v", path, buf) + klog.V(5).Infof("Statfs_t of %v: %+v", path, buf) if buf.Type == linuxTmpfsMagic { return v1.StorageMediumMemory, !notMnt, nil } else if int64(buf.Type) == linuxHugetlbfsMagic { diff --git a/pkg/volume/fc/BUILD b/pkg/volume/fc/BUILD index 9b8a102b84c09..8c7328601db81 100644 --- a/pkg/volume/fc/BUILD +++ b/pkg/volume/fc/BUILD @@ -27,7 +27,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/fc/attacher.go b/pkg/volume/fc/attacher.go index 3ef5dd20f9894..748c14f38e130 100644 --- a/pkg/volume/fc/attacher.go +++ b/pkg/volume/fc/attacher.go @@ -23,10 +23,10 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" @@ -78,7 +78,7 @@ func (attacher *fcAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty func (attacher *fcAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { mounter, err := volumeSpecToMounter(spec, attacher.host) if err != nil { - glog.Warningf("failed to get fc mounter: %v", err) + klog.Warningf("failed to get fc mounter: %v", err) return "", err } return attacher.manager.AttachDisk(*mounter) @@ -88,7 +88,7 @@ func (attacher *fcAttacher) GetDeviceMountPath( spec *volume.Spec) (string, error) { mounter, err := volumeSpecToMounter(spec, attacher.host) if err != nil { - glog.Warningf("failed to get fc mounter: %v", err) + klog.Warningf("failed to get fc mounter: %v", err) return "", err } @@ -158,7 +158,7 @@ func (detacher *fcDetacher) UnmountDevice(deviceMountPath string) error { // Specify device name for DetachDisk later devName, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath) if err != nil { - glog.Errorf("fc: failed to get device from mnt: %s\nError: %v", deviceMountPath, err) + klog.Errorf("fc: failed to get device from mnt: %s\nError: %v", deviceMountPath, err) return err } // Unmount for deviceMountPath(=globalPDPath) @@ -171,7 +171,7 @@ func (detacher *fcDetacher) UnmountDevice(deviceMountPath string) error { if err != nil { return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", devName, err) } - glog.V(4).Infof("fc: successfully detached disk: %s", devName) + klog.V(4).Infof("fc: successfully detached disk: %s", devName) return nil } @@ -206,7 +206,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost) (*fcDiskMoun if err != nil { return nil, err } - glog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode) + klog.V(5).Infof("fc: volumeSpecToMounter volumeMode %s", volumeMode) return &fcDiskMounter{ fcDisk: fcDisk, fsType: fc.FSType, diff --git a/pkg/volume/fc/disk_manager.go b/pkg/volume/fc/disk_manager.go index c6cc815ac2d60..602b7aa0bdbd3 100644 --- a/pkg/volume/fc/disk_manager.go +++ b/pkg/volume/fc/disk_manager.go @@ -19,7 +19,7 @@ package fc import ( "os" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -43,14 +43,14 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou noMnt, err := mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mountpoint: %s", volPath) + klog.Errorf("cannot validate mountpoint: %s", volPath) return err } if !noMnt { return nil } if err := os.MkdirAll(volPath, 0750); err != nil { - glog.Errorf("failed to mkdir:%s", volPath) + klog.Errorf("failed to mkdir:%s", volPath) return err } // Perform a bind mount to the full path to allow duplicate mounts of the same disk. @@ -61,25 +61,25 @@ func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mou mountOptions := util.JoinMountOptions(options, b.mountOptions) err = mounter.Mount(globalPDPath, volPath, "", mountOptions) if err != nil { - glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err) + klog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err) noMnt, mntErr := b.mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !noMnt { if mntErr = b.mounter.Unmount(volPath); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } noMnt, mntErr = b.mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !noMnt { // will most likely retry on next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath) return err } } diff --git a/pkg/volume/fc/fc.go b/pkg/volume/fc/fc.go index d413e02dbf471..cbd247aebcaf3 100644 --- a/pkg/volume/fc/fc.go +++ b/pkg/volume/fc/fc.go @@ -22,11 +22,11 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" @@ -137,7 +137,7 @@ func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, if err != nil { return nil, err } - glog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode) + klog.V(5).Infof("fc: newMounterInternal volumeMode %s", volumeMode) return &fcDiskMounter{ fcDisk: fcDisk, fsType: fc.FSType, @@ -276,7 +276,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu FC: &v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32}, }, } - glog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v", + klog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v", fcVolume.VolumeSource.FC.TargetWWNs, *fcVolume.VolumeSource.FC.Lun) } else { fcVolume = &v1.Volume{ @@ -285,7 +285,7 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volu FC: &v1.FCVolumeSource{WWIDs: []string{volumeInfo}}, }, } - glog.V(5).Infof("ConstructVolumeSpec: WWIDs: %v", fcVolume.VolumeSource.FC.WWIDs) + klog.V(5).Infof("ConstructVolumeSpec: WWIDs: %v", fcVolume.VolumeSource.FC.WWIDs) } return volume.NewSpecFromVolume(fcVolume), nil } @@ -304,7 +304,7 @@ func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, m if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) // Retrieve volumePluginDependentPath from globalMapPathUUID // globalMapPathUUID examples: @@ -328,13 +328,13 @@ func (plugin *fcPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, m lun32 := int32(lun) fcPV = createPersistentVolumeFromFCVolumeSource(volumeName, v1.FCVolumeSource{TargetWWNs: []string{wwnLun[0]}, Lun: &lun32}) - glog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v", + klog.V(5).Infof("ConstructBlockVolumeSpec: TargetWWNs: %v, Lun: %v", fcPV.Spec.PersistentVolumeSource.FC.TargetWWNs, *fcPV.Spec.PersistentVolumeSource.FC.Lun) } else { fcPV = createPersistentVolumeFromFCVolumeSource(volumeName, v1.FCVolumeSource{WWIDs: []string{volumeInfo}}) - glog.V(5).Infof("ConstructBlockVolumeSpec: WWIDs: %v", fcPV.Spec.PersistentVolumeSource.FC.WWIDs) + klog.V(5).Infof("ConstructBlockVolumeSpec: WWIDs: %v", fcPV.Spec.PersistentVolumeSource.FC.WWIDs) } return volume.NewSpecFromPersistentVolume(fcPV, false), nil } @@ -363,7 +363,7 @@ func (fc *fcDisk) GetPath() string { func (fc *fcDisk) fcGlobalMapPath(spec *volume.Spec) (string, error) { mounter, err := volumeSpecToMounter(spec, fc.plugin.host) if err != nil { - glog.Warningf("failed to get fc mounter: %v", err) + klog.Warningf("failed to get fc mounter: %v", err) return "", err } return fc.manager.MakeGlobalVDPDName(*mounter.fcDisk), nil @@ -409,7 +409,7 @@ func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error { // diskSetUp checks mountpoints and prevent repeated calls err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) if err != nil { - glog.Errorf("fc: failed to setup") + klog.Errorf("fc: failed to setup") } return err } @@ -462,12 +462,12 @@ func (c *fcDiskUnmapper) TearDownDevice(mapPath, devicePath string) error { if err != nil { return fmt.Errorf("fc: failed to detach disk: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("fc: %q is unmounted, deleting the directory", mapPath) + klog.V(4).Infof("fc: %q is unmounted, deleting the directory", mapPath) err = os.RemoveAll(mapPath) if err != nil { return fmt.Errorf("fc: failed to delete the directory: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("fc: successfully detached disk: %s", mapPath) + klog.V(4).Infof("fc: successfully detached disk: %s", mapPath) return nil } diff --git a/pkg/volume/fc/fc_util.go b/pkg/volume/fc/fc_util.go index 9d71a00a23ddf..2beafee94bf7d 100644 --- a/pkg/volume/fc/fc_util.go +++ b/pkg/volume/fc/fc_util.go @@ -24,9 +24,9 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -69,7 +69,7 @@ func findDisk(wwn, lun string, io ioHandler, deviceUtil volumeutil.DeviceUtil) ( if strings.Contains(name, fcPath) { if disk, err1 := io.EvalSymlinks(devPath + name); err1 == nil { dm := deviceUtil.FindMultipathDeviceForDevice(disk) - glog.Infof("fc: find disk: %v, dm: %v", disk, dm) + klog.Infof("fc: find disk: %v, dm: %v", disk, dm) return disk, dm } } @@ -97,23 +97,23 @@ func findDiskWWIDs(wwid string, io ioHandler, deviceUtil volumeutil.DeviceUtil) if name == fcPath { disk, err := io.EvalSymlinks(devID + name) if err != nil { - glog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", devID+name, err) + klog.V(2).Infof("fc: failed to find a corresponding disk from symlink[%s], error %v", devID+name, err) return "", "" } dm := deviceUtil.FindMultipathDeviceForDevice(disk) - glog.Infof("fc: find disk: %v, dm: %v", disk, dm) + klog.Infof("fc: find disk: %v, dm: %v", disk, dm) return disk, dm } } } - glog.V(2).Infof("fc: failed to find a disk [%s]", devID+fcPath) + klog.V(2).Infof("fc: failed to find a disk [%s]", devID+fcPath) return "", "" } // Removes a scsi device based upon /dev/sdX name func removeFromScsiSubsystem(deviceName string, io ioHandler) { fileName := "/sys/block/" + deviceName + "/device/delete" - glog.V(4).Infof("fc: remove device from scsi-subsystem: path: %s", fileName) + klog.V(4).Infof("fc: remove device from scsi-subsystem: path: %s", fileName) data := []byte("1") io.WriteFile(fileName, data, 0666) } @@ -218,7 +218,7 @@ func (util *fcUtil) AttachDisk(b fcDiskMounter) (string, error) { if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { // If the volumeMode is 'Block', plugin don't have to format the volume. // The globalPDPath will be created by operationexecutor. Just return devicePath here. - glog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath) + klog.V(5).Infof("fc: AttachDisk volumeMode: %s, devicePath: %s", b.volumeMode, devicePath) if b.volumeMode == v1.PersistentVolumeBlock { return devicePath, nil } @@ -235,7 +235,7 @@ func (util *fcUtil) AttachDisk(b fcDiskMounter) (string, error) { return devicePath, fmt.Errorf("Heuristic determination of mount point failed:%v", err) } if !noMnt { - glog.Infof("fc: %s already mounted", globalPDPath) + klog.Infof("fc: %s already mounted", globalPDPath) return devicePath, nil } @@ -262,17 +262,17 @@ func (util *fcUtil) DetachDisk(c fcDiskUnmounter, devicePath string) error { // Add single devicepath to devices devices = append(devices, dstPath) } - glog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices) + klog.V(4).Infof("fc: DetachDisk devicePath: %v, dstPath: %v, devices: %v", devicePath, dstPath, devices) var lastErr error for _, device := range devices { err := util.detachFCDisk(c.io, device) if err != nil { - glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) + klog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) } } if lastErr != nil { - glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr) + klog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr) return lastErr } return nil @@ -301,7 +301,7 @@ func (util *fcUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri } else { // TODO: FC plugin can't obtain the devicePath from kubelet because devicePath // in volume object isn't updated when volume is attached to kubelet node. - glog.Infof("fc: devicePath is empty. Try to retrieve FC configuration from global map path: %v", mapPath) + klog.Infof("fc: devicePath is empty. Try to retrieve FC configuration from global map path: %v", mapPath) } // Check if global map path is valid @@ -332,7 +332,7 @@ func (util *fcUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri for _, fi := range fis { if strings.Contains(fi.Name(), volumeInfo) { devicePath = path.Join(searchPath, fi.Name()) - glog.V(5).Infof("fc: updated devicePath: %s", devicePath) + klog.V(5).Infof("fc: updated devicePath: %s", devicePath) break } } @@ -343,7 +343,7 @@ func (util *fcUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri if err != nil { return err } - glog.V(4).Infof("fc: find destination device path from symlink: %v", dstPath) + klog.V(4).Infof("fc: find destination device path from symlink: %v", dstPath) var devices []string dm := c.deviceUtil.FindMultipathDeviceForDevice(dstPath) @@ -363,12 +363,12 @@ func (util *fcUtil) DetachBlockFCDisk(c fcDiskUnmapper, mapPath, devicePath stri for _, device := range devices { err = util.detachFCDisk(c.io, device) if err != nil { - glog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) + klog.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) lastErr = fmt.Errorf("fc: detachFCDisk failed. device: %v err: %v", device, err) } } if lastErr != nil { - glog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr) + klog.Errorf("fc: last error occurred during detach disk:\n%v", lastErr) return lastErr } return nil @@ -378,7 +378,7 @@ func checkPathExists(path string) (bool, error) { if pathExists, pathErr := volumeutil.PathExists(path); pathErr != nil { return pathExists, fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmap skipped because path does not exist: %v", path) + klog.Warningf("Warning: Unmap skipped because path does not exist: %v", path) return pathExists, nil } return true, nil diff --git a/pkg/volume/flexvolume/BUILD b/pkg/volume/flexvolume/BUILD index 38ff345cb350c..f337fc42bb364 100644 --- a/pkg/volume/flexvolume/BUILD +++ b/pkg/volume/flexvolume/BUILD @@ -39,7 +39,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/github.com/fsnotify/fsnotify:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/pkg/volume/flexvolume/attacher-defaults.go b/pkg/volume/flexvolume/attacher-defaults.go index 368ac1be0bdb9..3c82f264b9147 100644 --- a/pkg/volume/flexvolume/attacher-defaults.go +++ b/pkg/volume/flexvolume/attacher-defaults.go @@ -19,7 +19,7 @@ package flexvolume import ( "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/util/mount" @@ -30,13 +30,13 @@ type attacherDefaults flexVolumeAttacher // Attach is part of the volume.Attacher interface func (a *attacherDefaults) Attach(spec *volume.Spec, hostName types.NodeName) (string, error) { - glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name(), ", host ", hostName) + klog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default Attach for volume ", spec.Name(), ", host ", hostName) return "", nil } // WaitForAttach is part of the volume.Attacher interface func (a *attacherDefaults) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) { - glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name(), ", device ", devicePath) + klog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default WaitForAttach for volume ", spec.Name(), ", device ", devicePath) return devicePath, nil } @@ -47,7 +47,7 @@ func (a *attacherDefaults) GetDeviceMountPath(spec *volume.Spec, mountsDir strin // MountDevice is part of the volume.Attacher interface func (a *attacherDefaults) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error { - glog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name(), ", device ", devicePath, ", deviceMountPath ", deviceMountPath) + klog.Warning(logPrefix(a.plugin.flexVolumePlugin), "using default MountDevice for volume ", spec.Name(), ", device ", devicePath, ", deviceMountPath ", deviceMountPath) volSourceFSType, err := getFSType(spec) if err != nil { diff --git a/pkg/volume/flexvolume/attacher.go b/pkg/volume/flexvolume/attacher.go index 4d2169f36fc06..3b98eefa0792e 100644 --- a/pkg/volume/flexvolume/attacher.go +++ b/pkg/volume/flexvolume/attacher.go @@ -19,9 +19,9 @@ package flexvolume import ( "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" ) @@ -112,7 +112,7 @@ func (a *flexVolumeAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName t } else if err == nil { if !status.Attached { volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume (%q) is no longer attached", spec.Name()) + klog.V(2).Infof("VolumesAreAttached: check volume (%q) is no longer attached", spec.Name()) } } else { return nil, err diff --git a/pkg/volume/flexvolume/detacher-defaults.go b/pkg/volume/flexvolume/detacher-defaults.go index 181f87bde487e..f0f43262394c0 100644 --- a/pkg/volume/flexvolume/detacher-defaults.go +++ b/pkg/volume/flexvolume/detacher-defaults.go @@ -19,8 +19,8 @@ package flexvolume import ( "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume/util" ) @@ -28,18 +28,18 @@ type detacherDefaults flexVolumeDetacher // Detach is part of the volume.Detacher interface. func (d *detacherDefaults) Detach(volumeName string, hostName types.NodeName) error { - glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default Detach for volume ", volumeName, ", host ", hostName) + klog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default Detach for volume ", volumeName, ", host ", hostName) return nil } // WaitForDetach is part of the volume.Detacher interface. func (d *detacherDefaults) WaitForDetach(devicePath string, timeout time.Duration) error { - glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default WaitForDetach for device ", devicePath) + klog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default WaitForDetach for device ", devicePath) return nil } // UnmountDevice is part of the volume.Detacher interface. func (d *detacherDefaults) UnmountDevice(deviceMountPath string) error { - glog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default UnmountDevice for device mount path ", deviceMountPath) + klog.Warning(logPrefix(d.plugin.flexVolumePlugin), "using default UnmountDevice for device mount path ", deviceMountPath) return util.UnmountPath(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName())) } diff --git a/pkg/volume/flexvolume/detacher.go b/pkg/volume/flexvolume/detacher.go index 6b5760258469d..d98791f3d1bc4 100644 --- a/pkg/volume/flexvolume/detacher.go +++ b/pkg/volume/flexvolume/detacher.go @@ -20,8 +20,8 @@ import ( "fmt" "os" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -53,7 +53,7 @@ func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error { pathExists, pathErr := util.PathExists(deviceMountPath) if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath) return nil } if pathErr != nil && !util.IsCorruptedMnt(pathErr) { @@ -70,7 +70,7 @@ func (d *flexVolumeDetacher) UnmountDevice(deviceMountPath string) error { } if notmnt { - glog.Warningf("Warning: Path: %v already unmounted", deviceMountPath) + klog.Warningf("Warning: Path: %v already unmounted", deviceMountPath) } else { call := d.plugin.NewDriverCall(unmountDeviceCmd) call.Append(deviceMountPath) diff --git a/pkg/volume/flexvolume/driver-call.go b/pkg/volume/flexvolume/driver-call.go index 0a42a914247c1..c25b599919152 100644 --- a/pkg/volume/flexvolume/driver-call.go +++ b/pkg/volume/flexvolume/driver-call.go @@ -22,7 +22,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" ) @@ -141,13 +141,13 @@ func (dc *DriverCall) Run() (*DriverStatus, error) { } _, err := handleCmdResponse(dc.Command, output) if err == nil { - glog.Errorf("FlexVolume: driver bug: %s: exec error (%s) but no error in response.", execPath, execErr) + klog.Errorf("FlexVolume: driver bug: %s: exec error (%s) but no error in response.", execPath, execErr) return nil, execErr } if isCmdNotSupportedErr(err) { dc.plugin.unsupported(dc.Command) } else { - glog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %q", execPath, dc.args, execErr.Error(), output) + klog.Warningf("FlexVolume: driver call failed: executable: %s, args: %s, error: %s, output: %q", execPath, dc.args, execErr.Error(), output) } return nil, err } @@ -264,14 +264,14 @@ func handleCmdResponse(cmd string, output []byte) (*DriverStatus, error) { Capabilities: defaultCapabilities(), } if err := json.Unmarshal(output, &status); err != nil { - glog.Errorf("Failed to unmarshal output for command: %s, output: %q, error: %s", cmd, string(output), err.Error()) + klog.Errorf("Failed to unmarshal output for command: %s, output: %q, error: %s", cmd, string(output), err.Error()) return nil, err } else if status.Status == StatusNotSupported { - glog.V(5).Infof("%s command is not supported by the driver", cmd) + klog.V(5).Infof("%s command is not supported by the driver", cmd) return nil, errors.New(status.Status) } else if status.Status != StatusSuccess { errMsg := fmt.Sprintf("%s command failed, status: %s, reason: %s", cmd, status.Status, status.Message) - glog.Errorf(errMsg) + klog.Errorf(errMsg) return nil, fmt.Errorf("%s", errMsg) } diff --git a/pkg/volume/flexvolume/expander-defaults.go b/pkg/volume/flexvolume/expander-defaults.go index e578cb32c63f1..4a33e184e38cf 100644 --- a/pkg/volume/flexvolume/expander-defaults.go +++ b/pkg/volume/flexvolume/expander-defaults.go @@ -17,8 +17,8 @@ limitations under the License. package flexvolume import ( - "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -32,14 +32,14 @@ func newExpanderDefaults(plugin *flexVolumePlugin) *expanderDefaults { } func (e *expanderDefaults) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { - glog.Warning(logPrefix(e.plugin), "using default expand for volume ", spec.Name(), ", to size ", newSize, " from ", oldSize) + klog.Warning(logPrefix(e.plugin), "using default expand for volume ", spec.Name(), ", to size ", newSize, " from ", oldSize) return newSize, nil } // the defaults for ExpandFS return a generic resize indicator that will trigger the operation executor to go ahead with // generic filesystem resize func (e *expanderDefaults) ExpandFS(spec *volume.Spec, devicePath, deviceMountPath string, _, _ resource.Quantity) error { - glog.Warning(logPrefix(e.plugin), "using default filesystem resize for volume ", spec.Name(), ", at ", devicePath) + klog.Warning(logPrefix(e.plugin), "using default filesystem resize for volume ", spec.Name(), ", at ", devicePath) _, err := util.GenericResizeFS(e.plugin.host, e.plugin.GetPluginName(), devicePath, deviceMountPath) return err } diff --git a/pkg/volume/flexvolume/mounter-defaults.go b/pkg/volume/flexvolume/mounter-defaults.go index b3cd9430ff532..930806118972e 100644 --- a/pkg/volume/flexvolume/mounter-defaults.go +++ b/pkg/volume/flexvolume/mounter-defaults.go @@ -17,7 +17,7 @@ limitations under the License. package flexvolume import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" ) @@ -27,7 +27,7 @@ type mounterDefaults flexVolumeMounter // SetUpAt is part of the volume.Mounter interface. // This implementation relies on the attacher's device mount path and does a bind mount to dir. func (f *mounterDefaults) SetUpAt(dir string, fsGroup *int64) error { - glog.Warning(logPrefix(f.plugin), "using default SetUpAt to ", dir) + klog.Warning(logPrefix(f.plugin), "using default SetUpAt to ", dir) src, err := f.plugin.getDeviceMountPath(f.spec) if err != nil { @@ -43,7 +43,7 @@ func (f *mounterDefaults) SetUpAt(dir string, fsGroup *int64) error { // Returns the default volume attributes. func (f *mounterDefaults) GetAttributes() volume.Attributes { - glog.V(5).Infof(logPrefix(f.plugin), "using default GetAttributes") + klog.V(5).Infof(logPrefix(f.plugin), "using default GetAttributes") return volume.Attributes{ ReadOnly: f.readOnly, Managed: !f.readOnly, diff --git a/pkg/volume/flexvolume/plugin-defaults.go b/pkg/volume/flexvolume/plugin-defaults.go index 6f090b4e1884c..9f9e1587b0a24 100644 --- a/pkg/volume/flexvolume/plugin-defaults.go +++ b/pkg/volume/flexvolume/plugin-defaults.go @@ -17,7 +17,7 @@ limitations under the License. package flexvolume import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" ) @@ -29,6 +29,6 @@ func logPrefix(plugin *flexVolumePlugin) string { } func (plugin *pluginDefaults) GetVolumeName(spec *volume.Spec) (string, error) { - glog.Warning(logPrefix((*flexVolumePlugin)(plugin)), "using default GetVolumeName for volume ", spec.Name()) + klog.Warning(logPrefix((*flexVolumePlugin)(plugin)), "using default GetVolumeName for volume ", spec.Name()) return spec.Name(), nil } diff --git a/pkg/volume/flexvolume/plugin.go b/pkg/volume/flexvolume/plugin.go index 8859bcb199911..2e2a9f7e532d0 100644 --- a/pkg/volume/flexvolume/plugin.go +++ b/pkg/volume/flexvolume/plugin.go @@ -23,7 +23,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -135,7 +135,7 @@ func (plugin *flexVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) return "", err } - glog.Warning(logPrefix(plugin), "GetVolumeName is not supported yet. Defaulting to PV or volume name: ", name) + klog.Warning(logPrefix(plugin), "GetVolumeName is not supported yet. Defaulting to PV or volume name: ", name) return name, nil } diff --git a/pkg/volume/flexvolume/probe.go b/pkg/volume/flexvolume/probe.go index 2a792c6f409a0..7a929e5ce99b1 100644 --- a/pkg/volume/flexvolume/probe.go +++ b/pkg/volume/flexvolume/probe.go @@ -17,7 +17,7 @@ limitations under the License. package flexvolume import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" "k8s.io/utils/exec" @@ -234,7 +234,7 @@ func (prober *flexVolumeProber) addWatchRecursive(filename string) error { addWatch := func(path string, info os.FileInfo, err error) error { if err == nil && info.IsDir() { if err := prober.watcher.AddWatch(path); err != nil { - glog.Errorf("Error recursively adding watch: %v", err) + klog.Errorf("Error recursively adding watch: %v", err) } } return nil @@ -247,10 +247,10 @@ func (prober *flexVolumeProber) addWatchRecursive(filename string) error { func (prober *flexVolumeProber) initWatcher() error { err := prober.watcher.Init(func(event fsnotify.Event) { if err := prober.handleWatchEvent(event); err != nil { - glog.Errorf("Flexvolume prober watch: %s", err) + klog.Errorf("Flexvolume prober watch: %s", err) } }, func(err error) { - glog.Errorf("Received an error from watcher: %s", err) + klog.Errorf("Received an error from watcher: %s", err) }) if err != nil { return fmt.Errorf("Error initializing watcher: %s", err) @@ -268,7 +268,7 @@ func (prober *flexVolumeProber) initWatcher() error { // Creates the plugin directory, if it doesn't already exist. func (prober *flexVolumeProber) createPluginDir() error { if _, err := prober.fs.Stat(prober.pluginDir); os.IsNotExist(err) { - glog.Warningf("Flexvolume plugin directory at %s does not exist. Recreating.", prober.pluginDir) + klog.Warningf("Flexvolume plugin directory at %s does not exist. Recreating.", prober.pluginDir) err := prober.fs.MkdirAll(prober.pluginDir, 0755) if err != nil { return fmt.Errorf("Error (re-)creating driver directory: %s", err) diff --git a/pkg/volume/flexvolume/unmounter-defaults.go b/pkg/volume/flexvolume/unmounter-defaults.go index 67d9facf79dd8..919a6be890edb 100644 --- a/pkg/volume/flexvolume/unmounter-defaults.go +++ b/pkg/volume/flexvolume/unmounter-defaults.go @@ -17,13 +17,13 @@ limitations under the License. package flexvolume import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume/util" ) type unmounterDefaults flexVolumeUnmounter func (f *unmounterDefaults) TearDownAt(dir string) error { - glog.Warning(logPrefix(f.plugin), "using default TearDownAt for ", dir) + klog.Warning(logPrefix(f.plugin), "using default TearDownAt for ", dir) return util.UnmountPath(dir, f.mounter) } diff --git a/pkg/volume/flexvolume/unmounter.go b/pkg/volume/flexvolume/unmounter.go index 406c7f84f0496..a62636aba4097 100644 --- a/pkg/volume/flexvolume/unmounter.go +++ b/pkg/volume/flexvolume/unmounter.go @@ -20,7 +20,7 @@ import ( "fmt" "os" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" "k8s.io/utils/exec" @@ -45,7 +45,7 @@ func (f *flexVolumeUnmounter) TearDownAt(dir string) error { pathExists, pathErr := util.PathExists(dir) if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } diff --git a/pkg/volume/flexvolume/util.go b/pkg/volume/flexvolume/util.go index 1efdb92112eac..4a3019e25e39a 100644 --- a/pkg/volume/flexvolume/util.go +++ b/pkg/volume/flexvolume/util.go @@ -21,7 +21,7 @@ import ( "fmt" "os" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -49,7 +49,7 @@ func addSecretsToOptions(options map[string]string, spec *volume.Spec, namespace } for name, data := range secrets { options[optionKeySecret+"/"+name] = base64.StdEncoding.EncodeToString([]byte(data)) - glog.V(1).Infof("found flex volume secret info: %s", name) + klog.V(1).Infof("found flex volume secret info: %s", name) } return nil @@ -141,7 +141,7 @@ func prepareForMount(mounter mount.Interface, deviceMountPath string) (bool, err func doMount(mounter mount.Interface, devicePath, deviceMountPath, fsType string, options []string) error { err := mounter.Mount(devicePath, deviceMountPath, fsType, options) if err != nil { - glog.Errorf("Failed to mount the volume at %s, device: %s, error: %s", deviceMountPath, devicePath, err.Error()) + klog.Errorf("Failed to mount the volume at %s, device: %s, error: %s", deviceMountPath, devicePath, err.Error()) return err } return nil @@ -150,7 +150,7 @@ func doMount(mounter mount.Interface, devicePath, deviceMountPath, fsType string func isNotMounted(mounter mount.Interface, deviceMountPath string) (bool, error) { notmnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) if err != nil { - glog.Errorf("Error checking mount point %s, error: %v", deviceMountPath, err) + klog.Errorf("Error checking mount point %s, error: %v", deviceMountPath, err) return false, err } return notmnt, nil diff --git a/pkg/volume/flocker/BUILD b/pkg/volume/flocker/BUILD index d3c41b0fade82..0067622c19d86 100644 --- a/pkg/volume/flocker/BUILD +++ b/pkg/volume/flocker/BUILD @@ -27,7 +27,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//vendor/github.com/clusterhq/flocker-go:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/flocker/flocker.go b/pkg/volume/flocker/flocker.go index 8a250ace67acc..8651ba4908322 100644 --- a/pkg/volume/flocker/flocker.go +++ b/pkg/volume/flocker/flocker.go @@ -22,9 +22,9 @@ import ( "path" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/env" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" @@ -310,9 +310,9 @@ func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { // TODO: handle failed mounts here. notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("flockerVolume set up: %s %v %v, datasetUUID %v readOnly %v", dir, !notMnt, err, datasetUUID, b.readOnly) + klog.V(4).Infof("flockerVolume set up: %s %v %v, datasetUUID %v readOnly %v", dir, !notMnt, err, datasetUUID, b.readOnly) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mount point: %s %v", dir, err) + klog.Errorf("cannot validate mount point: %s %v", dir, err) return err } if !notMnt { @@ -320,7 +320,7 @@ func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } if err := os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", dir, err) + klog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } @@ -331,33 +331,33 @@ func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } globalFlockerPath := makeGlobalFlockerPath(datasetUUID) - glog.V(4).Infof("attempting to mount %s", dir) + klog.V(4).Infof("attempting to mount %s", dir) err = b.mounter.Mount(globalFlockerPath, dir, "", options) if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("failed to unmount: %v", mntErr) + klog.Errorf("failed to unmount: %v", mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("isLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } os.Remove(dir) - glog.Errorf("mount of disk %s failed: %v", dir, err) + klog.Errorf("mount of disk %s failed: %v", dir, err) return err } @@ -365,7 +365,7 @@ func (b *flockerVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { volume.SetVolumeOwnership(b, fsGroup) } - glog.V(4).Infof("successfully mounted %s", dir) + klog.V(4).Infof("successfully mounted %s", dir) return nil } diff --git a/pkg/volume/flocker/flocker_util.go b/pkg/volume/flocker/flocker_util.go index 85b8263ab0bef..e753da9acd038 100644 --- a/pkg/volume/flocker/flocker_util.go +++ b/pkg/volume/flocker/flocker_util.go @@ -25,7 +25,7 @@ import ( volutil "k8s.io/kubernetes/pkg/volume/util" flockerapi "github.com/clusterhq/flocker-go" - "github.com/golang/glog" + "k8s.io/klog" ) type flockerUtil struct{} @@ -68,7 +68,7 @@ func (util *flockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID // select random node node := nodes[rand.Intn(len(nodes))] - glog.V(2).Infof("selected flocker node with UUID '%s' to provision dataset", node.UUID) + klog.V(2).Infof("selected flocker node with UUID '%s' to provision dataset", node.UUID) capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() @@ -92,7 +92,7 @@ func (util *flockerUtil) CreateVolume(c *flockerVolumeProvisioner) (datasetUUID } datasetUUID = datasetState.DatasetID - glog.V(2).Infof("successfully created Flocker dataset with UUID '%s'", datasetUUID) + klog.V(2).Infof("successfully created Flocker dataset with UUID '%s'", datasetUUID) return } diff --git a/pkg/volume/gcepd/BUILD b/pkg/volume/gcepd/BUILD index 513e4bce7066a..938b575733086 100644 --- a/pkg/volume/gcepd/BUILD +++ b/pkg/volume/gcepd/BUILD @@ -33,7 +33,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) @@ -60,7 +60,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/gcepd/attacher.go b/pkg/volume/gcepd/attacher.go index 6ce2c8928a803..5567e29f28fff 100644 --- a/pkg/volume/gcepd/attacher.go +++ b/pkg/volume/gcepd/attacher.go @@ -24,10 +24,10 @@ import ( "strconv" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -85,17 +85,17 @@ func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName ty attached, err := attacher.gceDisks.DiskIsAttached(pdName, nodeName) if err != nil { // Log error and continue with attach - glog.Errorf( + klog.Errorf( "Error checking if PD (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v", pdName, nodeName, err) } if err == nil && attached { // Volume is already attached to node. - glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName) + klog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName) } else { if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly, isRegionalPD(spec)); err != nil { - glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err) + klog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err) return "", err } } @@ -111,7 +111,7 @@ func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Sp volumeSource, _, err := getVolumeSource(spec) // If error is occurred, skip this volume and move to the next one if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) + klog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) continue } pdNameList = append(pdNameList, volumeSource.PDName) @@ -121,7 +121,7 @@ func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Sp attachedResult, err := attacher.gceDisks.DisksAreAttached(pdNameList, nodeName) if err != nil { // Log error and continue with attach - glog.Errorf( + klog.Errorf( "Error checking if PDs (%v) are already attached to current node (%q). err=%v", pdNameList, nodeName, err) return volumesAttachedCheck, err @@ -131,7 +131,7 @@ func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Sp if !attached { spec := volumePdNameMap[pdName] volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdName, spec.Name()) + klog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdName, spec.Name()) } } return volumesAttachedCheck, nil @@ -156,7 +156,7 @@ func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devi sdBefore, err := filepath.Glob(diskSDPattern) if err != nil { - glog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) + klog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) } sdBeforeSet := sets.NewString(sdBefore...) @@ -164,14 +164,14 @@ func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devi for { select { case <-ticker.C: - glog.V(5).Infof("Checking GCE PD %q is attached.", pdName) + klog.V(5).Infof("Checking GCE PD %q is attached.", pdName) path, err := verifyDevicePath(devicePaths, sdBeforeSet, pdName) if err != nil { // Log error, if any, and continue checking periodically. See issue #11321 - glog.Errorf("Error verifying GCE PD (%q) is attached: %v", pdName, err) + klog.Errorf("Error verifying GCE PD (%q) is attached: %v", pdName, err) } else if path != "" { // A device path has successfully been created for the PD - glog.Infof("Successfully found attached GCE PD %q.", pdName) + klog.Infof("Successfully found attached GCE PD %q.", pdName) return path, nil } case <-timer.C: @@ -222,7 +222,7 @@ func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, device os.Remove(deviceMountPath) return err } - glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) + klog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) } return nil } @@ -265,19 +265,19 @@ func (detacher *gcePersistentDiskDetacher) Detach(volumeName string, nodeName ty attached, err := detacher.gceDisks.DiskIsAttached(pdName, nodeName) if err != nil { // Log error and continue with detach - glog.Errorf( + klog.Errorf( "Error checking if PD (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", pdName, nodeName, err) } if err == nil && !attached { // Volume is not attached to node. Success! - glog.Infof("Detach operation is successful. PD %q was not attached to node %q.", pdName, nodeName) + klog.Infof("Detach operation is successful. PD %q was not attached to node %q.", pdName, nodeName) return nil } if err = detacher.gceDisks.DetachDisk(pdName, nodeName); err != nil { - glog.Errorf("Error detaching PD %q from node %q: %v", pdName, nodeName, err) + klog.Errorf("Error detaching PD %q from node %q: %v", pdName, nodeName, err) return err } diff --git a/pkg/volume/gcepd/attacher_test.go b/pkg/volume/gcepd/attacher_test.go index df5490f2b97d9..6f9ab41c8295e 100644 --- a/pkg/volume/gcepd/attacher_test.go +++ b/pkg/volume/gcepd/attacher_test.go @@ -27,8 +27,8 @@ import ( "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "strings" ) @@ -346,7 +346,7 @@ func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, r return errors.New("Unexpected AttachDisk call: wrong regional") } - glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, nodeName, readOnly, expected.ret) + klog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, nodeName, readOnly, expected.ret) return expected.ret } @@ -371,7 +371,7 @@ func (testcase *testcase) DetachDisk(devicePath string, nodeName types.NodeName) return errors.New("Unexpected DetachDisk call: wrong nodeName") } - glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", devicePath, nodeName, expected.ret) + klog.V(4).Infof("DetachDisk call: %s, %s, returning %v", devicePath, nodeName, expected.ret) return expected.ret } @@ -396,7 +396,7 @@ func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeNam return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName") } - glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret) + klog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret) return expected.isAttached, expected.ret } diff --git a/pkg/volume/gcepd/gce_pd.go b/pkg/volume/gcepd/gce_pd.go index 4cbe76d4d06e4..455722033200e 100644 --- a/pkg/volume/gcepd/gce_pd.go +++ b/pkg/volume/gcepd/gce_pd.go @@ -24,12 +24,12 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -137,13 +137,13 @@ func (plugin *gcePersistentDiskPlugin) GetVolumeLimits() (map[string]int64, erro instances, ok := cloud.Instances() if !ok { - glog.Warning("Failed to get instances from cloud provider") + klog.Warning("Failed to get instances from cloud provider") return volumeLimits, nil } instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName()) if err != nil { - glog.Errorf("Failed to get instance type from GCE cloud provider") + klog.Errorf("Failed to get instance type from GCE cloud provider") return volumeLimits, nil } if strings.HasPrefix(instanceType, "n1-") { @@ -367,9 +367,9 @@ func (b *gcePersistentDiskMounter) SetUp(fsGroup *int64) error { func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { // TODO: handle failed mounts here. notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("GCE PersistentDisk set up: Dir (%s) PD name (%q) Mounted (%t) Error (%v), ReadOnly (%t)", dir, b.pdName, !notMnt, err, b.readOnly) + klog.V(4).Infof("GCE PersistentDisk set up: Dir (%s) PD name (%q) Mounted (%t) Error (%v), ReadOnly (%t)", dir, b.pdName, !notMnt, err, b.readOnly) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mount point: %s %v", dir, err) + klog.Errorf("cannot validate mount point: %s %v", dir, err) return err } if !notMnt { @@ -377,7 +377,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { } if err := os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", dir, err) + klog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } @@ -388,7 +388,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { } globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName) - glog.V(4).Infof("attempting to mount %s", dir) + klog.V(4).Infof("attempting to mount %s", dir) mountOptions := util.JoinMountOptions(b.mountOptions, options) @@ -396,27 +396,27 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } os.Remove(dir) - glog.Errorf("Mount of disk %s failed: %v", dir, err) + klog.Errorf("Mount of disk %s failed: %v", dir, err) return err } @@ -424,7 +424,7 @@ func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { volume.SetVolumeOwnership(b, fsGroup) } - glog.V(4).Infof("Successfully mounted %s", dir) + klog.V(4).Infof("Successfully mounted %s", dir) return nil } diff --git a/pkg/volume/gcepd/gce_pd_block.go b/pkg/volume/gcepd/gce_pd_block.go index 417a5c9652ac1..f4e7e2c7252d8 100644 --- a/pkg/volume/gcepd/gce_pd_block.go +++ b/pkg/volume/gcepd/gce_pd_block.go @@ -22,9 +22,9 @@ import ( "path/filepath" "strconv" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -46,7 +46,7 @@ func (plugin *gcePersistentDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) <= 1 { diff --git a/pkg/volume/gcepd/gce_util.go b/pkg/volume/gcepd/gce_util.go index 947cc13fe1481..153bab8716d14 100644 --- a/pkg/volume/gcepd/gce_util.go +++ b/pkg/volume/gcepd/gce_util.go @@ -24,11 +24,11 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -79,12 +79,12 @@ func (util *GCEDiskUtil) DeleteVolume(d *gcePersistentDiskDeleter) error { } if err = cloud.DeleteDisk(d.pdName); err != nil { - glog.V(2).Infof("Error deleting GCE PD volume %s: %v", d.pdName, err) + klog.V(2).Infof("Error deleting GCE PD volume %s: %v", d.pdName, err) // GCE cloud provider returns volume.deletedVolumeInUseError when // necessary, no handling needed here. return err } - glog.V(2).Infof("Successfully deleted GCE PD volume %s", d.pdName) + klog.V(2).Infof("Successfully deleted GCE PD volume %s", d.pdName) return nil } @@ -154,7 +154,7 @@ func (util *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1. case replicationTypeRegionalPD: selectedZones, err := volumeutil.SelectZonesForVolume(zonePresent, zonesPresent, configuredZone, configuredZones, activezones, node, allowedTopologies, c.options.PVC.Name, maxRegionalPDZones) if err != nil { - glog.V(2).Infof("Error selecting zones for regional GCE PD volume: %v", err) + klog.V(2).Infof("Error selecting zones for regional GCE PD volume: %v", err) return "", 0, nil, "", err } if err = cloud.CreateRegionalDisk( @@ -163,10 +163,10 @@ func (util *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1. selectedZones, int64(requestGB), *c.options.CloudTags); err != nil { - glog.V(2).Infof("Error creating regional GCE PD volume: %v", err) + klog.V(2).Infof("Error creating regional GCE PD volume: %v", err) return "", 0, nil, "", err } - glog.V(2).Infof("Successfully created Regional GCE PD volume %s", name) + klog.V(2).Infof("Successfully created Regional GCE PD volume %s", name) case replicationTypeNone: selectedZone, err := volumeutil.SelectZoneForVolume(zonePresent, zonesPresent, configuredZone, configuredZones, activezones, node, allowedTopologies, c.options.PVC.Name) @@ -179,10 +179,10 @@ func (util *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1. selectedZone, int64(requestGB), *c.options.CloudTags); err != nil { - glog.V(2).Infof("Error creating single-zone GCE PD volume: %v", err) + klog.V(2).Infof("Error creating single-zone GCE PD volume: %v", err) return "", 0, nil, "", err } - glog.V(2).Infof("Successfully created single-zone GCE PD volume %s", name) + klog.V(2).Infof("Successfully created single-zone GCE PD volume %s", name) default: return "", 0, nil, "", fmt.Errorf("replication-type of '%s' is not supported", replicationType) @@ -191,7 +191,7 @@ func (util *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner, node *v1. labels, err := cloud.GetAutoLabelsForPD(name, "" /* zone */) if err != nil { // We don't really want to leak the volume here... - glog.Errorf("error getting labels for volume %q: %v", name, err) + klog.Errorf("error getting labels for volume %q: %v", name, err) } return name, int(requestGB), labels, fstype, nil @@ -203,7 +203,7 @@ func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String, diskName st // It's possible udevadm was called on other disks so it should not block this // call. If it did fail on this disk, then the devicePath will either // not exist or be wrong. If it's wrong, then the scsi_id check below will fail. - glog.Errorf("udevadmChangeToNewDrives failed with: %v", err) + klog.Errorf("udevadmChangeToNewDrives failed with: %v", err) } for _, path := range devicePaths { @@ -219,7 +219,7 @@ func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String, diskName st // The device link is not pointing to the correct device // Trigger udev on this device to try to fix the link if udevErr := udevadmChangeToDrive(path); udevErr != nil { - glog.Errorf("udevadmChangeToDrive %q failed with: %v", path, err) + klog.Errorf("udevadmChangeToDrive %q failed with: %v", path, err) } // Return error to retry WaitForAttach and verifyDevicePath @@ -241,7 +241,7 @@ func getScsiSerial(devicePath, diskName string) (string, error) { } if !exists { - glog.V(6).Infof("scsi_id doesn't exist; skipping check for %v", devicePath) + klog.V(6).Infof("scsi_id doesn't exist; skipping check for %v", devicePath) return diskName, nil } @@ -290,7 +290,7 @@ func getCloudProvider(cloudProvider cloudprovider.Interface) (*gcecloud.Cloud, e gceCloudProvider, ok := cloudProvider.(*gcecloud.Cloud) if !ok || gceCloudProvider == nil { // Retry on error. See issue #11321 - glog.Errorf("Failed to get GCE Cloud Provider. plugin.host.GetCloudProvider returned %v instead", cloudProvider) + klog.Errorf("Failed to get GCE Cloud Provider. plugin.host.GetCloudProvider returned %v instead", cloudProvider) time.Sleep(errorSleepDuration) continue } @@ -324,14 +324,14 @@ func udevadmChangeToNewDrives(sdBeforeSet sets.String) error { // drivePath must be the block device path to trigger on, in the format "/dev/sd*", or a symlink to it. // This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed. func udevadmChangeToDrive(drivePath string) error { - glog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath) + klog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath) // Evaluate symlink, if any drive, err := filepath.EvalSymlinks(drivePath) if err != nil { return fmt.Errorf("udevadmChangeToDrive: filepath.EvalSymlinks(%q) failed with %v", drivePath, err) } - glog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive) + klog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive) // Check to make sure input is "/dev/sd*" if !strings.Contains(drive, diskSDPath) { diff --git a/pkg/volume/glusterfs/BUILD b/pkg/volume/glusterfs/BUILD index bd7d5e0cc28d8..52e33003414f8 100644 --- a/pkg/volume/glusterfs/BUILD +++ b/pkg/volume/glusterfs/BUILD @@ -30,9 +30,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/heketi/heketi/client/api/go-client:go_default_library", "//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 81783c728a320..5e1811263d64d 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -26,7 +26,6 @@ import ( dstrings "strings" "sync" - "github.com/golang/glog" gcli "github.com/heketi/heketi/client/api/go-client" gapi "github.com/heketi/heketi/pkg/glusterfs/api" "k8s.io/api/core/v1" @@ -38,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" @@ -174,10 +174,10 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu ep, err := kubeClient.Core().Endpoints(epNamespace).Get(epName, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get endpoint %s: %v", epName, err) + klog.Errorf("failed to get endpoint %s: %v", epName, err) return nil, err } - glog.V(4).Infof("glusterfs pv endpoint %v", ep) + klog.V(4).Infof("glusterfs pv endpoint %v", ep) return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName())) } @@ -210,7 +210,7 @@ func (plugin *glusterfsPlugin) getEndpointNameAndNamespace(spec *volume.Spec, de func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) { volPath, readOnly, err := getVolumeInfo(spec) if err != nil { - glog.Errorf("failed to get volumesource : %v", err) + klog.Errorf("failed to get volumesource : %v", err) return nil, err } return &glusterfsMounter{ @@ -298,7 +298,7 @@ func (b *glusterfsMounter) SetUp(fsGroup *int64) error { func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("mount setup: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("mount setup: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { return err } @@ -354,11 +354,11 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { switch { case dstrings.HasPrefix(userOpt, "log-file"): - glog.V(4).Infof("log-file mount option has provided") + klog.V(4).Infof("log-file mount option has provided") hasLogFile = true case dstrings.HasPrefix(userOpt, "log-level"): - glog.V(4).Infof("log-level mount option has provided") + klog.V(4).Infof("log-level mount option has provided") hasLogLevel = true } @@ -418,7 +418,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { ip := addrlist[0] errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", mountOptions) if errs == nil { - glog.Infof("successfully mounted directory %s", dir) + klog.Infof("successfully mounted directory %s", dir) return nil } @@ -435,7 +435,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { } errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", noAutoMountOptions) if errs == nil { - glog.Infof("successfully mounted %s", dir) + klog.Infof("successfully mounted %s", dir) return nil } } @@ -573,7 +573,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll } pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { - glog.Error("failed to get existing persistent volumes") + klog.Error("failed to get existing persistent volumes") return err } @@ -587,21 +587,21 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey] if !ok { - glog.Warningf("no GID found in pv %v", pvName) + klog.Warningf("no GID found in pv %v", pvName) continue } gid, err := convertGid(gidStr) if err != nil { - glog.Errorf("failed to parse gid %s: %v", gidStr, err) + klog.Errorf("failed to parse gid %s: %v", gidStr, err) continue } _, err = gidTable.Allocate(gid) if err == ErrConflict { - glog.Warningf("GID %v found in pv %v was already allocated", gid, pvName) + klog.Warningf("GID %v found in pv %v was already allocated", gid, pvName) } else if err != nil { - glog.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err) + klog.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err) return err } } @@ -677,7 +677,7 @@ func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) { } func (d *glusterfsVolumeDeleter) Delete() error { - glog.V(2).Infof("delete volume %s", d.glusterfsMounter.path) + klog.V(2).Infof("delete volume %s", d.glusterfsMounter.path) volumeName := d.glusterfsMounter.path volumeID, err := getVolumeID(d.spec, volumeName) @@ -696,11 +696,11 @@ func (d *glusterfsVolumeDeleter) Delete() error { } d.provisionerConfig = *cfg - glog.V(4).Infof("deleting volume %q", volumeID) + klog.V(4).Infof("deleting volume %q", volumeID) gid, exists, err := d.getGid() if err != nil { - glog.Error(err) + klog.Error(err) } else if exists { gidTable, err := d.plugin.getGidTable(class.Name, cfg.gidMin, cfg.gidMax) if err != nil { @@ -715,37 +715,37 @@ func (d *glusterfsVolumeDeleter) Delete() error { cli := gcli.NewClient(d.url, d.user, d.secretValue) if cli == nil { - glog.Errorf("failed to create glusterfs REST client") + klog.Errorf("failed to create glusterfs REST client") return fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") } err = cli.VolumeDelete(volumeID) if err != nil { - glog.Errorf("failed to delete volume %s: %v", volumeName, err) + klog.Errorf("failed to delete volume %s: %v", volumeName, err) return err } - glog.V(2).Infof("volume %s deleted successfully", volumeName) + klog.V(2).Infof("volume %s deleted successfully", volumeName) //Deleter takes endpoint and namespace from pv spec. pvSpec := d.spec.Spec var dynamicEndpoint, dynamicNamespace string if pvSpec.ClaimRef == nil { - glog.Errorf("ClaimRef is nil") + klog.Errorf("ClaimRef is nil") return fmt.Errorf("ClaimRef is nil") } if pvSpec.ClaimRef.Namespace == "" { - glog.Errorf("namespace is nil") + klog.Errorf("namespace is nil") return fmt.Errorf("namespace is nil") } dynamicNamespace = pvSpec.ClaimRef.Namespace if pvSpec.Glusterfs.EndpointsName != "" { dynamicEndpoint = pvSpec.Glusterfs.EndpointsName } - glog.V(3).Infof("dynamic namespace and endpoint %v/%v", dynamicNamespace, dynamicEndpoint) + klog.V(3).Infof("dynamic namespace and endpoint %v/%v", dynamicNamespace, dynamicEndpoint) err = d.deleteEndpointService(dynamicNamespace, dynamicEndpoint) if err != nil { - glog.Errorf("failed to delete endpoint/service %v/%v: %v", dynamicNamespace, dynamicEndpoint, err) + klog.Errorf("failed to delete endpoint/service %v/%v: %v", dynamicNamespace, dynamicEndpoint, err) } else { - glog.V(1).Infof("endpoint %v/%v is deleted successfully ", dynamicNamespace, dynamicEndpoint) + klog.V(1).Infof("endpoint %v/%v is deleted successfully ", dynamicNamespace, dynamicEndpoint) } return nil } @@ -756,7 +756,7 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop } if p.options.PVC.Spec.Selector != nil { - glog.V(4).Infof("not able to parse your claim Selector") + klog.V(4).Infof("not able to parse your claim Selector") return nil, fmt.Errorf("not able to parse your claim Selector") } @@ -764,7 +764,7 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop return nil, fmt.Errorf("%s does not support block volume provisioning", p.plugin.GetPluginName()) } - glog.V(4).Infof("Provision VolumeOptions %v", p.options) + klog.V(4).Infof("Provision VolumeOptions %v", p.options) scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC) cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient()) if err != nil { @@ -779,19 +779,19 @@ func (p *glusterfsVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTop gid, _, err := gidTable.AllocateNext() if err != nil { - glog.Errorf("failed to reserve GID from table: %v", err) + klog.Errorf("failed to reserve GID from table: %v", err) return nil, fmt.Errorf("failed to reserve GID from table: %v", err) } - glog.V(2).Infof("Allocated GID %d for PVC %s", gid, p.options.PVC.Name) + klog.V(2).Infof("Allocated GID %d for PVC %s", gid, p.options.PVC.Name) glusterfs, sizeGiB, volID, err := p.CreateVolume(gid) if err != nil { if releaseErr := gidTable.Release(gid); releaseErr != nil { - glog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr) + klog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr) } - glog.Errorf("failed to create volume: %v", err) + klog.Errorf("failed to create volume: %v", err) return nil, fmt.Errorf("failed to create volume: %v", err) } mode := v1.PersistentVolumeFilesystem @@ -834,20 +834,20 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi if err != nil { return nil, 0, "", err } - glog.V(2).Infof("create volume of size %dGiB", sz) + klog.V(2).Infof("create volume of size %dGiB", sz) if p.url == "" { - glog.Errorf("REST server endpoint is empty") + klog.Errorf("REST server endpoint is empty") return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty") } cli := gcli.NewClient(p.url, p.user, p.secretValue) if cli == nil { - glog.Errorf("failed to create glusterfs REST client") + klog.Errorf("failed to create glusterfs REST client") return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") } if p.provisionerConfig.clusterID != "" { clusterIDs = dstrings.Split(p.clusterID, ",") - glog.V(4).Infof("provided clusterIDs %v", clusterIDs) + klog.V(4).Infof("provided clusterIDs %v", clusterIDs) } if p.provisionerConfig.volumeNamePrefix != "" { @@ -866,14 +866,14 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions, Snapshot: snaps} volume, err := cli.VolumeCreate(volumeReq) if err != nil { - glog.Errorf("failed to create volume: %v", err) + klog.Errorf("failed to create volume: %v", err) return nil, 0, "", fmt.Errorf("failed to create volume: %v", err) } - glog.V(1).Infof("volume with size %d and name %s created", volume.Size, volume.Name) + klog.V(1).Infof("volume with size %d and name %s created", volume.Size, volume.Name) volID = volume.Id dynamicHostIps, err := getClusterNodes(cli, volume.Cluster) if err != nil { - glog.Errorf("failed to get cluster nodes for volume %s: %v", volume, err) + klog.Errorf("failed to get cluster nodes for volume %s: %v", volume, err) return nil, 0, "", fmt.Errorf("failed to get cluster nodes for volume %s: %v", volume, err) } @@ -885,14 +885,14 @@ func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsPersi epNamespace := p.options.PVC.Namespace endpoint, service, err := p.createEndpointService(epNamespace, epServiceName, dynamicHostIps, p.options.PVC.Name) if err != nil { - glog.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err) + klog.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err) deleteErr := cli.VolumeDelete(volume.Id) if deleteErr != nil { - glog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr) + klog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr) } return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err) } - glog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service) + klog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service) return &v1.GlusterfsPersistentVolumeSource{ EndpointsName: endpoint.Name, EndpointsNamespace: &epNamespace, @@ -930,11 +930,11 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS } _, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint) if err != nil && errors.IsAlreadyExists(err) { - glog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace) + klog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace) err = nil } if err != nil { - glog.Errorf("failed to create endpoint: %v", err) + klog.Errorf("failed to create endpoint: %v", err) return nil, nil, fmt.Errorf("failed to create endpoint: %v", err) } service = &v1.Service{ @@ -950,11 +950,11 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS {Protocol: "TCP", Port: 1}}}} _, err = kubeClient.CoreV1().Services(namespace).Create(service) if err != nil && errors.IsAlreadyExists(err) { - glog.V(1).Infof("service %s already exist in namespace %s", service, namespace) + klog.V(1).Infof("service %s already exist in namespace %s", service, namespace) err = nil } if err != nil { - glog.Errorf("failed to create service: %v", err) + klog.Errorf("failed to create service: %v", err) return nil, nil, fmt.Errorf("error creating service: %v", err) } return endpoint, service, nil @@ -967,10 +967,10 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi } err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil) if err != nil { - glog.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err) + klog.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err) return fmt.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err) } - glog.V(1).Infof("service/endpoint: %s/%s deleted successfully", namespace, epServiceName) + klog.V(1).Infof("service/endpoint: %s/%s deleted successfully", namespace, epServiceName) return nil } @@ -978,7 +978,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) { secretMap, err := volutil.GetSecretForPV(namespace, secretName, glusterfsPluginName, kubeClient) if err != nil { - glog.Errorf("failed to get secret: %s/%s: %v", namespace, secretName, err) + klog.Errorf("failed to get secret: %s/%s: %v", namespace, secretName, err) return "", fmt.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err) } if len(secretMap) == 0 { @@ -1000,7 +1000,7 @@ func parseSecret(namespace, secretName string, kubeClient clientset.Interface) ( func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) { clusterinfo, err := cli.ClusterInfo(cluster) if err != nil { - glog.Errorf("failed to get cluster details: %v", err) + klog.Errorf("failed to get cluster details: %v", err) return nil, fmt.Errorf("failed to get cluster details: %v", err) } @@ -1010,15 +1010,15 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, for _, node := range clusterinfo.Nodes { nodeInfo, err := cli.NodeInfo(string(node)) if err != nil { - glog.Errorf("failed to get host ipaddress: %v", err) + klog.Errorf("failed to get host ipaddress: %v", err) return nil, fmt.Errorf("failed to get host ipaddress: %v", err) } ipaddr := dstrings.Join(nodeInfo.NodeAddRequest.Hostnames.Storage, "") dynamicHostIps = append(dynamicHostIps, ipaddr) } - glog.V(3).Infof("host list :%v", dynamicHostIps) + klog.V(3).Infof("host list :%v", dynamicHostIps) if len(dynamicHostIps) == 0 { - glog.Errorf("no hosts found: %v", err) + klog.Errorf("no hosts found: %v", err) return nil, fmt.Errorf("no hosts found: %v", err) } return dynamicHostIps, nil @@ -1232,7 +1232,7 @@ func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) { func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { pvSpec := spec.PersistentVolume.Spec volumeName := pvSpec.Glusterfs.Path - glog.V(2).Infof("Received request to expand volume %s", volumeName) + klog.V(2).Infof("Received request to expand volume %s", volumeName) volumeID, err := getVolumeID(spec.PersistentVolume, volumeName) if err != nil { @@ -1249,12 +1249,12 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res return oldSize, err } - glog.V(4).Infof("expanding volume: %q", volumeID) + klog.V(4).Infof("expanding volume: %q", volumeID) //Create REST server connection cli := gcli.NewClient(cfg.url, cfg.user, cfg.secretValue) if cli == nil { - glog.Errorf("failed to create glusterfs REST client") + klog.Errorf("failed to create glusterfs REST client") return oldSize, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed") } @@ -1268,7 +1268,7 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res //Check the existing volume size currentVolumeInfo, err := cli.VolumeInfo(volumeID) if err != nil { - glog.Errorf("error when fetching details of volume %s: %v", volumeName, err) + klog.Errorf("error when fetching details of volume %s: %v", volumeName, err) return oldSize, err } @@ -1282,11 +1282,11 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res // Expand the volume volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq) if err != nil { - glog.Errorf("failed to expand volume %s: %v", volumeName, err) + klog.Errorf("failed to expand volume %s: %v", volumeName, err) return oldSize, err } - glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size) + klog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size) newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size)) return newVolumeSize, nil } diff --git a/pkg/volume/glusterfs/glusterfs_util.go b/pkg/volume/glusterfs/glusterfs_util.go index 83d8a13c7696c..2b19bf709f8d3 100644 --- a/pkg/volume/glusterfs/glusterfs_util.go +++ b/pkg/volume/glusterfs/glusterfs_util.go @@ -21,7 +21,7 @@ import ( "fmt" "os" - "github.com/golang/glog" + "k8s.io/klog" ) // readGlusterLog will take the last 2 lines of the log file @@ -34,7 +34,7 @@ func readGlusterLog(path string, podName string) error { var line2 string linecount := 0 - glog.Infof("failure, now attempting to read the gluster log for pod %s", podName) + klog.Infof("failure, now attempting to read the gluster log for pod %s", podName) // Check and make sure path exists if len(path) == 0 { diff --git a/pkg/volume/iscsi/BUILD b/pkg/volume/iscsi/BUILD index 1fb3b31bf4e99..1143c892e86cb 100644 --- a/pkg/volume/iscsi/BUILD +++ b/pkg/volume/iscsi/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/iscsi/attacher.go b/pkg/volume/iscsi/attacher.go index 26e9e52808ba4..922bc31de6c6e 100644 --- a/pkg/volume/iscsi/attacher.go +++ b/pkg/volume/iscsi/attacher.go @@ -21,10 +21,10 @@ import ( "os" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" @@ -79,7 +79,7 @@ func (attacher *iscsiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) { mounter, err := volumeSpecToMounter(spec, attacher.host, attacher.targetLocks, pod) if err != nil { - glog.Warningf("failed to get iscsi mounter: %v", err) + klog.Warningf("failed to get iscsi mounter: %v", err) return "", err } return attacher.manager.AttachDisk(*mounter) @@ -89,7 +89,7 @@ func (attacher *iscsiAttacher) GetDeviceMountPath( spec *volume.Spec) (string, error) { mounter, err := volumeSpecToMounter(spec, attacher.host, attacher.targetLocks, nil) if err != nil { - glog.Warningf("failed to get iscsi mounter: %v", err) + klog.Warningf("failed to get iscsi mounter: %v", err) return "", err } if mounter.InitiatorName != "" { @@ -167,12 +167,12 @@ func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error { if err != nil { return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", deviceMountPath, err) } - glog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", deviceMountPath) + klog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", deviceMountPath) err = os.RemoveAll(deviceMountPath) if err != nil { return fmt.Errorf("iscsi: failed to delete the directory: %s\nError: %v", deviceMountPath, err) } - glog.V(4).Infof("iscsi: successfully detached disk: %s", deviceMountPath) + klog.V(4).Infof("iscsi: successfully detached disk: %s", deviceMountPath) return nil } @@ -206,7 +206,7 @@ func volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, targetLocks if err != nil { return nil, err } - glog.V(5).Infof("iscsi: VolumeSpecToMounter volumeMode %s", volumeMode) + klog.V(5).Infof("iscsi: VolumeSpecToMounter volumeMode %s", volumeMode) return &iscsiDiskMounter{ iscsiDisk: iscsiDisk, fsType: fsType, diff --git a/pkg/volume/iscsi/disk_manager.go b/pkg/volume/iscsi/disk_manager.go index aa1caeaf99cd6..aff2045d7fff8 100644 --- a/pkg/volume/iscsi/disk_manager.go +++ b/pkg/volume/iscsi/disk_manager.go @@ -19,7 +19,7 @@ package iscsi import ( "os" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -43,7 +43,7 @@ type diskManager interface { func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error { notMnt, err := mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mountpoint: %s", volPath) + klog.Errorf("cannot validate mountpoint: %s", volPath) return err } if !notMnt { @@ -51,7 +51,7 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter } if err := os.MkdirAll(volPath, 0750); err != nil { - glog.Errorf("failed to mkdir:%s", volPath) + klog.Errorf("failed to mkdir:%s", volPath) return err } // Perform a bind mount to the full path to allow duplicate mounts of the same disk. @@ -67,25 +67,25 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter mountOptions := util.JoinMountOptions(b.mountOptions, options) err = mounter.Mount(globalPDPath, volPath, "", mountOptions) if err != nil { - glog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err) + klog.Errorf("Failed to bind mount: source:%s, target:%s, err:%v", globalPDPath, volPath, err) noMnt, mntErr := b.mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !noMnt { if mntErr = b.mounter.Unmount(volPath); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } noMnt, mntErr = b.mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !noMnt { // will most likely retry on next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", volPath) return err } } diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index f7f2cc2b130d4..f8612d9dc1996 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -23,10 +23,10 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" @@ -252,7 +252,7 @@ func (plugin *iscsiPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) // Retrieve volume information from globalMapPathUUID // globalMapPathUUID example: // plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} @@ -287,7 +287,7 @@ func (iscsi *iscsiDisk) GetPath() string { func (iscsi *iscsiDisk) iscsiGlobalMapPath(spec *volume.Spec) (string, error) { mounter, err := volumeSpecToMounter(spec, iscsi.plugin.host, iscsi.plugin.targetLocks, nil /* pod */) if err != nil { - glog.Warningf("failed to get iscsi mounter: %v", err) + klog.Warningf("failed to get iscsi mounter: %v", err) return "", err } return iscsi.manager.MakeGlobalVDPDName(*mounter.iscsiDisk), nil @@ -334,7 +334,7 @@ func (b *iscsiDiskMounter) SetUpAt(dir string, fsGroup *int64) error { // diskSetUp checks mountpoints and prevent repeated calls err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) if err != nil { - glog.Errorf("iscsi: failed to setup") + klog.Errorf("iscsi: failed to setup") } return err } @@ -392,12 +392,12 @@ func (c *iscsiDiskUnmapper) TearDownDevice(mapPath, _ string) error { if err != nil { return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", mapPath) + klog.V(4).Infof("iscsi: %q is unmounted, deleting the directory", mapPath) err = os.RemoveAll(mapPath) if err != nil { return fmt.Errorf("iscsi: failed to delete the directory: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("iscsi: successfully detached disk: %s", mapPath) + klog.V(4).Infof("iscsi: successfully detached disk: %s", mapPath) return nil } @@ -582,7 +582,7 @@ func createSecretMap(spec *volume.Spec, plugin *iscsiPlugin, namespace string) ( } secret = make(map[string]string) for name, data := range secretObj.Data { - glog.V(4).Infof("retrieving CHAP secret name: %s", name) + klog.V(4).Infof("retrieving CHAP secret name: %s", name) secret[name] = string(data) } } @@ -649,7 +649,7 @@ func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.S ISCSIInterface: iface, }, ) - glog.V(5).Infof("ConstructBlockVolumeSpec: TargetPortal: %v, IQN: %v, Lun: %v, ISCSIInterface: %v", + klog.V(5).Infof("ConstructBlockVolumeSpec: TargetPortal: %v, IQN: %v, Lun: %v, ISCSIInterface: %v", iscsiPV.Spec.PersistentVolumeSource.ISCSI.TargetPortal, iscsiPV.Spec.PersistentVolumeSource.ISCSI.IQN, iscsiPV.Spec.PersistentVolumeSource.ISCSI.Lun, diff --git a/pkg/volume/iscsi/iscsi_util.go b/pkg/volume/iscsi/iscsi_util.go index 2e7eee8901a15..ae9c7a70fc8e4 100644 --- a/pkg/volume/iscsi/iscsi_util.go +++ b/pkg/volume/iscsi/iscsi_util.go @@ -27,9 +27,9 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" @@ -253,7 +253,7 @@ func scanOneLun(hostNumber int, lunNumber int) error { return fmt.Errorf("No data written to file: %s", filename) } - glog.V(3).Infof("Scanned SCSI host %d LUN %d", hostNumber, lunNumber) + klog.V(3).Infof("Scanned SCSI host %d LUN %d", hostNumber, lunNumber) return nil } @@ -291,7 +291,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { out, err := b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "show") if err != nil { - glog.Errorf("iscsi: could not read iface %s error: %s", b.Iface, string(out)) + klog.Errorf("iscsi: could not read iface %s error: %s", b.Iface, string(out)) return "", err } @@ -305,7 +305,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { newIface := bkpPortal[0] + ":" + b.VolName err = cloneIface(b, newIface) if err != nil { - glog.Errorf("iscsi: failed to clone iface: %s error: %v", b.Iface, err) + klog.Errorf("iscsi: failed to clone iface: %s error: %v", b.Iface, err) return "", err } // update iface name @@ -323,18 +323,18 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { if err != nil { return "", err } - glog.V(4).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap) + klog.V(4).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap) for i := 1; i <= maxAttachAttempts; i++ { for _, tp := range bkpPortal { if _, found := devicePaths[tp]; found { - glog.V(4).Infof("Device for portal %q already known", tp) + klog.V(4).Infof("Device for portal %q already known", tp) continue } hostNumber, loggedIn := portalHostMap[tp] if !loggedIn { - glog.V(4).Infof("Could not get SCSI host number for portal %s, will attempt login", tp) + klog.V(4).Infof("Could not get SCSI host number for portal %s, will attempt login", tp) // build discoverydb and discover iscsi target b.exec.Run("iscsiadm", "-m", "discoverydb", "-t", "sendtargets", "-p", tp, "-I", b.Iface, "-o", "new") @@ -374,7 +374,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { out, err = b.exec.Run("iscsiadm", "-m", "node", "-p", tp, "-T", b.Iqn, "-o", "update", "-n", "node.startup", "-v", "manual") if err != nil { // don't fail if we can't set startup mode, but log warning so there is a clue - glog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err) + klog.Warningf("Warning: Failed to set iSCSI login mode to manual. Error: %v", err) } // Rebuild the host map after logging in @@ -382,16 +382,16 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { if err != nil { return "", err } - glog.V(6).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap) + klog.V(6).Infof("AttachDisk portal->host map for %s is %v", b.Iqn, portalHostMap) hostNumber, loggedIn = portalHostMap[tp] if !loggedIn { - glog.Warningf("Could not get SCSI host number for portal %s after logging in", tp) + klog.Warningf("Could not get SCSI host number for portal %s after logging in", tp) continue } } - glog.V(5).Infof("AttachDisk: scanning SCSI host %d LUN %s", hostNumber, b.Lun) + klog.V(5).Infof("AttachDisk: scanning SCSI host %d LUN %s", hostNumber, b.Lun) lunNumber, err := strconv.Atoi(b.Lun) if err != nil { return "", fmt.Errorf("AttachDisk: lun is not a number: %s\nError: %v", b.Lun, err) @@ -404,7 +404,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { } if iscsiTransport == "" { - glog.Errorf("iscsi: could not find transport name in iface %s", b.Iface) + klog.Errorf("iscsi: could not find transport name in iface %s", b.Iface) return "", fmt.Errorf("Could not parse iface file for %s", b.Iface) } if iscsiTransport == "tcp" { @@ -414,7 +414,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { } if exist := waitForPathToExist(&devicePath, multipathDeviceTimeout, iscsiTransport); !exist { - glog.Errorf("Could not attach disk: Timeout after 10s") + klog.Errorf("Could not attach disk: Timeout after 10s") // update last error lastErr = fmt.Errorf("Could not attach disk: Timeout after 10s") continue @@ -422,28 +422,28 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { devicePaths[tp] = devicePath } } - glog.V(4).Infof("iscsi: tried all devices for %q %d times, %d paths found", b.Iqn, i, len(devicePaths)) + klog.V(4).Infof("iscsi: tried all devices for %q %d times, %d paths found", b.Iqn, i, len(devicePaths)) if len(devicePaths) == 0 { // No path attached, report error and stop trying. kubelet will try again in a short while // delete cloned iface b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "delete") - glog.Errorf("iscsi: failed to get any path for iscsi disk, last err seen:\n%v", lastErr) + klog.Errorf("iscsi: failed to get any path for iscsi disk, last err seen:\n%v", lastErr) return "", fmt.Errorf("failed to get any path for iscsi disk, last err seen:\n%v", lastErr) } if len(devicePaths) == len(bkpPortal) { // We have all paths - glog.V(4).Infof("iscsi: all devices for %q found", b.Iqn) + klog.V(4).Infof("iscsi: all devices for %q found", b.Iqn) break } if len(devicePaths) >= minMultipathCount && i >= minAttachAttempts { // We have at least two paths for multipath and we tried the other paths long enough - glog.V(4).Infof("%d devices found for %q", len(devicePaths), b.Iqn) + klog.V(4).Infof("%d devices found for %q", len(devicePaths), b.Iqn) break } } if lastErr != nil { - glog.Errorf("iscsi: last error occurred during iscsi init:\n%v", lastErr) + klog.Errorf("iscsi: last error occurred during iscsi init:\n%v", lastErr) } devicePathList := []string{} @@ -466,7 +466,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { devicePath = devicePathList[0] } - glog.V(5).Infof("iscsi: AttachDisk devicePath: %s", devicePath) + klog.V(5).Infof("iscsi: AttachDisk devicePath: %s", devicePath) // run global mount path related operations based on volumeMode return globalPDPathOperation(b)(b, devicePath, util) } @@ -479,14 +479,14 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) { func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *ISCSIUtil) (string, error) { // TODO: remove feature gate check after no longer needed if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { - glog.V(5).Infof("iscsi: AttachDisk volumeMode: %s", b.volumeMode) + klog.V(5).Infof("iscsi: AttachDisk volumeMode: %s", b.volumeMode) if b.volumeMode == v1.PersistentVolumeBlock { // If the volumeMode is 'Block', plugin don't need to format the volume. return func(b iscsiDiskMounter, devicePath string, util *ISCSIUtil) (string, error) { globalPDPath := b.manager.MakeGlobalVDPDName(*b.iscsiDisk) // Create dir like /var/lib/kubelet/plugins/kubernetes.io/iscsi/volumeDevices/{ifaceName}/{portal-some_iqn-lun-lun_id} if err := os.MkdirAll(globalPDPath, 0750); err != nil { - glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) + klog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) return "", err } // Persist iscsi disk config to json file for DetachDisk path @@ -506,12 +506,12 @@ func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *I } // Return confirmed devicePath to caller if !notMnt { - glog.Infof("iscsi: %s already mounted", globalPDPath) + klog.Infof("iscsi: %s already mounted", globalPDPath) return devicePath, nil } // Create dir like /var/lib/kubelet/plugins/kubernetes.io/iscsi/{ifaceName}/{portal-some_iqn-lun-lun_id} if err := os.MkdirAll(globalPDPath, 0750); err != nil { - glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) + klog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath) return "", err } // Persist iscsi disk config to json file for DetachDisk path @@ -519,7 +519,7 @@ func globalPDPathOperation(b iscsiDiskMounter) func(iscsiDiskMounter, string, *I err = b.mounter.FormatAndMount(devicePath, globalPDPath, b.fsType, nil) if err != nil { - glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) + klog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err) } return devicePath, nil @@ -541,7 +541,7 @@ func deleteDevice(deviceName string) error { } else if 0 == written { return fmt.Errorf("No data written to file: %s", filename) } - glog.V(4).Infof("Deleted block device: %s", deviceName) + klog.V(4).Infof("Deleted block device: %s", deviceName) return nil } @@ -550,13 +550,13 @@ func deleteDevice(deviceName string) error { func deleteDevices(c iscsiDiskUnmounter) error { lunNumber, err := strconv.Atoi(c.iscsiDisk.Lun) if err != nil { - glog.Errorf("iscsi delete devices: lun is not a number: %s\nError: %v", c.iscsiDisk.Lun, err) + klog.Errorf("iscsi delete devices: lun is not a number: %s\nError: %v", c.iscsiDisk.Lun, err) return err } // Enumerate the devices so we can delete them deviceNames, err := c.deviceUtil.FindDevicesForISCSILun(c.iscsiDisk.Iqn, lunNumber) if err != nil { - glog.Errorf("iscsi delete devices: could not get devices associated with LUN %d on target %s\nError: %v", + klog.Errorf("iscsi delete devices: could not get devices associated with LUN %d on target %s\nError: %v", lunNumber, c.iscsiDisk.Iqn, err) return err } @@ -573,15 +573,15 @@ func deleteDevices(c iscsiDiskUnmounter) error { for mpathDevice := range mpathDevices { _, err = c.exec.Run("multipath", "-f", mpathDevice) if err != nil { - glog.Warningf("Warning: Failed to flush multipath device map: %s\nError: %v", mpathDevice, err) + klog.Warningf("Warning: Failed to flush multipath device map: %s\nError: %v", mpathDevice, err) // Fall through -- keep deleting the block devices } - glog.V(4).Infof("Flushed multipath device: %s", mpathDevice) + klog.V(4).Infof("Flushed multipath device: %s", mpathDevice) } for _, deviceName := range deviceNames { err = deleteDevice(deviceName) if err != nil { - glog.Warningf("Warning: Failed to delete block device: %s\nError: %v", deviceName, err) + klog.Warningf("Warning: Failed to delete block device: %s\nError: %v", deviceName, err) // Fall through -- keep deleting other block devices } } @@ -593,7 +593,7 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { if pathExists, pathErr := volumeutil.PathExists(mntPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mntPath) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mntPath) return nil } @@ -603,7 +603,7 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { } if !notMnt { if err := c.mounter.Unmount(mntPath); err != nil { - glog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err) + klog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err) return err } } @@ -639,7 +639,7 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error { // Delete all the scsi devices and any multipath devices after unmounting if err = deleteDevices(c); err != nil { - glog.Warningf("iscsi detach disk: failed to delete devices\nError: %v", err) + klog.Warningf("iscsi detach disk: failed to delete devices\nError: %v", err) // Fall through -- even if deleting fails, a logout may fix problems } @@ -670,7 +670,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) if pathExists, pathErr := volumeutil.PathExists(mapPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) + klog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) return nil } // If we arrive here, device is no longer used, see if need to logout the target @@ -711,7 +711,7 @@ func (util *ISCSIUtil) DetachBlockISCSIDisk(c iscsiDiskUnmapper, mapPath string) } devicePath := getDevByPath(portals[0], iqn, lun) - glog.V(5).Infof("iscsi: devicePath: %s", devicePath) + klog.V(5).Infof("iscsi: devicePath: %s", devicePath) if _, err = os.Stat(devicePath); err != nil { return fmt.Errorf("failed to validate devicePath: %s", devicePath) } @@ -735,16 +735,16 @@ func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, i logoutArgs = append(logoutArgs, []string{"-I", iface}...) deleteArgs = append(deleteArgs, []string{"-I", iface}...) } - glog.Infof("iscsi: log out target %s iqn %s iface %s", portal, iqn, iface) + klog.Infof("iscsi: log out target %s iqn %s iface %s", portal, iqn, iface) out, err := exec.Run("iscsiadm", logoutArgs...) if err != nil { - glog.Errorf("iscsi: failed to detach disk Error: %s", string(out)) + klog.Errorf("iscsi: failed to detach disk Error: %s", string(out)) } // Delete the node record - glog.Infof("iscsi: delete node record target %s iqn %s", portal, iqn) + klog.Infof("iscsi: delete node record target %s iqn %s", portal, iqn) out, err = exec.Run("iscsiadm", deleteArgs...) if err != nil { - glog.Errorf("iscsi: failed to delete node record Error: %s", string(out)) + klog.Errorf("iscsi: failed to delete node record Error: %s", string(out)) } } // Delete the iface after all sessions have logged out @@ -753,7 +753,7 @@ func (util *ISCSIUtil) detachISCSIDisk(exec mount.Exec, portals []string, iqn, i deleteArgs := []string{"-m", "iface", "-I", iface, "-o", "delete"} out, err := exec.Run("iscsiadm", deleteArgs...) if err != nil { - glog.Errorf("iscsi: failed to delete iface Error: %s", string(out)) + klog.Errorf("iscsi: failed to delete iface Error: %s", string(out)) } } diff --git a/pkg/volume/local/BUILD b/pkg/volume/local/BUILD index 8fe054c70bb75..a17a11a58bc55 100644 --- a/pkg/volume/local/BUILD +++ b/pkg/volume/local/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/local/local.go b/pkg/volume/local/local.go index 574534ed55973..45249313424c5 100644 --- a/pkg/volume/local/local.go +++ b/pkg/volume/local/local.go @@ -23,7 +23,7 @@ import ( "runtime" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -257,7 +257,7 @@ func (plugin *localVolumePlugin) NewDeviceMounter() (volume.DeviceMounter, error } func (dm *deviceMounter) mountLocalBlockDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { - glog.V(4).Infof("local: mounting device %s to %s", devicePath, deviceMountPath) + klog.V(4).Infof("local: mounting device %s to %s", devicePath, deviceMountPath) notMnt, err := dm.mounter.IsLikelyNotMountPoint(deviceMountPath) if err != nil { if os.IsNotExist(err) { @@ -291,7 +291,7 @@ func (dm *deviceMounter) mountLocalBlockDevice(spec *volume.Spec, devicePath str os.Remove(deviceMountPath) return fmt.Errorf("local: failed to mount device %s at %s (fstype: %s), error %v", devicePath, deviceMountPath, fstype, err) } - glog.V(3).Infof("local: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype) + klog.V(3).Infof("local: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype) return nil } @@ -434,9 +434,9 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } notMnt, err := m.mounter.IsNotMountPoint(dir) - glog.V(4).Infof("LocalVolume mount setup: PodDir(%s) VolDir(%s) Mounted(%t) Error(%v), ReadOnly(%t)", dir, m.globalPath, !notMnt, err, m.readOnly) + klog.V(4).Infof("LocalVolume mount setup: PodDir(%s) VolDir(%s) Mounted(%t) Error(%v), ReadOnly(%t)", dir, m.globalPath, !notMnt, err, m.readOnly) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mount point: %s %v", dir, err) + klog.Errorf("cannot validate mount point: %s %v", dir, err) return err } @@ -446,7 +446,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { refs, err := m.mounter.GetMountRefs(m.globalPath) if fsGroup != nil { if err != nil { - glog.Errorf("cannot collect mounting information: %s %v", m.globalPath, err) + klog.Errorf("cannot collect mounting information: %s %v", m.globalPath, err) return err } @@ -468,7 +468,7 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if runtime.GOOS != "windows" { // skip below MkdirAll for windows since the "bind mount" logic is implemented differently in mount_wiondows.go if err := os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", dir, err) + klog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } } @@ -479,29 +479,29 @@ func (m *localVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } mountOptions := util.JoinMountOptions(options, m.mountOptions) - glog.V(4).Infof("attempting to mount %s", dir) + klog.V(4).Infof("attempting to mount %s", dir) globalPath := util.MakeAbsolutePath(runtime.GOOS, m.globalPath) err = m.mounter.Mount(globalPath, dir, "", mountOptions) if err != nil { - glog.Errorf("Mount of volume %s failed: %v", dir, err) + klog.Errorf("Mount of volume %s failed: %v", dir, err) notMnt, mntErr := m.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = m.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notMnt, mntErr = m.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } @@ -541,7 +541,7 @@ func (u *localVolumeUnmounter) TearDown() error { // TearDownAt unmounts the bind mount func (u *localVolumeUnmounter) TearDownAt(dir string) error { - glog.V(4).Infof("Unmounting volume %q at path %q\n", u.volName, dir) + klog.V(4).Infof("Unmounting volume %q at path %q\n", u.volName, dir) return util.UnmountMountPoint(dir, u.mounter, true) /* extensiveMountPointCheck = true */ } @@ -556,7 +556,7 @@ var _ volume.BlockVolumeMapper = &localVolumeMapper{} // SetUpDevice provides physical device path for the local PV. func (m *localVolumeMapper) SetUpDevice() (string, error) { globalPath := util.MakeAbsolutePath(runtime.GOOS, m.globalPath) - glog.V(4).Infof("SetupDevice returning path %s", globalPath) + klog.V(4).Infof("SetupDevice returning path %s", globalPath) return globalPath, nil } @@ -573,7 +573,7 @@ var _ volume.BlockVolumeUnmapper = &localVolumeUnmapper{} // TearDownDevice will undo SetUpDevice procedure. In local PV, all of this already handled by operation_generator. func (u *localVolumeUnmapper) TearDownDevice(mapPath, _ string) error { - glog.V(4).Infof("local: TearDownDevice completed for: %s", mapPath) + klog.V(4).Infof("local: TearDownDevice completed for: %s", mapPath) return nil } diff --git a/pkg/volume/nfs/BUILD b/pkg/volume/nfs/BUILD index 81fae48e2a262..28885d7fb4f9b 100644 --- a/pkg/volume/nfs/BUILD +++ b/pkg/volume/nfs/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/nfs/nfs.go b/pkg/volume/nfs/nfs.go index 588687cdebb67..92384b3db0138 100644 --- a/pkg/volume/nfs/nfs.go +++ b/pkg/volume/nfs/nfs.go @@ -21,10 +21,10 @@ import ( "os" "runtime" - "github.com/golang/glog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -235,7 +235,7 @@ func (b *nfsMounter) SetUp(fsGroup *int64) error { func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsNotMountPoint(dir) - glog.V(4).Infof("NFS mount set up: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("NFS mount set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { return err } @@ -255,22 +255,22 @@ func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error { if err != nil { notMnt, mntErr := b.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notMnt, mntErr := b.mounter.IsNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsNotMountPoint check failed: %v", mntErr) return err } if !notMnt { // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } diff --git a/pkg/volume/photon_pd/BUILD b/pkg/volume/photon_pd/BUILD index 0ae0a316aedc6..61d5467e4dfe6 100644 --- a/pkg/volume/photon_pd/BUILD +++ b/pkg/volume/photon_pd/BUILD @@ -25,7 +25,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -44,7 +44,7 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/photon_pd/attacher.go b/pkg/volume/photon_pd/attacher.go index 016fd65b83919..d2570a4a7430c 100644 --- a/pkg/volume/photon_pd/attacher.go +++ b/pkg/volume/photon_pd/attacher.go @@ -25,9 +25,9 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/photon" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" @@ -50,7 +50,7 @@ var _ volume.DeviceMountableVolumePlugin = &photonPersistentDiskPlugin{} func (plugin *photonPersistentDiskPlugin) NewAttacher() (volume.Attacher, error) { photonCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) if err != nil { - glog.Errorf("Photon Controller attacher: NewAttacher failed to get cloud provider") + klog.Errorf("Photon Controller attacher: NewAttacher failed to get cloud provider") return nil, err } @@ -74,22 +74,22 @@ func (attacher *photonPersistentDiskAttacher) Attach(spec *volume.Spec, nodeName hostName := string(nodeName) volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon Controller attacher: Attach failed to get volume source") + klog.Errorf("Photon Controller attacher: Attach failed to get volume source") return "", err } attached, err := attacher.photonDisks.DiskIsAttached(context.TODO(), volumeSource.PdID, nodeName) if err != nil { - glog.Warningf("Photon Controller: couldn't check if disk is Attached for host %s, will try attach disk: %+v", hostName, err) + klog.Warningf("Photon Controller: couldn't check if disk is Attached for host %s, will try attach disk: %+v", hostName, err) attached = false } if !attached { - glog.V(4).Infof("Photon Controller: Attach disk called for host %s", hostName) + klog.V(4).Infof("Photon Controller: Attach disk called for host %s", hostName) err = attacher.photonDisks.AttachDisk(context.TODO(), volumeSource.PdID, nodeName) if err != nil { - glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.PdID, nodeName, err) + klog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.PdID, nodeName, err) return "", err } } @@ -105,7 +105,7 @@ func (attacher *photonPersistentDiskAttacher) VolumesAreAttached(specs []*volume for _, spec := range specs { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) + klog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) continue } @@ -115,7 +115,7 @@ func (attacher *photonPersistentDiskAttacher) VolumesAreAttached(specs []*volume } attachedResult, err := attacher.photonDisks.DisksAreAttached(context.TODO(), pdIDList, nodeName) if err != nil { - glog.Errorf( + klog.Errorf( "Error checking if volumes (%v) are attached to current node (%q). err=%v", pdIDList, nodeName, err) return volumesAttachedCheck, err @@ -125,7 +125,7 @@ func (attacher *photonPersistentDiskAttacher) VolumesAreAttached(specs []*volume if !attached { spec := volumeSpecMap[pdID] volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdID, spec.Name()) + klog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdID, spec.Name()) } } return volumesAttachedCheck, nil @@ -134,7 +134,7 @@ func (attacher *photonPersistentDiskAttacher) VolumesAreAttached(specs []*volume func (attacher *photonPersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon Controller attacher: WaitForAttach failed to get volume source") + klog.Errorf("Photon Controller attacher: WaitForAttach failed to get volume source") return "", err } @@ -154,14 +154,14 @@ func (attacher *photonPersistentDiskAttacher) WaitForAttach(spec *volume.Spec, d for { select { case <-ticker.C: - glog.V(4).Infof("Checking PD %s is attached", volumeSource.PdID) + klog.V(4).Infof("Checking PD %s is attached", volumeSource.PdID) checkPath, err := verifyDevicePath(devicePath) if err != nil { // Log error, if any, and continue checking periodically. See issue #11321 - glog.Warningf("Photon Controller attacher: WaitForAttach with devicePath %s Checking PD %s Error verify path", devicePath, volumeSource.PdID) + klog.Warningf("Photon Controller attacher: WaitForAttach with devicePath %s Checking PD %s Error verify path", devicePath, volumeSource.PdID) } else if checkPath != "" { // A device path has successfully been created for the VMDK - glog.V(4).Infof("Successfully found attached PD %s.", volumeSource.PdID) + klog.V(4).Infof("Successfully found attached PD %s.", volumeSource.PdID) // map path with spec.Name() volName := spec.Name() realPath, _ := filepath.EvalSymlinks(devicePath) @@ -180,7 +180,7 @@ func (attacher *photonPersistentDiskAttacher) WaitForAttach(spec *volume.Spec, d func (attacher *photonPersistentDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon Controller attacher: GetDeviceMountPath failed to get volume source") + klog.Errorf("Photon Controller attacher: GetDeviceMountPath failed to get volume source") return "", err } @@ -201,7 +201,7 @@ func (attacher *photonPersistentDiskAttacher) MountDevice(spec *volume.Spec, dev if err != nil { if os.IsNotExist(err) { if err := os.MkdirAll(deviceMountPath, 0750); err != nil { - glog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err) + klog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err) return err } notMnt = true @@ -212,7 +212,7 @@ func (attacher *photonPersistentDiskAttacher) MountDevice(spec *volume.Spec, dev volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon Controller attacher: MountDevice failed to get volume source. err: %s", err) + klog.Errorf("Photon Controller attacher: MountDevice failed to get volume source. err: %s", err) return err } @@ -226,7 +226,7 @@ func (attacher *photonPersistentDiskAttacher) MountDevice(spec *volume.Spec, dev os.Remove(deviceMountPath) return err } - glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) + klog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) } return nil } @@ -243,7 +243,7 @@ var _ volume.DeviceUnmounter = &photonPersistentDiskDetacher{} func (plugin *photonPersistentDiskPlugin) NewDetacher() (volume.Detacher, error) { photonCloud, err := getCloudProvider(plugin.host.GetCloudProvider()) if err != nil { - glog.Errorf("Photon Controller attacher: NewDetacher failed to get cloud provider. err: %s", err) + klog.Errorf("Photon Controller attacher: NewDetacher failed to get cloud provider. err: %s", err) return nil, err } @@ -265,19 +265,19 @@ func (detacher *photonPersistentDiskDetacher) Detach(volumeName string, nodeName attached, err := detacher.photonDisks.DiskIsAttached(context.TODO(), pdID, nodeName) if err != nil { // Log error and continue with detach - glog.Errorf( + klog.Errorf( "Error checking if persistent disk (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", pdID, hostName, err) } if err == nil && !attached { // Volume is already detached from node. - glog.V(4).Infof("detach operation was successful. persistent disk %q is already detached from node %q.", pdID, hostName) + klog.V(4).Infof("detach operation was successful. persistent disk %q is already detached from node %q.", pdID, hostName) return nil } if err := detacher.photonDisks.DetachDisk(context.TODO(), pdID, nodeName); err != nil { - glog.Errorf("Error detaching volume %q: %v", pdID, err) + klog.Errorf("Error detaching volume %q: %v", pdID, err) return err } return nil @@ -292,7 +292,7 @@ func (detacher *photonPersistentDiskDetacher) WaitForDetach(devicePath string, t for { select { case <-ticker.C: - glog.V(4).Infof("Checking device %q is detached.", devicePath) + klog.V(4).Infof("Checking device %q is detached.", devicePath) if pathExists, err := volumeutil.PathExists(devicePath); err != nil { return fmt.Errorf("Error checking if device path exists: %v", err) } else if !pathExists { diff --git a/pkg/volume/photon_pd/attacher_test.go b/pkg/volume/photon_pd/attacher_test.go index cf055e9182f8d..077131610b52f 100644 --- a/pkg/volume/photon_pd/attacher_test.go +++ b/pkg/volume/photon_pd/attacher_test.go @@ -26,8 +26,8 @@ import ( "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" ) func TestGetDeviceName_Volume(t *testing.T) { @@ -254,7 +254,7 @@ func (testcase *testcase) AttachDisk(ctx context.Context, diskName string, nodeN return errors.New("Unexpected AttachDisk call: wrong nodeName") } - glog.V(4).Infof("AttachDisk call: %s, %s, returning %v", diskName, nodeName, expected.ret) + klog.V(4).Infof("AttachDisk call: %s, %s, returning %v", diskName, nodeName, expected.ret) return expected.ret } @@ -279,7 +279,7 @@ func (testcase *testcase) DetachDisk(ctx context.Context, diskName string, nodeN return errors.New("Unexpected DetachDisk call: wrong nodeName") } - glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", diskName, nodeName, expected.ret) + klog.V(4).Infof("DetachDisk call: %s, %s, returning %v", diskName, nodeName, expected.ret) return expected.ret } @@ -304,7 +304,7 @@ func (testcase *testcase) DiskIsAttached(ctx context.Context, diskName string, n return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName") } - glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret) + klog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret) return expected.isAttached, expected.ret } diff --git a/pkg/volume/photon_pd/photon_pd.go b/pkg/volume/photon_pd/photon_pd.go index b8d8c936e002c..03cbd9b0ecb42 100644 --- a/pkg/volume/photon_pd/photon_pd.go +++ b/pkg/volume/photon_pd/photon_pd.go @@ -21,11 +21,11 @@ import ( "os" "path" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -62,7 +62,7 @@ func (plugin *photonPersistentDiskPlugin) GetPluginName() string { func (plugin *photonPersistentDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon volume plugin: GetVolumeName failed to get volume source") + klog.Errorf("Photon volume plugin: GetVolumeName failed to get volume source") return "", err } @@ -97,7 +97,7 @@ func (plugin *photonPersistentDiskPlugin) NewUnmounter(volName string, podUID ty func (plugin *photonPersistentDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Mounter, error) { vvol, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Photon volume plugin: newMounterInternal failed to get volume source") + klog.Errorf("Photon volume plugin: newMounterInternal failed to get volume source") return nil, err } @@ -202,12 +202,12 @@ func (b *photonPersistentDiskMounter) SetUp(fsGroup *int64) error { // SetUp attaches the disk and bind mounts to the volume path. func (b *photonPersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(4).Infof("Photon Persistent Disk setup %s to %s", b.pdID, dir) + klog.V(4).Infof("Photon Persistent Disk setup %s to %s", b.pdID, dir) // TODO: handle failed mounts here. notmnt, err := b.mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mount point: %s %v", dir, err) + klog.Errorf("cannot validate mount point: %s %v", dir, err) return err } if !notmnt { @@ -215,7 +215,7 @@ func (b *photonPersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error } if err := os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", dir, err) + klog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } @@ -223,33 +223,33 @@ func (b *photonPersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error // Perform a bind mount to the full path to allow duplicate mounts of the same PD. globalPDPath := makeGlobalPDPath(b.plugin.host, b.pdID) - glog.V(4).Infof("attempting to mount %s", dir) + klog.V(4).Infof("attempting to mount %s", dir) mountOptions := util.JoinMountOptions(options, b.mountOption) err = b.mounter.Mount(globalPDPath, dir, "", mountOptions) if err != nil { notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) return err } } os.Remove(dir) - glog.Errorf("Mount of disk %s failed: %v", dir, err) + klog.Errorf("Mount of disk %s failed: %v", dir, err) return err } diff --git a/pkg/volume/photon_pd/photon_util.go b/pkg/volume/photon_pd/photon_util.go index d3e8c29e57aeb..fe3a6c24c6d73 100644 --- a/pkg/volume/photon_pd/photon_util.go +++ b/pkg/volume/photon_pd/photon_util.go @@ -23,9 +23,9 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/photon" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -63,7 +63,7 @@ func scsiHostScan() { name := scsi_path + f.Name() + "/scan" data := []byte("- - -") ioutil.WriteFile(name, data, 0666) - glog.Errorf("scsiHostScan scan for %s", name) + klog.Errorf("scsiHostScan scan for %s", name) } } } @@ -75,7 +75,7 @@ func verifyDevicePath(path string) (string, error) { return path, nil } - glog.V(4).Infof("verifyDevicePath: path not exists yet") + klog.V(4).Infof("verifyDevicePath: path not exists yet") return "", nil } @@ -83,7 +83,7 @@ func verifyDevicePath(path string) (string, error) { func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pdID string, capacityGB int, fstype string, err error) { cloud, err := getCloudProvider(p.plugin.host.GetCloudProvider()) if err != nil { - glog.Errorf("Photon Controller Util: CreateVolume failed to get cloud provider. Error [%v]", err) + klog.Errorf("Photon Controller Util: CreateVolume failed to get cloud provider. Error [%v]", err) return "", 0, "", err } @@ -106,20 +106,20 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd volumeOptions.Flavor = value case volume.VolumeParameterFSType: fstype = value - glog.V(4).Infof("Photon Controller Util: Setting fstype to %s", fstype) + klog.V(4).Infof("Photon Controller Util: Setting fstype to %s", fstype) default: - glog.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) + klog.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) return "", 0, "", fmt.Errorf("Photon Controller Util: invalid option %s for volume plugin %s.", parameter, p.plugin.GetPluginName()) } } pdID, err = cloud.CreateDisk(volumeOptions) if err != nil { - glog.Errorf("Photon Controller Util: failed to CreateDisk. Error [%v]", err) + klog.Errorf("Photon Controller Util: failed to CreateDisk. Error [%v]", err) return "", 0, "", err } - glog.V(4).Infof("Successfully created Photon Controller persistent disk %s", name) + klog.V(4).Infof("Successfully created Photon Controller persistent disk %s", name) return pdID, volSizeGB, "", nil } @@ -127,28 +127,28 @@ func (util *PhotonDiskUtil) CreateVolume(p *photonPersistentDiskProvisioner) (pd func (util *PhotonDiskUtil) DeleteVolume(pd *photonPersistentDiskDeleter) error { cloud, err := getCloudProvider(pd.plugin.host.GetCloudProvider()) if err != nil { - glog.Errorf("Photon Controller Util: DeleteVolume failed to get cloud provider. Error [%v]", err) + klog.Errorf("Photon Controller Util: DeleteVolume failed to get cloud provider. Error [%v]", err) return err } if err = cloud.DeleteDisk(pd.pdID); err != nil { - glog.Errorf("Photon Controller Util: failed to DeleteDisk for pdID %s. Error [%v]", pd.pdID, err) + klog.Errorf("Photon Controller Util: failed to DeleteDisk for pdID %s. Error [%v]", pd.pdID, err) return err } - glog.V(4).Infof("Successfully deleted PhotonController persistent disk %s", pd.pdID) + klog.V(4).Infof("Successfully deleted PhotonController persistent disk %s", pd.pdID) return nil } func getCloudProvider(cloud cloudprovider.Interface) (*photon.PCCloud, error) { if cloud == nil { - glog.Errorf("Photon Controller Util: Cloud provider not initialized properly") + klog.Errorf("Photon Controller Util: Cloud provider not initialized properly") return nil, fmt.Errorf("Photon Controller Util: Cloud provider not initialized properly") } pcc := cloud.(*photon.PCCloud) if pcc == nil { - glog.Errorf("Invalid cloud provider: expected Photon Controller") + klog.Errorf("Invalid cloud provider: expected Photon Controller") return nil, fmt.Errorf("Invalid cloud provider: expected Photon Controller") } return pcc, nil diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index abfb7bf8fd5ea..a0cb57164643c 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -22,7 +22,6 @@ import ( "strings" "sync" - "github.com/golang/glog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -34,6 +33,7 @@ import ( "k8s.io/client-go/tools/record" cloudprovider "k8s.io/cloud-provider" csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume/util/recyclerclient" ) @@ -514,7 +514,7 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu } if err := pm.prober.Init(); err != nil { // Prober init failure should not affect the initialization of other plugins. - glog.Errorf("Error initializing dynamic plugin prober: %s", err) + klog.Errorf("Error initializing dynamic plugin prober: %s", err) pm.prober = &dummyPluginProber{} } @@ -539,12 +539,12 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu } err := plugin.Init(host) if err != nil { - glog.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error()) + klog.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error()) allErrs = append(allErrs, err) continue } pm.plugins[name] = plugin - glog.V(1).Infof("Loaded volume plugin %q", name) + klog.V(1).Infof("Loaded volume plugin %q", name) } return utilerrors.NewAggregate(allErrs) } @@ -560,7 +560,7 @@ func (pm *VolumePluginMgr) initProbedPlugin(probedPlugin VolumePlugin) error { return fmt.Errorf("Failed to load volume plugin %s, error: %s", name, err.Error()) } - glog.V(1).Infof("Loaded volume plugin %q", name) + klog.V(1).Infof("Loaded volume plugin %q", name) return nil } @@ -639,14 +639,14 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) { func (pm *VolumePluginMgr) refreshProbedPlugins() { events, err := pm.prober.Probe() if err != nil { - glog.Errorf("Error dynamically probing plugins: %s", err) + klog.Errorf("Error dynamically probing plugins: %s", err) return // Use cached plugins upon failure. } for _, event := range events { if event.Op == ProbeAddOrUpdate { if err := pm.initProbedPlugin(event.Plugin); err != nil { - glog.Errorf("Error initializing dynamically probed plugin %s; error: %s", + klog.Errorf("Error initializing dynamically probed plugin %s; error: %s", event.Plugin.GetPluginName(), err) continue } @@ -655,7 +655,7 @@ func (pm *VolumePluginMgr) refreshProbedPlugins() { // Plugin is not available on ProbeRemove event, only PluginName delete(pm.probedPlugins, event.PluginName) } else { - glog.Errorf("Unknown Operation on PluginName: %s.", + klog.Errorf("Unknown Operation on PluginName: %s.", event.Plugin.GetPluginName()) } } @@ -839,10 +839,10 @@ func (pm *VolumePluginMgr) FindExpandablePluginBySpec(spec *Spec) (ExpandableVol if spec.IsKubeletExpandable() { // for kubelet expandable volumes, return a noop plugin that // returns success for expand on the controller - glog.Warningf("FindExpandablePluginBySpec(%s) -> returning noopExpandableVolumePluginInstance", spec.Name()) + klog.Warningf("FindExpandablePluginBySpec(%s) -> returning noopExpandableVolumePluginInstance", spec.Name()) return &noopExpandableVolumePluginInstance{spec}, nil } - glog.Warningf("FindExpandablePluginBySpec(%s) -> err:%v", spec.Name(), err) + klog.Warningf("FindExpandablePluginBySpec(%s) -> err:%v", spec.Name(), err) return nil, err } diff --git a/pkg/volume/portworx/BUILD b/pkg/volume/portworx/BUILD index 19190114902ab..e43ba8d037fbb 100644 --- a/pkg/volume/portworx/BUILD +++ b/pkg/volume/portworx/BUILD @@ -39,12 +39,12 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/libopenstorage/openstorage/api:go_default_library", "//vendor/github.com/libopenstorage/openstorage/api/client:go_default_library", "//vendor/github.com/libopenstorage/openstorage/api/client/volume:go_default_library", "//vendor/github.com/libopenstorage/openstorage/api/spec:go_default_library", "//vendor/github.com/libopenstorage/openstorage/volume:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/portworx/portworx.go b/pkg/volume/portworx/portworx.go index 8c875e7dd772a..212a4c23d7965 100644 --- a/pkg/volume/portworx/portworx.go +++ b/pkg/volume/portworx/portworx.go @@ -20,11 +20,11 @@ import ( "fmt" "os" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -180,13 +180,13 @@ func (plugin *portworxVolumePlugin) ExpandVolumeDevice( spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) { - glog.V(4).Infof("Expanding: %s from %v to %v", spec.Name(), oldSize, newSize) + klog.V(4).Infof("Expanding: %s from %v to %v", spec.Name(), oldSize, newSize) err := plugin.util.ResizeVolume(spec, newSize, plugin.host) if err != nil { return oldSize, err } - glog.V(4).Infof("Successfully resized %s to %v", spec.Name(), newSize) + klog.V(4).Infof("Successfully resized %s to %v", spec.Name(), newSize) return newSize, nil } @@ -290,9 +290,9 @@ func (b *portworxVolumeMounter) SetUp(fsGroup *int64) error { // SetUpAt attaches the disk and bind mounts to the volume path. func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err) + klog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { - glog.Errorf("Cannot validate mountpoint: %s", dir) + klog.Errorf("Cannot validate mountpoint: %s", dir) return err } if !notMnt { @@ -306,7 +306,7 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } - glog.V(4).Infof("Portworx Volume %s attached", b.volumeID) + klog.V(4).Infof("Portworx Volume %s attached", b.volumeID) if err := os.MkdirAll(dir, 0750); err != nil { return err @@ -318,7 +318,7 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if !b.readOnly { volume.SetVolumeOwnership(b, fsGroup) } - glog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir) + klog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir) return nil } @@ -341,7 +341,7 @@ func (c *portworxVolumeUnmounter) TearDown() error { // Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. func (c *portworxVolumeUnmounter) TearDownAt(dir string) error { - glog.Infof("Portworx Volume TearDown of %s", dir) + klog.Infof("Portworx Volume TearDown of %s", dir) if err := c.manager.UnmountVolume(c, dir); err != nil { return err diff --git a/pkg/volume/portworx/portworx_util.go b/pkg/volume/portworx/portworx_util.go index ab68e40726863..62b3e3f4d3178 100644 --- a/pkg/volume/portworx/portworx_util.go +++ b/pkg/volume/portworx/portworx_util.go @@ -19,7 +19,6 @@ package portworx import ( "fmt" - "github.com/golang/glog" osdapi "github.com/libopenstorage/openstorage/api" osdclient "github.com/libopenstorage/openstorage/api/client" volumeclient "github.com/libopenstorage/openstorage/api/client/volume" @@ -28,6 +27,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" @@ -51,11 +51,11 @@ type PortworxVolumeUtil struct { func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int64, map[string]string, error) { driver, err := util.getPortworxDriver(p.plugin.host, false /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return "", 0, nil, err } - glog.Infof("Creating Portworx volume for PVC: %v", p.options.PVC.Name) + klog.Infof("Creating Portworx volume for PVC: %v", p.options.PVC.Name) capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] // Portworx Volumes are specified in GiB @@ -95,7 +95,7 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri for k, v := range p.options.PVC.Annotations { if _, present := spec.VolumeLabels[k]; present { - glog.Warningf("not saving annotation: %s=%s in spec labels due to an existing key", k, v) + klog.Warningf("not saving annotation: %s=%s in spec labels due to an existing key", k, v) continue } spec.VolumeLabels[k] = v @@ -103,11 +103,11 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri volumeID, err := driver.Create(locator, source, spec) if err != nil { - glog.Errorf("Error creating Portworx Volume : %v", err) + klog.Errorf("Error creating Portworx Volume : %v", err) return "", 0, nil, err } - glog.Infof("Successfully created Portworx volume for PVC: %v", p.options.PVC.Name) + klog.Infof("Successfully created Portworx volume for PVC: %v", p.options.PVC.Name) return volumeID, requestGiB, nil, err } @@ -115,13 +115,13 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { driver, err := util.getPortworxDriver(d.plugin.host, false /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return err } err = driver.Delete(d.volumeID) if err != nil { - glog.Errorf("Error deleting Portworx Volume (%v): %v", d.volName, err) + klog.Errorf("Error deleting Portworx Volume (%v): %v", d.volName, err) return err } return nil @@ -131,13 +131,13 @@ func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error { func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOptions map[string]string) (string, error) { driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return "", err } devicePath, err := driver.Attach(m.volName, attachOptions) if err != nil { - glog.Errorf("Error attaching Portworx Volume (%v): %v", m.volName, err) + klog.Errorf("Error attaching Portworx Volume (%v): %v", m.volName, err) return "", err } return devicePath, nil @@ -147,13 +147,13 @@ func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter, attachOpt func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return err } err = driver.Detach(u.volName, false /*doNotForceDetach*/) if err != nil { - glog.Errorf("Error detaching Portworx Volume (%v): %v", u.volName, err) + klog.Errorf("Error detaching Portworx Volume (%v): %v", u.volName, err) return err } return nil @@ -163,13 +163,13 @@ func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error { func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error { driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return err } err = driver.Mount(m.volName, mountPath) if err != nil { - glog.Errorf("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err) + klog.Errorf("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err) return err } return nil @@ -179,13 +179,13 @@ func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error { driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return err } err = driver.Unmount(u.volName, mountPath) if err != nil { - glog.Errorf("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err) + klog.Errorf("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err) return err } return nil @@ -194,7 +194,7 @@ func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountP func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource.Quantity, volumeHost volume.VolumeHost) error { driver, err := util.getPortworxDriver(volumeHost, false /*localOnly*/) if err != nil || driver == nil { - glog.Errorf("Failed to get portworx driver. Err: %v", err) + klog.Errorf("Failed to get portworx driver. Err: %v", err) return err } @@ -210,7 +210,7 @@ func (util *PortworxVolumeUtil) ResizeVolume(spec *volume.Spec, newSize resource vol := vols[0] newSizeInBytes := uint64(volutil.RoundUpToGiB(newSize) * volutil.GIB) if vol.Spec.Size >= newSizeInBytes { - glog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+ + klog.Infof("Portworx volume: %s already at size: %d greater than or equal to new "+ "requested size: %d. Skipping resize.", spec.Name(), vol.Spec.Size, newSizeInBytes) return nil } @@ -247,7 +247,7 @@ func isClientValid(client *osdclient.Client) (bool, error) { _, err := client.Versions(osdapi.OsdVolumePath) if err != nil { - glog.Errorf("portworx client failed driver versions check. Err: %v", err) + klog.Errorf("portworx client failed driver versions check. Err: %v", err) return false, err } @@ -285,7 +285,7 @@ func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost, if err != nil { return nil, err } else { - glog.V(4).Infof("Using portworx local service at: %v as api endpoint", volumeHost.GetHostName()) + klog.V(4).Infof("Using portworx local service at: %v as api endpoint", volumeHost.GetHostName()) return volumeclient.VolumeDriver(util.portworxClient), nil } } @@ -301,31 +301,31 @@ func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost, // Create client from portworx service kubeClient := volumeHost.GetKubeClient() if kubeClient == nil { - glog.Error("Failed to get kubeclient when creating portworx client") + klog.Error("Failed to get kubeclient when creating portworx client") return nil, nil } opts := metav1.GetOptions{} svc, err := kubeClient.CoreV1().Services(api.NamespaceSystem).Get(pxServiceName, opts) if err != nil { - glog.Errorf("Failed to get service. Err: %v", err) + klog.Errorf("Failed to get service. Err: %v", err) return nil, err } if svc == nil { - glog.Errorf("Service: %v not found. Consult Portworx docs to deploy it.", pxServiceName) + klog.Errorf("Service: %v not found. Consult Portworx docs to deploy it.", pxServiceName) return nil, err } util.portworxClient, err = createDriverClient(svc.Spec.ClusterIP) if err != nil || util.portworxClient == nil { - glog.Errorf("Failed to connect to portworx service. Err: %v", err) + klog.Errorf("Failed to connect to portworx service. Err: %v", err) return nil, err } - glog.Infof("Using portworx cluster service at: %v as api endpoint", svc.Spec.ClusterIP) + klog.Infof("Using portworx cluster service at: %v as api endpoint", svc.Spec.ClusterIP) } else { - glog.Infof("Using portworx service at: %v as api endpoint", volumeHost.GetHostName()) + klog.Infof("Using portworx service at: %v as api endpoint", volumeHost.GetHostName()) } return volumeclient.VolumeDriver(util.portworxClient), nil diff --git a/pkg/volume/projected/BUILD b/pkg/volume/projected/BUILD index a1f2a07faf7a6..1d7df6fe8ff6e 100644 --- a/pkg/volume/projected/BUILD +++ b/pkg/volume/projected/BUILD @@ -42,7 +42,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/projected/projected.go b/pkg/volume/projected/projected.go index 376199e120e6b..efffb178cd8ac 100644 --- a/pkg/volume/projected/projected.go +++ b/pkg/volume/projected/projected.go @@ -34,7 +34,7 @@ import ( "k8s.io/kubernetes/pkg/volume/secret" volumeutil "k8s.io/kubernetes/pkg/volume/util" - "github.com/golang/glog" + "k8s.io/klog" ) // ProbeVolumePlugins is the entry point for plugin detection in a package. @@ -188,7 +188,7 @@ func (s *projectedVolumeMounter) SetUp(fsGroup *int64) error { } func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(3).Infof("Setting up volume %v for pod %v at %v", s.volName, s.pod.UID, dir) + klog.V(3).Infof("Setting up volume %v for pod %v at %v", s.volName, s.pod.UID, dir) wrapped, err := s.plugin.host.NewWrapperMounter(s.volName, wrappedVolumeSpec(), s.pod, *s.opts) if err != nil { @@ -197,7 +197,7 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { data, err := s.collectData() if err != nil { - glog.Errorf("Error preparing data for projected volume %v for pod %v/%v: %s", s.volName, s.pod.Namespace, s.pod.Name, err.Error()) + klog.Errorf("Error preparing data for projected volume %v for pod %v/%v: %s", s.volName, s.pod.Namespace, s.pod.Name, err.Error()) return err } @@ -215,12 +215,12 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if !setupSuccess { unmounter, unmountCreateErr := s.plugin.NewUnmounter(s.volName, s.podUID) if unmountCreateErr != nil { - glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", s.volName, unmountCreateErr) + klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", s.volName, unmountCreateErr) return } tearDownErr := unmounter.TearDown() if tearDownErr != nil { - glog.Errorf("error tearing down volume %s with : %v", s.volName, tearDownErr) + klog.Errorf("error tearing down volume %s with : %v", s.volName, tearDownErr) } } }() @@ -228,19 +228,19 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { writerContext := fmt.Sprintf("pod %v/%v volume %v", s.pod.Namespace, s.pod.Name, s.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { - glog.Errorf("Error creating atomic writer: %v", err) + klog.Errorf("Error creating atomic writer: %v", err) return err } err = writer.Write(data) if err != nil { - glog.Errorf("Error writing payload to dir: %v", err) + klog.Errorf("Error writing payload to dir: %v", err) return err } err = volume.SetVolumeOwnership(s, fsGroup) if err != nil { - glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) + klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } setupSuccess = true @@ -266,7 +266,7 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec secretapi, err := s.plugin.getSecret(s.pod.Namespace, source.Secret.Name) if err != nil { if !(errors.IsNotFound(err) && optional) { - glog.Errorf("Couldn't get secret %v/%v: %v", s.pod.Namespace, source.Secret.Name, err) + klog.Errorf("Couldn't get secret %v/%v: %v", s.pod.Namespace, source.Secret.Name, err) errlist = append(errlist, err) continue } @@ -279,7 +279,7 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec } secretPayload, err := secret.MakePayload(source.Secret.Items, secretapi, s.source.DefaultMode, optional) if err != nil { - glog.Errorf("Couldn't get secret payload %v/%v: %v", s.pod.Namespace, source.Secret.Name, err) + klog.Errorf("Couldn't get secret payload %v/%v: %v", s.pod.Namespace, source.Secret.Name, err) errlist = append(errlist, err) continue } @@ -291,7 +291,7 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec configMap, err := s.plugin.getConfigMap(s.pod.Namespace, source.ConfigMap.Name) if err != nil { if !(errors.IsNotFound(err) && optional) { - glog.Errorf("Couldn't get configMap %v/%v: %v", s.pod.Namespace, source.ConfigMap.Name, err) + klog.Errorf("Couldn't get configMap %v/%v: %v", s.pod.Namespace, source.ConfigMap.Name, err) errlist = append(errlist, err) continue } @@ -304,7 +304,7 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec } configMapPayload, err := configmap.MakePayload(source.ConfigMap.Items, configMap, s.source.DefaultMode, optional) if err != nil { - glog.Errorf("Couldn't get configMap payload %v/%v: %v", s.pod.Namespace, source.ConfigMap.Name, err) + klog.Errorf("Couldn't get configMap payload %v/%v: %v", s.pod.Namespace, source.ConfigMap.Name, err) errlist = append(errlist, err) continue } @@ -364,7 +364,7 @@ func (c *projectedVolumeUnmounter) TearDown() error { } func (c *projectedVolumeUnmounter) TearDownAt(dir string) error { - glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) + klog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID) if err != nil { diff --git a/pkg/volume/quobyte/BUILD b/pkg/volume/quobyte/BUILD index b120d9c2f7c75..1142f5f633a6e 100644 --- a/pkg/volume/quobyte/BUILD +++ b/pkg/volume/quobyte/BUILD @@ -23,9 +23,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/github.com/quobyte/api:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/quobyte/quobyte.go b/pkg/volume/quobyte/quobyte.go index 22be50e8f0cd6..c9d9e773fa7bb 100644 --- a/pkg/volume/quobyte/quobyte.go +++ b/pkg/volume/quobyte/quobyte.go @@ -22,12 +22,12 @@ import ( "path" gostrings "strings" - "github.com/golang/glog" "github.com/pborman/uuid" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -94,16 +94,16 @@ func (plugin *quobytePlugin) CanSupport(spec *volume.Spec) bool { qm, _ := mounter.(*quobyteMounter) pluginDir := plugin.host.GetPluginDir(strings.EscapeQualifiedNameForDisk(quobytePluginName)) if mounted, err := qm.pluginDirIsMounted(pluginDir); mounted && err == nil { - glog.V(4).Infof("quobyte: can support") + klog.V(4).Infof("quobyte: can support") return true } } else { - glog.V(4).Infof("quobyte: Error: %v", err) + klog.V(4).Infof("quobyte: Error: %v", err) } exec := plugin.host.GetExec(plugin.GetPluginName()) if out, err := exec.Run("ls", "/sbin/mount.quobyte"); err == nil { - glog.V(4).Infof("quobyte: can support: %s", string(out)) + klog.V(4).Infof("quobyte: can support: %s", string(out)) return true } @@ -260,7 +260,7 @@ func (mounter *quobyteMounter) SetUpAt(dir string, fsGroup *int64) error { return fmt.Errorf("quobyte: mount failed: %v", err) } - glog.V(4).Infof("quobyte: mount set up: %s", dir) + klog.V(4).Infof("quobyte: mount set up: %s", dir) return nil } diff --git a/pkg/volume/quobyte/quobyte_util.go b/pkg/volume/quobyte/quobyte_util.go index d61879b7bb6cc..107c1454bed8d 100644 --- a/pkg/volume/quobyte/quobyte_util.go +++ b/pkg/volume/quobyte/quobyte_util.go @@ -25,8 +25,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/volume/util" - "github.com/golang/glog" quobyteapi "github.com/quobyte/api" + "k8s.io/klog" ) type quobyteVolumeManager struct { @@ -63,7 +63,7 @@ func (manager *quobyteVolumeManager) createVolume(provisioner *quobyteVolumeProv } } - glog.V(4).Infof("Created Quobyte volume %s", provisioner.volume) + klog.V(4).Infof("Created Quobyte volume %s", provisioner.volume) return &v1.QuobyteVolumeSource{ Registry: provisioner.registry, Volume: provisioner.volume, @@ -96,7 +96,7 @@ func (mounter *quobyteMounter) pluginDirIsMounted(pluginDir string) (bool, error } if mountPoint.Path == pluginDir { - glog.V(4).Infof("quobyte: found mountpoint %s in /proc/mounts", mountPoint.Path) + klog.V(4).Infof("quobyte: found mountpoint %s in /proc/mounts", mountPoint.Path) return true, nil } } diff --git a/pkg/volume/rbd/BUILD b/pkg/volume/rbd/BUILD index 41bf823ac4b54..ccd413eefecc4 100644 --- a/pkg/volume/rbd/BUILD +++ b/pkg/volume/rbd/BUILD @@ -35,7 +35,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/rbd/attacher.go b/pkg/volume/rbd/attacher.go index 70fc15ea618ee..97d944f2b5c9f 100644 --- a/pkg/volume/rbd/attacher.go +++ b/pkg/volume/rbd/attacher.go @@ -21,9 +21,9 @@ import ( "os" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" @@ -107,17 +107,17 @@ func (attacher *rbdAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName t // attach volume onto the node. // This method is idempotent, callers are responsible for retrying on failure. func (attacher *rbdAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) { - glog.V(4).Infof("rbd: waiting for attach volume (name: %s) for pod (name: %s, uid: %s)", spec.Name(), pod.Name, pod.UID) + klog.V(4).Infof("rbd: waiting for attach volume (name: %s) for pod (name: %s, uid: %s)", spec.Name(), pod.Name, pod.UID) mounter, err := attacher.plugin.createMounterFromVolumeSpecAndPod(spec, pod) if err != nil { - glog.Warningf("failed to create mounter: %v", spec) + klog.Warningf("failed to create mounter: %v", spec) return "", err } realDevicePath, err := attacher.manager.AttachDisk(*mounter) if err != nil { return "", err } - glog.V(3).Infof("rbd: successfully wait for attach volume (spec: %s, pool: %s, image: %s) at %s", spec.Name(), mounter.Pool, mounter.Image, realDevicePath) + klog.V(3).Infof("rbd: successfully wait for attach volume (spec: %s, pool: %s, image: %s) at %s", spec.Name(), mounter.Pool, mounter.Image, realDevicePath) return realDevicePath, nil } @@ -138,7 +138,7 @@ func (attacher *rbdAttacher) GetDeviceMountPath(spec *volume.Spec) (string, erro // mount device at the given mount path. // This method is idempotent, callers are responsible for retrying on failure. func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { - glog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath) + klog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath) notMnt, err := attacher.mounter.IsLikelyNotMountPoint(deviceMountPath) if err != nil { if os.IsNotExist(err) { @@ -171,7 +171,7 @@ func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, d os.Remove(deviceMountPath) return fmt.Errorf("rbd: failed to mount device %s at %s (fstype: %s), error %v", devicePath, deviceMountPath, fstype, err) } - glog.V(3).Infof("rbd: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype) + klog.V(3).Infof("rbd: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype) return nil } @@ -200,7 +200,7 @@ func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error { if pathExists, pathErr := volutil.PathExists(deviceMountPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath) return nil } devicePath, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath) @@ -208,23 +208,23 @@ func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error { return err } // Unmount the device from the device mount point. - glog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath) + klog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath) if err = detacher.mounter.Unmount(deviceMountPath); err != nil { return err } - glog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath) + klog.V(3).Infof("rbd: successfully umount device mountpath %s", deviceMountPath) - glog.V(4).Infof("rbd: detaching device %s", devicePath) + klog.V(4).Infof("rbd: detaching device %s", devicePath) err = detacher.manager.DetachDisk(detacher.plugin, deviceMountPath, devicePath) if err != nil { return err } - glog.V(3).Infof("rbd: successfully detach device %s", devicePath) + klog.V(3).Infof("rbd: successfully detach device %s", devicePath) err = os.Remove(deviceMountPath) if err != nil { return err } - glog.V(3).Infof("rbd: successfully remove device mount point %s", deviceMountPath) + klog.V(3).Infof("rbd: successfully remove device mount point %s", deviceMountPath) return nil } diff --git a/pkg/volume/rbd/disk_manager.go b/pkg/volume/rbd/disk_manager.go index 6f62f485444e1..6067916da7537 100644 --- a/pkg/volume/rbd/disk_manager.go +++ b/pkg/volume/rbd/disk_manager.go @@ -26,9 +26,9 @@ import ( "fmt" "os" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" @@ -61,7 +61,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. globalPDPath := manager.MakeGlobalPDName(*b.rbd) notMnt, err := mounter.IsLikelyNotMountPoint(globalPDPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mountpoint: %s", globalPDPath) + klog.Errorf("cannot validate mountpoint: %s", globalPDPath) return err } if notMnt { @@ -70,7 +70,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. notMnt, err = mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mountpoint: %s", volPath) + klog.Errorf("cannot validate mountpoint: %s", volPath) return err } if !notMnt { @@ -78,7 +78,7 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. } if err := os.MkdirAll(volPath, 0750); err != nil { - glog.Errorf("failed to mkdir:%s", volPath) + klog.Errorf("failed to mkdir:%s", volPath) return err } // Perform a bind mount to the full path to allow duplicate mounts of the same disk. @@ -89,10 +89,10 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. mountOptions := util.JoinMountOptions(b.mountOptions, options) err = mounter.Mount(globalPDPath, volPath, "", mountOptions) if err != nil { - glog.Errorf("failed to bind mount:%s", globalPDPath) + klog.Errorf("failed to bind mount:%s", globalPDPath) return err } - glog.V(3).Infof("rbd: successfully bind mount %s to %s with options %v", globalPDPath, volPath, mountOptions) + klog.V(3).Infof("rbd: successfully bind mount %s to %s with options %v", globalPDPath, volPath, mountOptions) if !b.ReadOnly { volume.SetVolumeOwnership(&b, fsGroup) @@ -105,28 +105,28 @@ func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount. func diskTearDown(manager diskManager, c rbdUnmounter, volPath string, mounter mount.Interface) error { notMnt, err := mounter.IsLikelyNotMountPoint(volPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate mountpoint: %s", volPath) + klog.Errorf("cannot validate mountpoint: %s", volPath) return err } if notMnt { - glog.V(3).Infof("volume path %s is not a mountpoint, deleting", volPath) + klog.V(3).Infof("volume path %s is not a mountpoint, deleting", volPath) return os.Remove(volPath) } // Unmount the bind-mount inside this pod. if err := mounter.Unmount(volPath); err != nil { - glog.Errorf("failed to umount %s", volPath) + klog.Errorf("failed to umount %s", volPath) return err } notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath) if mntErr != nil && !os.IsNotExist(mntErr) { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return mntErr } if notMnt { if err := os.Remove(volPath); err != nil { - glog.V(2).Info("Error removing mountpoint ", volPath, ": ", err) + klog.V(2).Info("Error removing mountpoint ", volPath, ": ", err) return err } } diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 4d4f61fbb263d..d9197428f8ba8 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -23,7 +23,6 @@ import ( "regexp" dstrings "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" @@ -385,7 +385,7 @@ func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*vol // the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/{pool}-image-{image}. // So we will try to check whether this old style global device mount path exist or not. // If existed, extract the sourceName from this old style path, otherwise return an error. - glog.V(3).Infof("SourceName %s wrong, fallback to old format", sourceName) + klog.V(3).Infof("SourceName %s wrong, fallback to old format", sourceName) sourceName, err = plugin.getDeviceNameFromOldMountPath(mounter, mountPath) if err != nil { return nil, err @@ -415,7 +415,7 @@ func (plugin *rbdPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, if err != nil { return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) + klog.V(5).Infof("globalMapPathUUID: %v, err: %v", globalMapPathUUID, err) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) == 1 { return nil, fmt.Errorf("failed to retrieve volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) @@ -680,10 +680,10 @@ func (r *rbdVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologie r.rbdMounter.Image = image rbd, sizeMB, err := r.manager.CreateImage(r) if err != nil { - glog.Errorf("rbd: create volume failed, err: %v", err) + klog.Errorf("rbd: create volume failed, err: %v", err) return nil, err } - glog.Infof("successfully created rbd image %q", image) + klog.Infof("successfully created rbd image %q", image) pv := new(v1.PersistentVolume) metav1.SetMetaDataAnnotation(&pv.ObjectMeta, volutil.VolumeDynamicallyCreatedByKey, "rbd-dynamic-provisioner") @@ -824,12 +824,12 @@ func (b *rbdMounter) SetUp(fsGroup *int64) error { func (b *rbdMounter) SetUpAt(dir string, fsGroup *int64) error { // diskSetUp checks mountpoints and prevent repeated calls - glog.V(4).Infof("rbd: attempting to setup at %s", dir) + klog.V(4).Infof("rbd: attempting to setup at %s", dir) err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) if err != nil { - glog.Errorf("rbd: failed to setup at %s %v", dir, err) + klog.Errorf("rbd: failed to setup at %s %v", dir, err) } - glog.V(3).Infof("rbd: successfully setup at %s", dir) + klog.V(3).Infof("rbd: successfully setup at %s", dir) return err } @@ -847,18 +847,18 @@ func (c *rbdUnmounter) TearDown() error { } func (c *rbdUnmounter) TearDownAt(dir string) error { - glog.V(4).Infof("rbd: attempting to teardown at %s", dir) + klog.V(4).Infof("rbd: attempting to teardown at %s", dir) if pathExists, pathErr := volutil.PathExists(dir); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) return nil } err := diskTearDown(c.manager, *c, dir, c.mounter) if err != nil { return err } - glog.V(3).Infof("rbd: successfully teardown at %s", dir) + klog.V(3).Infof("rbd: successfully teardown at %s", dir) return nil } @@ -971,13 +971,13 @@ func (rbd *rbdDiskUnmapper) TearDownDevice(mapPath, _ string) error { if err != nil { return fmt.Errorf("rbd: failed to detach disk: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("rbd: %q is unmapped, deleting the directory", mapPath) + klog.V(4).Infof("rbd: %q is unmapped, deleting the directory", mapPath) err = os.RemoveAll(mapPath) if err != nil { return fmt.Errorf("rbd: failed to delete the directory: %s\nError: %v", mapPath, err) } - glog.V(4).Infof("rbd: successfully detached disk: %s", mapPath) + klog.V(4).Infof("rbd: successfully detached disk: %s", mapPath) return nil } @@ -1077,7 +1077,7 @@ func getVolumeAccessModes(spec *volume.Spec) ([]v1.PersistentVolumeAccessMode, e func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (string, error) { secret, err := volutil.GetSecretForPod(pod, secretName, kubeClient) if err != nil { - glog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) + klog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) return "", fmt.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) } return parseSecretMap(secret) @@ -1086,7 +1086,7 @@ func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interfa func parsePVSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) { secret, err := volutil.GetSecretForPV(namespace, secretName, rbdPluginName, kubeClient) if err != nil { - glog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) + klog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) return "", fmt.Errorf("failed to get secret from [%q/%q]", namespace, secretName) } return parseSecretMap(secret) diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index 0edd02b35d425..2e3de22644299 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -32,11 +32,11 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" fileutil "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/node" @@ -82,21 +82,21 @@ func getRbdDevFromImageAndPool(pool string, image string) (string, bool) { poolFile := path.Join(sys_path, name, "pool") poolBytes, err := ioutil.ReadFile(poolFile) if err != nil { - glog.V(4).Infof("error reading %s: %v", poolFile, err) + klog.V(4).Infof("error reading %s: %v", poolFile, err) continue } if strings.TrimSpace(string(poolBytes)) != pool { - glog.V(4).Infof("device %s is not %q: %q", name, pool, string(poolBytes)) + klog.V(4).Infof("device %s is not %q: %q", name, pool, string(poolBytes)) continue } imgFile := path.Join(sys_path, name, "name") imgBytes, err := ioutil.ReadFile(imgFile) if err != nil { - glog.V(4).Infof("error reading %s: %v", imgFile, err) + klog.V(4).Infof("error reading %s: %v", imgFile, err) continue } if strings.TrimSpace(string(imgBytes)) != image { - glog.V(4).Infof("device %s is not %q: %q", name, image, string(imgBytes)) + klog.V(4).Infof("device %s is not %q: %q", name, image, string(imgBytes)) continue } // Found a match, check if device exists. @@ -119,7 +119,7 @@ func getMaxNbds() (int, error) { return 0, fmt.Errorf("rbd-nbd: failed to retrieve max_nbds from %s err: %q", maxNbdsPath, err) } - glog.V(4).Infof("found nbds max parameters file at %s", maxNbdsPath) + klog.V(4).Infof("found nbds max parameters file at %s", maxNbdsPath) maxNbdBytes, err := ioutil.ReadFile(maxNbdsPath) if err != nil { @@ -131,7 +131,7 @@ func getMaxNbds() (int, error) { return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds err: %q", err) } - glog.V(4).Infof("rbd-nbd: max_nbds: %d", maxNbds) + klog.V(4).Infof("rbd-nbd: max_nbds: %d", maxNbds) return maxNbds, nil } @@ -148,7 +148,7 @@ func getNbdDevFromImageAndPool(pool string, image string) (string, bool) { maxNbds, maxNbdsErr := getMaxNbds() if maxNbdsErr != nil { - glog.V(4).Infof("error reading nbds_max %v", maxNbdsErr) + klog.V(4).Infof("error reading nbds_max %v", maxNbdsErr) return "", false } @@ -156,18 +156,18 @@ func getNbdDevFromImageAndPool(pool string, image string) (string, bool) { nbdPath := basePath + strconv.Itoa(i) _, err := os.Lstat(nbdPath) if err != nil { - glog.V(4).Infof("error reading nbd info directory %s: %v", nbdPath, err) + klog.V(4).Infof("error reading nbd info directory %s: %v", nbdPath, err) continue } pidBytes, err := ioutil.ReadFile(path.Join(nbdPath, "pid")) if err != nil { - glog.V(5).Infof("did not find valid pid file in dir %s: %v", nbdPath, err) + klog.V(5).Infof("did not find valid pid file in dir %s: %v", nbdPath, err) continue } cmdlineFileName := path.Join("/proc", strings.TrimSpace(string(pidBytes)), "cmdline") rawCmdline, err := ioutil.ReadFile(cmdlineFileName) if err != nil { - glog.V(4).Infof("failed to read cmdline file %s: %v", cmdlineFileName, err) + klog.V(4).Infof("failed to read cmdline file %s: %v", cmdlineFileName, err) continue } cmdlineArgs := strings.FieldsFunc(string(rawCmdline), func(r rune) bool { @@ -177,17 +177,17 @@ func getNbdDevFromImageAndPool(pool string, image string) (string, bool) { // Only accepted pattern of cmdline is from execRbdMap: // rbd-nbd map pool/image ... if len(cmdlineArgs) < 3 || cmdlineArgs[0] != "rbd-nbd" || cmdlineArgs[1] != "map" { - glog.V(4).Infof("nbd device %s is not used by rbd", nbdPath) + klog.V(4).Infof("nbd device %s is not used by rbd", nbdPath) continue } if cmdlineArgs[2] != imgPath { - glog.V(4).Infof("rbd-nbd device %s did not match expected image path: %s with path found: %s", + klog.V(4).Infof("rbd-nbd device %s did not match expected image path: %s with path found: %s", nbdPath, imgPath, cmdlineArgs[2]) continue } devicePath := path.Join("/dev", "nbd"+strconv.Itoa(i)) if _, err := os.Lstat(devicePath); err != nil { - glog.Warningf("Stat device %s for imgpath %s failed %v", devicePath, imgPath, err) + klog.Warningf("Stat device %s for imgpath %s failed %v", devicePath, imgPath, err) continue } return devicePath, true @@ -233,14 +233,14 @@ func execRbdMap(b rbdMounter, rbdCmd string, mon string) ([]byte, error) { func checkRbdNbdTools(e mount.Exec) bool { _, err := e.Run("modprobe", "nbd") if err != nil { - glog.V(5).Infof("rbd-nbd: nbd modprobe failed with error %v", err) + klog.V(5).Infof("rbd-nbd: nbd modprobe failed with error %v", err) return false } if _, err := e.Run("rbd-nbd", "--version"); err != nil { - glog.V(5).Infof("rbd-nbd: getting rbd-nbd version failed with error %v", err) + klog.V(5).Infof("rbd-nbd: getting rbd-nbd version failed with error %v", err) return false } - glog.V(3).Infof("rbd-nbd tools were found.") + klog.V(3).Infof("rbd-nbd tools were found.") return true } @@ -251,7 +251,7 @@ func makePDNameInternal(host volume.VolumeHost, pool string, image string) strin info, err := os.Stat(deprecatedDir) if err == nil && info.IsDir() { // The device mount path has already been created with the deprecated format, return it. - glog.V(5).Infof("Deprecated format path %s found", deprecatedDir) + klog.V(5).Infof("Deprecated format path %s found", deprecatedDir) return deprecatedDir } // Return the canonical format path. @@ -331,7 +331,7 @@ func (util *RBDUtil) rbdUnlock(b rbdMounter) error { args = append(args, secret_opt...) cmd, err = b.exec.Run("rbd", args...) output = string(cmd) - glog.V(4).Infof("lock list output %q", output) + klog.V(4).Infof("lock list output %q", output) if err != nil { return err } @@ -349,9 +349,9 @@ func (util *RBDUtil) rbdUnlock(b rbdMounter) error { args = append(args, secret_opt...) cmd, err = b.exec.Run("rbd", args...) if err == nil { - glog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) + klog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lock_id, b.Pool, b.Image, b.Id, mon) } else { - glog.Warningf("rbd: failed to remove lock (lock_id: %s) on image: %s/%s with id %s mon %s: %v", lock_id, b.Pool, b.Image, b.Id, mon, err) + klog.Warningf("rbd: failed to remove lock (lock_id: %s) on image: %s/%s with id %s mon %s: %v", lock_id, b.Pool, b.Image, b.Id, mon, err) } } @@ -424,19 +424,19 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) { } mon := util.kernelRBDMonitorsOpt(b.Mon) - glog.V(1).Infof("rbd: map mon %s", mon) + klog.V(1).Infof("rbd: map mon %s", mon) _, err := b.exec.Run("modprobe", "rbd") if err != nil { - glog.Warningf("rbd: failed to load rbd kernel module:%v", err) + klog.Warningf("rbd: failed to load rbd kernel module:%v", err) } output, err = execRbdMap(b, "rbd", mon) if err != nil { if !nbdToolsFound { - glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output)) + klog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output)) return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output)) } - glog.V(3).Infof("rbd: map failed with %v, %s. Retrying with rbd-nbd", err, string(output)) + klog.V(3).Infof("rbd: map failed with %v, %s. Retrying with rbd-nbd", err, string(output)) errList := []error{err} outputList := output output, err = execRbdMap(b, "rbd-nbd", mon) @@ -481,7 +481,7 @@ func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, devic if err != nil { return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %v", device, err, output)) } - glog.V(3).Infof("rbd: successfully unmap device %s", device) + klog.V(3).Infof("rbd: successfully unmap device %s", device) // Currently, we don't persist rbd info on the disk, but for backward // compatbility, we need to clean it if found. @@ -491,13 +491,13 @@ func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, devic return err } if exists { - glog.V(3).Infof("rbd: old rbd.json is found under %s, cleaning it", deviceMountPath) + klog.V(3).Infof("rbd: old rbd.json is found under %s, cleaning it", deviceMountPath) err = util.cleanOldRBDFile(plugin, rbdFile) if err != nil { - glog.Errorf("rbd: failed to clean %s", rbdFile) + klog.Errorf("rbd: failed to clean %s", rbdFile) return err } - glog.V(3).Infof("rbd: successfully remove %s", rbdFile) + klog.V(3).Infof("rbd: successfully remove %s", rbdFile) } return nil } @@ -508,7 +508,7 @@ func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error if pathExists, pathErr := volutil.PathExists(mapPath); pathErr != nil { return fmt.Errorf("Error checking if path exists: %v", pathErr) } else if !pathExists { - glog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) + klog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath) return nil } // If we arrive here, device is no longer used, see if we need to logout of the target @@ -529,10 +529,10 @@ func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error // Any nbd device must be unmapped by rbd-nbd if strings.HasPrefix(device, "/dev/nbd") { rbdCmd = "rbd-nbd" - glog.V(4).Infof("rbd: using rbd-nbd for unmap function") + klog.V(4).Infof("rbd: using rbd-nbd for unmap function") } else { rbdCmd = "rbd" - glog.V(4).Infof("rbd: using rbd for unmap function") + klog.V(4).Infof("rbd: using rbd for unmap function") } // rbd unmap @@ -540,7 +540,7 @@ func (util *RBDUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error if err != nil { return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %s", device, err, string(output))) } - glog.V(3).Infof("rbd: successfully unmap device %s", device) + klog.V(3).Infof("rbd: successfully unmap device %s", device) return nil } @@ -564,7 +564,7 @@ func (util *RBDUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error { } if err != nil { - glog.Errorf("failed to load rbd info from %s: %v", rbdFile, err) + klog.Errorf("failed to load rbd info from %s: %v", rbdFile, err) return err } // Remove rbd lock if found. @@ -589,9 +589,9 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo volSz := fmt.Sprintf("%d", sz) mon := util.kernelRBDMonitorsOpt(p.Mon) if p.rbdMounter.imageFormat == rbdImageFormat2 { - glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) } else { - glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) } args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat} if p.rbdMounter.imageFormat == rbdImageFormat2 { @@ -603,7 +603,7 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVo output, err = p.exec.Run("rbd", args...) if err != nil { - glog.Warningf("failed to create rbd image, output %v", string(output)) + klog.Warningf("failed to create rbd image, output %v", string(output)) return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output)) } @@ -621,19 +621,19 @@ func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error { return fmt.Errorf("error %v, rbd output: %v", err, rbdOutput) } if found { - glog.Info("rbd is still being used ", p.rbdMounter.Image) + klog.Info("rbd is still being used ", p.rbdMounter.Image) return fmt.Errorf("rbd image %s/%s is still being used, rbd output: %v", p.rbdMounter.Pool, p.rbdMounter.Image, rbdOutput) } // rbd rm. mon := util.kernelRBDMonitorsOpt(p.rbdMounter.Mon) - glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) output, err = p.exec.Run("rbd", "rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key="+p.rbdMounter.adminSecret) if err == nil { return nil } - glog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output)) + klog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output)) return fmt.Errorf("error %v, rbd output: %v", err, string(output)) } @@ -658,14 +658,14 @@ func (util *RBDUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resourc // rbd resize. mon := util.kernelRBDMonitorsOpt(rbdExpander.rbdMounter.Mon) - glog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret) + klog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key %s", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminId, rbdExpander.rbdMounter.adminSecret) output, err = rbdExpander.exec.Run("rbd", "resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminId, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret) if err == nil { return newSizeQuant, nil } - glog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output)) + klog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output)) return oldSize, err } @@ -701,14 +701,14 @@ func (util *RBDUtil) rbdInfo(b *rbdMounter) (int, error) { // # image does not exist (exit=2) // rbd: error opening image 1234: (2) No such file or directory // - glog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) + klog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) cmd, err = b.exec.Run("rbd", "info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) output = string(cmd) if err, ok := err.(*exec.Error); ok { if err.Err == exec.ErrNotFound { - glog.Errorf("rbd cmd not found") + klog.Errorf("rbd cmd not found") // fail fast if rbd command is not found. return 0, err } @@ -767,14 +767,14 @@ func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) { // # image does not exist (exit=2) // rbd: error opening image kubernetes-dynamic-pvc-: (2) No such file or directory // - glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) + klog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, id, secret) cmd, err = b.exec.Run("rbd", "status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret) output = string(cmd) if err, ok := err.(*exec.Error); ok { if err.Err == exec.ErrNotFound { - glog.Errorf("rbd cmd not found") + klog.Errorf("rbd cmd not found") // fail fast if command not found return false, output, err } @@ -786,10 +786,10 @@ func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, string, error) { } if strings.Contains(output, imageWatcherStr) { - glog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output) + klog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output) return true, output, nil } else { - glog.Warningf("rbd: no watchers on %s", b.Image) + klog.Warningf("rbd: no watchers on %s", b.Image) return false, output, nil } } diff --git a/pkg/volume/scaleio/BUILD b/pkg/volume/scaleio/BUILD index 767ad4a40334e..526fcbe582bfd 100644 --- a/pkg/volume/scaleio/BUILD +++ b/pkg/volume/scaleio/BUILD @@ -25,7 +25,7 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//vendor/github.com/codedellemc/goscaleio/types/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -52,7 +52,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/github.com/codedellemc/goscaleio:go_default_library", "//vendor/github.com/codedellemc/goscaleio/types/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/scaleio/sio_client.go b/pkg/volume/scaleio/sio_client.go index 28d8ba79a4b5d..2c7041f478ea7 100644 --- a/pkg/volume/scaleio/sio_client.go +++ b/pkg/volume/scaleio/sio_client.go @@ -33,7 +33,7 @@ import ( sio "github.com/codedellemc/goscaleio" siotypes "github.com/codedellemc/goscaleio/types/v1" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -97,7 +97,7 @@ func newSioClient(gateway, username, password string, sslEnabled bool, exec moun } r, err := regexp.Compile(`^emc-vol-\w*-\w*$`) if err != nil { - glog.Error(log("failed to compile regex: %v", err)) + klog.Error(log("failed to compile regex: %v", err)) return nil, err } client.diskRegex = r @@ -113,10 +113,10 @@ func (c *sioClient) init() error { if c.inited { return nil } - glog.V(4).Infoln(log("initializing scaleio client")) + klog.V(4).Infoln(log("initializing scaleio client")) client, err := sio.NewClientWithArgs(c.gateway, "", c.insecure, c.certsEnabled) if err != nil { - glog.Error(log("failed to create client: %v", err)) + klog.Error(log("failed to create client: %v", err)) return err } c.client = client @@ -127,24 +127,24 @@ func (c *sioClient) init() error { Username: c.username, Password: c.password}, ); err != nil { - glog.Error(log("client authentication failed: %v", err)) + klog.Error(log("client authentication failed: %v", err)) return err } // retrieve system if c.system, err = c.findSystem(c.sysName); err != nil { - glog.Error(log("unable to find system %s: %v", c.sysName, err)) + klog.Error(log("unable to find system %s: %v", c.sysName, err)) return err } // retrieve protection domain if c.protectionDomain, err = c.findProtectionDomain(c.pdName); err != nil { - glog.Error(log("unable to find protection domain %s: %v", c.protectionDomain, err)) + klog.Error(log("unable to find protection domain %s: %v", c.protectionDomain, err)) return err } // retrieve storage pool if c.storagePool, err = c.findStoragePool(c.spName); err != nil { - glog.Error(log("unable to find storage pool %s: %v", c.storagePool, err)) + klog.Error(log("unable to find storage pool %s: %v", c.storagePool, err)) return err } c.inited = true @@ -157,7 +157,7 @@ func (c *sioClient) Volumes() ([]*siotypes.Volume, error) { } vols, err := c.getVolumes() if err != nil { - glog.Error(log("failed to retrieve volumes: %v", err)) + klog.Error(log("failed to retrieve volumes: %v", err)) return nil, err } return vols, nil @@ -170,12 +170,12 @@ func (c *sioClient) Volume(id sioVolumeID) (*siotypes.Volume, error) { vols, err := c.getVolumesByID(id) if err != nil { - glog.Error(log("failed to retrieve volume by id: %v", err)) + klog.Error(log("failed to retrieve volume by id: %v", err)) return nil, err } vol := vols[0] if vol == nil { - glog.V(4).Info(log("volume not found, id %s", id)) + klog.V(4).Info(log("volume not found, id %s", id)) return nil, errors.New("volume not found") } return vol, nil @@ -186,20 +186,20 @@ func (c *sioClient) FindVolume(name string) (*siotypes.Volume, error) { return nil, err } - glog.V(4).Info(log("searching for volume %s", name)) + klog.V(4).Info(log("searching for volume %s", name)) volumes, err := c.getVolumesByName(name) if err != nil { - glog.Error(log("failed to find volume by name %v", err)) + klog.Error(log("failed to find volume by name %v", err)) return nil, err } for _, volume := range volumes { if volume.Name == name { - glog.V(4).Info(log("found volume %s", name)) + klog.V(4).Info(log("found volume %s", name)) return volume, nil } } - glog.V(4).Info(log("volume not found, name %s", name)) + klog.V(4).Info(log("volume not found, name %s", name)) return nil, errors.New("volume not found") } @@ -215,7 +215,7 @@ func (c *sioClient) CreateVolume(name string, sizeGB int64) (*siotypes.Volume, e } createResponse, err := c.client.CreateVolume(params, c.storagePool.Name) if err != nil { - glog.Error(log("failed to create volume %s: %v", name, err)) + klog.Error(log("failed to create volume %s: %v", name, err)) return nil, err } return c.Volume(sioVolumeID(createResponse.ID)) @@ -225,13 +225,13 @@ func (c *sioClient) CreateVolume(name string, sizeGB int64) (*siotypes.Volume, e // is true, ScaleIO will allow other SDC to map to that volume. func (c *sioClient) AttachVolume(id sioVolumeID, multipleMappings bool) error { if err := c.init(); err != nil { - glog.Error(log("failed to init'd client in attach volume: %v", err)) + klog.Error(log("failed to init'd client in attach volume: %v", err)) return err } iid, err := c.IID() if err != nil { - glog.Error(log("failed to get instanceIID for attach volume: %v", err)) + klog.Error(log("failed to get instanceIID for attach volume: %v", err)) return err } @@ -244,11 +244,11 @@ func (c *sioClient) AttachVolume(id sioVolumeID, multipleMappings bool) error { volClient.Volume = &siotypes.Volume{ID: string(id)} if err := volClient.MapVolumeSdc(params); err != nil { - glog.Error(log("failed to attach volume id %s: %v", id, err)) + klog.Error(log("failed to attach volume id %s: %v", id, err)) return err } - glog.V(4).Info(log("volume %s attached successfully", id)) + klog.V(4).Info(log("volume %s attached successfully", id)) return nil } @@ -307,11 +307,11 @@ func (c *sioClient) IID() (string, error) { } sdc, err := c.sysClient.FindSdc("SdcGUID", guid) if err != nil { - glog.Error(log("failed to retrieve sdc info %s", err)) + klog.Error(log("failed to retrieve sdc info %s", err)) return "", err } c.instanceID = sdc.Sdc.ID - glog.V(4).Info(log("retrieved instanceID %s", c.instanceID)) + klog.V(4).Info(log("retrieved instanceID %s", c.instanceID)) } return c.instanceID, nil } @@ -320,11 +320,11 @@ func (c *sioClient) IID() (string, error) { // it attempts to fallback to using drv_cfg binary func (c *sioClient) getGUID() (string, error) { if c.sdcGUID == "" { - glog.V(4).Info(log("sdc guid label not set, falling back to using drv_cfg")) + klog.V(4).Info(log("sdc guid label not set, falling back to using drv_cfg")) cmd := c.getSdcCmd() output, err := c.exec.Run(cmd, "--query_guid") if err != nil { - glog.Error(log("drv_cfg --query_guid failed: %v", err)) + klog.Error(log("drv_cfg --query_guid failed: %v", err)) return "", err } c.sdcGUID = strings.TrimSpace(string(output)) @@ -343,7 +343,7 @@ func (c *sioClient) getSioDiskPaths() ([]os.FileInfo, error) { // sioDiskIDPath may not exist yet which is fine return []os.FileInfo{}, nil } - glog.Error(log("failed to ReadDir %s: %v", sioDiskIDPath, err)) + klog.Error(log("failed to ReadDir %s: %v", sioDiskIDPath, err)) return nil, err } @@ -391,7 +391,7 @@ func (c *sioClient) Devs() (map[string]string, error) { volumeID := parts[3] devPath, err := filepath.EvalSymlinks(fmt.Sprintf("%s/%s", sioDiskIDPath, f.Name())) if err != nil { - glog.Error(log("devicepath-to-volID mapping error: %v", err)) + klog.Error(log("devicepath-to-volID mapping error: %v", err)) return nil, err } // map volumeID to devicePath @@ -417,18 +417,18 @@ func (c *sioClient) WaitForAttachedDevice(token string) (string, error) { case <-ticker.C: devMap, err := c.Devs() if err != nil { - glog.Error(log("failed while waiting for volume to attach: %v", err)) + klog.Error(log("failed while waiting for volume to attach: %v", err)) return "", err } go func() { - glog.V(4).Info(log("waiting for volume %s to be mapped/attached", token)) + klog.V(4).Info(log("waiting for volume %s to be mapped/attached", token)) }() if path, ok := devMap[token]; ok { - glog.V(4).Info(log("device %s mapped to vol %s", path, token)) + klog.V(4).Info(log("device %s mapped to vol %s", path, token)) return path, nil } case <-timer.C: - glog.Error(log("timed out while waiting for volume to be mapped to a device")) + klog.Error(log("timed out while waiting for volume to be mapped to a device")) return "", fmt.Errorf("volume attach timeout") } } @@ -451,18 +451,18 @@ func (c *sioClient) WaitForDetachedDevice(token string) error { case <-ticker.C: devMap, err := c.Devs() if err != nil { - glog.Error(log("failed while waiting for volume to unmap/detach: %v", err)) + klog.Error(log("failed while waiting for volume to unmap/detach: %v", err)) return err } go func() { - glog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token)) + klog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token)) }() // cant find vol id, then ok. if _, ok := devMap[token]; !ok { return nil } case <-timer.C: - glog.Error(log("timed out while waiting for volume %s to be unmapped/detached", token)) + klog.Error(log("timed out while waiting for volume %s to be unmapped/detached", token)) return fmt.Errorf("volume detach timeout") } } @@ -477,7 +477,7 @@ func (c *sioClient) findSystem(sysname string) (sys *siotypes.System, err error) } systems, err := c.client.GetInstance("") if err != nil { - glog.Error(log("failed to retrieve instances: %v", err)) + klog.Error(log("failed to retrieve instances: %v", err)) return nil, err } for _, sys = range systems { @@ -485,7 +485,7 @@ func (c *sioClient) findSystem(sysname string) (sys *siotypes.System, err error) return sys, nil } } - glog.Error(log("system %s not found", sysname)) + klog.Error(log("system %s not found", sysname)) return nil, errors.New("system not found") } @@ -494,13 +494,13 @@ func (c *sioClient) findProtectionDomain(pdname string) (*siotypes.ProtectionDom if c.sysClient != nil { protectionDomain, err := c.sysClient.FindProtectionDomain("", pdname, "") if err != nil { - glog.Error(log("failed to retrieve protection domains: %v", err)) + klog.Error(log("failed to retrieve protection domains: %v", err)) return nil, err } c.pdClient.ProtectionDomain = protectionDomain return protectionDomain, nil } - glog.Error(log("protection domain %s not set", pdname)) + klog.Error(log("protection domain %s not set", pdname)) return nil, errors.New("protection domain not set") } @@ -509,13 +509,13 @@ func (c *sioClient) findStoragePool(spname string) (*siotypes.StoragePool, error if c.pdClient != nil { sp, err := c.pdClient.FindStoragePool("", spname, "") if err != nil { - glog.Error(log("failed to retrieve storage pool: %v", err)) + klog.Error(log("failed to retrieve storage pool: %v", err)) return nil, err } c.spClient.StoragePool = sp return sp, nil } - glog.Error(log("storage pool %s not set", spname)) + klog.Error(log("storage pool %s not set", spname)) return nil, errors.New("storage pool not set") } diff --git a/pkg/volume/scaleio/sio_mgr.go b/pkg/volume/scaleio/sio_mgr.go index 02fbbee6eefb0..a322276b1ddae 100644 --- a/pkg/volume/scaleio/sio_mgr.go +++ b/pkg/volume/scaleio/sio_mgr.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/util/mount" - "github.com/golang/glog" + "k8s.io/klog" siotypes "github.com/codedellemc/goscaleio/types/v1" ) @@ -57,22 +57,22 @@ func newSioMgr(configs map[string]string, exec mount.Exec) (*sioMgr, error) { // getClient safely returns an sioInterface func (m *sioMgr) getClient() (sioInterface, error) { if m.client == nil { - glog.V(4).Info(log("creating scaleio client")) + klog.V(4).Info(log("creating scaleio client")) configs := m.configData username := configs[confKey.username] password := configs[confKey.password] gateway := configs[confKey.gateway] b, err := strconv.ParseBool(configs[confKey.sslEnabled]) if err != nil { - glog.Error(log("failed to parse sslEnabled, must be either \"true\" or \"false\"")) + klog.Error(log("failed to parse sslEnabled, must be either \"true\" or \"false\"")) return nil, err } certsEnabled := b - glog.V(4).Info(log("creating new client for gateway %s", gateway)) + klog.V(4).Info(log("creating new client for gateway %s", gateway)) client, err := newSioClient(gateway, username, password, certsEnabled, m.exec) if err != nil { - glog.Error(log("failed to create scaleio client: %v", err)) + klog.Error(log("failed to create scaleio client: %v", err)) return nil, err } @@ -85,7 +85,7 @@ func (m *sioMgr) getClient() (sioInterface, error) { m.client = client - glog.V(4).Info(log("client created successfully [gateway=%s]", gateway)) + klog.V(4).Info(log("client created successfully [gateway=%s]", gateway)) } return m.client, nil } @@ -97,13 +97,13 @@ func (m *sioMgr) CreateVolume(volName string, sizeGB int64) (*siotypes.Volume, e return nil, err } - glog.V(4).Infof("scaleio: creating volume %s", volName) + klog.V(4).Infof("scaleio: creating volume %s", volName) vol, err := client.CreateVolume(volName, sizeGB) if err != nil { - glog.V(4).Infof("scaleio: failed creating volume %s: %v", volName, err) + klog.V(4).Infof("scaleio: failed creating volume %s: %v", volName, err) return nil, err } - glog.V(4).Infof("scaleio: created volume %s successfully", volName) + klog.V(4).Infof("scaleio: created volume %s successfully", volName) return vol, nil } @@ -112,17 +112,17 @@ func (m *sioMgr) CreateVolume(volName string, sizeGB int64) (*siotypes.Volume, e func (m *sioMgr) AttachVolume(volName string, multipleMappings bool) (string, error) { client, err := m.getClient() if err != nil { - glog.Error(log("attach volume failed: %v", err)) + klog.Error(log("attach volume failed: %v", err)) return "", err } - glog.V(4).Infoln(log("attaching volume %s", volName)) + klog.V(4).Infoln(log("attaching volume %s", volName)) iid, err := client.IID() if err != nil { - glog.Error(log("failed to get instanceID")) + klog.Error(log("failed to get instanceID")) return "", err } - glog.V(4).Info(log("attaching volume %s to host instance %s", volName, iid)) + klog.V(4).Info(log("attaching volume %s to host instance %s", volName, iid)) devs, err := client.Devs() if err != nil { @@ -131,29 +131,29 @@ func (m *sioMgr) AttachVolume(volName string, multipleMappings bool) (string, er vol, err := client.FindVolume(volName) if err != nil { - glog.Error(log("failed to find volume %s: %v", volName, err)) + klog.Error(log("failed to find volume %s: %v", volName, err)) return "", err } // handle vol if already attached if len(vol.MappedSdcInfo) > 0 { if m.isSdcMappedToVol(iid, vol) { - glog.V(4).Info(log("skippping attachment, volume %s already attached to sdc %s", volName, iid)) + klog.V(4).Info(log("skippping attachment, volume %s already attached to sdc %s", volName, iid)) return devs[vol.ID], nil } } // attach volume, get deviceName if err := client.AttachVolume(sioVolumeID(vol.ID), multipleMappings); err != nil { - glog.Error(log("attachment for volume %s failed :%v", volName, err)) + klog.Error(log("attachment for volume %s failed :%v", volName, err)) return "", err } device, err := client.WaitForAttachedDevice(vol.ID) if err != nil { - glog.Error(log("failed while waiting for device to attach: %v", err)) + klog.Error(log("failed while waiting for device to attach: %v", err)) return "", err } - glog.V(4).Info(log("volume %s attached successfully as %s to instance %s", volName, device, iid)) + klog.V(4).Info(log("volume %s attached successfully as %s to instance %s", volName, device, iid)) return device, nil } @@ -165,7 +165,7 @@ func (m *sioMgr) IsAttached(volName string) (bool, error) { } iid, err := client.IID() if err != nil { - glog.Error("scaleio: failed to get instanceID") + klog.Error("scaleio: failed to get instanceID") return false, err } @@ -184,7 +184,7 @@ func (m *sioMgr) DetachVolume(volName string) error { } iid, err := client.IID() if err != nil { - glog.Error(log("failed to get instanceID: %v", err)) + klog.Error(log("failed to get instanceID: %v", err)) return err } @@ -193,7 +193,7 @@ func (m *sioMgr) DetachVolume(volName string) error { return err } if !m.isSdcMappedToVol(iid, vol) { - glog.Warning(log( + klog.Warning(log( "skipping detached, vol %s not attached to instance %s", volName, iid, )) @@ -201,11 +201,11 @@ func (m *sioMgr) DetachVolume(volName string) error { } if err := client.DetachVolume(sioVolumeID(vol.ID)); err != nil { - glog.Error(log("failed to detach vol %s: %v", volName, err)) + klog.Error(log("failed to detach vol %s: %v", volName, err)) return err } - glog.V(4).Info(log("volume %s detached successfully", volName)) + klog.V(4).Info(log("volume %s detached successfully", volName)) return nil } @@ -223,11 +223,11 @@ func (m *sioMgr) DeleteVolume(volName string) error { } if err := client.DeleteVolume(sioVolumeID(vol.ID)); err != nil { - glog.Error(log("failed to delete volume %s: %v", volName, err)) + klog.Error(log("failed to delete volume %s: %v", volName, err)) return err } - glog.V(4).Info(log("deleted volume %s successfully", volName)) + klog.V(4).Info(log("deleted volume %s successfully", volName)) return nil } @@ -235,7 +235,7 @@ func (m *sioMgr) DeleteVolume(volName string) error { // isSdcMappedToVol returns true if the sdc is mapped to the volume func (m *sioMgr) isSdcMappedToVol(sdcID string, vol *siotypes.Volume) bool { if len(vol.MappedSdcInfo) == 0 { - glog.V(4).Info(log("no attachment found")) + klog.V(4).Info(log("no attachment found")) return false } diff --git a/pkg/volume/scaleio/sio_plugin.go b/pkg/volume/scaleio/sio_plugin.go index c82b6d0fb1378..eda8556f7a9d5 100644 --- a/pkg/volume/scaleio/sio_plugin.go +++ b/pkg/volume/scaleio/sio_plugin.go @@ -19,9 +19,9 @@ package scaleio import ( "errors" - "github.com/golang/glog" api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/volume" ) @@ -108,7 +108,7 @@ func (p *sioPlugin) NewMounter( // NewUnmounter creates a representation of the volume to unmount func (p *sioPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmounter, error) { - glog.V(4).Info(log("Unmounter for %s", specName)) + klog.V(4).Info(log("Unmounter for %s", specName)) return &sioVolume{ podUID: podUID, @@ -161,7 +161,7 @@ var _ volume.DeletableVolumePlugin = &sioPlugin{} func (p *sioPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { attribs, err := getVolumeSourceAttribs(spec) if err != nil { - glog.Error(log("deleter failed to extract volume attributes from spec: %v", err)) + klog.Error(log("deleter failed to extract volume attributes from spec: %v", err)) return nil, err } @@ -187,11 +187,11 @@ func (p *sioPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { var _ volume.ProvisionableVolumePlugin = &sioPlugin{} func (p *sioPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - glog.V(4).Info(log("creating Provisioner")) + klog.V(4).Info(log("creating Provisioner")) configData := options.Parameters if configData == nil { - glog.Error(log("provisioner missing parameters, unable to continue")) + klog.Error(log("provisioner missing parameters, unable to continue")) return nil, errors.New("option parameters missing") } diff --git a/pkg/volume/scaleio/sio_util.go b/pkg/volume/scaleio/sio_util.go index d4a1edcd8f79c..e1b5116318fd8 100644 --- a/pkg/volume/scaleio/sio_util.go +++ b/pkg/volume/scaleio/sio_util.go @@ -24,7 +24,7 @@ import ( "path" "strconv" - "github.com/golang/glog" + "k8s.io/klog" api "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/volume" @@ -140,7 +140,7 @@ func validateConfigs(config map[string]string) error { func applyConfigDefaults(config map[string]string) { b, err := strconv.ParseBool(config[confKey.sslEnabled]) if err != nil { - glog.Warning(log("failed to parse param sslEnabled, setting it to false")) + klog.Warning(log("failed to parse param sslEnabled, setting it to false")) b = false } config[confKey.sslEnabled] = strconv.FormatBool(b) @@ -148,7 +148,7 @@ func applyConfigDefaults(config map[string]string) { config[confKey.fsType] = defaultString(config[confKey.fsType], "xfs") b, err = strconv.ParseBool(config[confKey.readOnly]) if err != nil { - glog.Warning(log("failed to parse param readOnly, setting it to false")) + klog.Warning(log("failed to parse param readOnly, setting it to false")) b = false } config[confKey.readOnly] = strconv.FormatBool(b) @@ -163,21 +163,21 @@ func defaultString(val, defVal string) string { // loadConfig loads configuration data from a file on disk func loadConfig(configName string) (map[string]string, error) { - glog.V(4).Info(log("loading config file %s", configName)) + klog.V(4).Info(log("loading config file %s", configName)) file, err := os.Open(configName) if err != nil { - glog.Error(log("failed to open config file %s: %v", configName, err)) + klog.Error(log("failed to open config file %s: %v", configName, err)) return nil, err } defer file.Close() data := map[string]string{} if err := gob.NewDecoder(file).Decode(&data); err != nil { - glog.Error(log("failed to parse config data %s: %v", configName, err)) + klog.Error(log("failed to parse config data %s: %v", configName, err)) return nil, err } applyConfigDefaults(data) if err := validateConfigs(data); err != nil { - glog.Error(log("failed to load ConfigMap %s: %v", err)) + klog.Error(log("failed to load ConfigMap %s: %v", err)) return nil, err } @@ -186,31 +186,31 @@ func loadConfig(configName string) (map[string]string, error) { // saveConfig saves the configuration data to local disk func saveConfig(configName string, data map[string]string) error { - glog.V(4).Info(log("saving config file %s", configName)) + klog.V(4).Info(log("saving config file %s", configName)) dir := path.Dir(configName) if _, err := os.Stat(dir); err != nil { if !os.IsNotExist(err) { return err } - glog.V(4).Info(log("creating config dir for config data: %s", dir)) + klog.V(4).Info(log("creating config dir for config data: %s", dir)) if err := os.MkdirAll(dir, 0750); err != nil { - glog.Error(log("failed to create config data dir %v", err)) + klog.Error(log("failed to create config data dir %v", err)) return err } } file, err := os.Create(configName) if err != nil { - glog.V(4).Info(log("failed to save config data file %s: %v", configName, err)) + klog.V(4).Info(log("failed to save config data file %s: %v", configName, err)) return err } defer file.Close() if err := gob.NewEncoder(file).Encode(data); err != nil { - glog.Error(log("failed to save config %s: %v", configName, err)) + klog.Error(log("failed to save config %s: %v", configName, err)) return err } - glog.V(4).Info(log("config data file saved successfully as %s", configName)) + klog.V(4).Info(log("config data file saved successfully as %s", configName)) return nil } @@ -221,7 +221,7 @@ func attachSecret(plug *sioPlugin, namespace string, configData map[string]strin kubeClient := plug.host.GetKubeClient() secretMap, err := volutil.GetSecretForPV(namespace, secretRefName, sioPluginName, kubeClient) if err != nil { - glog.Error(log("failed to get secret: %v", err)) + klog.Error(log("failed to get secret: %v", err)) return secretNotFoundErr } // merge secret data @@ -251,11 +251,11 @@ func getSdcGUIDLabel(plug *sioPlugin) (string, error) { } label, ok := nodeLabels[sdcGUIDLabelName] if !ok { - glog.V(4).Info(log("node label %s not found", sdcGUIDLabelName)) + klog.V(4).Info(log("node label %s not found", sdcGUIDLabelName)) return "", nil } - glog.V(4).Info(log("found node label %s=%s", sdcGUIDLabelName, label)) + klog.V(4).Info(log("found node label %s=%s", sdcGUIDLabelName, label)) return label, nil } @@ -284,7 +284,7 @@ func getVolumeSourceAttribs(spec *volume.Spec) (*volSourceAttribs, error) { attribs.readOnly = pSource.ReadOnly } else { msg := log("failed to get ScaleIOVolumeSource or ScaleIOPersistentVolumeSource from spec") - glog.Error(msg) + klog.Error(msg) return nil, errors.New(msg) } return attribs, nil diff --git a/pkg/volume/scaleio/sio_volume.go b/pkg/volume/scaleio/sio_volume.go index 420afb36ba940..4a5cb35221eb0 100644 --- a/pkg/volume/scaleio/sio_volume.go +++ b/pkg/volume/scaleio/sio_volume.go @@ -23,12 +23,12 @@ import ( "strconv" "strings" - "github.com/golang/glog" api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -85,20 +85,20 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { v.plugin.volumeMtx.LockKey(v.volSpecName) defer v.plugin.volumeMtx.UnlockKey(v.volSpecName) - glog.V(4).Info(log("setting up volume for PV.spec %s", v.volSpecName)) + klog.V(4).Info(log("setting up volume for PV.spec %s", v.volSpecName)) if err := v.setSioMgr(); err != nil { - glog.Error(log("setup failed to create scalio manager: %v", err)) + klog.Error(log("setup failed to create scalio manager: %v", err)) return err } mounter := v.plugin.host.GetMounter(v.plugin.GetPluginName()) notDevMnt, err := mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.Error(log("IsLikelyNotMountPoint test failed for dir %v", dir)) + klog.Error(log("IsLikelyNotMountPoint test failed for dir %v", dir)) return err } if !notDevMnt { - glog.V(4).Info(log("skipping setup, dir %s already a mount point", v.volName)) + klog.V(4).Info(log("skipping setup, dir %s already a mount point", v.volName)) return nil } @@ -114,12 +114,12 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { } } } - glog.V(4).Info(log("multiple mapping enabled = %v", enableMultiMaps)) + klog.V(4).Info(log("multiple mapping enabled = %v", enableMultiMaps)) volName := v.volName devicePath, err := v.sioMgr.AttachVolume(volName, enableMultiMaps) if err != nil { - glog.Error(log("setup of volume %v: %v", v.volSpecName, err)) + klog.Error(log("setup of volume %v: %v", v.volSpecName, err)) return err } options := []string{} @@ -134,31 +134,31 @@ func (v *sioVolume) SetUpAt(dir string, fsGroup *int64) error { options = append(options, "ro") } - glog.V(4).Info(log("mounting device %s -> %s", devicePath, dir)) + klog.V(4).Info(log("mounting device %s -> %s", devicePath, dir)) if err := os.MkdirAll(dir, 0750); err != nil { - glog.Error(log("failed to create dir %#v: %v", dir, err)) + klog.Error(log("failed to create dir %#v: %v", dir, err)) return err } - glog.V(4).Info(log("setup created mount point directory %s", dir)) + klog.V(4).Info(log("setup created mount point directory %s", dir)) diskMounter := util.NewSafeFormatAndMountFromHost(v.plugin.GetPluginName(), v.plugin.host) err = diskMounter.FormatAndMount(devicePath, dir, v.fsType, options) if err != nil { - glog.Error(log("mount operation failed during setup: %v", err)) + klog.Error(log("mount operation failed during setup: %v", err)) if err := os.Remove(dir); err != nil && !os.IsNotExist(err) { - glog.Error(log("failed to remove dir %s during a failed mount at setup: %v", dir, err)) + klog.Error(log("failed to remove dir %s during a failed mount at setup: %v", dir, err)) return err } return err } if !v.readOnly && fsGroup != nil { - glog.V(4).Info(log("applying value FSGroup ownership")) + klog.V(4).Info(log("applying value FSGroup ownership")) volume.SetVolumeOwnership(v, fsGroup) } - glog.V(4).Info(log("successfully setup PV %s: volume %s mapped as %s mounted at %s", v.volSpecName, v.volName, devicePath, dir)) + klog.V(4).Info(log("successfully setup PV %s: volume %s mapped as %s mounted at %s", v.volSpecName, v.volName, devicePath, dir)) return nil } @@ -188,21 +188,21 @@ func (v *sioVolume) TearDownAt(dir string) error { mounter := v.plugin.host.GetMounter(v.plugin.GetPluginName()) dev, _, err := mount.GetDeviceNameFromMount(mounter, dir) if err != nil { - glog.Errorf(log("failed to get reference count for volume: %s", dir)) + klog.Errorf(log("failed to get reference count for volume: %s", dir)) return err } - glog.V(4).Info(log("attempting to unmount %s", dir)) + klog.V(4).Info(log("attempting to unmount %s", dir)) if err := util.UnmountPath(dir, mounter); err != nil { - glog.Error(log("teardown failed while unmounting dir %s: %v ", dir, err)) + klog.Error(log("teardown failed while unmounting dir %s: %v ", dir, err)) return err } - glog.V(4).Info(log("dir %s unmounted successfully", dir)) + klog.V(4).Info(log("dir %s unmounted successfully", dir)) // detach/unmap deviceBusy, err := mounter.DeviceOpened(dev) if err != nil { - glog.Error(log("teardown unable to get status for device %s: %v", dev, err)) + klog.Error(log("teardown unable to get status for device %s: %v", dev, err)) return err } @@ -210,16 +210,16 @@ func (v *sioVolume) TearDownAt(dir string) error { // use "last attempt wins" strategy to detach volume from node // only allow volume to detach when it is not busy (not being used by other pods) if !deviceBusy { - glog.V(4).Info(log("teardown is attempting to detach/unmap volume for PV %s", v.volSpecName)) + klog.V(4).Info(log("teardown is attempting to detach/unmap volume for PV %s", v.volSpecName)) if err := v.resetSioMgr(); err != nil { - glog.Error(log("teardown failed, unable to reset scalio mgr: %v", err)) + klog.Error(log("teardown failed, unable to reset scalio mgr: %v", err)) } volName := v.volName if err := v.sioMgr.DetachVolume(volName); err != nil { - glog.Warning(log("warning: detaching failed for volume %s: %v", volName, err)) + klog.Warning(log("warning: detaching failed for volume %s: %v", volName, err)) return nil } - glog.V(4).Infof(log("teardown of volume %v detached successfully", volName)) + klog.V(4).Infof(log("teardown of volume %v detached successfully", volName)) } return nil } @@ -230,20 +230,20 @@ func (v *sioVolume) TearDownAt(dir string) error { var _ volume.Deleter = &sioVolume{} func (v *sioVolume) Delete() error { - glog.V(4).Info(log("deleting pvc %s", v.volSpecName)) + klog.V(4).Info(log("deleting pvc %s", v.volSpecName)) if err := v.setSioMgrFromSpec(); err != nil { - glog.Error(log("delete failed while setting sio manager: %v", err)) + klog.Error(log("delete failed while setting sio manager: %v", err)) return err } err := v.sioMgr.DeleteVolume(v.volName) if err != nil { - glog.Error(log("failed to delete volume %s: %v", v.volName, err)) + klog.Error(log("failed to delete volume %s: %v", v.volName, err)) return err } - glog.V(4).Info(log("successfully deleted PV %s with volume %s", v.volSpecName, v.volName)) + klog.V(4).Info(log("successfully deleted PV %s with volume %s", v.volSpecName, v.volName)) return nil } @@ -253,7 +253,7 @@ func (v *sioVolume) Delete() error { var _ volume.Provisioner = &sioVolume{} func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.TopologySelectorTerm) (*api.PersistentVolume, error) { - glog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name)) + klog.V(4).Info(log("attempting to dynamically provision pvc %v", v.options.PVC.Name)) if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) { return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes()) @@ -278,13 +278,13 @@ func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.To if volSizeBytes < eightGig { volSizeGB = int64(util.RoundUpSize(eightGig, oneGig)) - glog.V(4).Info(log("capacity less than 8Gi found, adjusted to %dGi", volSizeGB)) + klog.V(4).Info(log("capacity less than 8Gi found, adjusted to %dGi", volSizeGB)) } // create sio manager if err := v.setSioMgrFromConfig(); err != nil { - glog.Error(log("provision failed while setting up sio mgr: %v", err)) + klog.Error(log("provision failed while setting up sio mgr: %v", err)) return nil, err } @@ -292,7 +292,7 @@ func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.To volName := genName vol, err := v.sioMgr.CreateVolume(volName, volSizeGB) if err != nil { - glog.Error(log("provision failed while creating volume: %v", err)) + klog.Error(log("provision failed while creating volume: %v", err)) return nil, err } @@ -300,12 +300,12 @@ func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.To v.configData[confKey.volumeName] = volName sslEnabled, err := strconv.ParseBool(v.configData[confKey.sslEnabled]) if err != nil { - glog.Warning(log("failed to parse parameter sslEnabled, setting to false")) + klog.Warning(log("failed to parse parameter sslEnabled, setting to false")) sslEnabled = false } readOnly, err := strconv.ParseBool(v.configData[confKey.readOnly]) if err != nil { - glog.Warning(log("failed to parse parameter readOnly, setting it to false")) + klog.Warning(log("failed to parse parameter readOnly, setting it to false")) readOnly = false } @@ -348,24 +348,24 @@ func (v *sioVolume) Provision(selectedNode *api.Node, allowedTopologies []api.To pv.Spec.AccessModes = v.plugin.GetAccessModes() } - glog.V(4).Info(log("provisioner created pv %v and volume %s successfully", pvName, vol.Name)) + klog.V(4).Info(log("provisioner created pv %v and volume %s successfully", pvName, vol.Name)) return pv, nil } // setSioMgr creates scaleio mgr from cached config data if found // otherwise, setups new config data and create mgr func (v *sioVolume) setSioMgr() error { - glog.V(4).Info(log("setting up sio mgr for spec %s", v.volSpecName)) + klog.V(4).Info(log("setting up sio mgr for spec %s", v.volSpecName)) podDir := v.plugin.host.GetPodPluginDir(v.podUID, sioPluginName) configName := path.Join(podDir, sioConfigFileName) if v.sioMgr == nil { configData, err := loadConfig(configName) // try to load config if exist if err != nil { if !os.IsNotExist(err) { - glog.Error(log("failed to load config %s : %v", configName, err)) + klog.Error(log("failed to load config %s : %v", configName, err)) return err } - glog.V(4).Info(log("previous config file not found, creating new one")) + klog.V(4).Info(log("previous config file not found, creating new one")) // prepare config data configData = make(map[string]string) mapVolumeSpec(configData, v.spec) @@ -376,31 +376,31 @@ func (v *sioVolume) setSioMgr() error { configData[confKey.volSpecName] = v.volSpecName if err := validateConfigs(configData); err != nil { - glog.Error(log("config setup failed: %s", err)) + klog.Error(log("config setup failed: %s", err)) return err } // persist config if err := saveConfig(configName, configData); err != nil { - glog.Error(log("failed to save config data: %v", err)) + klog.Error(log("failed to save config data: %v", err)) return err } } // merge in secret if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil { - glog.Error(log("failed to load secret: %v", err)) + klog.Error(log("failed to load secret: %v", err)) return err } // merge in Sdc Guid label value if err := attachSdcGUID(v.plugin, configData); err != nil { - glog.Error(log("failed to retrieve sdc guid: %v", err)) + klog.Error(log("failed to retrieve sdc guid: %v", err)) return err } mgr, err := newSioMgr(configData, v.plugin.host.GetExec(v.plugin.GetPluginName())) if err != nil { - glog.Error(log("failed to reset sio manager: %v", err)) + klog.Error(log("failed to reset sio manager: %v", err)) return err } @@ -417,7 +417,7 @@ func (v *sioVolume) resetSioMgr() error { // load config data from disk configData, err := loadConfig(configName) if err != nil { - glog.Error(log("failed to load config data: %v", err)) + klog.Error(log("failed to load config data: %v", err)) return err } v.secretName = configData[confKey.secretName] @@ -427,20 +427,20 @@ func (v *sioVolume) resetSioMgr() error { // attach secret if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil { - glog.Error(log("failed to load secret: %v", err)) + klog.Error(log("failed to load secret: %v", err)) return err } // merge in Sdc Guid label value if err := attachSdcGUID(v.plugin, configData); err != nil { - glog.Error(log("failed to retrieve sdc guid: %v", err)) + klog.Error(log("failed to retrieve sdc guid: %v", err)) return err } mgr, err := newSioMgr(configData, v.plugin.host.GetExec(v.plugin.GetPluginName())) if err != nil { - glog.Error(log("failed to reset scaleio mgr: %v", err)) + klog.Error(log("failed to reset scaleio mgr: %v", err)) return err } v.sioMgr = mgr @@ -451,14 +451,14 @@ func (v *sioVolume) resetSioMgr() error { // setSioFromConfig sets up scaleio mgr from an available config data map // designed to be called from dynamic provisioner func (v *sioVolume) setSioMgrFromConfig() error { - glog.V(4).Info(log("setting scaleio mgr from available config")) + klog.V(4).Info(log("setting scaleio mgr from available config")) if v.sioMgr == nil { applyConfigDefaults(v.configData) v.configData[confKey.volSpecName] = v.volSpecName if err := validateConfigs(v.configData); err != nil { - glog.Error(log("config data setup failed: %s", err)) + klog.Error(log("config data setup failed: %s", err)) return err } @@ -469,14 +469,14 @@ func (v *sioVolume) setSioMgrFromConfig() error { } if err := attachSecret(v.plugin, v.secretNamespace, data); err != nil { - glog.Error(log("failed to load secret: %v", err)) + klog.Error(log("failed to load secret: %v", err)) return err } mgr, err := newSioMgr(data, v.plugin.host.GetExec(v.plugin.GetPluginName())) if err != nil { - glog.Error(log("failed while setting scaleio mgr from config: %v", err)) + klog.Error(log("failed while setting scaleio mgr from config: %v", err)) return err } v.sioMgr = mgr @@ -487,7 +487,7 @@ func (v *sioVolume) setSioMgrFromConfig() error { // setSioMgrFromSpec sets the scaleio manager from a spec object. // The spec may be complete or incomplete depending on lifecycle phase. func (v *sioVolume) setSioMgrFromSpec() error { - glog.V(4).Info(log("setting sio manager from spec")) + klog.V(4).Info(log("setting sio manager from spec")) if v.sioMgr == nil { // get config data form spec volume source configData := map[string]string{} @@ -499,20 +499,20 @@ func (v *sioVolume) setSioMgrFromSpec() error { configData[confKey.volSpecName] = v.volSpecName if err := validateConfigs(configData); err != nil { - glog.Error(log("config setup failed: %s", err)) + klog.Error(log("config setup failed: %s", err)) return err } // attach secret object to config data if err := attachSecret(v.plugin, v.secretNamespace, configData); err != nil { - glog.Error(log("failed to load secret: %v", err)) + klog.Error(log("failed to load secret: %v", err)) return err } mgr, err := newSioMgr(configData, v.plugin.host.GetExec(v.plugin.GetPluginName())) if err != nil { - glog.Error(log("failed to reset sio manager: %v", err)) + klog.Error(log("failed to reset sio manager: %v", err)) return err } v.sioMgr = mgr diff --git a/pkg/volume/scaleio/sio_volume_test.go b/pkg/volume/scaleio/sio_volume_test.go index dbc7b8361e815..199be8c918dcc 100644 --- a/pkg/volume/scaleio/sio_volume_test.go +++ b/pkg/volume/scaleio/sio_volume_test.go @@ -23,7 +23,7 @@ import ( "strings" "testing" - "github.com/golang/glog" + "k8s.io/klog" api "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -328,7 +328,7 @@ func TestVolumeProvisioner(t *testing.T) { t.Errorf("expected volume name to be %s, got %s", actualVolName, vol.Name) } if vol.SizeInKb != 8*1024*1024 { - glog.V(4).Info(log("unexpected volume size")) + klog.V(4).Info(log("unexpected volume size")) } // mount dynamic vol diff --git a/pkg/volume/secret/BUILD b/pkg/volume/secret/BUILD index e07df1456f357..d44d59dd61433 100644 --- a/pkg/volume/secret/BUILD +++ b/pkg/volume/secret/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/secret/secret.go b/pkg/volume/secret/secret.go index 92445b8d0581c..14485181f92c3 100644 --- a/pkg/volume/secret/secret.go +++ b/pkg/volume/secret/secret.go @@ -19,11 +19,11 @@ package secret import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -179,7 +179,7 @@ func (b *secretVolumeMounter) SetUp(fsGroup *int64) error { } func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) + klog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) // Wrap EmptyDir, let it do the setup. wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts) @@ -191,7 +191,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { secret, err := b.getSecret(b.pod.Namespace, b.source.SecretName) if err != nil { if !(errors.IsNotFound(err) && optional) { - glog.Errorf("Couldn't get secret %v/%v: %v", b.pod.Namespace, b.source.SecretName, err) + klog.Errorf("Couldn't get secret %v/%v: %v", b.pod.Namespace, b.source.SecretName, err) return err } secret = &v1.Secret{ @@ -203,7 +203,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } totalBytes := totalSecretBytes(secret) - glog.V(3).Infof("Received secret %v/%v containing (%v) pieces of data, %v total bytes", + klog.V(3).Infof("Received secret %v/%v containing (%v) pieces of data, %v total bytes", b.pod.Namespace, b.source.SecretName, len(secret.Data), @@ -227,12 +227,12 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if !setupSuccess { unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID) if unmountCreateErr != nil { - glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) + klog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) return } tearDownErr := unmounter.TearDown() if tearDownErr != nil { - glog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr) + klog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr) } } }() @@ -240,19 +240,19 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { - glog.Errorf("Error creating atomic writer: %v", err) + klog.Errorf("Error creating atomic writer: %v", err) return err } err = writer.Write(payload) if err != nil { - glog.Errorf("Error writing payload to dir: %v", err) + klog.Errorf("Error writing payload to dir: %v", err) return err } err = volume.SetVolumeOwnership(b, fsGroup) if err != nil { - glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) + klog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } setupSuccess = true @@ -282,7 +282,7 @@ func MakePayload(mappings []v1.KeyToPath, secret *v1.Secret, defaultMode *int32, continue } errMsg := "references non-existent secret key" - glog.Errorf(errMsg) + klog.Errorf(errMsg) return nil, fmt.Errorf(errMsg) } diff --git a/pkg/volume/storageos/BUILD b/pkg/volume/storageos/BUILD index ee84092563fd1..c89c5fee55fc1 100644 --- a/pkg/volume/storageos/BUILD +++ b/pkg/volume/storageos/BUILD @@ -24,9 +24,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/storageos/go-api:go_default_library", "//vendor/github.com/storageos/go-api/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/storageos/storageos.go b/pkg/volume/storageos/storageos.go index 1130ae5b5edbf..9cb56b353aa80 100644 --- a/pkg/volume/storageos/storageos.go +++ b/pkg/volume/storageos/storageos.go @@ -24,7 +24,7 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -343,14 +343,14 @@ func (b *storageosMounter) CanMount() error { func (b *storageosMounter) SetUp(fsGroup *int64) error { // Need a namespace to find the volume, try pod's namespace if not set. if b.volNamespace == "" { - glog.V(2).Infof("Setting StorageOS volume namespace to pod namespace: %s", b.podNamespace) + klog.V(2).Infof("Setting StorageOS volume namespace to pod namespace: %s", b.podNamespace) b.volNamespace = b.podNamespace } // Attach the StorageOS volume as a block device devicePath, err := b.manager.AttachVolume(b) if err != nil { - glog.Errorf("Failed to attach StorageOS volume %s: %s", b.volName, err.Error()) + klog.Errorf("Failed to attach StorageOS volume %s: %s", b.volName, err.Error()) return err } @@ -360,7 +360,7 @@ func (b *storageosMounter) SetUp(fsGroup *int64) error { if err != nil { return err } - glog.V(4).Infof("Successfully mounted StorageOS volume %s into global mount directory", b.volName) + klog.V(4).Infof("Successfully mounted StorageOS volume %s into global mount directory", b.volName) // Bind mount the volume into the pod return b.SetUpAt(b.GetPath(), fsGroup) @@ -369,9 +369,9 @@ func (b *storageosMounter) SetUp(fsGroup *int64) error { // SetUp bind mounts the disk global mount to the give volume path. func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error { notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("StorageOS volume set up: %s %v %v", dir, !notMnt, err) + klog.V(4).Infof("StorageOS volume set up: %s %v %v", dir, !notMnt, err) if err != nil && !os.IsNotExist(err) { - glog.Errorf("Cannot validate mount point: %s %v", dir, err) + klog.Errorf("Cannot validate mount point: %s %v", dir, err) return err } if !notMnt { @@ -379,7 +379,7 @@ func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error { } if err = os.MkdirAll(dir, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", dir, err) + klog.Errorf("mkdir failed on disk %s (%v)", dir, err) return err } @@ -391,39 +391,39 @@ func (b *storageosMounter) SetUpAt(dir string, fsGroup *int64) error { mountOptions := util.JoinMountOptions(b.mountOptions, options) globalPDPath := makeGlobalPDName(b.plugin.host, b.pvName, b.volNamespace, b.volName) - glog.V(4).Infof("Attempting to bind mount to pod volume at %s", dir) + klog.V(4).Infof("Attempting to bind mount to pod volume at %s", dir) err = b.mounter.Mount(globalPDPath, dir, "", mountOptions) if err != nil { notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notMnt { - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) return err } } os.Remove(dir) - glog.Errorf("Mount of disk %s failed: %v", dir, err) + klog.Errorf("Mount of disk %s failed: %v", dir, err) return err } if !b.readOnly { volume.SetVolumeOwnership(b, fsGroup) } - glog.V(4).Infof("StorageOS volume setup complete on %s", dir) + klog.V(4).Infof("StorageOS volume setup complete on %s", dir) return nil } @@ -487,7 +487,7 @@ func (b *storageosUnmounter) GetPath() string { // resource was the last reference to that disk on the kubelet. func (b *storageosUnmounter) TearDown() error { if len(b.volNamespace) == 0 || len(b.volName) == 0 { - glog.Warningf("volNamespace: %q, volName: %q not set, skipping TearDown", b.volNamespace, b.volName) + klog.Warningf("volNamespace: %q, volName: %q not set, skipping TearDown", b.volNamespace, b.volName) return fmt.Errorf("pvName not specified for TearDown, waiting for next sync loop") } // Unmount from pod @@ -495,7 +495,7 @@ func (b *storageosUnmounter) TearDown() error { err := b.TearDownAt(mountPath) if err != nil { - glog.Errorf("Unmount from pod failed: %v", err) + klog.Errorf("Unmount from pod failed: %v", err) return err } @@ -503,25 +503,25 @@ func (b *storageosUnmounter) TearDown() error { globalPDPath := makeGlobalPDName(b.plugin.host, b.pvName, b.volNamespace, b.volName) devicePath, _, err := mount.GetDeviceNameFromMount(b.mounter, globalPDPath) if err != nil { - glog.Errorf("Detach failed when getting device from global mount: %v", err) + klog.Errorf("Detach failed when getting device from global mount: %v", err) return err } // Unmount from plugin's disk global mount dir. err = b.TearDownAt(globalPDPath) if err != nil { - glog.Errorf("Detach failed during unmount: %v", err) + klog.Errorf("Detach failed during unmount: %v", err) return err } // Detach loop device err = b.manager.DetachVolume(b, devicePath) if err != nil { - glog.Errorf("Detach device %s failed for volume %s: %v", devicePath, b.pvName, err) + klog.Errorf("Detach device %s failed for volume %s: %v", devicePath, b.pvName, err) return err } - glog.V(4).Infof("Successfully unmounted StorageOS volume %s and detached devices", b.pvName) + klog.V(4).Infof("Successfully unmounted StorageOS volume %s and detached devices", b.pvName) return nil } @@ -530,10 +530,10 @@ func (b *storageosUnmounter) TearDown() error { // resource was the last reference to that disk on the kubelet. func (b *storageosUnmounter) TearDownAt(dir string) error { if err := util.UnmountPath(dir, b.mounter); err != nil { - glog.V(4).Infof("Unmounted StorageOS volume %s failed with: %v", b.pvName, err) + klog.V(4).Infof("Unmounted StorageOS volume %s failed with: %v", b.pvName, err) } if err := b.manager.UnmountVolume(b); err != nil { - glog.V(4).Infof("Mount reference for volume %s could not be removed from StorageOS: %v", b.pvName, err) + klog.V(4).Infof("Mount reference for volume %s could not be removed from StorageOS: %v", b.pvName, err) } return nil } @@ -616,7 +616,7 @@ func (c *storageosProvisioner) Provision(selectedNode *v1.Node, allowedTopologie vol, err := c.manager.CreateVolume(c) if err != nil { - glog.Errorf("failed to create volume: %v", err) + klog.Errorf("failed to create volume: %v", err) return nil, err } if vol.FSType == "" { @@ -717,7 +717,7 @@ func getAPICfg(spec *volume.Spec, pod *v1.Pod, kubeClient clientset.Interface) ( func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (*storageosAPIConfig, error) { secret, err := util.GetSecretForPod(pod, secretName, kubeClient) if err != nil { - glog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) + klog.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) return nil, fmt.Errorf("failed to get secret from [%q/%q]", pod.Namespace, secretName) } return parseAPIConfig(secret) @@ -728,7 +728,7 @@ func parsePodSecret(pod *v1.Pod, secretName string, kubeClient clientset.Interfa func parsePVSecret(namespace, secretName string, kubeClient clientset.Interface) (*storageosAPIConfig, error) { secret, err := util.GetSecretForPV(namespace, secretName, storageosPluginName, kubeClient) if err != nil { - glog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) + klog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) return nil, fmt.Errorf("failed to get secret from [%q/%q]", namespace, secretName) } return parseAPIConfig(secret) diff --git a/pkg/volume/storageos/storageos_util.go b/pkg/volume/storageos/storageos_util.go index beac89240871b..c102e4226d5d6 100644 --- a/pkg/volume/storageos/storageos_util.go +++ b/pkg/volume/storageos/storageos_util.go @@ -25,9 +25,9 @@ import ( "k8s.io/kubernetes/pkg/util/mount" - "github.com/golang/glog" storageosapi "github.com/storageos/go-api" storageostypes "github.com/storageos/go-api/types" + "k8s.io/klog" ) const ( @@ -88,7 +88,7 @@ func (u *storageosUtil) NewAPI(apiCfg *storageosAPIConfig) error { apiPass: defaultAPIPassword, apiVersion: defaultAPIVersion, } - glog.V(4).Infof("Using default StorageOS API settings: addr %s, version: %s", apiCfg.apiAddr, defaultAPIVersion) + klog.V(4).Infof("Using default StorageOS API settings: addr %s, version: %s", apiCfg.apiAddr, defaultAPIVersion) } api, err := storageosapi.NewVersionedClient(apiCfg.apiAddr, defaultAPIVersion) @@ -122,7 +122,7 @@ func (u *storageosUtil) CreateVolume(p *storageosProvisioner) (*storageosVolume, vol, err := u.api.VolumeCreate(opts) if err != nil { - glog.Errorf("volume create failed for volume %q (%v)", opts.Name, err) + klog.Errorf("volume create failed for volume %q (%v)", opts.Name, err) return nil, err } return &storageosVolume{ @@ -157,7 +157,7 @@ func (u *storageosUtil) AttachVolume(b *storageosMounter) (string, error) { vol, err := u.api.Volume(b.volNamespace, b.volName) if err != nil { - glog.Warningf("volume retrieve failed for volume %q with namespace %q (%v)", b.volName, b.volNamespace, err) + klog.Warningf("volume retrieve failed for volume %q with namespace %q (%v)", b.volName, b.volNamespace, err) return "", err } @@ -170,14 +170,14 @@ func (u *storageosUtil) AttachVolume(b *storageosMounter) (string, error) { Namespace: vol.Namespace, } if err := u.api.VolumeUnmount(opts); err != nil { - glog.Warningf("Couldn't clear existing StorageOS mount reference: %v", err) + klog.Warningf("Couldn't clear existing StorageOS mount reference: %v", err) } } srcPath := path.Join(b.deviceDir, vol.ID) dt, err := pathDeviceType(srcPath) if err != nil { - glog.Warningf("volume source path %q for volume %q not ready (%v)", srcPath, b.volName, err) + klog.Warningf("volume source path %q for volume %q not ready (%v)", srcPath, b.volName, err) return "", err } @@ -217,7 +217,7 @@ func (u *storageosUtil) MountVolume(b *storageosMounter, mntDevice, deviceMountP } } if err = os.MkdirAll(deviceMountPath, 0750); err != nil { - glog.Errorf("mkdir failed on disk %s (%v)", deviceMountPath, err) + klog.Errorf("mkdir failed on disk %s (%v)", deviceMountPath, err) return err } options := []string{} @@ -255,7 +255,7 @@ func (u *storageosUtil) UnmountVolume(b *storageosUnmounter) error { if err := u.NewAPI(b.apiCfg); err != nil { // We can't always get the config we need, so allow the unmount to // succeed even if we can't remove the mount reference from the API. - glog.V(4).Infof("Could not remove mount reference in the StorageOS API as no credentials available to the unmount operation") + klog.V(4).Infof("Could not remove mount reference in the StorageOS API as no credentials available to the unmount operation") return nil } @@ -291,11 +291,11 @@ func (u *storageosUtil) DeviceDir(b *storageosMounter) string { ctrl, err := u.api.Controller(b.plugin.host.GetHostName()) if err != nil { - glog.Warningf("node device path lookup failed: %v", err) + klog.Warningf("node device path lookup failed: %v", err) return defaultDeviceDir } if ctrl == nil || ctrl.DeviceDir == "" { - glog.Warningf("node device path not set, using default: %s", defaultDeviceDir) + klog.Warningf("node device path not set, using default: %s", defaultDeviceDir) return defaultDeviceDir } return ctrl.DeviceDir @@ -327,7 +327,7 @@ func attachFileDevice(path string, exec mount.Exec) (string, error) { // If no existing loop device for the path, create one if blockDevicePath == "" { - glog.V(4).Infof("Creating device for path: %s", path) + klog.V(4).Infof("Creating device for path: %s", path) blockDevicePath, err = makeLoopDevice(path, exec) if err != nil { return "", err @@ -349,7 +349,7 @@ func getLoopDevice(path string, exec mount.Exec) (string, error) { args := []string{"-j", path} out, err := exec.Run(losetupPath, args...) if err != nil { - glog.V(2).Infof("Failed device discover command for path %s: %v", path, err) + klog.V(2).Infof("Failed device discover command for path %s: %v", path, err) return "", err } return parseLosetupOutputForDevice(out) @@ -359,7 +359,7 @@ func makeLoopDevice(path string, exec mount.Exec) (string, error) { args := []string{"-f", "-P", "--show", path} out, err := exec.Run(losetupPath, args...) if err != nil { - glog.V(2).Infof("Failed device create command for path %s: %v", path, err) + klog.V(2).Infof("Failed device create command for path %s: %v", path, err) return "", err } return parseLosetupOutputForDevice(out) diff --git a/pkg/volume/util/BUILD b/pkg/volume/util/BUILD index cafe94a22356d..c6ac0cf233bfc 100644 --- a/pkg/volume/util/BUILD +++ b/pkg/volume/util/BUILD @@ -41,8 +41,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/util/atomic_writer.go b/pkg/volume/util/atomic_writer.go index 99cc0c15d8561..7b3d034012f4d 100644 --- a/pkg/volume/util/atomic_writer.go +++ b/pkg/volume/util/atomic_writer.go @@ -27,7 +27,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" ) @@ -121,7 +121,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { // (1) cleanPayload, err := validatePayload(payload) if err != nil { - glog.Errorf("%s: invalid payload: %v", w.logContext, err) + klog.Errorf("%s: invalid payload: %v", w.logContext, err) return err } @@ -130,7 +130,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { oldTsDir, err := os.Readlink(dataDirPath) if err != nil { if !os.IsNotExist(err) { - glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err) + klog.Errorf("%s: error reading link for data directory: %v", w.logContext, err) return err } // although Readlink() returns "" on err, don't be fragile by relying on it (since it's not specified in docs) @@ -145,40 +145,40 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { // (3) pathsToRemove, err = w.pathsToRemove(cleanPayload, oldTsPath) if err != nil { - glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err) + klog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err) return err } // (4) if should, err := shouldWritePayload(cleanPayload, oldTsPath); err != nil { - glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err) + klog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err) return err } else if !should && len(pathsToRemove) == 0 { - glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir) + klog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir) return nil } else { - glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir) + klog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir) } } // (5) tsDir, err := w.newTimestampDir() if err != nil { - glog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err) + klog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err) return err } tsDirName := filepath.Base(tsDir) // (6) if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil { - glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err) + klog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err) return err } - glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir) + klog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir) // (7) if err = w.createUserVisibleFiles(cleanPayload); err != nil { - glog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err) + klog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err) return err } @@ -186,7 +186,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { newDataDirPath := path.Join(w.targetDir, newDataDirName) if err = os.Symlink(tsDirName, newDataDirPath); err != nil { os.RemoveAll(tsDir) - glog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err) + klog.Errorf("%s: error creating symbolic link for atomic update: %v", w.logContext, err) return err } @@ -201,20 +201,20 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error { if err != nil { os.Remove(newDataDirPath) os.RemoveAll(tsDir) - glog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err) + klog.Errorf("%s: error renaming symbolic link for data directory %s: %v", w.logContext, newDataDirPath, err) return err } // (10) if err = w.removeUserVisiblePaths(pathsToRemove); err != nil { - glog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err) + klog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err) return err } // (11) if len(oldTsDir) > 0 { if err = os.RemoveAll(oldTsPath); err != nil { - glog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err) + klog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err) return err } } @@ -329,7 +329,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir } else if err != nil { return nil, err } - glog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List()) + klog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List()) newPaths := sets.NewString() for file := range payload { @@ -341,10 +341,10 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir subPath = strings.TrimSuffix(subPath, string(os.PathSeparator)) } } - glog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List()) + klog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List()) result := paths.Difference(newPaths) - glog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result) + klog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result) return result, nil } @@ -353,7 +353,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir func (w *AtomicWriter) newTimestampDir() (string, error) { tsDir, err := ioutil.TempDir(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05.")) if err != nil { - glog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err) + klog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err) return "", err } @@ -362,7 +362,7 @@ func (w *AtomicWriter) newTimestampDir() (string, error) { // regardless of the process' umask. err = os.Chmod(tsDir, 0755) if err != nil { - glog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err) + klog.Errorf("%s: unable to set mode on new temp directory: %v", w.logContext, err) return "", err } @@ -380,13 +380,13 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir err := os.MkdirAll(baseDir, os.ModePerm) if err != nil { - glog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err) + klog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err) return err } err = ioutil.WriteFile(fullPath, content, mode) if err != nil { - glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) + klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) return err } // Chmod is needed because ioutil.WriteFile() ends up calling @@ -395,7 +395,7 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir // in the file no matter what the umask is. err = os.Chmod(fullPath, mode) if err != nil { - glog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) + klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err) } } @@ -445,7 +445,7 @@ func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error { continue } if err := os.Remove(path.Join(w.targetDir, p)); err != nil { - glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err) + klog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err) lasterr = err } } diff --git a/pkg/volume/util/device_util_linux.go b/pkg/volume/util/device_util_linux.go index a3d292d0f963a..66e8564915b07 100644 --- a/pkg/volume/util/device_util_linux.go +++ b/pkg/volume/util/device_util_linux.go @@ -21,7 +21,7 @@ package util import ( "errors" "fmt" - "github.com/golang/glog" + "k8s.io/klog" "path" "strconv" "strings" @@ -109,7 +109,7 @@ func (handler *deviceHandler) GetISCSIPortalHostMapForTarget(targetIqn string) ( } hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host")) if err != nil { - glog.Errorf("Could not get number from iSCSI host: %s", hostName) + klog.Errorf("Could not get number from iSCSI host: %s", hostName) continue } @@ -222,7 +222,7 @@ func (handler *deviceHandler) FindDevicesForISCSILun(targetIqn string, lun int) } hostNumber, err := strconv.Atoi(strings.TrimPrefix(hostName, "host")) if err != nil { - glog.Errorf("Could not get number from iSCSI host: %s", hostName) + klog.Errorf("Could not get number from iSCSI host: %s", hostName) continue } diff --git a/pkg/volume/util/nestedpendingoperations/BUILD b/pkg/volume/util/nestedpendingoperations/BUILD index 4622d91fbb1cd..945d33dfdf27d 100644 --- a/pkg/volume/util/nestedpendingoperations/BUILD +++ b/pkg/volume/util/nestedpendingoperations/BUILD @@ -15,7 +15,7 @@ go_library( "//pkg/volume/util/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go b/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go index 526ea403ceeac..d9d277e0b2199 100644 --- a/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go +++ b/pkg/volume/util/nestedpendingoperations/nestedpendingoperations.go @@ -28,9 +28,9 @@ import ( "fmt" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" k8sRuntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" "k8s.io/kubernetes/pkg/volume/util/types" ) @@ -240,7 +240,7 @@ func (grm *nestedPendingOperations) operationComplete( if *err != nil { // Log error logOperationName := getOperationName(volumeName, podName) - glog.Errorf("operation %s failed with: %v", + klog.Errorf("operation %s failed with: %v", logOperationName, *err) } @@ -252,7 +252,7 @@ func (grm *nestedPendingOperations) operationComplete( if getOpErr != nil { // Failed to find existing operation logOperationName := getOperationName(volumeName, podName) - glog.Errorf("Operation %s completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.", + klog.Errorf("Operation %s completed. error: %v. exponentialBackOffOnError is enabled, but failed to get operation to update.", logOperationName, *err) return @@ -264,7 +264,7 @@ func (grm *nestedPendingOperations) operationComplete( // Log error operationName := getOperationName(volumeName, podName) - glog.Errorf("%v", grm.operations[existingOpIndex].expBackoff. + klog.Errorf("%v", grm.operations[existingOpIndex].expBackoff. GenerateNoRetriesPermittedMsg(operationName)) } diff --git a/pkg/volume/util/operationexecutor/BUILD b/pkg/volume/util/operationexecutor/BUILD index 2f42e04ea9f0f..3b2cd65d5d760 100644 --- a/pkg/volume/util/operationexecutor/BUILD +++ b/pkg/volume/util/operationexecutor/BUILD @@ -30,7 +30,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index 8983ae48d39e6..745910dc220d5 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -24,7 +24,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -627,14 +627,14 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( for node, nodeAttachedVolumes := range attachedVolumes { for _, volumeAttached := range nodeAttachedVolumes { if volumeAttached.VolumeSpec == nil { - glog.Errorf("VerifyVolumesAreAttached: nil spec for volume %s", volumeAttached.VolumeName) + klog.Errorf("VerifyVolumesAreAttached: nil spec for volume %s", volumeAttached.VolumeName) continue } volumePlugin, err := oe.operationGenerator.GetVolumePluginMgr().FindPluginBySpec(volumeAttached.VolumeSpec) if err != nil || volumePlugin == nil { - glog.Errorf( + klog.Errorf( "VolumesAreAttached.FindPluginBySpec failed for volume %q (spec.Name: %q) on node %q with error: %v", volumeAttached.VolumeName, volumeAttached.VolumeSpec.Name(), @@ -673,7 +673,7 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( // If node doesn't support Bulk volume polling it is best to poll individually nodeError := oe.VerifyVolumesAreAttachedPerNode(nodeAttachedVolumes, node, actualStateOfWorld) if nodeError != nil { - glog.Errorf("BulkVerifyVolumes.VerifyVolumesAreAttached verifying volumes on node %q with %v", node, nodeError) + klog.Errorf("BulkVerifyVolumes.VerifyVolumesAreAttached verifying volumes on node %q with %v", node, nodeError) } break } @@ -686,14 +686,14 @@ func (oe *operationExecutor) VerifyVolumesAreAttached( volumeSpecMapByPlugin[pluginName], actualStateOfWorld) if err != nil { - glog.Errorf("BulkVerifyVolumes.GenerateBulkVolumeVerifyFunc error bulk verifying volumes for plugin %q with %v", pluginName, err) + klog.Errorf("BulkVerifyVolumes.GenerateBulkVolumeVerifyFunc error bulk verifying volumes for plugin %q with %v", pluginName, err) } // Ugly hack to ensure - we don't do parallel bulk polling of same volume plugin uniquePluginName := v1.UniqueVolumeName(pluginName) err = oe.pendingOperations.Run(uniquePluginName, "" /* Pod Name */, generatedOperations) if err != nil { - glog.Errorf("BulkVerifyVolumes.Run Error bulk volume verification for plugin %q with %v", pluginName, err) + klog.Errorf("BulkVerifyVolumes.Run Error bulk volume verification for plugin %q with %v", pluginName, err) } } } @@ -862,7 +862,7 @@ func (oe *operationExecutor) ReconstructVolumeOperation( // Filesystem Volume case if volumeMode == v1.PersistentVolumeFilesystem { // Create volumeSpec from mount path - glog.V(5).Infof("Starting operationExecutor.ReconstructVolumepodName") + klog.V(5).Infof("Starting operationExecutor.ReconstructVolumepodName") volumeSpec, err := plugin.ConstructVolumeSpec(volumeSpecName, mountPath) if err != nil { return nil, err @@ -872,7 +872,7 @@ func (oe *operationExecutor) ReconstructVolumeOperation( // Block Volume case // Create volumeSpec from mount path - glog.V(5).Infof("Starting operationExecutor.ReconstructVolume") + klog.V(5).Infof("Starting operationExecutor.ReconstructVolume") if mapperPlugin == nil { return nil, fmt.Errorf("Could not find block volume plugin %q (spec.Name: %q) pod %q (UID: %q)", pluginName, diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 67ba7f6d04c4f..7950f9597b479 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -22,7 +22,6 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,6 +29,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" + "k8s.io/klog" expandcache "k8s.io/kubernetes/pkg/controller/volume/expand/cache" "k8s.io/kubernetes/pkg/features" kevents "k8s.io/kubernetes/pkg/kubelet/events" @@ -139,13 +139,13 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( // Iterate each volume spec and put them into a map index by the pluginName for _, volumeAttached := range attachedVolumes { if volumeAttached.VolumeSpec == nil { - glog.Errorf("VerifyVolumesAreAttached.GenerateVolumesAreAttachedFunc: nil spec for volume %s", volumeAttached.VolumeName) + klog.Errorf("VerifyVolumesAreAttached.GenerateVolumesAreAttachedFunc: nil spec for volume %s", volumeAttached.VolumeName) continue } volumePlugin, err := og.volumePluginMgr.FindPluginBySpec(volumeAttached.VolumeSpec) if err != nil || volumePlugin == nil { - glog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error()) + klog.Errorf(volumeAttached.GenerateErrorDetailed("VolumesAreAttached.FindPluginBySpec failed", err).Error()) continue } volumeSpecList, pluginExists := volumesPerPlugin[volumePlugin.GetPluginName()] @@ -165,7 +165,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginByName(pluginName) if err != nil || attachableVolumePlugin == nil { - glog.Errorf( + klog.Errorf( "VolumeAreAttached.FindAttachablePluginBySpec failed for plugin %q with: %v", pluginName, err) @@ -174,7 +174,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher() if newAttacherErr != nil { - glog.Errorf( + klog.Errorf( "VolumesAreAttached.NewAttacher failed for getting plugin %q with: %v", pluginName, newAttacherErr) @@ -183,7 +183,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( attached, areAttachedErr := volumeAttacher.VolumesAreAttached(volumesSpecs, nodeName) if areAttachedErr != nil { - glog.Errorf( + klog.Errorf( "VolumesAreAttached failed for checking on node %q with: %v", nodeName, areAttachedErr) @@ -193,7 +193,7 @@ func (og *operationGenerator) GenerateVolumesAreAttachedFunc( for spec, check := range attached { if !check { actualStateOfWorld.MarkVolumeAsDetached(volumeSpecMap[spec], nodeName) - glog.V(1).Infof("VerifyVolumesAreAttached determined volume %q (spec.Name: %q) is no longer attached to node %q, therefore it was marked as detached.", + klog.V(1).Infof("VerifyVolumesAreAttached determined volume %q (spec.Name: %q) is no longer attached to node %q, therefore it was marked as detached.", volumeSpecMap[spec], spec.Name(), nodeName) } } @@ -218,7 +218,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( attachableVolumePlugin, err := og.volumePluginMgr.FindAttachablePluginByName(pluginName) if err != nil || attachableVolumePlugin == nil { - glog.Errorf( + klog.Errorf( "BulkVerifyVolume.FindAttachablePluginBySpec failed for plugin %q with: %v", pluginName, err) @@ -228,7 +228,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher() if newAttacherErr != nil { - glog.Errorf( + klog.Errorf( "BulkVerifyVolume.NewAttacher failed for getting plugin %q with: %v", attachableVolumePlugin, newAttacherErr) @@ -237,13 +237,13 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( bulkVolumeVerifier, ok := volumeAttacher.(volume.BulkVolumeVerifier) if !ok { - glog.Errorf("BulkVerifyVolume failed to type assert attacher %q", bulkVolumeVerifier) + klog.Errorf("BulkVerifyVolume failed to type assert attacher %q", bulkVolumeVerifier) return nil, nil } attached, bulkAttachErr := bulkVolumeVerifier.BulkVerifyVolumes(pluginNodeVolumes) if bulkAttachErr != nil { - glog.Errorf("BulkVerifyVolume.BulkVerifyVolumes Error checking volumes are attached with %v", bulkAttachErr) + klog.Errorf("BulkVerifyVolume.BulkVerifyVolumes Error checking volumes are attached with %v", bulkAttachErr) return nil, nil } @@ -252,7 +252,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( nodeVolumeSpecs, nodeChecked := attached[nodeName] if !nodeChecked { - glog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and leaving volume %q as attached", + klog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and leaving volume %q as attached", nodeName, volumeSpec.Name()) continue @@ -261,7 +261,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( check := nodeVolumeSpecs[volumeSpec] if !check { - glog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and volume %q", + klog.V(2).Infof("VerifyVolumesAreAttached.BulkVerifyVolumes failed for node %q and volume %q", nodeName, volumeSpec.Name()) actualStateOfWorld.MarkVolumeAsDetached(volumeSpecMap[volumeSpec], nodeName) @@ -319,7 +319,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( derr.DevicePath) if addErr != nil { - glog.Errorf("AttachVolume.MarkVolumeAsAttached failed to fix dangling volume error for volume %q with %s", volumeToAttach.VolumeName, addErr) + klog.Errorf("AttachVolume.MarkVolumeAsAttached failed to fix dangling volume error for volume %q with %s", volumeToAttach.VolumeName, addErr) } } @@ -332,7 +332,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( for _, pod := range volumeToAttach.ScheduledPods { og.recorder.Eventf(pod, v1.EventTypeNormal, kevents.SuccessfulAttachVolume, simpleMsg) } - glog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", "")) + klog.Infof(volumeToAttach.GenerateMsgDetailed("AttachVolume.Attach succeeded", "")) // Update actual state of world addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( @@ -416,7 +416,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( return volumeToDetach.GenerateError("DetachVolume.Detach failed", err) } - glog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", "")) + klog.Infof(volumeToDetach.GenerateMsgDetailed("DetachVolume.Detach succeeded", "")) // Update actual state of world actualStateOfWorld.MarkVolumeAsDetached( @@ -494,7 +494,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( devicePath := volumeToMount.DevicePath if volumeAttacher != nil { // Wait for attachable volumes to finish attaching - glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) devicePath, err = volumeAttacher.WaitForAttach( volumeToMount.VolumeSpec, devicePath, volumeToMount.Pod, waitForAttachTimeout) @@ -503,7 +503,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( return volumeToMount.GenerateError("MountVolume.WaitForAttach failed", err) } - glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) } if volumeDeviceMounter != nil { @@ -524,7 +524,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( return volumeToMount.GenerateError("MountVolume.MountDevice failed", err) } - glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.MountDevice succeeded", fmt.Sprintf("device mount path %q", deviceMountPath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.MountDevice succeeded", fmt.Sprintf("device mount path %q", deviceMountPath))) // Update actual state of world to reflect volume is globally mounted markDeviceMountedErr := actualStateOfWorld.MarkDeviceAsMounted( @@ -560,11 +560,11 @@ func (og *operationGenerator) GenerateMountVolumeFunc( } _, detailedMsg := volumeToMount.GenerateMsg("MountVolume.SetUp succeeded", "") - verbosity := glog.Level(1) + verbosity := klog.Level(1) if isRemount { - verbosity = glog.Level(4) + verbosity = klog.Level(4) } - glog.V(verbosity).Infof(detailedMsg) + klog.V(verbosity).Infof(detailedMsg) // Update actual state of world markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted( @@ -599,7 +599,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc( func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devicePath, deviceMountPath, pluginName string) (simpleErr, detailedErr error) { if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { - glog.V(4).Infof("Resizing is not enabled for this volume %s", volumeToMount.VolumeName) + klog.V(4).Infof("Resizing is not enabled for this volume %s", volumeToMount.VolumeName) return nil, nil } @@ -621,11 +621,11 @@ func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devi pvSpecCap := pv.Spec.Capacity[v1.ResourceStorage] if pvcStatusCap.Cmp(pvSpecCap) < 0 { // File system resize was requested, proceed - glog.V(4).Infof(volumeToMount.GenerateMsgDetailed("MountVolume.resizeFileSystem entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) + klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("MountVolume.resizeFileSystem entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) if volumeToMount.VolumeSpec.ReadOnly { simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem failed", "requested read-only file system") - glog.Warningf(detailedMsg) + klog.Warningf(detailedMsg) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg) return nil, nil } @@ -634,7 +634,7 @@ func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, devi } simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem succeeded", "") og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg) - glog.Infof(detailedMsg) + klog.Infof(detailedMsg) // File system resize succeeded, now update the PVC's Capacity to match the PV's err = util.MarkFSResizeFinished(pvc, pv.Spec.Capacity, og.kubeClient) if err != nil { @@ -680,7 +680,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( return volumeToUnmount.GenerateError("UnmountVolume.TearDown failed", unmountErr) } - glog.Infof( + klog.Infof( "UnmountVolume.TearDown succeeded for volume %q (OuterVolumeSpecName: %q) pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGidValue %q", volumeToUnmount.VolumeName, volumeToUnmount.OuterVolumeSpecName, @@ -695,7 +695,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( volumeToUnmount.PodName, volumeToUnmount.VolumeName) if markVolMountedErr != nil { // On failure, just log and exit - glog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error()) + klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmountVolume.MarkVolumeAsUnmounted failed", markVolMountedErr).Error()) } return nil, nil @@ -765,7 +765,7 @@ func (og *operationGenerator) GenerateUnmountDeviceFunc( fmt.Errorf("the device is in use when it was no longer expected to be in use")) } - glog.Infof(deviceToDetach.GenerateMsg("UnmountDevice succeeded", "")) + klog.Infof(deviceToDetach.GenerateMsg("UnmountDevice succeeded", "")) // Update actual state of world markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted( @@ -844,7 +844,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( } if volumeAttacher != nil { // Wait for attachable volumes to finish attaching - glog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath))) devicePath, err = volumeAttacher.WaitForAttach( volumeToMount.VolumeSpec, volumeToMount.DevicePath, volumeToMount.Pod, waitForAttachTimeout) @@ -853,7 +853,7 @@ func (og *operationGenerator) GenerateMapVolumeFunc( return volumeToMount.GenerateError("MapVolume.WaitForAttach failed", err) } - glog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("MapVolume.WaitForAttach succeeded", fmt.Sprintf("DevicePath %q", devicePath))) } // A plugin doesn't have attacher also needs to map device to global map path with SetUpDevice() @@ -909,15 +909,15 @@ func (og *operationGenerator) GenerateMapVolumeFunc( // Device mapping for global map path succeeded simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("globalMapPath %q", globalMapPath)) - verbosity := glog.Level(4) + verbosity := klog.Level(4) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg) - glog.V(verbosity).Infof(detailedMsg) + klog.V(verbosity).Infof(detailedMsg) // Device mapping for pod device map path succeeded simpleMsg, detailedMsg = volumeToMount.GenerateMsg("MapVolume.MapDevice succeeded", fmt.Sprintf("volumeMapPath %q", volumeMapPath)) - verbosity = glog.Level(1) + verbosity = klog.Level(1) og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg) - glog.V(verbosity).Infof(detailedMsg) + klog.V(verbosity).Infof(detailedMsg) // Update actual state of world markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted( @@ -992,7 +992,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( return volumeToUnmount.GenerateError("UnmapVolume.UnmapDevice on global map path failed", unmapDeviceErr) } - glog.Infof( + klog.Infof( "UnmapVolume succeeded for volume %q (OuterVolumeSpecName: %q) pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGidValue %q", volumeToUnmount.VolumeName, volumeToUnmount.OuterVolumeSpecName, @@ -1007,7 +1007,7 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( volumeToUnmount.PodName, volumeToUnmount.VolumeName) if markVolUnmountedErr != nil { // On failure, just log and exit - glog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error()) + klog.Errorf(volumeToUnmount.GenerateErrorDetailed("UnmapVolume.MarkVolumeAsUnmounted failed", markVolUnmountedErr).Error()) } return nil, nil @@ -1067,11 +1067,11 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( // The block volume is not referenced from Pods. Release file descriptor lock. // This should be done before calling TearDownDevice, because some plugins that do local detach // in TearDownDevice will fail in detaching device due to the refcnt on the loopback device. - glog.V(4).Infof("UnmapDevice: deviceToDetach.DevicePath: %v", deviceToDetach.DevicePath) + klog.V(4).Infof("UnmapDevice: deviceToDetach.DevicePath: %v", deviceToDetach.DevicePath) loopPath, err := og.blkUtil.GetLoopDevice(deviceToDetach.DevicePath) if err != nil { if err.Error() == volumepathhandler.ErrDeviceNotFound { - glog.Warningf(deviceToDetach.GenerateMsgDetailed("UnmapDevice: Couldn't find loopback device which takes file descriptor lock", fmt.Sprintf("device path: %q", deviceToDetach.DevicePath))) + klog.Warningf(deviceToDetach.GenerateMsgDetailed("UnmapDevice: Couldn't find loopback device which takes file descriptor lock", fmt.Sprintf("device path: %q", deviceToDetach.DevicePath))) } else { errInfo := "UnmapDevice.GetLoopDevice failed to get loopback device, " + fmt.Sprintf("device path: %q", deviceToDetach.DevicePath) return deviceToDetach.GenerateError(errInfo, err) @@ -1115,7 +1115,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( fmt.Errorf("the device is in use when it was no longer expected to be in use")) } - glog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", "")) + klog.Infof(deviceToDetach.GenerateMsgDetailed("UnmapDevice succeeded", "")) // Update actual state of world markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted( @@ -1189,7 +1189,7 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( if attachedVolume.Name == volumeToMount.VolumeName { addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath) - glog.Infof(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath))) + klog.Infof(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath))) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) @@ -1216,7 +1216,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach( node, fetchErr := og.kubeClient.CoreV1().Nodes().Get(string(volumeToDetach.NodeName), metav1.GetOptions{}) if fetchErr != nil { if errors.IsNotFound(fetchErr) { - glog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", "")) + klog.Warningf(volumeToDetach.GenerateMsgDetailed("Node not found on API server. DetachVolume will skip safe to detach check", "")) return nil } @@ -1240,7 +1240,7 @@ func (og *operationGenerator) verifyVolumeIsSafeToDetach( } // Volume is not marked as in use by node - glog.Infof(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", "")) + klog.Infof(volumeToDetach.GenerateMsgDetailed("Verified volume is safe to detach", "")) return nil } @@ -1274,7 +1274,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( return detailedErr, detailedErr } - glog.Infof("ExpandVolume succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) + klog.Infof("ExpandVolume succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) newSize = updatedSize // k8s doesn't have transactions, we can't guarantee that after updating PV - updating PVC will be // successful, that is why all PVCs for which pvc.Spec.Size > pvc.Status.Size must be reprocessed @@ -1285,14 +1285,14 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( detailedErr := fmt.Errorf("Error updating PV spec capacity for volume %q with : %v", pvcWithResizeRequest.QualifiedName(), updateErr) return detailedErr, detailedErr } - glog.Infof("ExpandVolume.UpdatePV succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) + klog.Infof("ExpandVolume.UpdatePV succeeded for volume %s", pvcWithResizeRequest.QualifiedName()) } // No Cloudprovider resize needed, lets mark resizing as done // Rest of the volume expand controller code will assume PVC as *not* resized until pvc.Status.Size // reflects user requested size. if !volumePlugin.RequiresFSResize() { - glog.V(4).Infof("Controller resizing done for PVC %s", pvcWithResizeRequest.QualifiedName()) + klog.V(4).Infof("Controller resizing done for PVC %s", pvcWithResizeRequest.QualifiedName()) err := resizeMap.MarkAsResized(pvcWithResizeRequest, newSize) if err != nil { @@ -1305,7 +1305,7 @@ func (og *operationGenerator) GenerateExpandVolumeFunc( err := resizeMap.MarkForFSResize(pvcWithResizeRequest) if err != nil { detailedErr := fmt.Errorf("Error updating pvc %s condition for fs resize : %v", pvcWithResizeRequest.QualifiedName(), err) - glog.Warning(detailedErr) + klog.Warning(detailedErr) return nil, nil } } @@ -1421,7 +1421,7 @@ func isDeviceOpened(deviceToDetach AttachedVolume, mounter mount.Interface) (boo (devicePathErr != nil && strings.Contains(devicePathErr.Error(), "does not exist")) { // not a device path or path doesn't exist //TODO: refer to #36092 - glog.V(3).Infof("The path isn't device path or doesn't exist. Skip checking device path: %s", deviceToDetach.DevicePath) + klog.V(3).Infof("The path isn't device path or doesn't exist. Skip checking device path: %s", deviceToDetach.DevicePath) deviceOpened = false } else if devicePathErr != nil { return false, deviceToDetach.GenerateErrorDetailed("PathIsDevice failed", devicePathErr) diff --git a/pkg/volume/util/recyclerclient/BUILD b/pkg/volume/util/recyclerclient/BUILD index e43b35af736d4..3ea30eb449139 100644 --- a/pkg/volume/util/recyclerclient/BUILD +++ b/pkg/volume/util/recyclerclient/BUILD @@ -12,7 +12,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/util/recyclerclient/recycler_client.go b/pkg/volume/util/recyclerclient/recycler_client.go index 275f55bbe3364..c7a8f147f8782 100644 --- a/pkg/volume/util/recyclerclient/recycler_client.go +++ b/pkg/volume/util/recyclerclient/recycler_client.go @@ -20,13 +20,13 @@ import ( "fmt" "sync" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" ) type RecycleEventRecorder func(eventtype, message string) @@ -51,7 +51,7 @@ func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeC // same as above func comments, except 'recyclerClient' is a narrower pod API // interface to ease testing func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error { - glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name) + klog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name) // Generate unique name for the recycler pod - we need to get "already // exists" error when a previous controller has already started recycling @@ -63,7 +63,7 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po defer close(stopChannel) podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel) if err != nil { - glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err) + klog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err) return err } @@ -84,10 +84,10 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Po err = waitForPod(pod, recyclerClient, podCh) // In all cases delete the recycler pod and log its result. - glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) + klog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name) deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace) if deleteErr != nil { - glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) + klog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Returning recycler error is preferred, the pod will be deleted again on @@ -117,7 +117,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E case *v1.Pod: // POD changed pod := event.Object.(*v1.Pod) - glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase) + klog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase) switch event.Type { case watch.Added, watch.Modified: if pod.Status.Phase == v1.PodSucceeded { @@ -142,7 +142,7 @@ func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.E case *v1.Event: // Event received podEvent := event.Object.(*v1.Event) - glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message) + klog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message) if event.Type == watch.Added { recyclerClient.Event(podEvent.Type, podEvent.Message) } diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index faf0d34b61d63..070961c2822aa 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -25,7 +25,6 @@ import ( "strings" "syscall" - "github.com/golang/glog" "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/pkg/api/legacyscheme" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" @@ -101,7 +101,7 @@ func IsReady(dir string) bool { } if !s.Mode().IsRegular() { - glog.Errorf("ready-file is not a file: %s", readyFile) + klog.Errorf("ready-file is not a file: %s", readyFile) return false } @@ -113,14 +113,14 @@ func IsReady(dir string) bool { // created. func SetReady(dir string) { if err := os.MkdirAll(dir, 0750); err != nil && !os.IsExist(err) { - glog.Errorf("Can't mkdir %s: %v", dir, err) + klog.Errorf("Can't mkdir %s: %v", dir, err) return } readyFile := path.Join(dir, readyFileName) file, err := os.Create(readyFile) if err != nil { - glog.Errorf("Can't touch %s: %v", readyFile, err) + klog.Errorf("Can't touch %s: %v", readyFile, err) return } file.Close() @@ -140,7 +140,7 @@ func UnmountPath(mountPath string, mounter mount.Interface) error { func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error { pathExists, pathErr := PathExists(mountPath) if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath) + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath) return nil } corruptedMnt := IsCorruptedMnt(pathErr) @@ -171,13 +171,13 @@ func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMou } if notMnt { - glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) + klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) return os.Remove(mountPath) } } // Unmount the mount path - glog.V(4).Infof("%q is a mountpoint, unmounting", mountPath) + klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath) if err := mounter.Unmount(mountPath); err != nil { return err } @@ -186,7 +186,7 @@ func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMou return mntErr } if notMnt { - glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) + klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) return os.Remove(mountPath) } return fmt.Errorf("Failed to unmount path %v", mountPath) @@ -291,7 +291,7 @@ func checkVolumeNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]stri if pv.Spec.NodeAffinity.Required != nil { terms := pv.Spec.NodeAffinity.Required.NodeSelectorTerms - glog.V(10).Infof("Match for Required node selector terms %+v", terms) + klog.V(10).Infof("Match for Required node selector terms %+v", terms) if !v1helper.MatchNodeSelectorTerms(terms, labels.Set(nodeLabels), nil) { return fmt.Errorf("No matching NodeSelectorTerms") } @@ -607,7 +607,7 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string { zoneSlice := zones.List() zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))] - glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice) + klog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice) return zone } @@ -666,7 +666,7 @@ func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) se replicaZones.Insert(zone) } - glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q", + klog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q", pvcName, replicaZones.UnsortedList(), zoneSlice) return replicaZones } @@ -674,7 +674,7 @@ func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) se func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { if pvcName == "" { // We should always be called with a name; this shouldn't happen - glog.Warningf("No name defined during volume create; choosing random zone") + klog.Warningf("No name defined during volume create; choosing random zone") hash = rand.Uint32() } else { @@ -710,7 +710,7 @@ func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { hashString = hashString[lastDash+1:] } - glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) + klog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index) } } @@ -726,7 +726,7 @@ func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) { // UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi // to empty_dir func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error { - glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir) + klog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir) // Wrap EmptyDir, let it do the teardown. wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID) diff --git a/pkg/volume/util/volumepathhandler/BUILD b/pkg/volume/util/volumepathhandler/BUILD index e61e3973ea54f..e344849a517cc 100644 --- a/pkg/volume/util/volumepathhandler/BUILD +++ b/pkg/volume/util/volumepathhandler/BUILD @@ -11,7 +11,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/util/volumepathhandler/volume_path_handler.go b/pkg/volume/util/volumepathhandler/volume_path_handler.go index 61680c11577ee..a7822efc3e556 100644 --- a/pkg/volume/util/volumepathhandler/volume_path_handler.go +++ b/pkg/volume/util/volumepathhandler/volume_path_handler.go @@ -23,7 +23,7 @@ import ( "path" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/types" ) @@ -86,14 +86,14 @@ func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName if !filepath.IsAbs(mapPath) { return fmt.Errorf("The map path should be absolute: map path: %s", mapPath) } - glog.V(5).Infof("MapDevice: devicePath %s", devicePath) - glog.V(5).Infof("MapDevice: mapPath %s", mapPath) - glog.V(5).Infof("MapDevice: linkName %s", linkName) + klog.V(5).Infof("MapDevice: devicePath %s", devicePath) + klog.V(5).Infof("MapDevice: mapPath %s", mapPath) + klog.V(5).Infof("MapDevice: linkName %s", linkName) // Check and create mapPath _, err := os.Stat(mapPath) if err != nil && !os.IsNotExist(err) { - glog.Errorf("cannot validate map path: %s", mapPath) + klog.Errorf("cannot validate map path: %s", mapPath) return err } if err = os.MkdirAll(mapPath, 0750); err != nil { @@ -115,15 +115,15 @@ func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error { if len(mapPath) == 0 { return fmt.Errorf("Failed to unmap device from map path. mapPath is empty") } - glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) - glog.V(5).Infof("UnmapDevice: linkName %s", linkName) + klog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) + klog.V(5).Infof("UnmapDevice: linkName %s", linkName) // Check symbolic link exists linkPath := path.Join(mapPath, string(linkName)) if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil { return checkErr } else if !islinkExist { - glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) + klog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) return nil } err := os.Remove(linkPath) @@ -135,7 +135,7 @@ func (v VolumePathHandler) RemoveMapPath(mapPath string) error { if len(mapPath) == 0 { return fmt.Errorf("Failed to remove map path. mapPath is empty") } - glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) + klog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) err := os.RemoveAll(mapPath) if err != nil && !os.IsNotExist(err) { return err @@ -180,12 +180,12 @@ func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) if err != nil { return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err) } - glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) + klog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) if filepath == devPath { refs = append(refs, path.Join(mapPath, filename)) } } - glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) + klog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) return refs, nil } @@ -201,7 +201,7 @@ func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath strin return err } if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) { - glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) + klog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) if res, err := compareSymlinks(path, mapPath); err == nil && res { globalMapPathUUID = path } @@ -211,7 +211,7 @@ func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath strin if err != nil { return "", err } - glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) + klog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) // Return path contains global map path + {pod uuid} return globalMapPathUUID, nil } @@ -225,7 +225,7 @@ func compareSymlinks(global, pod string) (bool, error) { if err != nil { return false, err } - glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) + klog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) if devGlobal == devPod { return true, nil } diff --git a/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go b/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go index f9a886d7dc64c..7170edc7de007 100644 --- a/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go +++ b/pkg/volume/util/volumepathhandler/volume_path_handler_linux.go @@ -25,7 +25,7 @@ import ( "os/exec" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // AttachFileDevice takes a path to a regular file and makes it available as an @@ -38,7 +38,7 @@ func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { // If no existing loop device for the path, create one if blockDevicePath == "" { - glog.V(4).Infof("Creating device for path: %s", path) + klog.V(4).Infof("Creating device for path: %s", path) blockDevicePath, err = makeLoopDevice(path) if err != nil { return "", err @@ -61,7 +61,7 @@ func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { cmd := exec.Command(losetupPath, args...) out, err := cmd.CombinedOutput() if err != nil { - glog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out) + klog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out) return "", err } return parseLosetupOutputForDevice(out) @@ -72,7 +72,7 @@ func makeLoopDevice(path string) (string, error) { cmd := exec.Command(losetupPath, args...) out, err := cmd.CombinedOutput() if err != nil { - glog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out) + klog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out) return "", err } return parseLosetupOutputForDevice(out) @@ -87,7 +87,7 @@ func (v VolumePathHandler) RemoveLoopDevice(device string) error { if _, err := os.Stat(device); os.IsNotExist(err) { return nil } - glog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out) + klog.V(2).Infof("Failed to remove loopback device: %s: %v %s", device, err, out) return err } return nil diff --git a/pkg/volume/volume_linux.go b/pkg/volume/volume_linux.go index ef1f45208c95f..eb44d5f162fad 100644 --- a/pkg/volume/volume_linux.go +++ b/pkg/volume/volume_linux.go @@ -24,7 +24,7 @@ import ( "os" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -63,13 +63,13 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error { } if stat == nil { - glog.Errorf("Got nil stat_t for path %v while setting ownership of volume", path) + klog.Errorf("Got nil stat_t for path %v while setting ownership of volume", path) return nil } err = os.Chown(path, int(stat.Uid), int(*fsGroup)) if err != nil { - glog.Errorf("Chown failed on %v: %v", path, err) + klog.Errorf("Chown failed on %v: %v", path, err) } mask := rwMask @@ -83,7 +83,7 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error { err = os.Chmod(path, info.Mode()|mask) if err != nil { - glog.Errorf("Chmod failed on %v: %v", path, err) + klog.Errorf("Chmod failed on %v: %v", path, err) } return nil diff --git a/pkg/volume/vsphere_volume/BUILD b/pkg/volume/vsphere_volume/BUILD index c78d097eb0462..89892c7621d51 100644 --- a/pkg/volume/vsphere_volume/BUILD +++ b/pkg/volume/vsphere_volume/BUILD @@ -31,7 +31,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -55,7 +55,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/volume/vsphere_volume/attacher.go b/pkg/volume/vsphere_volume/attacher.go index 9f292e348fab2..ca934b45abd4f 100644 --- a/pkg/volume/vsphere_volume/attacher.go +++ b/pkg/volume/vsphere_volume/attacher.go @@ -22,9 +22,9 @@ import ( "path" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" @@ -76,7 +76,7 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No return "", err } - glog.V(4).Infof("vSphere: Attach disk called for node %s", nodeName) + klog.V(4).Infof("vSphere: Attach disk called for node %s", nodeName) // Keeps concurrent attach operations to same host atomic attachdetachMutex.LockKey(string(nodeName)) @@ -86,7 +86,7 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No // succeeds in that case, so no need to do that separately. diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyName, nodeName) if err != nil { - glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err) + klog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err) return "", err } @@ -94,14 +94,14 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No } func (attacher *vsphereVMDKAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { - glog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for vSphere", nodeName) + klog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for vSphere", nodeName) volumeNodeMap := map[types.NodeName][]*volume.Spec{ nodeName: specs, } nodeVolumesResult := make(map[*volume.Spec]bool) nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap) if err != nil { - glog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err) + klog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err) return nodeVolumesResult, err } if result, ok := nodesVerificationMap[nodeName]; ok { @@ -119,7 +119,7 @@ func (attacher *vsphereVMDKAttacher) BulkVerifyVolumes(volumesByNode map[types.N for _, volumeSpec := range volumeSpecs { volumeSource, _, err := getVolumeSource(volumeSpec) if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err) + klog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err) continue } volPath := volumeSource.VolumePath @@ -135,7 +135,7 @@ func (attacher *vsphereVMDKAttacher) BulkVerifyVolumes(volumesByNode map[types.N } attachedResult, err := attacher.vsphereVolumes.DisksAreAttached(volumePathsByNode) if err != nil { - glog.Errorf("Error checking if volumes are attached to nodes: %+v. err: %v", volumePathsByNode, err) + klog.Errorf("Error checking if volumes are attached to nodes: %+v. err: %v", volumePathsByNode, err) return volumesAttachedCheck, err } @@ -169,14 +169,14 @@ func (attacher *vsphereVMDKAttacher) WaitForAttach(spec *volume.Spec, devicePath for { select { case <-ticker.C: - glog.V(5).Infof("Checking VMDK %q is attached", volumeSource.VolumePath) + klog.V(5).Infof("Checking VMDK %q is attached", volumeSource.VolumePath) path, err := verifyDevicePath(devicePath) if err != nil { // Log error, if any, and continue checking periodically. See issue #11321 - glog.Warningf("Error verifying VMDK (%q) is attached: %v", volumeSource.VolumePath, err) + klog.Warningf("Error verifying VMDK (%q) is attached: %v", volumeSource.VolumePath, err) } else if path != "" { // A device path has successfully been created for the VMDK - glog.Infof("Successfully found attached VMDK %q.", volumeSource.VolumePath) + klog.Infof("Successfully found attached VMDK %q.", volumeSource.VolumePath) return path, nil } case <-timer.C: @@ -210,7 +210,7 @@ func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath s if err != nil { if os.IsNotExist(err) { if err := os.MkdirAll(deviceMountPath, 0750); err != nil { - glog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err) + klog.Errorf("Failed to create directory at %#v. err: %s", deviceMountPath, err) return err } notMnt = true @@ -234,7 +234,7 @@ func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath s os.Remove(deviceMountPath) return err } - glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) + klog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options) } return nil } @@ -271,21 +271,21 @@ func (detacher *vsphereVMDKDetacher) Detach(volumeName string, nodeName types.No attached, err := detacher.vsphereVolumes.DiskIsAttached(volPath, nodeName) if err != nil { // Log error and continue with detach - glog.Errorf( + klog.Errorf( "Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v", volPath, nodeName, err) } if err == nil && !attached { // Volume is already detached from node. - glog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, nodeName) + klog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, nodeName) return nil } attachdetachMutex.LockKey(string(nodeName)) defer attachdetachMutex.UnlockKey(string(nodeName)) if err := detacher.vsphereVolumes.DetachDisk(volPath, nodeName); err != nil { - glog.Errorf("Error detaching volume %q: %v", volPath, err) + klog.Errorf("Error detaching volume %q: %v", volPath, err) return err } return nil diff --git a/pkg/volume/vsphere_volume/attacher_test.go b/pkg/volume/vsphere_volume/attacher_test.go index 5922c00eef72e..08a59f593c4ee 100644 --- a/pkg/volume/vsphere_volume/attacher_test.go +++ b/pkg/volume/vsphere_volume/attacher_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" - "github.com/golang/glog" + "k8s.io/klog" ) func TestGetDeviceName_Volume(t *testing.T) { @@ -253,7 +253,7 @@ func (testcase *testcase) AttachDisk(diskName string, storagePolicyName string, return "", errors.New("Unexpected AttachDisk call: wrong nodeName") } - glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceUUID, expected.ret) + klog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceUUID, expected.ret) return expected.retDeviceUUID, expected.ret } @@ -278,7 +278,7 @@ func (testcase *testcase) DetachDisk(diskName string, nodeName types.NodeName) e return errors.New("Unexpected DetachDisk call: wrong nodeName") } - glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", diskName, nodeName, expected.ret) + klog.V(4).Infof("DetachDisk call: %s, %s, returning %v", diskName, nodeName, expected.ret) return expected.ret } @@ -303,7 +303,7 @@ func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeNam return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName") } - glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret) + klog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret) return expected.isAttached, expected.ret } diff --git a/pkg/volume/vsphere_volume/vsphere_volume.go b/pkg/volume/vsphere_volume/vsphere_volume.go index 07c9ff47d8709..7097005ef9e16 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/pkg/volume/vsphere_volume/vsphere_volume.go @@ -22,12 +22,12 @@ import ( "path" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/klog" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" @@ -146,7 +146,7 @@ func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath str return nil, err } volumePath = strings.Replace(volumePath, "\\040", " ", -1) - glog.V(5).Infof("vSphere volume path is %q", volumePath) + klog.V(5).Infof("vSphere volume path is %q", volumePath) vsphereVolume := &v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{ @@ -216,21 +216,21 @@ func (b *vsphereVolumeMounter) CanMount() error { // SetUp attaches the disk and bind mounts to the volume path. func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { - glog.V(5).Infof("vSphere volume setup %s to %s", b.volPath, dir) + klog.V(5).Infof("vSphere volume setup %s to %s", b.volPath, dir) // TODO: handle failed mounts here. notmnt, err := b.mounter.IsLikelyNotMountPoint(dir) if err != nil && !os.IsNotExist(err) { - glog.V(4).Infof("IsLikelyNotMountPoint failed: %v", err) + klog.V(4).Infof("IsLikelyNotMountPoint failed: %v", err) return err } if !notmnt { - glog.V(4).Infof("Something is already mounted to target %s", dir) + klog.V(4).Infof("Something is already mounted to target %s", dir) return nil } if err := os.MkdirAll(dir, 0750); err != nil { - glog.V(4).Infof("Could not create directory %s: %v", dir, err) + klog.V(4).Infof("Could not create directory %s: %v", dir, err) return err } @@ -243,21 +243,21 @@ func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { if err != nil { notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) + klog.Errorf("Failed to unmount: %v", mntErr) return err } notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) + klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) return err } if !notmnt { - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) + klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath()) return err } } @@ -265,7 +265,7 @@ func (b *vsphereVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } volume.SetVolumeOwnership(b, fsGroup) - glog.V(3).Infof("vSphere volume %s mounted to %s", b.volPath, dir) + klog.V(3).Infof("vSphere volume %s mounted to %s", b.volPath, dir) return nil } @@ -372,7 +372,7 @@ func (v *vsphereVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopol if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { volumeMode = v.options.PVC.Spec.VolumeMode if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock { - glog.V(5).Infof("vSphere block volume should not have any FSType") + klog.V(5).Infof("vSphere block volume should not have any FSType") volSpec.Fstype = "" } } diff --git a/pkg/volume/vsphere_volume/vsphere_volume_block.go b/pkg/volume/vsphere_volume/vsphere_volume_block.go index bb01da9af1925..40342b8aacdf2 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_block.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_block.go @@ -22,9 +22,9 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/mount" kstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" @@ -40,10 +40,10 @@ func (plugin *vsphereVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, vo blkUtil := volumepathhandler.NewBlockVolumePathHandler() globalMapPathUUID, err := blkUtil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID) if err != nil { - glog.Errorf("Failed to find GlobalMapPathUUID from Pod: %s with error: %+v", podUID, err) + klog.Errorf("Failed to find GlobalMapPathUUID from Pod: %s with error: %+v", podUID, err) return nil, err } - glog.V(5).Infof("globalMapPathUUID: %v", globalMapPathUUID) + klog.V(5).Infof("globalMapPathUUID: %v", globalMapPathUUID) globalMapPath := filepath.Dir(globalMapPathUUID) if len(globalMapPath) <= 1 { return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID) @@ -88,7 +88,7 @@ func (plugin *vsphereVolumePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod * func (plugin *vsphereVolumePlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.BlockVolumeMapper, error) { volumeSource, _, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Failed to get Volume source from volume Spec: %+v with error: %+v", *spec, err) + klog.Errorf("Failed to get Volume source from volume Spec: %+v with error: %+v", *spec, err) return nil, err } volPath := volumeSource.VolumePath diff --git a/pkg/volume/vsphere_volume/vsphere_volume_util.go b/pkg/volume/vsphere_volume/vsphere_volume_util.go index 0603b36e55292..a5880ea7fbf88 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -23,9 +23,9 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/volume" @@ -114,10 +114,10 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec volumeOptions.Datastore = value case volume.VolumeParameterFSType: fstype = value - glog.V(4).Infof("Setting fstype as %q", fstype) + klog.V(4).Infof("Setting fstype as %q", fstype) case StoragePolicyName: volumeOptions.StoragePolicyName = value - glog.V(4).Infof("Setting StoragePolicyName as %q", volumeOptions.StoragePolicyName) + klog.V(4).Infof("Setting StoragePolicyName as %q", volumeOptions.StoragePolicyName) case HostFailuresToTolerateCapability, ForceProvisioningCapability, CacheReservationCapability, DiskStripesCapability, ObjectSpaceReservationCapability, IopsLimitCapability: @@ -137,7 +137,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec } volumeOptions.VSANStorageProfileData = "(" + volumeOptions.VSANStorageProfileData + ")" } - glog.V(4).Infof("VSANStorageProfileData in vsphere volume %q", volumeOptions.VSANStorageProfileData) + klog.V(4).Infof("VSANStorageProfileData in vsphere volume %q", volumeOptions.VSANStorageProfileData) // TODO: implement PVC.Selector parsing if v.options.PVC.Spec.Selector != nil { return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere") @@ -154,7 +154,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec StoragePolicyName: volumeOptions.StoragePolicyName, StoragePolicyID: volumeOptions.StoragePolicyID, } - glog.V(2).Infof("Successfully created vsphere volume %s", name) + klog.V(2).Infof("Successfully created vsphere volume %s", name) return volSpec, nil } @@ -166,10 +166,10 @@ func (util *VsphereDiskUtil) DeleteVolume(vd *vsphereVolumeDeleter) error { } if err = cloud.DeleteVolume(vd.volPath); err != nil { - glog.V(2).Infof("Error deleting vsphere volume %s: %v", vd.volPath, err) + klog.V(2).Infof("Error deleting vsphere volume %s: %v", vd.volPath, err) return err } - glog.V(2).Infof("Successfully deleted vsphere volume %s", vd.volPath) + klog.V(2).Infof("Successfully deleted vsphere volume %s", vd.volPath) return nil } @@ -184,7 +184,7 @@ func getVolPathfromVolumeName(deviceMountPath string) string { func getCloudProvider(cloud cloudprovider.Interface) (*vsphere.VSphere, error) { if cloud == nil { - glog.Errorf("Cloud provider not initialized properly") + klog.Errorf("Cloud provider not initialized properly") return nil, errors.New("Cloud provider not initialized properly") } diff --git a/pkg/windows/service/BUILD b/pkg/windows/service/BUILD index 3e9cf1d3bdc61..e64c40488b43e 100644 --- a/pkg/windows/service/BUILD +++ b/pkg/windows/service/BUILD @@ -11,9 +11,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/windows/service", deps = select({ "@io_bazel_rules_go//go/platform:windows": [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/sys/windows:go_default_library", "//vendor/golang.org/x/sys/windows/svc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/pkg/windows/service/service.go b/pkg/windows/service/service.go index acc48246f1e7b..a5bffa1822e1b 100644 --- a/pkg/windows/service/service.go +++ b/pkg/windows/service/service.go @@ -21,7 +21,7 @@ package service import ( "os" - "github.com/golang/glog" + "k8s.io/klog" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc" @@ -57,7 +57,7 @@ func InitService(serviceName string) error { if err != nil { return err } - glog.Infof("Running %s as a Windows service!", serviceName) + klog.Infof("Running %s as a Windows service!", serviceName) return nil } @@ -67,7 +67,7 @@ func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.S h.fromsvc <- nil s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} - glog.Infof("Service running") + klog.Infof("Service running") Loop: for { select { diff --git a/plugin/pkg/admission/admit/BUILD b/plugin/pkg/admission/admit/BUILD index 7fe5dcf695cf1..bd93027e33c5e 100644 --- a/plugin/pkg/admission/admit/BUILD +++ b/plugin/pkg/admission/admit/BUILD @@ -12,7 +12,7 @@ go_library( importpath = "k8s.io/kubernetes/plugin/pkg/admission/admit", deps = [ "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/admit/admission.go b/plugin/pkg/admission/admit/admission.go index 863184d32e99a..867bbcdae4dcb 100644 --- a/plugin/pkg/admission/admit/admission.go +++ b/plugin/pkg/admission/admit/admission.go @@ -19,8 +19,8 @@ package admit import ( "io" - "github.com/golang/glog" "k8s.io/apiserver/pkg/admission" + "k8s.io/klog" ) // PluginName indicates name of admission plugin. @@ -58,7 +58,7 @@ func (alwaysAdmit) Handles(operation admission.Operation) bool { // NewAlwaysAdmit creates a new always admit admission handler func NewAlwaysAdmit() admission.Interface { // DEPRECATED: AlwaysAdmit admit all admission request, it is no use. - glog.Warningf("%s admission controller is deprecated. "+ + klog.Warningf("%s admission controller is deprecated. "+ "Please remove this controller from your configuration files and scripts.", PluginName) return new(alwaysAdmit) } diff --git a/plugin/pkg/admission/deny/BUILD b/plugin/pkg/admission/deny/BUILD index 989df96ae8b18..cdbe0e929ad3d 100644 --- a/plugin/pkg/admission/deny/BUILD +++ b/plugin/pkg/admission/deny/BUILD @@ -12,7 +12,7 @@ go_library( importpath = "k8s.io/kubernetes/plugin/pkg/admission/deny", deps = [ "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/deny/admission.go b/plugin/pkg/admission/deny/admission.go index 386cb78b1e257..bf484590cbe66 100644 --- a/plugin/pkg/admission/deny/admission.go +++ b/plugin/pkg/admission/deny/admission.go @@ -20,7 +20,7 @@ import ( "errors" "io" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiserver/pkg/admission" ) @@ -60,7 +60,7 @@ func (alwaysDeny) Handles(operation admission.Operation) bool { // NewAlwaysDeny creates an always deny admission handler func NewAlwaysDeny() admission.Interface { // DEPRECATED: AlwaysDeny denys all admission request, it is no use. - glog.Warningf("%s admission controller is deprecated. "+ + klog.Warningf("%s admission controller is deprecated. "+ "Please remove this controller from your configuration files and scripts.", PluginName) return new(alwaysDeny) } diff --git a/plugin/pkg/admission/imagepolicy/BUILD b/plugin/pkg/admission/imagepolicy/BUILD index ea26d351e7f5b..fb8c1c55e5429 100644 --- a/plugin/pkg/admission/imagepolicy/BUILD +++ b/plugin/pkg/admission/imagepolicy/BUILD @@ -26,7 +26,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/imagepolicy/admission.go b/plugin/pkg/admission/imagepolicy/admission.go index e37d272bfa1e8..cfcb70a0472e2 100644 --- a/plugin/pkg/admission/imagepolicy/admission.go +++ b/plugin/pkg/admission/imagepolicy/admission.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/imagepolicy/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -110,7 +110,7 @@ func (a *Plugin) filterAnnotations(allAnnotations map[string]string) map[string] // Function to call on webhook failure; behavior determined by defaultAllow flag func (a *Plugin) webhookError(pod *api.Pod, attributes admission.Attributes, err error) error { if err != nil { - glog.V(2).Infof("error contacting webhook backend: %s", err) + klog.V(2).Infof("error contacting webhook backend: %s", err) if a.defaultAllow { attributes.AddAnnotation(AuditKeyPrefix+ImagePolicyFailedOpenKeySuffix, "true") // TODO(wteiken): Remove the annotation code for the 1.13 release @@ -121,10 +121,10 @@ func (a *Plugin) webhookError(pod *api.Pod, attributes admission.Attributes, err annotations[api.ImagePolicyFailedOpenKey] = "true" pod.ObjectMeta.SetAnnotations(annotations) - glog.V(2).Infof("resource allowed in spite of webhook backend failure") + klog.V(2).Infof("resource allowed in spite of webhook backend failure") return nil } - glog.V(2).Infof("resource not allowed due to webhook backend failure ") + klog.V(2).Infof("resource not allowed due to webhook backend failure ") return admission.NewForbidden(attributes, err) } return nil @@ -194,7 +194,7 @@ func (a *Plugin) admitPod(pod *api.Pod, attributes admission.Attributes, review for k, v := range review.Status.AuditAnnotations { if err := attributes.AddAnnotation(AuditKeyPrefix+k, v); err != nil { - glog.Warningf("failed to set admission audit annotation %s to %s: %v", AuditKeyPrefix+k, v, err) + klog.Warningf("failed to set admission audit annotation %s to %s: %v", AuditKeyPrefix+k, v, err) } } if !review.Status.Allowed { diff --git a/plugin/pkg/admission/imagepolicy/config.go b/plugin/pkg/admission/imagepolicy/config.go index df34a4906b408..cbe2ece4a314d 100644 --- a/plugin/pkg/admission/imagepolicy/config.go +++ b/plugin/pkg/admission/imagepolicy/config.go @@ -22,7 +22,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -72,13 +72,13 @@ func normalizeWebhookConfig(config *imagePolicyWebhookConfig) (err error) { func normalizeConfigDuration(name string, scale, value, min, max, defaultValue time.Duration) (time.Duration, error) { // disable with -1 sentinel if value == disableTTL { - glog.V(2).Infof("image policy webhook %s disabled", name) + klog.V(2).Infof("image policy webhook %s disabled", name) return time.Duration(0), nil } // use default with 0 sentinel if value == useDefault { - glog.V(2).Infof("image policy webhook %s using default value", name) + klog.V(2).Infof("image policy webhook %s using default value", name) return defaultValue, nil } diff --git a/plugin/pkg/admission/podnodeselector/BUILD b/plugin/pkg/admission/podnodeselector/BUILD index c7ef7642951c8..0144ccd207d1c 100644 --- a/plugin/pkg/admission/podnodeselector/BUILD +++ b/plugin/pkg/admission/podnodeselector/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/podnodeselector/admission.go b/plugin/pkg/admission/podnodeselector/admission.go index 8dc1daa66c66d..1d502a0de1201 100644 --- a/plugin/pkg/admission/podnodeselector/admission.go +++ b/plugin/pkg/admission/podnodeselector/admission.go @@ -21,7 +21,7 @@ import ( "io" "reflect" - "github.com/golang/glog" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -190,7 +190,7 @@ func shouldIgnore(a admission.Attributes) bool { _, ok := a.GetObject().(*api.Pod) if !ok { - glog.Errorf("expected pod but got %s", a.GetKind().Kind) + klog.Errorf("expected pod but got %s", a.GetKind().Kind) return true } diff --git a/plugin/pkg/admission/podpreset/BUILD b/plugin/pkg/admission/podpreset/BUILD index c27fcc83eb0d6..b3a3b6a4bffe3 100644 --- a/plugin/pkg/admission/podpreset/BUILD +++ b/plugin/pkg/admission/podpreset/BUILD @@ -42,7 +42,7 @@ go_library( "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/listers/settings/v1alpha1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/podpreset/admission.go b/plugin/pkg/admission/podpreset/admission.go index d91bdd78896ff..e86f4511c4ddd 100644 --- a/plugin/pkg/admission/podpreset/admission.go +++ b/plugin/pkg/admission/podpreset/admission.go @@ -22,7 +22,7 @@ import ( "reflect" "strings" - "github.com/golang/glog" + "k8s.io/klog" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" @@ -108,7 +108,7 @@ func (c *podPresetPlugin) Admit(a admission.Attributes) error { // Ignore if exclusion annotation is present if podAnnotations := pod.GetAnnotations(); podAnnotations != nil { - glog.V(5).Infof("Looking at pod annotations, found: %v", podAnnotations) + klog.V(5).Infof("Looking at pod annotations, found: %v", podAnnotations) if podAnnotations[api.PodPresetOptOutAnnotationKey] == "true" { return nil } @@ -137,14 +137,14 @@ func (c *podPresetPlugin) Admit(a admission.Attributes) error { err = safeToApplyPodPresetsOnPod(pod, matchingPPs) if err != nil { // conflict, ignore the error, but raise an event - glog.Warningf("conflict occurred while applying podpresets: %s on pod: %v err: %v", + klog.Warningf("conflict occurred while applying podpresets: %s on pod: %v err: %v", strings.Join(presetNames, ","), pod.GetGenerateName(), err) return nil } applyPodPresetsOnPod(pod, matchingPPs) - glog.Infof("applied podpresets: %s successfully on Pod: %+v ", strings.Join(presetNames, ","), pod.GetGenerateName()) + klog.Infof("applied podpresets: %s successfully on Pod: %+v ", strings.Join(presetNames, ","), pod.GetGenerateName()) return nil } @@ -163,7 +163,7 @@ func filterPodPresets(list []*settingsv1alpha1.PodPreset, pod *api.Pod) ([]*sett if !selector.Matches(labels.Set(pod.Labels)) { continue } - glog.V(4).Infof("PodPreset %s matches pod %s labels", pp.GetName(), pod.GetName()) + klog.V(4).Infof("PodPreset %s matches pod %s labels", pp.GetName(), pod.GetName()) matchingPPs = append(matchingPPs, pp) } return matchingPPs, nil diff --git a/plugin/pkg/admission/podtolerationrestriction/BUILD b/plugin/pkg/admission/podtolerationrestriction/BUILD index d5652163389aa..f547df30e9de6 100644 --- a/plugin/pkg/admission/podtolerationrestriction/BUILD +++ b/plugin/pkg/admission/podtolerationrestriction/BUILD @@ -55,7 +55,7 @@ go_library( "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/podtolerationrestriction/admission.go b/plugin/pkg/admission/podtolerationrestriction/admission.go index 6e75d8dc2bc2f..35823f5a05825 100644 --- a/plugin/pkg/admission/podtolerationrestriction/admission.go +++ b/plugin/pkg/admission/podtolerationrestriction/admission.go @@ -21,7 +21,7 @@ import ( "fmt" "io" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" @@ -188,7 +188,7 @@ func shouldIgnore(a admission.Attributes) bool { obj := a.GetObject() _, ok := obj.(*api.Pod) if !ok { - glog.Errorf("expected pod but got %s", a.GetKind().Kind) + klog.Errorf("expected pod but got %s", a.GetKind().Kind) return true } diff --git a/plugin/pkg/admission/priority/BUILD b/plugin/pkg/admission/priority/BUILD index 513cc84def314..0649ae1d69721 100644 --- a/plugin/pkg/admission/priority/BUILD +++ b/plugin/pkg/admission/priority/BUILD @@ -22,7 +22,7 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/priority/admission_test.go b/plugin/pkg/admission/priority/admission_test.go index ef1c43c2a114b..f3785f5763081 100644 --- a/plugin/pkg/admission/priority/admission_test.go +++ b/plugin/pkg/admission/priority/admission_test.go @@ -20,7 +20,7 @@ import ( "fmt" "testing" - "github.com/golang/glog" + "k8s.io/klog" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -138,7 +138,7 @@ func TestPriorityClassAdmission(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("starting test %q", test.name) + klog.V(4).Infof("starting test %q", test.name) ctrl := newPlugin() // Add existing priority classes. @@ -159,7 +159,7 @@ func TestPriorityClassAdmission(t *testing.T) { test.userInfo, ) err := ctrl.Validate(attrs) - glog.Infof("Got %v", err) + klog.Infof("Got %v", err) if err != nil && !test.expectError { t.Errorf("Test %q: unexpected error received: %v", test.name, err) } @@ -239,7 +239,7 @@ func TestDefaultPriority(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("starting test %q", test.name) + klog.V(4).Infof("starting test %q", test.name) ctrl := newPlugin() if err := addPriorityClasses(ctrl, test.classesBefore); err != nil { t.Errorf("Test %q: unable to add object to informer: %v", test.name, err) @@ -582,7 +582,7 @@ func TestPodAdmission(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("starting test %q", test.name) + klog.V(4).Infof("starting test %q", test.name) ctrl := newPlugin() // Add existing priority classes. @@ -604,7 +604,7 @@ func TestPodAdmission(t *testing.T) { nil, ) err := ctrl.Admit(attrs) - glog.Infof("Got %v", err) + klog.Infof("Got %v", err) if !test.expectError { if err != nil { t.Errorf("Test %q: unexpected error received: %v", test.name, err) diff --git a/plugin/pkg/admission/resourcequota/BUILD b/plugin/pkg/admission/resourcequota/BUILD index e638b0691a07b..0920987180e8d 100644 --- a/plugin/pkg/admission/resourcequota/BUILD +++ b/plugin/pkg/admission/resourcequota/BUILD @@ -44,8 +44,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/hashicorp/golang-lru:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/resourcequota/controller.go b/plugin/pkg/admission/resourcequota/controller.go index 34c621903b9bd..13f46d1d5c965 100644 --- a/plugin/pkg/admission/resourcequota/controller.go +++ b/plugin/pkg/admission/resourcequota/controller.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -143,7 +143,7 @@ func (e *quotaEvaluator) run() { go wait.Until(e.doWork, time.Second, e.stopCh) } <-e.stopCh - glog.Infof("Shutting down quota evaluator") + klog.Infof("Shutting down quota evaluator") e.queue.ShutDown() } @@ -162,7 +162,7 @@ func (e *quotaEvaluator) doWork() { } for { if quit := workFunc(); quit { - glog.Infof("quota evaluator worker shutdown") + klog.Infof("quota evaluator worker shutdown") return } } @@ -379,7 +379,7 @@ func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Obje for _, limitedResource := range limitedResources { matched, err := evaluator.MatchingScopes(inputObject, limitedResource.MatchScopes) if err != nil { - glog.Errorf("Error while matching limited Scopes: %v", err) + klog.Errorf("Error while matching limited Scopes: %v", err) return []corev1.ScopedResourceSelectorRequirement{}, err } for _, scope := range matched { @@ -450,7 +450,7 @@ func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluat match, err := evaluator.Matches(&resourceQuota, inputObject) if err != nil { - glog.Errorf("Error occurred while matching resource quota, %v, against input object. Err: %v", resourceQuota, err) + klog.Errorf("Error occurred while matching resource quota, %v, against input object. Err: %v", resourceQuota, err) return quotas, err } if !match { @@ -605,7 +605,7 @@ func (e *quotaEvaluator) Evaluate(a admission.Attributes) error { // note, we do not need aggregate usage here, so we pass a nil informer func evaluator = generic.NewObjectCountEvaluator(gr, nil, "") e.registry.Add(evaluator) - glog.Infof("quota admission added evaluator for: %s", gr) + klog.Infof("quota admission added evaluator for: %s", gr) } // for this kind, check if the operation could mutate any quota resources // if no resources tracked by quota are impacted, then just return diff --git a/plugin/pkg/admission/security/podsecuritypolicy/BUILD b/plugin/pkg/admission/security/podsecuritypolicy/BUILD index 673c7fd666cfa..6480ddde4dd3b 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/BUILD +++ b/plugin/pkg/admission/security/podsecuritypolicy/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/security/podsecuritypolicy/admission.go b/plugin/pkg/admission/security/podsecuritypolicy/admission.go index 6c4ac7e25f0a4..f980b93221265 100644 --- a/plugin/pkg/admission/security/podsecuritypolicy/admission.go +++ b/plugin/pkg/admission/security/podsecuritypolicy/admission.go @@ -22,7 +22,7 @@ import ( "sort" "strings" - "github.com/golang/glog" + "k8s.io/klog" policyv1beta1 "k8s.io/api/policy/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -132,20 +132,20 @@ func (c *PodSecurityPolicyPlugin) Admit(a admission.Attributes) error { if allowedPod != nil { *pod = *allowedPod // annotate and accept the pod - glog.V(4).Infof("pod %s (generate: %s) in namespace %s validated against provider %s", pod.Name, pod.GenerateName, a.GetNamespace(), pspName) + klog.V(4).Infof("pod %s (generate: %s) in namespace %s validated against provider %s", pod.Name, pod.GenerateName, a.GetNamespace(), pspName) if pod.ObjectMeta.Annotations == nil { pod.ObjectMeta.Annotations = map[string]string{} } pod.ObjectMeta.Annotations[psputil.ValidatedPSPAnnotation] = pspName key := auditKeyPrefix + "/" + "admit-policy" if err := a.AddAnnotation(key, pspName); err != nil { - glog.Warningf("failed to set admission audit annotation %s to %s: %v", key, pspName, err) + klog.Warningf("failed to set admission audit annotation %s to %s: %v", key, pspName, err) } return nil } // we didn't validate against any provider, reject the pod and give the errors for each attempt - glog.V(4).Infof("unable to validate pod %s (generate: %s) in namespace %s against any pod security policy: %v", pod.Name, pod.GenerateName, a.GetNamespace(), validationErrs) + klog.V(4).Infof("unable to validate pod %s (generate: %s) in namespace %s against any pod security policy: %v", pod.Name, pod.GenerateName, a.GetNamespace(), validationErrs) return admission.NewForbidden(a, fmt.Errorf("unable to validate against any pod security policy: %v", validationErrs)) } @@ -166,13 +166,13 @@ func (c *PodSecurityPolicyPlugin) Validate(a admission.Attributes) error { if apiequality.Semantic.DeepEqual(pod, allowedPod) { key := auditKeyPrefix + "/" + "validate-policy" if err := a.AddAnnotation(key, pspName); err != nil { - glog.Warningf("failed to set admission audit annotation %s to %s: %v", key, pspName, err) + klog.Warningf("failed to set admission audit annotation %s to %s: %v", key, pspName, err) } return nil } // we didn't validate against any provider, reject the pod and give the errors for each attempt - glog.V(4).Infof("unable to validate pod %s (generate: %s) in namespace %s against any pod security policy: %v", pod.Name, pod.GenerateName, a.GetNamespace(), validationErrs) + klog.V(4).Infof("unable to validate pod %s (generate: %s) in namespace %s against any pod security policy: %v", pod.Name, pod.GenerateName, a.GetNamespace(), validationErrs) return admission.NewForbidden(a, fmt.Errorf("unable to validate against any pod security policy: %v", validationErrs)) } @@ -207,7 +207,7 @@ func shouldIgnore(a admission.Attributes) (bool, error) { // saved in kubernetes.io/psp annotation. This psp is usually the one we are looking for. func (c *PodSecurityPolicyPlugin) computeSecurityContext(a admission.Attributes, pod *api.Pod, specMutationAllowed bool, validatedPSPHint string) (*api.Pod, string, field.ErrorList, error) { // get all constraints that are usable by the user - glog.V(4).Infof("getting pod security policies for pod %s (generate: %s)", pod.Name, pod.GenerateName) + klog.V(4).Infof("getting pod security policies for pod %s (generate: %s)", pod.Name, pod.GenerateName) var saInfo user.Info if len(pod.Spec.ServiceAccountName) > 0 { saInfo = serviceaccount.UserInfo(a.GetNamespace(), pod.Spec.ServiceAccountName, "") @@ -241,7 +241,7 @@ func (c *PodSecurityPolicyPlugin) computeSecurityContext(a admission.Attributes, providers, errs := c.createProvidersFromPolicies(policies, pod.Namespace) for _, err := range errs { - glog.V(4).Infof("provider creation error: %v", err) + klog.V(4).Infof("provider creation error: %v", err) } if len(providers) == 0 { @@ -379,7 +379,7 @@ func authorizedForPolicyInAPIGroup(info user.Info, namespace, policyName, apiGro attr := buildAttributes(info, namespace, policyName, apiGroupName) decision, reason, err := authz.Authorize(attr) if err != nil { - glog.V(5).Infof("cannot authorize for policy: %v,%v", reason, err) + klog.V(5).Infof("cannot authorize for policy: %v,%v", reason, err) } return (decision == authorizer.DecisionAllow) } diff --git a/plugin/pkg/admission/storage/persistentvolume/label/BUILD b/plugin/pkg/admission/storage/persistentvolume/label/BUILD index 5f504ab267ce0..318777c214edc 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/BUILD +++ b/plugin/pkg/admission/storage/persistentvolume/label/BUILD @@ -26,7 +26,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/storage/persistentvolume/label/admission.go b/plugin/pkg/admission/storage/persistentvolume/label/admission.go index a1683e3d297e3..db291c86f44fa 100644 --- a/plugin/pkg/admission/storage/persistentvolume/label/admission.go +++ b/plugin/pkg/admission/storage/persistentvolume/label/admission.go @@ -22,10 +22,10 @@ import ( "io" "sync" - "github.com/golang/glog" "k8s.io/apiserver/pkg/admission" utilfeature "k8s.io/apiserver/pkg/util/feature" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" @@ -73,7 +73,7 @@ func newPersistentVolumeLabel() *persistentVolumeLabel { // DEPRECATED: cloud-controller-manager will now start NewPersistentVolumeLabelController // which does exactly what this admission controller used to do. So once GCE, AWS and AZURE can // run externally, we can remove this admission controller. - glog.Warning("PersistentVolumeLabel admission controller is deprecated. " + + klog.Warning("PersistentVolumeLabel admission controller is deprecated. " + "Please remove this controller from your configuration files and scripts.") return &persistentVolumeLabel{ Handler: admission.NewHandler(admission.Create), @@ -170,7 +170,7 @@ func (l *persistentVolumeLabel) Admit(a admission.Attributes) (err error) { volume.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]api.NodeSelectorTerm, 1) } if nodeSelectorRequirementKeysExistInNodeSelectorTerms(requirements, volume.Spec.NodeAffinity.Required.NodeSelectorTerms) { - glog.V(4).Infof("NodeSelectorRequirements for cloud labels %v conflict with existing NodeAffinity %v. Skipping addition of NodeSelectorRequirements for cloud labels.", + klog.V(4).Infof("NodeSelectorRequirements for cloud labels %v conflict with existing NodeAffinity %v. Skipping addition of NodeSelectorRequirements for cloud labels.", requirements, volume.Spec.NodeAffinity) } else { for _, req := range requirements { diff --git a/plugin/pkg/admission/storage/storageclass/setdefault/BUILD b/plugin/pkg/admission/storage/storageclass/setdefault/BUILD index ca19332f2fa41..78f92102b00b0 100644 --- a/plugin/pkg/admission/storage/storageclass/setdefault/BUILD +++ b/plugin/pkg/admission/storage/storageclass/setdefault/BUILD @@ -21,7 +21,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -37,7 +37,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/storage/storageclass/setdefault/admission.go b/plugin/pkg/admission/storage/storageclass/setdefault/admission.go index f90ac56d08017..98610ae060ef4 100644 --- a/plugin/pkg/admission/storage/storageclass/setdefault/admission.go +++ b/plugin/pkg/admission/storage/storageclass/setdefault/admission.go @@ -20,7 +20,7 @@ import ( "fmt" "io" - "github.com/golang/glog" + "k8s.io/klog" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -105,7 +105,7 @@ func (a *claimDefaulterPlugin) Admit(attr admission.Attributes) error { return nil } - glog.V(4).Infof("no storage class for claim %s (generate: %s)", pvc.Name, pvc.GenerateName) + klog.V(4).Infof("no storage class for claim %s (generate: %s)", pvc.Name, pvc.GenerateName) def, err := getDefaultClass(a.lister) if err != nil { @@ -116,7 +116,7 @@ func (a *claimDefaulterPlugin) Admit(attr admission.Attributes) error { return nil } - glog.V(4).Infof("defaulting storage class for claim %s (generate: %s) to %s", pvc.Name, pvc.GenerateName, def.Name) + klog.V(4).Infof("defaulting storage class for claim %s (generate: %s) to %s", pvc.Name, pvc.GenerateName, def.Name) pvc.Spec.StorageClassName = &def.Name return nil } @@ -132,7 +132,7 @@ func getDefaultClass(lister storagev1listers.StorageClassLister) (*storagev1.Sto for _, class := range list { if storageutil.IsDefaultAnnotation(class.ObjectMeta) { defaultClasses = append(defaultClasses, class) - glog.V(4).Infof("getDefaultClass added: %s", class.Name) + klog.V(4).Infof("getDefaultClass added: %s", class.Name) } } @@ -140,7 +140,7 @@ func getDefaultClass(lister storagev1listers.StorageClassLister) (*storagev1.Sto return nil, nil } if len(defaultClasses) > 1 { - glog.V(4).Infof("getDefaultClass %d defaults found", len(defaultClasses)) + klog.V(4).Infof("getDefaultClass %d defaults found", len(defaultClasses)) return nil, errors.NewInternalError(fmt.Errorf("%d default StorageClasses were found", len(defaultClasses))) } return defaultClasses[0], nil diff --git a/plugin/pkg/admission/storage/storageclass/setdefault/admission_test.go b/plugin/pkg/admission/storage/storageclass/setdefault/admission_test.go index cdd8d73eb9033..e9d5fcbaf4336 100644 --- a/plugin/pkg/admission/storage/storageclass/setdefault/admission_test.go +++ b/plugin/pkg/admission/storage/storageclass/setdefault/admission_test.go @@ -19,7 +19,7 @@ package setdefault import ( "testing" - "github.com/golang/glog" + "k8s.io/klog" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -188,7 +188,7 @@ func TestAdmission(t *testing.T) { } for _, test := range tests { - glog.V(4).Infof("starting test %q", test.name) + klog.V(4).Infof("starting test %q", test.name) // clone the claim, it's going to be modified claim := test.claim.DeepCopy() @@ -212,7 +212,7 @@ func TestAdmission(t *testing.T) { nil, // userInfo ) err := ctrl.Admit(attrs) - glog.Infof("Got %v", err) + klog.Infof("Got %v", err) if err != nil && !test.expectError { t.Errorf("Test %q: unexpected error received: %v", test.name, err) } diff --git a/plugin/pkg/admission/storage/storageobjectinuseprotection/BUILD b/plugin/pkg/admission/storage/storageobjectinuseprotection/BUILD index 1cccceca387f2..fda54ef49831c 100644 --- a/plugin/pkg/admission/storage/storageobjectinuseprotection/BUILD +++ b/plugin/pkg/admission/storage/storageobjectinuseprotection/BUILD @@ -11,7 +11,7 @@ go_library( "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/admission/storage/storageobjectinuseprotection/admission.go b/plugin/pkg/admission/storage/storageobjectinuseprotection/admission.go index 2a8e24c6dfacb..f9d769c3266ae 100644 --- a/plugin/pkg/admission/storage/storageobjectinuseprotection/admission.go +++ b/plugin/pkg/admission/storage/storageobjectinuseprotection/admission.go @@ -19,7 +19,7 @@ package storageobjectinuseprotection import ( "io" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/util/feature" @@ -97,7 +97,7 @@ func (c *storageProtectionPlugin) admitPV(a admission.Attributes) error { return nil } } - glog.V(4).Infof("adding PV protection finalizer to %s", pv.Name) + klog.V(4).Infof("adding PV protection finalizer to %s", pv.Name) pv.Finalizers = append(pv.Finalizers, volumeutil.PVProtectionFinalizer) return nil @@ -121,7 +121,7 @@ func (c *storageProtectionPlugin) admitPVC(a admission.Attributes) error { } } - glog.V(4).Infof("adding PVC protection finalizer to %s/%s", pvc.Namespace, pvc.Name) + klog.V(4).Infof("adding PVC protection finalizer to %s/%s", pvc.Namespace, pvc.Name) pvc.Finalizers = append(pvc.Finalizers, volumeutil.PVCProtectionFinalizer) return nil } diff --git a/plugin/pkg/auth/authenticator/token/bootstrap/BUILD b/plugin/pkg/auth/authenticator/token/bootstrap/BUILD index 15f6da5796943..4432fe3faa3d8 100644 --- a/plugin/pkg/auth/authenticator/token/bootstrap/BUILD +++ b/plugin/pkg/auth/authenticator/token/bootstrap/BUILD @@ -34,7 +34,7 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go b/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go index 93b4e1cddd82e..4459615665346 100644 --- a/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go +++ b/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go @@ -27,7 +27,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -62,7 +62,7 @@ type TokenAuthenticator struct { // func tokenErrorf(s *corev1.Secret, format string, i ...interface{}) { format = fmt.Sprintf("Bootstrap secret %s/%s matching bearer token ", s.Namespace, s.Name) + format - glog.V(3).Infof(format, i...) + klog.V(3).Infof(format, i...) } // AuthenticateToken tries to match the provided token to a bootstrap token secret @@ -102,7 +102,7 @@ func (t *TokenAuthenticator) AuthenticateToken(ctx context.Context, token string secret, err := t.lister.Get(secretName) if err != nil { if errors.IsNotFound(err) { - glog.V(3).Infof("No secret of name %s to match bootstrap bearer token", secretName) + klog.V(3).Infof("No secret of name %s to match bootstrap bearer token", secretName) return nil, false, nil } return nil, false, err @@ -170,12 +170,12 @@ func isSecretExpired(secret *corev1.Secret) bool { if len(expiration) > 0 { expTime, err2 := time.Parse(time.RFC3339, expiration) if err2 != nil { - glog.V(3).Infof("Unparseable expiration time (%s) in %s/%s Secret: %v. Treating as expired.", + klog.V(3).Infof("Unparseable expiration time (%s) in %s/%s Secret: %v. Treating as expired.", expiration, secret.Namespace, secret.Name, err2) return true } if time.Now().After(expTime) { - glog.V(3).Infof("Expired bootstrap token in %s/%s Secret: %v", + klog.V(3).Infof("Expired bootstrap token in %s/%s Secret: %v", secret.Namespace, secret.Name, expiration) return true } diff --git a/plugin/pkg/auth/authorizer/node/BUILD b/plugin/pkg/auth/authorizer/node/BUILD index 5faaf3d6396f9..5ae8b4a536903 100644 --- a/plugin/pkg/auth/authorizer/node/BUILD +++ b/plugin/pkg/auth/authorizer/node/BUILD @@ -60,7 +60,7 @@ go_library( "//third_party/forked/gonum/graph:go_default_library", "//third_party/forked/gonum/graph/simple:go_default_library", "//third_party/forked/gonum/graph/traverse:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/auth/authorizer/node/graph_populator.go b/plugin/pkg/auth/authorizer/node/graph_populator.go index 7bdf9b768572f..f2cc1a5135c16 100644 --- a/plugin/pkg/auth/authorizer/node/graph_populator.go +++ b/plugin/pkg/auth/authorizer/node/graph_populator.go @@ -18,7 +18,7 @@ package node import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" corev1 "k8s.io/api/core/v1" storagev1beta1 "k8s.io/api/storage/v1beta1" @@ -109,7 +109,7 @@ func (g *graphPopulator) updateNode(oldObj, obj interface{}) { if node.Spec.ConfigSource != nil { path = fmt.Sprintf("%s/%s", namespace, name) } - glog.V(4).Infof("updateNode configSource reference to %s for node %s", path, node.Name) + klog.V(4).Infof("updateNode configSource reference to %s for node %s", path, node.Name) g.graph.SetNodeConfigMap(node.Name, name, namespace) } @@ -119,7 +119,7 @@ func (g *graphPopulator) deleteNode(obj interface{}) { } node, ok := obj.(*corev1.Node) if !ok { - glog.Infof("unexpected type %T", obj) + klog.Infof("unexpected type %T", obj) return } @@ -137,17 +137,17 @@ func (g *graphPopulator) updatePod(oldObj, obj interface{}) { pod := obj.(*corev1.Pod) if len(pod.Spec.NodeName) == 0 { // No node assigned - glog.V(5).Infof("updatePod %s/%s, no node", pod.Namespace, pod.Name) + klog.V(5).Infof("updatePod %s/%s, no node", pod.Namespace, pod.Name) return } if oldPod, ok := oldObj.(*corev1.Pod); ok && oldPod != nil { if (pod.Spec.NodeName == oldPod.Spec.NodeName) && (pod.UID == oldPod.UID) { // Node and uid are unchanged, all object references in the pod spec are immutable - glog.V(5).Infof("updatePod %s/%s, node unchanged", pod.Namespace, pod.Name) + klog.V(5).Infof("updatePod %s/%s, node unchanged", pod.Namespace, pod.Name) return } } - glog.V(4).Infof("updatePod %s/%s for node %s", pod.Namespace, pod.Name, pod.Spec.NodeName) + klog.V(4).Infof("updatePod %s/%s for node %s", pod.Namespace, pod.Name, pod.Spec.NodeName) g.graph.AddPod(pod) } @@ -157,14 +157,14 @@ func (g *graphPopulator) deletePod(obj interface{}) { } pod, ok := obj.(*corev1.Pod) if !ok { - glog.Infof("unexpected type %T", obj) + klog.Infof("unexpected type %T", obj) return } if len(pod.Spec.NodeName) == 0 { - glog.V(5).Infof("deletePod %s/%s, no node", pod.Namespace, pod.Name) + klog.V(5).Infof("deletePod %s/%s, no node", pod.Namespace, pod.Name) return } - glog.V(4).Infof("deletePod %s/%s for node %s", pod.Namespace, pod.Name, pod.Spec.NodeName) + klog.V(4).Infof("deletePod %s/%s for node %s", pod.Namespace, pod.Name, pod.Spec.NodeName) g.graph.DeletePod(pod.Name, pod.Namespace) } @@ -184,7 +184,7 @@ func (g *graphPopulator) deletePV(obj interface{}) { } pv, ok := obj.(*corev1.PersistentVolume) if !ok { - glog.Infof("unexpected type %T", obj) + klog.Infof("unexpected type %T", obj) return } g.graph.DeletePV(pv.Name) @@ -212,7 +212,7 @@ func (g *graphPopulator) deleteVolumeAttachment(obj interface{}) { } attachment, ok := obj.(*storagev1beta1.VolumeAttachment) if !ok { - glog.Infof("unexpected type %T", obj) + klog.Infof("unexpected type %T", obj) return } g.graph.DeleteVolumeAttachment(attachment.Name) diff --git a/plugin/pkg/auth/authorizer/node/node_authorizer.go b/plugin/pkg/auth/authorizer/node/node_authorizer.go index caeb7d4c38731..757b386861197 100644 --- a/plugin/pkg/auth/authorizer/node/node_authorizer.go +++ b/plugin/pkg/auth/authorizer/node/node_authorizer.go @@ -19,7 +19,7 @@ package node import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -86,7 +86,7 @@ func (r *NodeAuthorizer) Authorize(attrs authorizer.Attributes) (authorizer.Deci } if len(nodeName) == 0 { // reject requests from unidentifiable nodes - glog.V(2).Infof("NODE DENY: unknown node for user %q", attrs.GetUser().GetName()) + klog.V(2).Infof("NODE DENY: unknown node for user %q", attrs.GetUser().GetName()) return authorizer.DecisionNoOpinion, fmt.Sprintf("unknown node for user %q", attrs.GetUser().GetName()), nil } @@ -144,12 +144,12 @@ func (r *NodeAuthorizer) authorizeStatusUpdate(nodeName string, startingType ver case "update", "patch": // ok default: - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only get/update/patch this type", nil } if attrs.GetSubresource() != "status" { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only update status subresource", nil } @@ -159,11 +159,11 @@ func (r *NodeAuthorizer) authorizeStatusUpdate(nodeName string, startingType ver // authorizeGet authorizes "get" requests to objects of the specified type if they are related to the specified node func (r *NodeAuthorizer) authorizeGet(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) { if attrs.GetVerb() != "get" { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only get individual resources of this type", nil } if len(attrs.GetSubresource()) > 0 { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "cannot get subresource", nil } return r.authorize(nodeName, startingType, attrs) @@ -173,15 +173,15 @@ func (r *NodeAuthorizer) authorizeGet(nodeName string, startingType vertexType, // specified types if they are related to the specified node. func (r *NodeAuthorizer) authorizeReadNamespacedObject(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) { if attrs.GetVerb() != "get" && attrs.GetVerb() != "list" && attrs.GetVerb() != "watch" { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only read resources of this type", nil } if len(attrs.GetSubresource()) > 0 { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "cannot read subresource", nil } if len(attrs.GetNamespace()) == 0 { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only read namespaced object of this type", nil } return r.authorize(nodeName, startingType, attrs) @@ -189,17 +189,17 @@ func (r *NodeAuthorizer) authorizeReadNamespacedObject(nodeName string, starting func (r *NodeAuthorizer) authorize(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) { if len(attrs.GetName()) == 0 { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "No Object name found", nil } ok, err := r.hasPathFrom(nodeName, startingType, attrs.GetNamespace(), attrs.GetName()) if err != nil { - glog.V(2).Infof("NODE DENY: %v", err) + klog.V(2).Infof("NODE DENY: %v", err) return authorizer.DecisionNoOpinion, "no path found to object", nil } if !ok { - glog.V(2).Infof("NODE DENY: %q %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %q %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "no path found to object", nil } return authorizer.DecisionAllow, "", nil @@ -209,22 +209,22 @@ func (r *NodeAuthorizer) authorize(nodeName string, startingType vertexType, att // subresource of pods running on a node func (r *NodeAuthorizer) authorizeCreateToken(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) { if attrs.GetVerb() != "create" || len(attrs.GetName()) == 0 { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only create tokens for individual service accounts", nil } if attrs.GetSubresource() != "token" { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only create token subresource of serviceaccount", nil } ok, err := r.hasPathFrom(nodeName, startingType, attrs.GetNamespace(), attrs.GetName()) if err != nil { - glog.V(2).Infof("NODE DENY: %v", err) + klog.V(2).Infof("NODE DENY: %v", err) return authorizer.DecisionNoOpinion, "no path found to object", nil } if !ok { - glog.V(2).Infof("NODE DENY: %q %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %q %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "no path found to object", nil } return authorizer.DecisionAllow, "", nil @@ -239,13 +239,13 @@ func (r *NodeAuthorizer) authorizeLease(nodeName string, attrs authorizer.Attrib verb != "update" && verb != "patch" && verb != "delete" { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only get, create, update, patch, or delete a node lease", nil } // the request must be against the system namespace reserved for node leases if attrs.GetNamespace() != api.NamespaceNodeLease { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, fmt.Sprintf("can only access leases in the %q system namespace", api.NamespaceNodeLease), nil } @@ -253,7 +253,7 @@ func (r *NodeAuthorizer) authorizeLease(nodeName string, attrs authorizer.Attrib // note we skip this check for create, since the authorizer doesn't know the name on create // the noderestriction admission plugin is capable of performing this check at create time if verb != "create" && attrs.GetName() != nodeName { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only access node lease with the same name as the requesting node", nil } @@ -269,12 +269,12 @@ func (r *NodeAuthorizer) authorizeCSINodeInfo(nodeName string, attrs authorizer. verb != "update" && verb != "patch" && verb != "delete" { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only get, create, update, patch, or delete a CSINodeInfo", nil } if len(attrs.GetSubresource()) > 0 { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "cannot authorize CSINodeInfo subresources", nil } @@ -282,7 +282,7 @@ func (r *NodeAuthorizer) authorizeCSINodeInfo(nodeName string, attrs authorizer. // note we skip this check for create, since the authorizer doesn't know the name on create // the noderestriction admission plugin is capable of performing this check at create time if verb != "create" && attrs.GetName() != nodeName { - glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) + klog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs) return authorizer.DecisionNoOpinion, "can only access CSINodeInfo with the same name as the requesting node", nil } diff --git a/plugin/pkg/auth/authorizer/rbac/BUILD b/plugin/pkg/auth/authorizer/rbac/BUILD index 6425852b9d820..772cc13ae3480 100644 --- a/plugin/pkg/auth/authorizer/rbac/BUILD +++ b/plugin/pkg/auth/authorizer/rbac/BUILD @@ -22,7 +22,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/client-go/listers/rbac/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD index 55e9cd22af717..51c729978348d 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index ba6fd6724e247..b20c927d0a4f0 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -19,7 +19,7 @@ package bootstrappolicy import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,12 +32,12 @@ const saRolePrefix = "system:controller:" func addControllerRole(controllerRoles *[]rbacv1.ClusterRole, controllerRoleBindings *[]rbacv1.ClusterRoleBinding, role rbacv1.ClusterRole) { if !strings.HasPrefix(role.Name, saRolePrefix) { - glog.Fatalf(`role %q must start with %q`, role.Name, saRolePrefix) + klog.Fatalf(`role %q must start with %q`, role.Name, saRolePrefix) } for _, existingRole := range *controllerRoles { if role.Name == existingRole.Name { - glog.Fatalf("role %q was already registered", role.Name) + klog.Fatalf("role %q was already registered", role.Name) } } diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go index fefe26d2ceb99..31ca13db52193 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/namespace_policy.go @@ -19,7 +19,7 @@ package bootstrappolicy import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -36,13 +36,13 @@ var ( func addNamespaceRole(namespace string, role rbacv1.Role) { if !strings.HasPrefix(namespace, "kube-") { - glog.Fatalf(`roles can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace) + klog.Fatalf(`roles can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace) } existingRoles := namespaceRoles[namespace] for _, existingRole := range existingRoles { if role.Name == existingRole.Name { - glog.Fatalf("role %q was already registered in %q", role.Name, namespace) + klog.Fatalf("role %q was already registered in %q", role.Name, namespace) } } @@ -54,13 +54,13 @@ func addNamespaceRole(namespace string, role rbacv1.Role) { func addNamespaceRoleBinding(namespace string, roleBinding rbacv1.RoleBinding) { if !strings.HasPrefix(namespace, "kube-") { - glog.Fatalf(`rolebindings can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace) + klog.Fatalf(`rolebindings can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace) } existingRoleBindings := namespaceRoleBindings[namespace] for _, existingRoleBinding := range existingRoleBindings { if roleBinding.Name == existingRoleBinding.Name { - glog.Fatalf("rolebinding %q was already registered in %q", roleBinding.Name, namespace) + klog.Fatalf("rolebinding %q was already registered in %q", roleBinding.Name, namespace) } } diff --git a/plugin/pkg/auth/authorizer/rbac/rbac.go b/plugin/pkg/auth/authorizer/rbac/rbac.go index a0f173c393bfe..5cd339675b9da 100644 --- a/plugin/pkg/auth/authorizer/rbac/rbac.go +++ b/plugin/pkg/auth/authorizer/rbac/rbac.go @@ -21,7 +21,7 @@ import ( "bytes" "fmt" - "github.com/golang/glog" + "k8s.io/klog" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/labels" @@ -81,7 +81,7 @@ func (r *RBACAuthorizer) Authorize(requestAttributes authorizer.Attributes) (aut // Build a detailed log of the denial. // Make the whole block conditional so we don't do a lot of string-building we won't use. - if glog.V(5) { + if klog.V(5) { var operation string if requestAttributes.IsResourceRequest() { b := &bytes.Buffer{} @@ -115,7 +115,7 @@ func (r *RBACAuthorizer) Authorize(requestAttributes authorizer.Attributes) (aut scope = "cluster-wide" } - glog.Infof("RBAC DENY: user %q groups %q cannot %s %s", requestAttributes.GetUser().GetName(), requestAttributes.GetUser().GetGroups(), operation, scope) + klog.Infof("RBAC DENY: user %q groups %q cannot %s %s", requestAttributes.GetUser().GetName(), requestAttributes.GetUser().GetGroups(), operation, scope) } reason := "" diff --git a/staging/src/k8s.io/api/Godeps/Godeps.json b/staging/src/k8s.io/api/Godeps/Godeps.json index d718f2e428d36..65d53b1c04bfd 100644 --- a/staging/src/k8s.io/api/Godeps/Godeps.json +++ b/staging/src/k8s.io/api/Godeps/Godeps.json @@ -18,10 +18,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" diff --git a/staging/src/k8s.io/apiextensions-apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/BUILD index dee7cb342beed..e555fb761c08f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 5730ff31adc40..ea4cfb9d58f59 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -458,10 +458,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/groupcache/lru", "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433" diff --git a/staging/src/k8s.io/apiextensions-apiserver/main.go b/staging/src/k8s.io/apiextensions-apiserver/main.go index 888a04b1a4613..eeac2e685a7c3 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/main.go +++ b/staging/src/k8s.io/apiextensions-apiserver/main.go @@ -20,7 +20,7 @@ import ( "flag" "os" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiextensions-apiserver/pkg/cmd/server" genericapiserver "k8s.io/apiserver/pkg/server" @@ -35,6 +35,6 @@ func main() { cmd := server.NewServerCommand(os.Stdout, os.Stderr, stopCh) cmd.Flags().AddGoFlagSet(flag.CommandLine) if err := cmd.Execute(); err != nil { - glog.Fatal(err) + klog.Fatal(err) } } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD index 9aa35c30d466a..5da6c14cda39d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/BUILD @@ -75,7 +75,7 @@ go_library( "//vendor/github.com/go-openapi/spec:go_default_library", "//vendor/github.com/go-openapi/strfmt:go_default_library", "//vendor/github.com/go-openapi/validate:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go index 13c465a557539..3b13e8357dba5 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go @@ -21,7 +21,7 @@ import ( "sort" "time" - "github.com/golang/glog" + "k8s.io/klog" autoscaling "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -198,9 +198,9 @@ func sortGroupDiscoveryByKubeAwareVersion(gd []metav1.GroupVersionForDiscovery) func (c *DiscoveryController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - defer glog.Infof("Shutting down DiscoveryController") + defer klog.Infof("Shutting down DiscoveryController") - glog.Infof("Starting DiscoveryController") + klog.Infof("Starting DiscoveryController") if !cache.WaitForCacheSync(stopCh, c.crdsSynced) { utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) @@ -246,14 +246,14 @@ func (c *DiscoveryController) enqueue(obj *apiextensions.CustomResourceDefinitio func (c *DiscoveryController) addCustomResourceDefinition(obj interface{}) { castObj := obj.(*apiextensions.CustomResourceDefinition) - glog.V(4).Infof("Adding customresourcedefinition %s", castObj.Name) + klog.V(4).Infof("Adding customresourcedefinition %s", castObj.Name) c.enqueue(castObj) } func (c *DiscoveryController) updateCustomResourceDefinition(oldObj, newObj interface{}) { castNewObj := newObj.(*apiextensions.CustomResourceDefinition) castOldObj := oldObj.(*apiextensions.CustomResourceDefinition) - glog.V(4).Infof("Updating customresourcedefinition %s", castOldObj.Name) + klog.V(4).Infof("Updating customresourcedefinition %s", castOldObj.Name) // Enqueue both old and new object to make sure we remove and add appropriate Versions. // The working queue will resolve any duplicates and only changes will stay in the queue. c.enqueue(castNewObj) @@ -265,15 +265,15 @@ func (c *DiscoveryController) deleteCustomResourceDefinition(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %#v", obj) + klog.Errorf("Couldn't get object from tombstone %#v", obj) return } castObj, ok = tombstone.Obj.(*apiextensions.CustomResourceDefinition) if !ok { - glog.Errorf("Tombstone contained object that is not expected %#v", obj) + klog.Errorf("Tombstone contained object that is not expected %#v", obj) return } } - glog.V(4).Infof("Deleting customresourcedefinition %q", castObj.Name) + klog.V(4).Infof("Deleting customresourcedefinition %q", castObj.Name) c.enqueue(castObj) } diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go index b4be7b1157877..2ad59dfe9375f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go @@ -28,7 +28,7 @@ import ( "github.com/go-openapi/spec" "github.com/go-openapi/strfmt" "github.com/go-openapi/validate" - "github.com/golang/glog" + "k8s.io/klog" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -350,11 +350,11 @@ func (r *crdHandler) updateCustomResourceDefinition(oldObj, newObj interface{}) return } if apiequality.Semantic.DeepEqual(&newCRD.Spec, oldInfo.spec) && apiequality.Semantic.DeepEqual(&newCRD.Status.AcceptedNames, oldInfo.acceptedNames) { - glog.V(6).Infof("Ignoring customresourcedefinition %s update because neither spec, nor accepted names changed", oldCRD.Name) + klog.V(6).Infof("Ignoring customresourcedefinition %s update because neither spec, nor accepted names changed", oldCRD.Name) return } - glog.V(4).Infof("Updating customresourcedefinition %s", oldCRD.Name) + klog.V(4).Infof("Updating customresourcedefinition %s", oldCRD.Name) // Copy because we cannot write to storageMap without a race // as it is used without locking elsewhere. @@ -394,7 +394,7 @@ func (r *crdHandler) removeDeadStorage() { } } if !found { - glog.V(4).Infof("Removing dead CRD storage for %s/%s", s.spec.Group, s.spec.Names.Kind) + klog.V(4).Infof("Removing dead CRD storage for %s/%s", s.spec.Group, s.spec.Names.Kind) for _, storage := range s.storages { // destroy only the main storage. Those for the subresources share cacher and etcd clients. storage.CustomResource.DestroyFunc() @@ -505,7 +505,7 @@ func (r *crdHandler) getOrCreateServingInfoFor(crd *apiextensions.CustomResource } table, err := tableconvertor.New(columns) if err != nil { - glog.V(2).Infof("The CRD for %v has an invalid printer specification, falling back to default printing: %v", kind, err) + klog.V(2).Infof("The CRD for %v has an invalid printer specification, falling back to default printing: %v", kind, err) } storages[v.Name] = customresource.NewStorage( diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD index 40c07208d8aac..03d72c68487d6 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/BUILD @@ -16,7 +16,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go index 6420c3a6f1ee7..d041485536ece 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go @@ -20,12 +20,12 @@ import ( "fmt" "time" - "github.com/golang/glog" apierrors "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" client "k8s.io/apiextensions-apiserver/pkg/client/clientset/internalclientset/typed/apiextensions/internalversion" @@ -70,8 +70,8 @@ func (ec *EstablishingController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer ec.queue.ShutDown() - glog.Infof("Starting EstablishingController") - defer glog.Infof("Shutting down EstablishingController") + klog.Infof("Starting EstablishingController") + defer klog.Infof("Shutting down EstablishingController") if !cache.WaitForCacheSync(stopCh, ec.crdSynced) { return diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD index ecafd6a92eefd..5acb5f3ce99c4 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/BUILD @@ -26,7 +26,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go index f881427efdf92..c2ebdcf1709e7 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go @@ -21,7 +21,7 @@ import ( "reflect" "time" - "github.com/golang/glog" + "k8s.io/klog" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -216,7 +216,7 @@ func (c *CRDFinalizer) deleteInstances(crd *apiextensions.CustomResourceDefiniti if len(listObj.(*unstructured.UnstructuredList).Items) == 0 { return true, nil } - glog.V(2).Infof("%s.%s waiting for %d items to be removed", crd.Status.AcceptedNames.Plural, crd.Spec.Group, len(listObj.(*unstructured.UnstructuredList).Items)) + klog.V(2).Infof("%s.%s waiting for %d items to be removed", crd.Status.AcceptedNames.Plural, crd.Spec.Group, len(listObj.(*unstructured.UnstructuredList).Items)) return false, nil }) if err != nil { @@ -239,8 +239,8 @@ func (c *CRDFinalizer) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Infof("Starting CRDFinalizer") - defer glog.Infof("Shutting down CRDFinalizer") + klog.Infof("Starting CRDFinalizer") + defer klog.Infof("Shutting down CRDFinalizer") if !cache.WaitForCacheSync(stopCh, c.crdSynced) { return diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD index d75d3dbe5017b..1d6f1cbbccc54 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/BUILD @@ -37,7 +37,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go index f00def4b12424..fe5c94798470a 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -281,8 +281,8 @@ func (c *NamingConditionController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Infof("Starting NamingConditionController") - defer glog.Infof("Shutting down NamingConditionController") + klog.Infof("Starting NamingConditionController") + defer klog.Infof("Shutting down NamingConditionController") if !cache.WaitForCacheSync(stopCh, c.crdSynced) { return @@ -331,13 +331,13 @@ func (c *NamingConditionController) enqueue(obj *apiextensions.CustomResourceDef func (c *NamingConditionController) addCustomResourceDefinition(obj interface{}) { castObj := obj.(*apiextensions.CustomResourceDefinition) - glog.V(4).Infof("Adding %s", castObj.Name) + klog.V(4).Infof("Adding %s", castObj.Name) c.enqueue(castObj) } func (c *NamingConditionController) updateCustomResourceDefinition(obj, _ interface{}) { castObj := obj.(*apiextensions.CustomResourceDefinition) - glog.V(4).Infof("Updating %s", castObj.Name) + klog.V(4).Infof("Updating %s", castObj.Name) c.enqueue(castObj) } @@ -346,16 +346,16 @@ func (c *NamingConditionController) deleteCustomResourceDefinition(obj interface if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %#v", obj) + klog.Errorf("Couldn't get object from tombstone %#v", obj) return } castObj, ok = tombstone.Obj.(*apiextensions.CustomResourceDefinition) if !ok { - glog.Errorf("Tombstone contained object that is not expected %#v", obj) + klog.Errorf("Tombstone contained object that is not expected %#v", obj) return } } - glog.V(4).Infof("Deleting %q", castObj.Name) + klog.V(4).Infof("Deleting %q", castObj.Name) c.enqueue(castObj) } diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index 014dafb36a1da..7bf5daca9bc3d 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -34,10 +34,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/groupcache/lru", "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433" diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD b/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD index 938ba9b5b73f0..4ced3ce566318 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/BUILD @@ -49,7 +49,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/api/meta/meta.go b/staging/src/k8s.io/apimachinery/pkg/api/meta/meta.go index 854bd30fa3da1..6fe7458f6c4ff 100644 --- a/staging/src/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/staging/src/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -20,7 +20,7 @@ import ( "fmt" "reflect" - "github.com/golang/glog" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" @@ -607,7 +607,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { var ret []metav1.OwnerReference s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) + klog.Errorf("expect %v to be a pointer to slice", s) return ret } s = s.Elem() @@ -615,7 +615,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) for i := 0; i < s.Len(); i++ { if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { - glog.Errorf("extractFromOwnerReference failed: %v", err) + klog.Errorf("extractFromOwnerReference failed: %v", err) return ret } } @@ -625,13 +625,13 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) + klog.Errorf("expect %v to be a pointer to slice", s) } s = s.Elem() newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) for i := 0; i < len(references); i++ { if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { - glog.Errorf("setOwnerReference failed: %v", err) + klog.Errorf("setOwnerReference failed: %v", err) return } } diff --git a/staging/src/k8s.io/apimachinery/pkg/labels/BUILD b/staging/src/k8s.io/apimachinery/pkg/labels/BUILD index 14666aec8ec51..eeffddf89e524 100644 --- a/staging/src/k8s.io/apimachinery/pkg/labels/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/labels/BUILD @@ -33,7 +33,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/labels/selector.go b/staging/src/k8s.io/apimachinery/pkg/labels/selector.go index 374d2ef1377a8..f5a0888932f26 100644 --- a/staging/src/k8s.io/apimachinery/pkg/labels/selector.go +++ b/staging/src/k8s.io/apimachinery/pkg/labels/selector.go @@ -23,10 +23,10 @@ import ( "strconv" "strings" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog" ) // Requirements is AND of all requirements. @@ -211,13 +211,13 @@ func (r *Requirement) Matches(ls Labels) bool { } lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) return false } // There should be only one strValue in r.strValues, and can be converted to a integer. if len(r.strValues) != 1 { - glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) return false } @@ -225,7 +225,7 @@ func (r *Requirement) Matches(ls Labels) bool { for i := range r.strValues { rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) return false } } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD index 3c07e159539d6..6b52ceff0fef8 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD @@ -67,7 +67,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go b/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go index 291d7a4e888cb..dff56e03401a6 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "github.com/golang/glog" + "k8s.io/klog" ) // UnstructuredConverter is an interface for converting between interface{} @@ -133,10 +133,10 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i newObj := reflect.New(t.Elem()).Interface() newErr := fromUnstructuredViaJSON(u, newObj) if (err != nil) != (newErr != nil) { - glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) + klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) } if err == nil && !c.comparison.DeepEqual(obj, newObj) { - glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) + klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } return err @@ -424,10 +424,10 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte newUnstr := map[string]interface{}{} newErr := toUnstructuredViaJSON(obj, &newUnstr) if (err != nil) != (newErr != nil) { - glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) + klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) } if err == nil && !c.comparison.DeepEqual(u, newUnstr) { - glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) + klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) } } if err != nil { diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD index 9721de40006b8..2c7d03c9560ea 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/BUILD @@ -39,7 +39,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/third_party/forked/golang/netutil:go_default_library", "//vendor/github.com/docker/spdystream:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go index 3dc8e23ae1418..9d222faa898fa 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go @@ -23,8 +23,8 @@ import ( "time" "github.com/docker/spdystream" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog" ) // connection maintains state about a spdystream.Connection and its associated @@ -128,7 +128,7 @@ func (c *connection) newSpdyStream(stream *spdystream.Stream) { err := c.newStreamHandler(stream, replySent) rejectStream := (err != nil) if rejectStream { - glog.Warningf("Stream rejected: %v", err) + klog.Warningf("Stream rejected: %v", err) stream.Reset() return } diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD index 865691051a810..50bb5fa4c70aa 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/BUILD @@ -23,8 +23,8 @@ go_library( importpath = "k8s.io/apimachinery/pkg/util/intstr", deps = [ "//vendor/github.com/gogo/protobuf/proto:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 642b83cec2173..5b26ed262631b 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,8 +25,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "github.com/google/gofuzz" + "k8s.io/klog" ) // IntOrString is a type that can hold an int32 or a string. When used in @@ -58,7 +58,7 @@ const ( // TODO: convert to (val int32) func FromInt(val int) IntOrString { if val > math.MaxInt32 || val < math.MinInt32 { - glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) } return IntOrString{Type: Int, IntVal: int32(val)} } diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD index d38670a8c87a9..6f4542e72c1dd 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD @@ -37,8 +37,8 @@ go_library( importpath = "k8s.io/apimachinery/pkg/util/net", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/http.go b/staging/src/k8s.io/apimachinery/pkg/util/net/http.go index 7c2a5e6286d1c..155667cdfc7f7 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/http.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/http.go @@ -31,8 +31,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "golang.org/x/net/http2" + "k8s.io/klog" ) // JoinPreservingTrailingSlash does a path.Join of the specified elements, @@ -107,10 +107,10 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { t = SetOldTransportDefaults(t) // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { - glog.Infof("HTTP2 has been explicitly disabled") + klog.Infof("HTTP2 has been explicitly disabled") } else { if err := http2.ConfigureTransport(t); err != nil { - glog.Warningf("Transport failed http2 configuration: %v", err) + klog.Warningf("Transport failed http2 configuration: %v", err) } } return t @@ -368,7 +368,7 @@ redirectLoop: resp, err := http.ReadResponse(respReader, nil) if err != nil { // Unable to read the backend response; let the client handle it. - glog.Warningf("Error reading backend response: %v", err) + klog.Warningf("Error reading backend response: %v", err) break redirectLoop } diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/interface.go b/staging/src/k8s.io/apimachinery/pkg/util/net/interface.go index 0ab9b36080b5c..daf5d24964559 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/interface.go @@ -26,7 +26,7 @@ import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" ) type AddressFamily uint @@ -193,7 +193,7 @@ func isInterfaceUp(intf *net.Interface) bool { return false } if intf.Flags&net.FlagUp != 0 { - glog.V(4).Infof("Interface %v is up", intf.Name) + klog.V(4).Infof("Interface %v is up", intf.Name) return true } return false @@ -208,20 +208,20 @@ func isLoopbackOrPointToPoint(intf *net.Interface) bool { func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { if len(addrs) > 0 { for i := range addrs { - glog.V(4).Infof("Checking addr %s.", addrs[i].String()) + klog.V(4).Infof("Checking addr %s.", addrs[i].String()) ip, _, err := net.ParseCIDR(addrs[i].String()) if err != nil { return nil, err } if memberOf(ip, family) { if ip.IsGlobalUnicast() { - glog.V(4).Infof("IP found %v", ip) + klog.V(4).Infof("IP found %v", ip) return ip, nil } else { - glog.V(4).Infof("Non-global unicast address found %v", ip) + klog.V(4).Infof("Non-global unicast address found %v", ip) } } else { - glog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) + klog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) } } @@ -241,13 +241,13 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte if err != nil { return nil, err } - glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) + klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) matchingIP, err := getMatchingGlobalIP(addrs, forFamily) if err != nil { return nil, err } if matchingIP != nil { - glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) return matchingIP, nil } } @@ -275,14 +275,14 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, fmt.Errorf("no interfaces found on host.") } for _, family := range []AddressFamily{familyIPv4, familyIPv6} { - glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) for _, intf := range intfs { if !isInterfaceUp(&intf) { - glog.V(4).Infof("Skipping: down interface %q", intf.Name) + klog.V(4).Infof("Skipping: down interface %q", intf.Name) continue } if isLoopbackOrPointToPoint(&intf) { - glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) continue } addrs, err := nw.Addrs(&intf) @@ -290,7 +290,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, err } if len(addrs) == 0 { - glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) continue } for _, addr := range addrs { @@ -299,15 +299,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } if !memberOf(ip, family) { - glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) continue } // TODO: Decide if should open up to allow IPv6 LLAs in future. if !ip.IsGlobalUnicast() { - glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) continue } - glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) return ip, nil } } @@ -381,23 +381,23 @@ func getAllDefaultRoutes() ([]Route, error) { // an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { for _, family := range []AddressFamily{familyIPv4, familyIPv6} { - glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) + klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) for _, route := range routes { if route.Family != family { continue } - glog.V(4).Infof("Default route transits interface %q", route.Interface) + klog.V(4).Infof("Default route transits interface %q", route.Interface) finalIP, err := getIPFromInterface(route.Interface, family, nw) if err != nil { return nil, err } if finalIP != nil { - glog.V(4).Infof("Found active IP %v ", finalIP) + klog.V(4).Infof("Found active IP %v ", finalIP) return finalIP, nil } } } - glog.V(4).Infof("No active IP found by looking at default routes") + klog.V(4).Infof("No active IP found by looking at default routes") return nil, fmt.Errorf("unable to select an IP from default routes.") } diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD index bd1b0cc23d064..6e9d6bababfca 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/BUILD @@ -41,10 +41,10 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/third_party/forked/golang/netutil:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/mxk/go-flowrate/flowrate:go_default_library", "//vendor/golang.org/x/net/html:go_default_library", "//vendor/golang.org/x/net/html/atom:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go index 37a5be487c0b9..a59b24c8dc3ec 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go @@ -24,7 +24,7 @@ import ( "net/http" "net/url" - "github.com/golang/glog" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/third_party/forked/golang/netutil" @@ -35,7 +35,7 @@ func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (ne dialer, err := utilnet.DialerFor(transport) if err != nil { - glog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err) + klog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err) } switch url.Scheme { @@ -52,7 +52,7 @@ func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (ne var err error tlsConfig, err = utilnet.TLSClientConfig(transport) if err != nil { - glog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err) + klog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err) } if dialer != nil { @@ -64,7 +64,7 @@ func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (ne } if tlsConfig == nil { // tls.Client requires non-nil config - glog.Warningf("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") + klog.Warningf("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") // tls.Handshake() requires ServerName or InsecureSkipVerify tlsConfig = &tls.Config{ InsecureSkipVerify: true, diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/transport.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/transport.go index 6c34ab5241de9..3c8cf6da73772 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/transport.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/transport.go @@ -27,9 +27,9 @@ import ( "path" "strings" - "github.com/golang/glog" "golang.org/x/net/html" "golang.org/x/net/html/atom" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/sets" @@ -236,7 +236,7 @@ func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*ht // This is fine default: // Some encoding we don't understand-- don't try to parse this - glog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) + klog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) return resp, nil } @@ -245,7 +245,7 @@ func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*ht } err := rewriteHTML(reader, writer, urlRewriter) if err != nil { - glog.Errorf("Failed to rewrite URLs: %v", err) + klog.Errorf("Failed to rewrite URLs: %v", err) return resp, err } diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go index 269c53310469a..596b1888975c7 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -34,8 +34,8 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "github.com/golang/glog" "github.com/mxk/go-flowrate/flowrate" + "k8s.io/klog" ) // UpgradeRequestRoundTripper provides an additional method to decorate a request @@ -235,7 +235,7 @@ func (h *UpgradeAwareHandler) ServeHTTP(w http.ResponseWriter, req *http.Request // tryUpgrade returns true if the request was handled. func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool { if !httpstream.IsUpgradeRequest(req) { - glog.V(6).Infof("Request was not an upgrade") + klog.V(6).Infof("Request was not an upgrade") return false } @@ -257,15 +257,15 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques // handles this in the non-upgrade path. utilnet.AppendForwardedForHeader(clone) if h.InterceptRedirects { - glog.V(6).Infof("Connecting to backend proxy (intercepting redirects) %s\n Headers: %v", &location, clone.Header) + klog.V(6).Infof("Connecting to backend proxy (intercepting redirects) %s\n Headers: %v", &location, clone.Header) backendConn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, &location, clone.Header, req.Body, utilnet.DialerFunc(h.DialForUpgrade), h.RequireSameHostRedirects) } else { - glog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) + klog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) clone.URL = &location backendConn, err = h.DialForUpgrade(clone) } if err != nil { - glog.V(6).Infof("Proxy connection error: %v", err) + klog.V(6).Infof("Proxy connection error: %v", err) h.Responder.Error(w, req, err) return true } @@ -275,13 +275,13 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques // hijacking should be the last step in the upgrade. requestHijacker, ok := w.(http.Hijacker) if !ok { - glog.V(6).Infof("Unable to hijack response writer: %T", w) + klog.V(6).Infof("Unable to hijack response writer: %T", w) h.Responder.Error(w, req, fmt.Errorf("request connection cannot be hijacked: %T", w)) return true } requestHijackedConn, _, err := requestHijacker.Hijack() if err != nil { - glog.V(6).Infof("Unable to hijack response: %v", err) + klog.V(6).Infof("Unable to hijack response: %v", err) h.Responder.Error(w, req, fmt.Errorf("error hijacking connection: %v", err)) return true } @@ -289,7 +289,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques // Forward raw response bytes back to client. if len(rawResponse) > 0 { - glog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) + klog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) if _, err = requestHijackedConn.Write(rawResponse); err != nil { utilruntime.HandleError(fmt.Errorf("Error proxying response from backend to client: %v", err)) } @@ -311,7 +311,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques } _, err := io.Copy(writer, requestHijackedConn) if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - glog.Errorf("Error proxying data from client to backend: %v", err) + klog.Errorf("Error proxying data from client to backend: %v", err) } close(writerComplete) }() @@ -325,7 +325,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques } _, err := io.Copy(requestHijackedConn, reader) if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - glog.Errorf("Error proxying data from backend to client: %v", err) + klog.Errorf("Error proxying data from backend to client: %v", err) } close(readerComplete) }() @@ -336,7 +336,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques case <-writerComplete: case <-readerComplete: } - glog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header) + klog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header) return true } diff --git a/staging/src/k8s.io/apimachinery/pkg/util/runtime/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/runtime/BUILD index 6bdeeb3d246d3..cf8f214167300 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/runtime/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/runtime/BUILD @@ -17,7 +17,7 @@ go_library( srcs = ["runtime.go"], importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/runtime", importpath = "k8s.io/apimachinery/pkg/util/runtime", - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) filegroup( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime.go index da32fe12f33b8..3512e68e16c55 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -63,7 +63,7 @@ func HandleCrash(additionalHandlers ...func(interface{})) { // logPanic logs the caller tree when a panic occurs. func logPanic(r interface{}) { callers := getCallers(r) - glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + klog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) } func getCallers(r interface{}) string { @@ -111,7 +111,7 @@ func HandleError(err error) { // logError prints an error with the call stack of the location it was reported func logError(err error) { - glog.ErrorDepth(2, err) + klog.ErrorDepth(2, err) } type rudimentaryErrorBackoff struct { diff --git a/staging/src/k8s.io/apimachinery/pkg/util/yaml/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/yaml/BUILD index 8941af581bee3..ad28913e08234 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/yaml/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/yaml/BUILD @@ -18,7 +18,7 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/yaml", importpath = "k8s.io/apimachinery/pkg/util/yaml", deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 41ad0431b44b9..63d735a804cf8 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -26,7 +26,7 @@ import ( "strings" "unicode" - "github.com/golang/glog" + "k8s.io/klog" "sigs.k8s.io/yaml" ) @@ -217,11 +217,11 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if d.decoder == nil { buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) if isJSON { - glog.V(4).Infof("decoding stream as JSON") + klog.V(4).Infof("decoding stream as JSON") d.decoder = json.NewDecoder(buffer) d.rawData = origData } else { - glog.V(4).Infof("decoding stream as YAML") + klog.V(4).Infof("decoding stream as YAML") d.decoder = NewYAMLToJSONDecoder(buffer) } } @@ -230,7 +230,7 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if syntax, ok := err.(*json.SyntaxError); ok { data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) if readErr != nil { - glog.V(4).Infof("reading stream failed: %v", readErr) + klog.V(4).Infof("reading stream failed: %v", readErr) } js := string(data) diff --git a/staging/src/k8s.io/apimachinery/pkg/watch/BUILD b/staging/src/k8s.io/apimachinery/pkg/watch/BUILD index 49ebc15246e48..5b9e1880993ea 100644 --- a/staging/src/k8s.io/apimachinery/pkg/watch/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/watch/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/staging/src/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 93bb1cdf7f6bb..d61cf5a2e58b6 100644 --- a/staging/src/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/staging/src/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -20,10 +20,10 @@ import ( "io" "sync" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog" ) // Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. @@ -100,13 +100,13 @@ func (sw *StreamWatcher) receive() { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: msg := "Unable to decode an event from the watch stream: %v" if net.IsProbableEOF(err) { - glog.V(5).Infof(msg, err) + klog.V(5).Infof(msg, err) } else { - glog.Errorf(msg, err) + klog.Errorf(msg, err) } } return diff --git a/staging/src/k8s.io/apimachinery/pkg/watch/watch.go b/staging/src/k8s.io/apimachinery/pkg/watch/watch.go index a627d1d572c3d..be9c90c03d108 100644 --- a/staging/src/k8s.io/apimachinery/pkg/watch/watch.go +++ b/staging/src/k8s.io/apimachinery/pkg/watch/watch.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" ) @@ -106,7 +106,7 @@ func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - glog.V(4).Infof("Stopping fake watcher.") + klog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } @@ -173,7 +173,7 @@ func (f *RaceFreeFakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - glog.V(4).Infof("Stopping fake watcher.") + klog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index fc86f1df5c070..9e706bf87eae8 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -422,10 +422,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/groupcache/lru", "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433" diff --git a/staging/src/k8s.io/apiserver/pkg/admission/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/BUILD index 58e3144a7abff..e45f33e1ecfa7 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/BUILD @@ -59,7 +59,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//staging/src/k8s.io/apiserver/pkg/audit:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/config.go b/staging/src/k8s.io/apiserver/pkg/admission/config.go index a382135975a5d..ffda2f3262c88 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/config.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/config.go @@ -25,7 +25,7 @@ import ( "path" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" @@ -146,7 +146,7 @@ func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfi if pluginCfg.Path != "" { content, err := ioutil.ReadFile(pluginCfg.Path) if err != nil { - glog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) + klog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) return nil, err } return bytes.NewBuffer(content), nil diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD index 598d6d404c091..10136848ecced 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/BUILD @@ -51,7 +51,7 @@ go_library( "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/listers/admissionregistration/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go b/staging/src/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go index 986524b5ba2e2..f2b7e909942e1 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/configuration/initializer_manager.go @@ -21,7 +21,7 @@ import ( "reflect" "sort" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/admissionregistration/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" @@ -42,7 +42,7 @@ func NewInitializerConfigurationManager(c InitializerConfigurationLister) *Initi list, err := c.List(metav1.ListOptions{}) if err != nil { if errors.IsNotFound(err) || errors.IsForbidden(err) { - glog.V(5).Infof("Initializers are disabled due to an error: %v", err) + klog.V(5).Infof("Initializers are disabled due to an error: %v", err) return nil, ErrDisabled } return nil, err diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD index 05ac8fe0fc40b..ad3b9b5f58b1b 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/BUILD @@ -27,7 +27,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/features:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go index 9ebd2db3f7b6d..d4d184a57478a 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization/initialization.go @@ -21,7 +21,7 @@ import ( "io" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/admissionregistration/v1alpha1" "k8s.io/api/core/v1" @@ -86,9 +86,9 @@ func (i *initializer) ValidateInitialization() error { if !utilfeature.DefaultFeatureGate.Enabled(features.Initializers) { if err := utilfeature.DefaultFeatureGate.Set(string(features.Initializers) + "=true"); err != nil { - glog.Errorf("error enabling Initializers feature as part of admission plugin setup: %v", err) + klog.Errorf("error enabling Initializers feature as part of admission plugin setup: %v", err) } else { - glog.Infof("enabled Initializers feature as part of admission plugin setup") + klog.Infof("enabled Initializers feature as part of admission plugin setup") } } @@ -170,7 +170,7 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { } existing := accessor.GetInitializers() if existing != nil { - glog.V(5).Infof("Admin bypassing initialization for %s", a.GetResource()) + klog.V(5).Infof("Admin bypassing initialization for %s", a.GetResource()) // it must be possible for some users to bypass initialization - for now, check the initialize operation if err := i.canInitialize(a, "create with initializers denied"); err != nil { @@ -182,7 +182,7 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { return nil } } else { - glog.V(5).Infof("Checking initialization for %s", a.GetResource()) + klog.V(5).Infof("Checking initialization for %s", a.GetResource()) config, err := i.readConfig(a) if err != nil { @@ -205,11 +205,11 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { names := findInitializers(config, a.GetResource()) if len(names) == 0 { - glog.V(5).Infof("No initializers needed") + klog.V(5).Infof("No initializers needed") return nil } - glog.V(5).Infof("Found initializers for %s: %v", a.GetResource(), names) + klog.V(5).Infof("Found initializers for %s: %v", a.GetResource(), names) accessor.SetInitializers(newInitializers(names)) } @@ -241,7 +241,7 @@ func (i *initializer) Admit(a admission.Attributes) (err error) { return nil } - glog.V(5).Infof("Modifying uninitialized resource %s", a.GetResource()) + klog.V(5).Infof("Modifying uninitialized resource %s", a.GetResource()) // because we are called before validation, we need to ensure the update transition is valid. if errs := validation.ValidateInitializersUpdate(updated, existing, initializerFieldPath); len(errs) > 0 { diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD index 731684adfcc22..5e605f4af59ab 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/BUILD @@ -24,7 +24,7 @@ go_library( "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go index ab01c0aaa7726..d7bb0215b981e 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle/admission.go @@ -21,7 +21,7 @@ import ( "io" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -139,7 +139,7 @@ func (l *Lifecycle) Admit(a admission.Attributes) error { exists = true } if exists { - glog.V(4).Infof("found %s in cache after waiting", a.GetNamespace()) + klog.V(4).Infof("found %s in cache after waiting", a.GetNamespace()) } } @@ -160,7 +160,7 @@ func (l *Lifecycle) Admit(a admission.Attributes) error { case err != nil: return errors.NewInternalError(err) } - glog.V(4).Infof("found %s via storage lookup", a.GetNamespace()) + klog.V(4).Infof("found %s via storage lookup", a.GetNamespace()) } // ensure that we're not trying to create objects in terminating namespaces diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD index a1693818ad625..da393cf2283f6 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/BUILD @@ -27,7 +27,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/util:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go index 4f95a6adf9ada..d646bacb535d1 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go @@ -24,7 +24,7 @@ import ( "time" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" + "k8s.io/klog" admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/api/admissionregistration/v1beta1" @@ -65,11 +65,11 @@ func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr *generic.Version ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1beta1.Ignore if callErr, ok := err.(*webhook.ErrCallingWebhook); ok { if ignoreClientCallFailures { - glog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) + klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) utilruntime.HandleError(callErr) continue } - glog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) + klog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) } return apierrors.NewInternalError(err) } @@ -110,7 +110,7 @@ func (a *mutatingDispatcher) callAttrMutatingHook(ctx context.Context, h *v1beta for k, v := range response.Response.AuditAnnotations { key := h.Name + "/" + k if err := attr.AddAnnotation(key, v); err != nil { - glog.Warningf("Failed to set admission audit annotation %s to %s for mutating webhook %s: %v", key, v, h.Name, err) + klog.Warningf("Failed to set admission audit annotation %s to %s for mutating webhook %s: %v", key, v, h.Name, err) } } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD index e180eeb76d4a4..ea9f489f8ec1c 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/util:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go index 42e4262d090c2..166e21adcdf8f 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/api/admissionregistration/v1beta1" @@ -64,17 +64,17 @@ func (d *validatingDispatcher) Dispatch(ctx context.Context, attr *generic.Versi ignoreClientCallFailures := hook.FailurePolicy != nil && *hook.FailurePolicy == v1beta1.Ignore if callErr, ok := err.(*webhook.ErrCallingWebhook); ok { if ignoreClientCallFailures { - glog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) + klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr) utilruntime.HandleError(callErr) return } - glog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) + klog.Warningf("Failed calling webhook, failing closed %v: %v", hook.Name, err) errCh <- apierrors.NewInternalError(err) return } - glog.Warningf("rejected by webhook %q: %#v", hook.Name, err) + klog.Warningf("rejected by webhook %q: %#v", hook.Name, err) errCh <- err }(relevantHooks[i]) } @@ -124,7 +124,7 @@ func (d *validatingDispatcher) callHook(ctx context.Context, h *v1beta1.Webhook, for k, v := range response.Response.AuditAnnotations { key := h.Name + "/" + k if err := attr.AddAnnotation(key, v); err != nil { - glog.Warningf("Failed to set admission audit annotation %s to %s for validating webhook %s: %v", key, v, h.Name, err) + klog.Warningf("Failed to set admission audit annotation %s to %s for validating webhook %s: %v", key, v, h.Name, err) } } if response.Response.Allowed { diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugins.go b/staging/src/k8s.io/apiserver/pkg/admission/plugins.go index c17d62cd4e67b..bdf087e564f4b 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugins.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugins.go @@ -26,7 +26,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // Factory is a function that returns an Interface for admission decisions. @@ -75,13 +75,13 @@ func (ps *Plugins) Register(name string, plugin Factory) { if ps.registry != nil { _, found := ps.registry[name] if found { - glog.Fatalf("Admission plugin %q was registered twice", name) + klog.Fatalf("Admission plugin %q was registered twice", name) } } else { ps.registry = map[string]Factory{} } - glog.V(1).Infof("Registered admission plugin %q", name) + klog.V(1).Infof("Registered admission plugin %q", name) ps.registry[name] = plugin } @@ -155,10 +155,10 @@ func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigPro } } if len(mutationPlugins) != 0 { - glog.Infof("Loaded %d mutating admission controller(s) successfully in the following order: %s.", len(mutationPlugins), strings.Join(mutationPlugins, ",")) + klog.Infof("Loaded %d mutating admission controller(s) successfully in the following order: %s.", len(mutationPlugins), strings.Join(mutationPlugins, ",")) } if len(validationPlugins) != 0 { - glog.Infof("Loaded %d validating admission controller(s) successfully in the following order: %s.", len(validationPlugins), strings.Join(validationPlugins, ",")) + klog.Infof("Loaded %d validating admission controller(s) successfully in the following order: %s.", len(validationPlugins), strings.Join(validationPlugins, ",")) } return chainAdmissionHandler(handlers), nil } @@ -166,7 +166,7 @@ func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigPro // InitPlugin creates an instance of the named interface. func (ps *Plugins) InitPlugin(name string, config io.Reader, pluginInitializer PluginInitializer) (Interface, error) { if name == "" { - glog.Info("No admission plugin specified.") + klog.Info("No admission plugin specified.") return nil, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/audit/BUILD b/staging/src/k8s.io/apiserver/pkg/audit/BUILD index a52f1fba8621b..f71da4d424366 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/audit/BUILD @@ -33,9 +33,9 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/audit/metrics.go b/staging/src/k8s.io/apiserver/pkg/audit/metrics.go index 10280e0d88e91..46b480eeaf544 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/audit/metrics.go @@ -19,9 +19,9 @@ package audit import ( "fmt" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/klog" ) const ( @@ -83,5 +83,5 @@ func HandlePluginError(plugin string, err error, impacted ...*auditinternal.Even for _, ev := range impacted { msg = msg + EventString(ev) + "\n" } - glog.Error(msg) + klog.Error(msg) } diff --git a/staging/src/k8s.io/apiserver/pkg/audit/policy/BUILD b/staging/src/k8s.io/apiserver/pkg/audit/policy/BUILD index 8b9f926279d8f..eb5905be6532e 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/policy/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/audit/policy/BUILD @@ -55,7 +55,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/apis/audit/validation:go_default_library", "//staging/src/k8s.io/apiserver/pkg/audit:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/audit/policy/reader.go b/staging/src/k8s.io/apiserver/pkg/audit/policy/reader.go index e78f86e8c44de..3d669fe699dc1 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/policy/reader.go +++ b/staging/src/k8s.io/apiserver/pkg/audit/policy/reader.go @@ -28,7 +28,7 @@ import ( "k8s.io/apiserver/pkg/apis/audit/validation" "k8s.io/apiserver/pkg/audit" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -85,6 +85,6 @@ func LoadPolicyFromBytes(policyDef []byte) (*auditinternal.Policy, error) { if policyCnt == 0 { return nil, fmt.Errorf("loaded illegal policy with 0 rules") } - glog.V(4).Infof("Loaded %d audit policy rules", policyCnt) + klog.V(4).Infof("Loaded %d audit policy rules", policyCnt) return policy, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/audit/request.go b/staging/src/k8s.io/apiserver/pkg/audit/request.go index 3aafde910f9d6..d4b12770eab4e 100644 --- a/staging/src/k8s.io/apiserver/pkg/audit/request.go +++ b/staging/src/k8s.io/apiserver/pkg/audit/request.go @@ -22,8 +22,8 @@ import ( "net/http" "time" - "github.com/golang/glog" "github.com/pborman/uuid" + "k8s.io/klog" "reflect" @@ -152,7 +152,7 @@ func LogRequestObject(ae *auditinternal.Event, obj runtime.Object, gvr schema.Gr ae.RequestObject, err = encodeObject(obj, gvr.GroupVersion(), s) if err != nil { // TODO(audit): add error slice to audit event struct - glog.Warningf("Auditing failed of %v request: %v", reflect.TypeOf(obj).Name(), err) + klog.Warningf("Auditing failed of %v request: %v", reflect.TypeOf(obj).Name(), err) return } } @@ -191,7 +191,7 @@ func LogResponseObject(ae *auditinternal.Event, obj runtime.Object, gv schema.Gr var err error ae.ResponseObject, err = encodeObject(obj, gv, s) if err != nil { - glog.Warningf("Audit failed for %q response: %v", reflect.TypeOf(obj).Name(), err) + klog.Warningf("Audit failed for %q response: %v", reflect.TypeOf(obj).Name(), err) } } @@ -223,7 +223,7 @@ func LogAnnotation(ae *auditinternal.Event, key, value string) { ae.Annotations = make(map[string]string) } if v, ok := ae.Annotations[key]; ok && v != value { - glog.Warningf("Failed to set annotations[%q] to %q for audit:%q, it has already been set to %q", key, value, ae.AuditID, ae.Annotations[key]) + klog.Warningf("Failed to set annotations[%q] to %q for audit:%q, it has already been set to %q", key, value, ae.AuditID, ae.Annotations[key]) return } ae.Annotations[key] = value diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD b/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD index 172e0fedf6cbf..b43d1ff609c00 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/BUILD @@ -21,7 +21,7 @@ go_library( deps = [ "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go b/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go index 4735357f9a896..69568f17dd25a 100644 --- a/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go +++ b/staging/src/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go @@ -24,9 +24,9 @@ import ( "os" "strings" - "github.com/golang/glog" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/klog" ) type TokenAuthenticator struct { @@ -67,7 +67,7 @@ func NewCSV(path string) (*TokenAuthenticator, error) { recordNum++ if record[0] == "" { - glog.Warningf("empty token has been found in token file '%s', record number '%d'", path, recordNum) + klog.Warningf("empty token has been found in token file '%s', record number '%d'", path, recordNum) continue } @@ -76,7 +76,7 @@ func NewCSV(path string) (*TokenAuthenticator, error) { UID: record[2], } if _, exist := tokens[record[0]]; exist { - glog.Warningf("duplicate token has been found in token file '%s', record number '%d'", path, recordNum) + klog.Warningf("duplicate token has been found in token file '%s', record number '%d'", path, recordNum) } tokens[record[0]] = obj diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD index 5ceffa3df4d56..0fc8e02381b63 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/BUILD @@ -68,8 +68,8 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/endpoints/handlers/responsewriters:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go index 70c14e088a281..d9f70efac264a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go @@ -21,8 +21,8 @@ import ( "net/http" "strings" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + "k8s.io/klog" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -52,7 +52,7 @@ func init() { // is invoked to serve the request. func WithAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences) http.Handler { if auth == nil { - glog.Warningf("Authentication is disabled") + klog.Warningf("Authentication is disabled") return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { @@ -62,7 +62,7 @@ func WithAuthentication(handler http.Handler, auth authenticator.Request, failed resp, ok, err := auth.AuthenticateRequest(req) if err != nil || !ok { if err != nil { - glog.Errorf("Unable to authenticate the request due to an error: %v", err) + klog.Errorf("Unable to authenticate the request due to an error: %v", err) } failed.ServeHTTP(w, req) return diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go index 4c9f140ca30df..c6ab15b3d672a 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -21,7 +21,7 @@ import ( "errors" "net/http" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/audit" @@ -44,7 +44,7 @@ const ( // WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise. func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { if a == nil { - glog.Warningf("Authorization is disabled") + klog.Warningf("Authorization is disabled") return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { @@ -70,7 +70,7 @@ func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime. return } - glog.V(4).Infof("Forbidden: %#v, Reason: %q", req.RequestURI, reason) + klog.V(4).Infof("Forbidden: %#v, Reason: %q", req.RequestURI, reason) audit.LogAnnotation(ae, decisionAnnotationKey, decisionForbid) audit.LogAnnotation(ae, reasonAnnotationKey, reason) responsewriters.Forbidden(ctx, attributes, w, req, reason, s) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index 726cbe4d565f0..d017f2bf68752 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -23,7 +23,7 @@ import ( "net/url" "strings" - "github.com/golang/glog" + "k8s.io/klog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" @@ -42,7 +42,7 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { impersonationRequests, err := buildImpersonationRequests(req.Header) if err != nil { - glog.V(4).Infof("%v", err) + klog.V(4).Infof("%v", err) responsewriters.InternalError(w, req, err) return } @@ -102,14 +102,14 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. userExtra[extraKey] = append(userExtra[extraKey], extraValue) default: - glog.V(4).Infof("unknown impersonation request type: %v", impersonationRequest) + klog.V(4).Infof("unknown impersonation request type: %v", impersonationRequest) responsewriters.Forbidden(ctx, actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest), s) return } decision, reason, err := a.Authorize(actingAsAttributes) if err != nil || decision != authorizer.DecisionAllow { - glog.V(4).Infof("Forbidden: %#v, Reason: %s, Error: %v", req.RequestURI, reason, err) + klog.V(4).Infof("Forbidden: %#v, Reason: %s, Error: %v", req.RequestURI, reason, err) responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s) return } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD index fede49cf3371b..734fc9004b229 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/BUILD @@ -85,8 +85,8 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/trace:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/wsstream:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/websocket:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go index 8526f8066ec8b..0f1c59946a387 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -244,7 +244,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope RequestScope, forceWatch if timeout == 0 && minRequestTimeout > 0 { timeout = time.Duration(float64(minRequestTimeout) * (rand.Float64() + 1.0)) } - glog.V(3).Infof("Starting watch for %s, rv=%s labels=%s fields=%s timeout=%s", req.URL.Path, opts.ResourceVersion, opts.LabelSelector, opts.FieldSelector, timeout) + klog.V(3).Infof("Starting watch for %s, rv=%s labels=%s fields=%s timeout=%s", req.URL.Path, opts.ResourceVersion, opts.LabelSelector, opts.FieldSelector, timeout) watcher, err := rw.Watch(ctx, &opts) if err != nil { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go index b3504df3f2372..7205bf8efdca4 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest.go @@ -25,7 +25,7 @@ import ( "net/url" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -284,7 +284,7 @@ func setListSelfLink(obj runtime.Object, ctx context.Context, req *http.Request, return 0, err } if err := namer.SetSelfLink(obj, uri); err != nil { - glog.V(4).Infof("Unable to set self link on object: %v", err) + klog.V(4).Infof("Unable to set self link on object: %v", err) } requestInfo, ok := request.RequestInfoFrom(ctx) if !ok { @@ -327,7 +327,7 @@ func parseTimeout(str string) time.Duration { if err == nil { return timeout } - glog.Errorf("Failed to parse %q: %v", str, err) + klog.Errorf("Failed to parse %q: %v", str, err) } return 30 * time.Second } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD b/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD index 5e84006a1cf28..388c1846e3a29 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/BUILD @@ -36,7 +36,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go index 1520bb3c9e515..cc8ae39fa2c5c 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) // LongRunningRequestCheck is a predicate which is true for long-running http requests. @@ -210,7 +210,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er opts := metainternalversion.ListOptions{} if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { // An error in parsing request will result in default to "list" and not setting "name" field. - glog.Errorf("Couldn't parse request %#v: %v", req.URL.Query(), err) + klog.Errorf("Couldn't parse request %#v: %v", req.URL.Query(), err) // Reset opts to not rely on partial results from parsing. // However, if watch is set, let's report it. opts = metainternalversion.ListOptions{} diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/BUILD b/staging/src/k8s.io/apiserver/pkg/registry/generic/BUILD index 87b78db56c5ed..9f23d1223a96e 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/storage:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/BUILD b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/BUILD index 8281a8e5a1b70..69db106208528 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/BUILD @@ -89,7 +89,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend/factory:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/dryrun:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go index fc93cc4d25b60..4552475070521 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -19,7 +19,7 @@ package registry import ( "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" @@ -43,10 +43,10 @@ func StorageWithCacher(capacity int) generic.StorageDecorator { s, d := generic.NewRawStorage(storageConfig) if capacity == 0 { - glog.V(5).Infof("Storage caching is disabled for %T", objectType) + klog.V(5).Infof("Storage caching is disabled for %T", objectType) return s, d } - glog.V(5).Infof("Storage caching is enabled for %T with capacity %v", objectType, capacity) + klog.V(5).Infof("Storage caching is enabled for %T with capacity %v", objectType, capacity) // TODO: we would change this later to make storage always have cacher and hide low level KV layer inside. // Currently it has two layers of same storage interface -- cacher and low level kv. diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go index 615637d8dee3b..2dcf99eae5275 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go @@ -47,7 +47,7 @@ import ( "k8s.io/apiserver/pkg/storage/etcd/metrics" "k8s.io/apiserver/pkg/util/dryrun" - "github.com/golang/glog" + "k8s.io/klog" ) // ObjectFunc is a function to act on a given object. An error may be returned @@ -501,7 +501,7 @@ func (e *Store) shouldDeleteForFailedInitialization(ctx context.Context, obj run // Used for objects that are either been finalized or have never initialized. func (e *Store) deleteWithoutFinalizers(ctx context.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions, dryRun bool) (runtime.Object, bool, error) { out := e.NewFunc() - glog.V(6).Infof("going to delete %s from registry, triggered by update", name) + klog.V(6).Infof("going to delete %s from registry, triggered by update", name) if err := e.Storage.Delete(ctx, key, out, preconditions, dryRun); err != nil { // Deletion is racy, i.e., there could be multiple update // requests to remove all finalizers from the object, so we @@ -909,7 +909,7 @@ func (e *Store) updateForGracefulDeletionAndFinalizers(ctx context.Context, name if !graceful { // set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion if pendingFinalizers { - glog.V(6).Infof("update the DeletionTimestamp to \"now\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers", name) + klog.V(6).Infof("update the DeletionTimestamp to \"now\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers", name) err = markAsDeleting(existing) if err != nil { return nil, err @@ -1017,7 +1017,7 @@ func (e *Store) Delete(ctx context.Context, name string, options *metav1.DeleteO } // delete immediately, or no graceful deletion supported - glog.V(6).Infof("going to delete %s from registry: ", name) + klog.V(6).Infof("going to delete %s from registry: ", name) out = e.NewFunc() if err := e.Storage.Delete(ctx, key, out, &preconditions, dryrun.IsDryRun(options.DryRun)); err != nil { // Please refer to the place where we set ignoreNotFound for the reason @@ -1103,7 +1103,7 @@ func (e *Store) DeleteCollection(ctx context.Context, options *metav1.DeleteOpti return } if _, _, err := e.Delete(ctx, accessor.GetName(), options); err != nil && !kubeerr.IsNotFound(err) { - glog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err) + klog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err) errs <- err return } @@ -1246,7 +1246,7 @@ func (e *Store) Export(ctx context.Context, name string, opts metav1.ExportOptio if accessor, err := meta.Accessor(obj); err == nil { exportObjectMeta(accessor, opts.Exact) } else { - glog.V(4).Infof("Object of type %v does not have ObjectMeta: %v", reflect.TypeOf(obj), err) + klog.V(4).Infof("Object of type %v does not have ObjectMeta: %v", reflect.TypeOf(obj), err) } if e.ExportStrategy != nil { @@ -1411,12 +1411,12 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error { func (e *Store) startObservingCount(period time.Duration) func() { prefix := e.KeyRootFunc(genericapirequest.NewContext()) resourceName := e.DefaultQualifiedResource.String() - glog.V(2).Infof("Monitoring %v count at /%v", resourceName, prefix) + klog.V(2).Infof("Monitoring %v count at /%v", resourceName, prefix) stopCh := make(chan struct{}) go wait.JitterUntil(func() { count, err := e.Storage.Count(prefix) if err != nil { - glog.V(5).Infof("Failed to update storage count metric: %v", err) + klog.V(5).Infof("Failed to update storage count metric: %v", err) metrics.UpdateObjectCount(resourceName, -1) } else { metrics.UpdateObjectCount(resourceName, count) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go index 94a4794422c05..858ad922af4b6 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -17,11 +17,11 @@ limitations under the License. package generic import ( - "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/apiserver/pkg/storage/storagebackend/factory" + "k8s.io/klog" ) // StorageDecorator is a function signature for producing a storage.Interface @@ -54,7 +54,7 @@ func UndecoratedStorage( func NewRawStorage(config *storagebackend.Config) (storage.Interface, factory.DestroyFunc) { s, d, err := factory.Create(*config) if err != nil { - glog.Fatalf("Unable to create storage backend: config (%v), err (%v)", config, err) + klog.Fatalf("Unable to create storage backend: config (%v), err (%v)", config, err) } return s, d } diff --git a/staging/src/k8s.io/apiserver/pkg/server/BUILD b/staging/src/k8s.io/apiserver/pkg/server/BUILD index c71377a41ed24..5dc323de407e8 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/BUILD @@ -110,9 +110,9 @@ go_library( "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/server/config.go b/staging/src/k8s.io/apiserver/pkg/server/config.go index 10621f84f60b1..4d6f90891fc79 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -30,8 +30,8 @@ import ( "github.com/emicklei/go-restful-swagger12" "github.com/go-openapi/spec" - "github.com/golang/glog" "github.com/pborman/uuid" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -358,11 +358,11 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo // if there is no port, and we listen on one securely, use that one if _, _, err := net.SplitHostPort(c.ExternalAddress); err != nil { if c.SecureServing == nil { - glog.Fatalf("cannot derive external address port without listening on a secure port.") + klog.Fatalf("cannot derive external address port without listening on a secure port.") } _, port, err := c.SecureServing.HostPort() if err != nil { - glog.Fatalf("cannot derive external address from the secure port: %v", err) + klog.Fatalf("cannot derive external address from the secure port: %v", err) } c.ExternalAddress = net.JoinHostPort(c.ExternalAddress, strconv.Itoa(port)) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go b/staging/src/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go index 6cf6c1a64fcbb..a78250edae9bf 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go +++ b/staging/src/k8s.io/apiserver/pkg/server/deprecated_insecure_serving.go @@ -21,7 +21,7 @@ import ( "net/http" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" @@ -46,9 +46,9 @@ func (s *DeprecatedInsecureServingInfo) Serve(handler http.Handler, shutdownTime } if len(s.Name) > 0 { - glog.Infof("Serving %s insecurely on %s", s.Name, s.Listener.Addr()) + klog.Infof("Serving %s insecurely on %s", s.Name, s.Listener.Addr()) } else { - glog.Infof("Serving insecurely on %s", s.Listener.Addr()) + klog.Infof("Serving insecurely on %s", s.Listener.Addr()) } return RunServer(insecureServer, s.Listener, shutdownTimeout, stopCh) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/BUILD b/staging/src/k8s.io/apiserver/pkg/server/filters/BUILD index 900c24b656ba1..b75b3de98241a 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/BUILD @@ -54,7 +54,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/cors.go b/staging/src/k8s.io/apiserver/pkg/server/filters/cors.go index 2c6e66ed6ba6d..96ff58dc7c889 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/cors.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/cors.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // TODO: use restful.CrossOriginResourceSharing @@ -79,7 +79,7 @@ func WithCORS(handler http.Handler, allowedOriginPatterns []string, allowedMetho func allowedOriginRegexps(allowedOrigins []string) []*regexp.Regexp { res, err := compileRegexps(allowedOrigins) if err != nil { - glog.Fatalf("Invalid CORS allowed origin, --cors-allowed-origins flag was set to %v - %v", strings.Join(allowedOrigins, ","), err) + klog.Fatalf("Invalid CORS allowed origin, --cors-allowed-origins flag was set to %v - %v", strings.Join(allowedOrigins, ","), err) } return res } diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go index 4f56e48a03a46..8818cb5633f38 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -28,7 +28,7 @@ import ( "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -47,7 +47,7 @@ var nonMutatingRequestVerbs = sets.NewString("get", "list", "watch") func handleError(w http.ResponseWriter, r *http.Request, err error) { errorMsg := fmt.Sprintf("Internal Server Error: %#v", r.RequestURI) http.Error(w, errorMsg, http.StatusInternalServerError) - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) } // requestWatermark is used to trak maximal usage of inflight requests. diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/wrap.go b/staging/src/k8s.io/apiserver/pkg/server/filters/wrap.go index 38742ffd9a212..46842c69737cf 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/wrap.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/wrap.go @@ -20,7 +20,7 @@ import ( "net/http" "runtime/debug" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/server/httplog" @@ -31,7 +31,7 @@ func WithPanicRecovery(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { defer runtime.HandleCrash(func(err interface{}) { http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError) - glog.Errorf("apiserver panic'd on %v %v: %v\n%s\n", req.Method, req.RequestURI, err, debug.Stack()) + klog.Errorf("apiserver panic'd on %v %v: %v\n%s\n", req.Method, req.RequestURI, err, debug.Stack()) }) logger := httplog.NewLogged(req, &w) diff --git a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go index b6e500c612312..7b334e1e900f9 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/staging/src/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -25,7 +25,7 @@ import ( systemd "github.com/coreos/go-systemd/daemon" "github.com/emicklei/go-restful-swagger12" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -312,7 +312,7 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) error { s.RunPostStartHooks(stopCh) if _, err := systemd.SdNotify(true, "READY=1\n"); err != nil { - glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) + klog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) } return nil @@ -322,7 +322,7 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}) error { func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo) error { for _, groupVersion := range apiGroupInfo.PrioritizedVersions { if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 { - glog.Warningf("Skipping API %v because it has no resources.", groupVersion) + klog.Warningf("Skipping API %v because it has no resources.", groupVersion) continue } diff --git a/staging/src/k8s.io/apiserver/pkg/server/handler.go b/staging/src/k8s.io/apiserver/pkg/server/handler.go index e4e7d9aee06b6..0277bac7788fd 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/handler.go +++ b/staging/src/k8s.io/apiserver/pkg/server/handler.go @@ -25,7 +25,7 @@ import ( "strings" "github.com/emicklei/go-restful" - "github.com/golang/glog" + "k8s.io/klog" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" @@ -130,7 +130,7 @@ func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) { // normally these are passed to the nonGoRestfulMux, but if discovery is enabled, it will go directly. // We can't rely on a prefix match since /apis matches everything (see the big comment on Director above) if path == "/apis" || path == "/apis/" { - glog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) + klog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) // don't use servemux here because gorestful servemuxes get messed up when removing webservices // TODO fix gorestful, remove TPRs, or stop using gorestful d.goRestfulContainer.Dispatch(w, req) @@ -140,7 +140,7 @@ func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) { case strings.HasPrefix(path, ws.RootPath()): // ensure an exact match or a path boundary match if len(path) == len(ws.RootPath()) || path[len(ws.RootPath())] == '/' { - glog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) + klog.V(5).Infof("%v: %v %q satisfied by gorestful with webservice %v", d.name, req.Method, path, ws.RootPath()) // don't use servemux here because gorestful servemuxes get messed up when removing webservices // TODO fix gorestful, remove TPRs, or stop using gorestful d.goRestfulContainer.Dispatch(w, req) @@ -150,7 +150,7 @@ func (d director) ServeHTTP(w http.ResponseWriter, req *http.Request) { } // if we didn't find a match, then we just skip gorestful altogether - glog.V(5).Infof("%v: %v %q satisfied by nonGoRestful", d.name, req.Method, path) + klog.V(5).Infof("%v: %v %q satisfied by nonGoRestful", d.name, req.Method, path) d.nonGoRestfulMux.ServeHTTP(w, req) } @@ -165,7 +165,7 @@ func logStackOnRecover(s runtime.NegotiatedSerializer, panicReason interface{}, } buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) } - glog.Errorln(buffer.String()) + klog.Errorln(buffer.String()) headers := http.Header{} if ct := w.Header().Get("Content-Type"); len(ct) > 0 { diff --git a/staging/src/k8s.io/apiserver/pkg/server/healthz/BUILD b/staging/src/k8s.io/apiserver/pkg/server/healthz/BUILD index aab24c34ef12d..f47cf546a945b 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/healthz/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/healthz/BUILD @@ -22,7 +22,7 @@ go_library( importpath = "k8s.io/apiserver/pkg/server/healthz", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/server/healthz/healthz.go b/staging/src/k8s.io/apiserver/pkg/server/healthz/healthz.go index 122dd0b6eedb2..7e2bc36c5270c 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/healthz/healthz.go +++ b/staging/src/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -25,7 +25,7 @@ import ( "sync/atomic" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/wait" ) @@ -76,7 +76,7 @@ func (l *log) Check(_ *http.Request) error { l.startOnce.Do(func() { l.lastVerified.Store(time.Now()) go wait.Forever(func() { - glog.Flush() + klog.Flush() l.lastVerified.Store(time.Now()) }, time.Minute) }) @@ -108,11 +108,11 @@ func InstallHandler(mux mux, checks ...HealthzChecker) { // result in a panic. func InstallPathHandler(mux mux, path string, checks ...HealthzChecker) { if len(checks) == 0 { - glog.V(5).Info("No default health checks specified. Installing the ping handler.") + klog.V(5).Info("No default health checks specified. Installing the ping handler.") checks = []HealthzChecker{PingHealthz} } - glog.V(5).Info("Installing healthz checkers:", formatQuoted(checkerNames(checks...)...)) + klog.V(5).Info("Installing healthz checkers:", formatQuoted(checkerNames(checks...)...)) mux.Handle(path, handleRootHealthz(checks...)) for _, check := range checks { @@ -150,7 +150,7 @@ func handleRootHealthz(checks ...HealthzChecker) http.HandlerFunc { if err := check.Check(r); err != nil { // don't include the error since this endpoint is public. If someone wants more detail // they should have explicit permission to the detailed checks. - glog.V(6).Infof("healthz check %v failed: %v", check.Name(), err) + klog.V(6).Infof("healthz check %v failed: %v", check.Name(), err) fmt.Fprintf(&verboseOut, "[-]%v failed: reason withheld\n", check.Name()) failed = true } else { diff --git a/staging/src/k8s.io/apiserver/pkg/server/hooks.go b/staging/src/k8s.io/apiserver/pkg/server/hooks.go index ccf8ee17ad02d..921255218bcc5 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/hooks.go +++ b/staging/src/k8s.io/apiserver/pkg/server/hooks.go @@ -21,7 +21,7 @@ import ( "fmt" "net/http" - "github.com/golang/glog" + "k8s.io/klog" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -101,7 +101,7 @@ func (s *GenericAPIServer) AddPostStartHook(name string, hook PostStartHookFunc) // AddPostStartHookOrDie allows you to add a PostStartHook, but dies on failure func (s *GenericAPIServer) AddPostStartHookOrDie(name string, hook PostStartHookFunc) { if err := s.AddPostStartHook(name, hook); err != nil { - glog.Fatalf("Error registering PostStartHook %q: %v", name, err) + klog.Fatalf("Error registering PostStartHook %q: %v", name, err) } } @@ -132,7 +132,7 @@ func (s *GenericAPIServer) AddPreShutdownHook(name string, hook PreShutdownHookF // AddPreShutdownHookOrDie allows you to add a PostStartHook, but dies on failure func (s *GenericAPIServer) AddPreShutdownHookOrDie(name string, hook PreShutdownHookFunc) { if err := s.AddPreShutdownHook(name, hook); err != nil { - glog.Fatalf("Error registering PreShutdownHook %q: %v", name, err) + klog.Fatalf("Error registering PreShutdownHook %q: %v", name, err) } } @@ -185,7 +185,7 @@ func runPostStartHook(name string, entry postStartHookEntry, context PostStartHo }() // if the hook intentionally wants to kill server, let it. if err != nil { - glog.Fatalf("PostStartHook %q failed: %v", name, err) + klog.Fatalf("PostStartHook %q failed: %v", name, err) } close(entry.done) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/httplog/BUILD b/staging/src/k8s.io/apiserver/pkg/server/httplog/BUILD index 22d299ba555e4..9626af306f24b 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/httplog/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/httplog/BUILD @@ -20,7 +20,7 @@ go_library( ], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/server/httplog", importpath = "k8s.io/apiserver/pkg/server/httplog", - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) filegroup( diff --git a/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go b/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go index f8a8a5307aaab..dcdba69225d82 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go +++ b/staging/src/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -24,7 +24,7 @@ import ( "runtime" "time" - "github.com/golang/glog" + "k8s.io/klog" ) // StacktracePred returns true if a stacktrace should be logged for this status. @@ -61,7 +61,7 @@ type passthroughLogger struct{} // Addf logs info immediately. func (passthroughLogger) Addf(format string, data ...interface{}) { - glog.V(2).Info(fmt.Sprintf(format, data...)) + klog.V(2).Info(fmt.Sprintf(format, data...)) } // DefaultStacktracePred is the default implementation of StacktracePred. @@ -143,11 +143,11 @@ func (rl *respLogger) Addf(format string, data ...interface{}) { // Log is intended to be called once at the end of your request handler, via defer func (rl *respLogger) Log() { latency := time.Since(rl.startTime) - if glog.V(3) { + if klog.V(3) { if !rl.hijacked { - glog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) %v%v%v [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo, rl.req.UserAgent(), rl.req.RemoteAddr)) + klog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) %v%v%v [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.status, rl.statusStack, rl.addedInfo, rl.req.UserAgent(), rl.req.RemoteAddr)) } else { - glog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) hijacked [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.req.UserAgent(), rl.req.RemoteAddr)) + klog.InfoDepth(1, fmt.Sprintf("%s %s: (%v) hijacked [%s %s]", rl.req.Method, rl.req.RequestURI, latency, rl.req.UserAgent(), rl.req.RemoteAddr)) } } } @@ -173,8 +173,8 @@ func (rl *respLogger) Write(b []byte) (int, error) { func (rl *respLogger) Flush() { if flusher, ok := rl.w.(http.Flusher); ok { flusher.Flush() - } else if glog.V(2) { - glog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w)) + } else if klog.V(2) { + klog.InfoDepth(1, fmt.Sprintf("Unable to convert %+v into http.Flusher", rl.w)) } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/mux/BUILD b/staging/src/k8s.io/apiserver/pkg/server/mux/BUILD index e60551c82db0f..dd150e0fc697c 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/mux/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/mux/BUILD @@ -24,7 +24,7 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/server/mux/pathrecorder.go b/staging/src/k8s.io/apiserver/pkg/server/mux/pathrecorder.go index 2f0eb7aa5b224..16857cc8a6b84 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/mux/pathrecorder.go +++ b/staging/src/k8s.io/apiserver/pkg/server/mux/pathrecorder.go @@ -25,7 +25,7 @@ import ( "sync" "sync/atomic" - "github.com/golang/glog" + "k8s.io/klog" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -237,20 +237,20 @@ func (m *PathRecorderMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { // ServeHTTP makes it an http.Handler func (h *pathHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if exactHandler, ok := h.pathToHandler[r.URL.Path]; ok { - glog.V(5).Infof("%v: %q satisfied by exact match", h.muxName, r.URL.Path) + klog.V(5).Infof("%v: %q satisfied by exact match", h.muxName, r.URL.Path) exactHandler.ServeHTTP(w, r) return } for _, prefixHandler := range h.prefixHandlers { if strings.HasPrefix(r.URL.Path, prefixHandler.prefix) { - glog.V(5).Infof("%v: %q satisfied by prefix %v", h.muxName, r.URL.Path, prefixHandler.prefix) + klog.V(5).Infof("%v: %q satisfied by prefix %v", h.muxName, r.URL.Path, prefixHandler.prefix) prefixHandler.handler.ServeHTTP(w, r) return } } - glog.V(5).Infof("%v: %q satisfied by NotFoundHandler", h.muxName, r.URL.Path) + klog.V(5).Infof("%v: %q satisfied by NotFoundHandler", h.muxName, r.URL.Path) h.notFoundHandler.ServeHTTP(w, r) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/BUILD b/staging/src/k8s.io/apiserver/pkg/server/options/BUILD index 7516c6e75ba7e..b59af5ada4c9e 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/options/BUILD @@ -70,10 +70,10 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/gopkg.in/natefinch/lumberjack.v2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/audit.go b/staging/src/k8s.io/apiserver/pkg/server/options/audit.go index 1e9efe7fc3933..fe31e5eb9c0b9 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/audit.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/audit.go @@ -23,9 +23,9 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/spf13/pflag" "gopkg.in/natefinch/lumberjack.v2" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime/schema" auditv1 "k8s.io/apiserver/pkg/apis/audit/v1" @@ -272,7 +272,7 @@ func (o *AuditOptions) ApplyTo(c *server.Config) error { } if c.AuditBackend != nil && c.AuditPolicyChecker == nil { - glog.V(2).Info("No audit policy file provided for AdvancedAuditing, no events will be recorded.") + klog.V(2).Info("No audit policy file provided for AdvancedAuditing, no events will be recorded.") } return nil } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go index 043a934153812..04331794e5422 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go @@ -22,8 +22,8 @@ import ( "io/ioutil" "time" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -232,10 +232,10 @@ func (s *DelegatingAuthenticationOptions) lookupMissingConfigInCluster(client ku } if client == nil { if len(s.ClientCert.ClientCA) == 0 { - glog.Warningf("No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + klog.Warningf("No authentication-kubeconfig provided in order to lookup client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) } if len(s.RequestHeader.ClientCAFile) == 0 { - glog.Warningf("No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + klog.Warningf("No authentication-kubeconfig provided in order to lookup requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) } return nil } @@ -245,7 +245,7 @@ func (s *DelegatingAuthenticationOptions) lookupMissingConfigInCluster(client ku case errors.IsNotFound(err): // ignore, authConfigMap is nil now case errors.IsForbidden(err): - glog.Warningf("Unable to get configmap/%s in %s. Usually fixed by "+ + klog.Warningf("Unable to get configmap/%s in %s. Usually fixed by "+ "'kubectl create rolebinding -n %s ROLE_NAME --role=%s --serviceaccount=YOUR_NS:YOUR_SA'", authenticationConfigMapName, authenticationConfigMapNamespace, authenticationConfigMapNamespace, authenticationRoleName) return err @@ -264,7 +264,7 @@ func (s *DelegatingAuthenticationOptions) lookupMissingConfigInCluster(client ku } } if len(s.ClientCert.ClientCA) == 0 { - glog.Warningf("Cluster doesn't provide client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + klog.Warningf("Cluster doesn't provide client-ca-file in configmap/%s in %s, so client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) } } @@ -279,7 +279,7 @@ func (s *DelegatingAuthenticationOptions) lookupMissingConfigInCluster(client ku } } if len(s.RequestHeader.ClientCAFile) == 0 { - glog.Warningf("Cluster doesn't provide requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) + klog.Warningf("Cluster doesn't provide requestheader-client-ca-file in configmap/%s in %s, so request-header client certificate authentication won't work.", authenticationConfigMapName, authenticationConfigMapNamespace) } } @@ -370,7 +370,7 @@ func (s *DelegatingAuthenticationOptions) getClient() (kubernetes.Interface, err clientConfig, err = rest.InClusterConfig() if err != nil && s.RemoteKubeConfigFileOptional { if err != rest.ErrNotInCluster { - glog.Warningf("failed to read in-cluster kubeconfig for delegated authentication: %v", err) + klog.Warningf("failed to read in-cluster kubeconfig for delegated authentication: %v", err) } return nil, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go b/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go index c7a0d6b41d24b..5d81d9e86604d 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizerfactory" @@ -143,7 +143,7 @@ func (s *DelegatingAuthorizationOptions) toAuthorizer(client kubernetes.Interfac } if client == nil { - glog.Warningf("No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work.") + klog.Warningf("No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work.") } else { cfg := authorizerfactory.DelegatingAuthorizerConfig{ SubjectAccessReviewClient: client.AuthorizationV1beta1().SubjectAccessReviews(), @@ -174,7 +174,7 @@ func (s *DelegatingAuthorizationOptions) getClient() (kubernetes.Interface, erro clientConfig, err = rest.InClusterConfig() if err != nil && s.RemoteKubeConfigFileOptional { if err != rest.ErrNotInCluster { - glog.Warningf("failed to read in-cluster kubeconfig for delegated authorization: %v", err) + klog.Warningf("failed to read in-cluster kubeconfig for delegated authorization: %v", err) } return nil, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/serving.go b/staging/src/k8s.io/apiserver/pkg/server/options/serving.go index 998c7ce358a68..939e05741da4f 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/serving.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/serving.go @@ -24,8 +24,8 @@ import ( "strconv" "strings" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apiserver/pkg/server" @@ -308,14 +308,14 @@ func (s *SecureServingOptions) MaybeDefaultWithSelfSignedCerts(publicAddress str if err := certutil.WriteKey(keyCert.KeyFile, key); err != nil { return err } - glog.Infof("Generated self-signed cert (%s, %s)", keyCert.CertFile, keyCert.KeyFile) + klog.Infof("Generated self-signed cert (%s, %s)", keyCert.CertFile, keyCert.KeyFile) } else { tlsCert, err := tls.X509KeyPair(cert, key) if err != nil { return fmt.Errorf("unable to generate self signed cert: %v", err) } s.ServerCert.GeneratedCert = &tlsCert - glog.Infof("Generated self-signed cert in-memory") + klog.Infof("Generated self-signed cert in-memory") } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/routes/BUILD b/staging/src/k8s.io/apiserver/pkg/server/routes/BUILD index f4d56f9e30404..0b81332a3a458 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/routes/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/routes/BUILD @@ -32,8 +32,8 @@ go_library( "//vendor/github.com/elazarl/go-bindata-assetfs:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/emicklei/go-restful-swagger12:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/handler:go_default_library", ], diff --git a/staging/src/k8s.io/apiserver/pkg/server/routes/flags.go b/staging/src/k8s.io/apiserver/pkg/server/routes/flags.go index d40f11499b3fe..a03b80d3ce79f 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/routes/flags.go +++ b/staging/src/k8s.io/apiserver/pkg/server/routes/flags.go @@ -24,7 +24,7 @@ import ( "path" "sync" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiserver/pkg/server/mux" ) @@ -57,7 +57,7 @@ func (f DebugFlags) Index(w http.ResponseWriter, r *http.Request) { lock.RLock() defer lock.RUnlock() if err := indexTmpl.Execute(w, registeredFlags); err != nil { - glog.Error(err) + klog.Error(err) } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go b/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go index 06c723d37531e..934bbf84a0429 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go +++ b/staging/src/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -18,7 +18,7 @@ package routes import ( restful "github.com/emicklei/go-restful" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiserver/pkg/server/mux" "k8s.io/kube-openapi/pkg/common" @@ -37,10 +37,10 @@ func (oa OpenAPI) Install(c *restful.Container, mux *mux.PathRecorderMux) { // are tracked at: https://docs.google.com/document/d/19lEqE9lc4yHJ3WJAJxS_G7TcORIJXGHyq3wpwcH28nU. _, err := handler.BuildAndRegisterOpenAPIService("/swagger.json", c.RegisteredWebServices(), oa.Config, mux) if err != nil { - glog.Fatalf("Failed to register open api spec for root: %v", err) + klog.Fatalf("Failed to register open api spec for root: %v", err) } _, err = handler.BuildAndRegisterOpenAPIVersionedService("/openapi/v2", c.RegisteredWebServices(), oa.Config, mux) if err != nil { - glog.Fatalf("Failed to register versioned open api spec for root: %v", err) + klog.Fatalf("Failed to register versioned open api spec for root: %v", err) } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go b/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go index 67a45d5779f29..08006c965502b 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go +++ b/staging/src/k8s.io/apiserver/pkg/server/secure_serving.go @@ -26,8 +26,8 @@ import ( "strings" "time" - "github.com/golang/glog" "golang.org/x/net/http2" + "k8s.io/klog" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/validation" @@ -113,7 +113,7 @@ func (s *SecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Dur return fmt.Errorf("error configuring http2: %v", err) } - glog.Infof("Serving securely on %s", secureServer.Addr) + klog.Infof("Serving securely on %s", secureServer.Addr) return RunServer(secureServer, s.Listener, shutdownTimeout, stopCh) } @@ -153,7 +153,7 @@ func RunServer( msg := fmt.Sprintf("Stopped listening on %s", ln.Addr().String()) select { case <-stopCh: - glog.Info(msg) + klog.Info(msg) default: panic(fmt.Sprintf("%s due to error: %v", msg, err)) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/storage/BUILD b/staging/src/k8s.io/apiserver/pkg/server/storage/BUILD index 8995549c5f8b5..612eefe7b08e2 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/storage/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/server/storage/BUILD @@ -45,7 +45,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/value:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory.go index 50c068254631f..a87ce4a5efb1f 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory.go +++ b/staging/src/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -22,7 +22,7 @@ import ( "io/ioutil" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -282,7 +282,7 @@ func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (* if err != nil { return nil, err } - glog.V(3).Infof("storing %v in %v, reading as %v from %#v", groupResource, codecConfig.StorageVersion, codecConfig.MemoryVersion, codecConfig.Config) + klog.V(3).Infof("storing %v in %v, reading as %v from %#v", groupResource, codecConfig.StorageVersion, codecConfig.MemoryVersion, codecConfig.Config) return &storageConfig, nil } @@ -302,14 +302,14 @@ func (s *DefaultStorageFactory) Backends() []Backend { if len(s.StorageConfig.CertFile) > 0 && len(s.StorageConfig.KeyFile) > 0 { cert, err := tls.LoadX509KeyPair(s.StorageConfig.CertFile, s.StorageConfig.KeyFile) if err != nil { - glog.Errorf("failed to load key pair while getting backends: %s", err) + klog.Errorf("failed to load key pair while getting backends: %s", err) } else { tlsConfig.Certificates = []tls.Certificate{cert} } } if len(s.StorageConfig.CAFile) > 0 { if caCert, err := ioutil.ReadFile(s.StorageConfig.CAFile); err != nil { - glog.Errorf("failed to read ca file while getting backends: %s", err) + klog.Errorf("failed to read ca file while getting backends: %s", err) } else { caPool := x509.NewCertPool() caPool.AppendCertsFromPEM(caCert) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/cacher/BUILD index 9d4b3fe24c7f5..c041b9e81ba6d 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/trace:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go index f73634cd9513b..f5e2c4d0eeb68 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -122,7 +122,7 @@ func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool func (i *indexedWatchers) terminateAll(objectType reflect.Type) { if len(i.allWatchers) > 0 || len(i.valueWatchers) > 0 { - glog.Warningf("Terminating all watchers from cacher %v", objectType) + klog.Warningf("Terminating all watchers from cacher %v", objectType) } i.allWatchers.terminateAll() for index, watchers := range i.valueWatchers { @@ -269,7 +269,7 @@ func (c *Cacher) startCaching(stopChannel <-chan struct{}) { // Also note that startCaching is called in a loop, so there's no need // to have another loop here. if err := c.reflector.ListAndWatch(stopChannel); err != nil { - glog.Errorf("unexpected ListAndWatch error: %v", err) + klog.Errorf("unexpected ListAndWatch error: %v", err) } } @@ -547,7 +547,7 @@ func (c *Cacher) GuaranteedUpdate( // Ignore the suggestion and try to pass down the current version of the object // read from cache. if elem, exists, err := c.watchCache.GetByKey(key); err != nil { - glog.Errorf("GetByKey returned error: %v", err) + klog.Errorf("GetByKey returned error: %v", err) } else if exists { currObj := elem.(*storeElement).Object.DeepCopyObject() return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj) @@ -590,7 +590,7 @@ func (c *Cacher) triggerValues(event *watchCacheEvent) ([]string, bool) { func (c *Cacher) processEvent(event *watchCacheEvent) { if curLen := int64(len(c.incoming)); c.incomingHWM.Update(curLen) { // Monitor if this gets backed up, and how much. - glog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen) + klog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen) } c.incoming <- *event } @@ -679,7 +679,7 @@ func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported b // false is currently passed only if we are forcing watcher to close due // to its unresponsiveness and blocking other watchers. // TODO: Get this information in cleaner way. - glog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String()) + klog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String()) } // It's possible that the watcher is already not in the structure (e.g. in case of // simultaneous Stop() and terminateAllWatchers(), but it doesn't break anything. @@ -942,7 +942,7 @@ func (c *cacheWatcher) process(initEvents []*watchCacheEvent, resourceVersion ui if len(initEvents) > 0 { objType = reflect.TypeOf(initEvents[0].Object).String() } - glog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime) + klog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime) } defer close(c.result) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD index f5b0c4c260395..0bbe6add4ec34 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/BUILD @@ -24,7 +24,7 @@ go_library( "//vendor/github.com/coreos/etcd/pkg/testutil:go_default_library", "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", "//vendor/github.com/coreos/etcd/pkg/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go index d8e1f20f84c0f..493abaa2feb50 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go @@ -43,7 +43,7 @@ import ( "github.com/coreos/etcd/pkg/testutil" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" - "github.com/golang/glog" + "k8s.io/klog" ) // EtcdTestServer encapsulates the datastructures needed to start local instance for testing @@ -220,7 +220,7 @@ func (m *EtcdTestServer) waitUntilUp() error { for start := time.Now(); time.Since(start) < wait.ForeverTestTimeout; time.Sleep(10 * time.Millisecond) { members, err := membersAPI.List(context.TODO()) if err != nil { - glog.Errorf("Error when getting etcd cluster members") + klog.Errorf("Error when getting etcd cluster members") continue } if len(members) == 1 && len(members[0].ClientURLs) > 0 { diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD index 1e683a48d7f28..1b177714b272e 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/BUILD @@ -66,7 +66,7 @@ go_library( "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes:go_default_library", "//vendor/github.com/coreos/etcd/mvcc/mvccpb:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/compact.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/compact.go index bdcd5bcb60db4..d4524f4922111 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/compact.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/compact.go @@ -23,7 +23,7 @@ import ( "time" "github.com/coreos/etcd/clientv3" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -51,7 +51,7 @@ func StartCompactor(ctx context.Context, client *clientv3.Client, compactInterva // Currently we rely on endpoints to differentiate clusters. for _, ep := range client.Endpoints() { if _, ok := endpointsMap[ep]; ok { - glog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints()) + klog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints()) return } } @@ -121,7 +121,7 @@ func compactor(ctx context.Context, client *clientv3.Client, interval time.Durat compactTime, rev, err = compact(ctx, client, compactTime, rev) if err != nil { - glog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err) + klog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err) continue } } @@ -157,6 +157,6 @@ func compact(ctx context.Context, client *clientv3.Client, t, rev int64) (int64, if _, err = client.Compact(ctx, rev); err != nil { return curTime, curRev, err } - glog.V(4).Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints()) + klog.V(4).Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints()) return curTime, curRev, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go index 43fa584296e20..129b593f53b66 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -29,7 +29,7 @@ import ( "time" "github.com/coreos/etcd/clientv3" - "github.com/golang/glog" + "k8s.io/klog" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -236,7 +236,7 @@ func (s *store) conditionalDelete(ctx context.Context, key string, out runtime.O } if !txnResp.Succeeded { getResp = (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) - glog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key) + klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key) continue } return decode(s.codec, s.versioner, origState.data, out, origState.rev) @@ -352,7 +352,7 @@ func (s *store) GuaranteedUpdate( trace.Step("Transaction committed") if !txnResp.Succeeded { getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange()) - glog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key) + klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key) origState, err = s.getState(getResp, key, v, ignoreNotFound) if err != nil { return err diff --git a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index c1216d5884cdd..d450038eff7a8 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -32,7 +32,7 @@ import ( "k8s.io/apiserver/pkg/storage/value" "github.com/coreos/etcd/clientv3" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -191,7 +191,7 @@ func (wc *watchChan) sync() error { func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { if wc.initialRev == 0 { if err := wc.sync(); err != nil { - glog.Errorf("failed to sync with latest state: %v", err) + klog.Errorf("failed to sync with latest state: %v", err) wc.sendError(err) return } @@ -205,7 +205,7 @@ func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { if wres.Err() != nil { err := wres.Err() // If there is an error on server (e.g. compaction), the channel will return it before closed. - glog.Errorf("watch chan error: %v", err) + klog.Errorf("watch chan error: %v", err) wc.sendError(err) return } @@ -232,7 +232,7 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) { continue } if len(wc.resultChan) == outgoingBufSize { - glog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ + klog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ "Probably caused by slow dispatching events to watchers", outgoingBufSize) } // If user couldn't receive results fast enough, we also block incoming events from watcher. @@ -265,7 +265,7 @@ func (wc *watchChan) acceptAll() bool { func (wc *watchChan) transform(e *event) (res *watch.Event) { curObj, oldObj, err := wc.prepareObjs(e) if err != nil { - glog.Errorf("failed to prepare current and previous objects: %v", err) + klog.Errorf("failed to prepare current and previous objects: %v", err) wc.sendError(err) return nil } @@ -339,7 +339,7 @@ func (wc *watchChan) sendError(err error) { func (wc *watchChan) sendEvent(e *event) { if len(wc.incomingEventChan) == incomingBufSize { - glog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ + klog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ "Probably caused by slow decoding, user not receiving fast, or other processing logic", incomingBufSize) } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD index 8beca7163d1ae..bd42719d88d54 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/BUILD @@ -17,10 +17,10 @@ go_library( deps = [ "//staging/src/k8s.io/apiserver/pkg/storage/value:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/hashicorp/golang-lru:go_default_library", "//vendor/golang.org/x/crypto/cryptobyte:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go index a39ceeca0da19..818da7dec5893 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go @@ -26,7 +26,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "google.golang.org/grpc" @@ -54,7 +54,7 @@ type gRPCService struct { // NewGRPCService returns an envelope.Service which use gRPC to communicate the remote KMS provider. func NewGRPCService(endpoint string, callTimeout time.Duration) (Service, error) { - glog.V(4).Infof("Configure KMS provider with endpoint: %s", endpoint) + klog.V(4).Infof("Configure KMS provider with endpoint: %s", endpoint) addr, err := parseEndpoint(endpoint) if err != nil { @@ -68,7 +68,7 @@ func NewGRPCService(endpoint string, callTimeout time.Duration) (Service, error) // timeout - is ignored since we are connecting in a non-blocking configuration c, err := net.DialTimeout(unixProtocol, addr, 0) if err != nil { - glog.Errorf("failed to create connection to unix socket: %s, error: %v", addr, err) + klog.Errorf("failed to create connection to unix socket: %s, error: %v", addr, err) } return c, err })) @@ -129,7 +129,7 @@ func (g *gRPCService) checkAPIVersion(ctx context.Context) error { } g.versionChecked = true - glog.V(4).Infof("Version of KMS provider is %s", response.Version) + klog.V(4).Infof("Version of KMS provider is %s", response.Version) return nil } diff --git a/staging/src/k8s.io/apiserver/pkg/util/feature/BUILD b/staging/src/k8s.io/apiserver/pkg/util/feature/BUILD index 31274c71d8613..2b0386ab76bbe 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/feature/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/feature/BUILD @@ -22,8 +22,8 @@ go_library( importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/util/feature", importpath = "k8s.io/apiserver/pkg/util/feature", deps = [ - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go index 8847c1fb62546..a83dafd56abe8 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go +++ b/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -24,8 +24,8 @@ import ( "sync" "sync/atomic" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" ) type Feature string @@ -193,9 +193,9 @@ func (f *featureGate) SetFromMap(m map[string]bool) error { } if featureSpec.PreRelease == Deprecated { - glog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) + klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) } else if featureSpec.PreRelease == GA { - glog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) + klog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) } } @@ -203,7 +203,7 @@ func (f *featureGate) SetFromMap(m map[string]bool) error { f.known.Store(known) f.enabled.Store(enabled) - glog.V(1).Infof("feature gates: %v", f.enabled) + klog.V(1).Infof("feature gates: %v", f.enabled) return nil } diff --git a/staging/src/k8s.io/apiserver/pkg/util/flag/BUILD b/staging/src/k8s.io/apiserver/pkg/util/flag/BUILD index 6370f398c2247..7841681a86e58 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flag/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/flag/BUILD @@ -42,8 +42,8 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/github.com/docker/docker/pkg/term:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/util/flag/flags.go b/staging/src/k8s.io/apiserver/pkg/util/flag/flags.go index 55a3ed34a8e70..d0fff8db2e49e 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flag/flags.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flag/flags.go @@ -20,8 +20,8 @@ import ( goflag "flag" "strings" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" ) // WordSepNormalizeFunc changes all flags that contain "_" separators @@ -36,7 +36,7 @@ func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { if strings.Contains(name, "_") { nname := strings.Replace(name, "_", "-", -1) - glog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) + klog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) return pflag.NormalizedName(nname) } @@ -49,6 +49,6 @@ func InitFlags() { pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) pflag.Parse() pflag.VisitAll(func(flag *pflag.Flag) { - glog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) }) } diff --git a/staging/src/k8s.io/apiserver/pkg/util/logs/BUILD b/staging/src/k8s.io/apiserver/pkg/util/logs/BUILD index 1e5cf49750a09..d51cc64392616 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/logs/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/logs/BUILD @@ -12,8 +12,8 @@ go_library( importpath = "k8s.io/apiserver/pkg/util/logs", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go b/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go index a62c06094dd77..3ffe9eeb29b7c 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go +++ b/staging/src/k8s.io/apiserver/pkg/util/logs/logs.go @@ -22,9 +22,9 @@ import ( "log" "time" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" ) const logFlushFreqFlagName = "log-flush-frequency" @@ -33,6 +33,7 @@ var logFlushFreq = pflag.Duration(logFlushFreqFlagName, 5*time.Second, "Maximum // TODO(thockin): This is temporary until we agree on log dirs and put those into each cmd. func init() { + klog.InitFlags(flag.CommandLine) flag.Set("logtostderr", "true") } @@ -42,38 +43,38 @@ func AddFlags(fs *pflag.FlagSet) { fs.AddFlag(pflag.Lookup(logFlushFreqFlagName)) } -// GlogWriter serves as a bridge between the standard log package and the glog package. -type GlogWriter struct{} +// KlogWriter serves as a bridge between the standard log package and the glog package. +type KlogWriter struct{} // Write implements the io.Writer interface. -func (writer GlogWriter) Write(data []byte) (n int, err error) { - glog.InfoDepth(1, string(data)) +func (writer KlogWriter) Write(data []byte) (n int, err error) { + klog.InfoDepth(1, string(data)) return len(data), nil } // InitLogs initializes logs the way we want for kubernetes. func InitLogs() { - log.SetOutput(GlogWriter{}) + log.SetOutput(KlogWriter{}) log.SetFlags(0) // The default glog flush interval is 5 seconds. - go wait.Forever(glog.Flush, *logFlushFreq) + go wait.Forever(klog.Flush, *logFlushFreq) } // FlushLogs flushes logs immediately. func FlushLogs() { - glog.Flush() + klog.Flush() } -// NewLogger creates a new log.Logger which sends logs to glog.Info. +// NewLogger creates a new log.Logger which sends logs to klog.Info. func NewLogger(prefix string) *log.Logger { - return log.New(GlogWriter{}, prefix, 0) + return log.New(KlogWriter{}, prefix, 0) } // GlogSetter is a setter to set glog level. func GlogSetter(val string) (string, error) { - var level glog.Level + var level klog.Level if err := level.Set(val); err != nil { - return "", fmt.Errorf("failed set glog.logging.verbosity %s: %v", val, err) + return "", fmt.Errorf("failed set klog.logging.verbosity %s: %v", val, err) } - return fmt.Sprintf("successfully set glog.logging.verbosity to %s", val), nil + return fmt.Sprintf("successfully set klog.logging.verbosity to %s", val), nil } diff --git a/staging/src/k8s.io/apiserver/pkg/util/trace/BUILD b/staging/src/k8s.io/apiserver/pkg/util/trace/BUILD index 2e94b1d0d4242..4e93ff5b498d9 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/trace/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/trace/BUILD @@ -10,7 +10,7 @@ go_library( srcs = ["trace.go"], importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/util/trace", importpath = "k8s.io/apiserver/pkg/util/trace", - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) filegroup( diff --git a/staging/src/k8s.io/apiserver/pkg/util/trace/trace.go b/staging/src/k8s.io/apiserver/pkg/util/trace/trace.go index b2f31c5275d1f..9049a17d0deeb 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/trace/trace.go +++ b/staging/src/k8s.io/apiserver/pkg/util/trace/trace.go @@ -22,7 +22,7 @@ import ( "math/rand" "time" - "github.com/golang/glog" + "k8s.io/klog" ) type traceStep struct { @@ -63,17 +63,17 @@ func (t *Trace) logWithStepThreshold(stepThreshold time.Duration) { lastStepTime := t.startTime for _, step := range t.steps { stepDuration := step.stepTime.Sub(lastStepTime) - if stepThreshold == 0 || stepDuration > stepThreshold || glog.V(4) { + if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] %v\n", tracenum, step.stepTime.Sub(t.startTime), stepDuration, step.msg)) } lastStepTime = step.stepTime } stepDuration := endTime.Sub(lastStepTime) - if stepThreshold == 0 || stepDuration > stepThreshold || glog.V(4) { + if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] END\n", tracenum, endTime.Sub(t.startTime), stepDuration)) } - glog.Info(buffer.String()) + klog.Info(buffer.String()) } func (t *Trace) LogIfLong(threshold time.Duration) { diff --git a/staging/src/k8s.io/apiserver/pkg/util/wsstream/BUILD b/staging/src/k8s.io/apiserver/pkg/util/wsstream/BUILD index 269af15e5ce8d..5367ab2f5c6dc 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/wsstream/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/util/wsstream/BUILD @@ -27,8 +27,8 @@ go_library( importpath = "k8s.io/apiserver/pkg/util/wsstream", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/websocket:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/util/wsstream/conn.go b/staging/src/k8s.io/apiserver/pkg/util/wsstream/conn.go index 05faf48eba184..2d1a79021363d 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/wsstream/conn.go +++ b/staging/src/k8s.io/apiserver/pkg/util/wsstream/conn.go @@ -25,8 +25,8 @@ import ( "strings" "time" - "github.com/golang/glog" "golang.org/x/net/websocket" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/runtime" ) @@ -251,7 +251,7 @@ func (conn *Conn) handle(ws *websocket.Conn) { var data []byte if err := websocket.Message.Receive(ws, &data); err != nil { if err != io.EOF { - glog.Errorf("Error on socket receive: %v", err) + klog.Errorf("Error on socket receive: %v", err) } break } @@ -264,11 +264,11 @@ func (conn *Conn) handle(ws *websocket.Conn) { } data = data[1:] if int(channel) >= len(conn.channels) { - glog.V(6).Infof("Frame is targeted for a reader %d that is not valid, possible protocol error", channel) + klog.V(6).Infof("Frame is targeted for a reader %d that is not valid, possible protocol error", channel) continue } if _, err := conn.channels[channel].DataFromSocket(data); err != nil { - glog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) + klog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) continue } } diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/BUILD index bb770700ada51..fc5981bd6b58e 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/BUILD @@ -21,7 +21,7 @@ go_library( deps = [ "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/passwordfile.go b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/passwordfile.go index 8f291d191cc74..3c54ed78e13ca 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/passwordfile.go +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/passwordfile.go @@ -24,7 +24,7 @@ import ( "os" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/user" @@ -72,7 +72,7 @@ func NewCSV(path string) (*PasswordAuthenticator, error) { } recordNum++ if _, exist := users[obj.info.Name]; exist { - glog.Warningf("duplicate username '%s' has been found in password file '%s', record number '%d'", obj.info.Name, path, recordNum) + klog.Warningf("duplicate username '%s' has been found in password file '%s', record number '%d'", obj.info.Name, path, recordNum) } users[obj.info.Name] = obj } diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/BUILD index 3863bb823fd5a..e0cba0dd20e42 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/BUILD @@ -15,8 +15,8 @@ go_test( deps = [ "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//vendor/github.com/coreos/go-oidc:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/gopkg.in/square/go-jose.v2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -32,7 +32,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//vendor/github.com/coreos/go-oidc:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go index f53fc2ddd4c7c..8442e4256d15e 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go @@ -43,7 +43,7 @@ import ( "time" oidc "github.com/coreos/go-oidc" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" @@ -147,10 +147,10 @@ func newAsyncIDTokenVerifier(ctx context.Context, c *oidc.Config, iss string) *a // Polls indefinitely in an attempt to initialize the distributed claims // verifier, or until context canceled. initFn := func() (done bool, err error) { - glog.V(4).Infof("oidc authenticator: attempting init: iss=%v", iss) + klog.V(4).Infof("oidc authenticator: attempting init: iss=%v", iss) v, err := initVerifier(ctx, c, iss) if err != nil { - glog.Errorf("oidc authenticator: async token verifier for issuer: %q: %v", iss, err) + klog.Errorf("oidc authenticator: async token verifier for issuer: %q: %v", iss, err) return false, nil } t.m.Lock() @@ -221,7 +221,7 @@ func New(opts Options) (*Authenticator, error) { go wait.PollUntil(time.Second*10, func() (done bool, err error) { provider, err := oidc.NewProvider(ctx, a.issuerURL) if err != nil { - glog.Errorf("oidc authenticator: initializing plugin: %v", err) + klog.Errorf("oidc authenticator: initializing plugin: %v", err) return false, nil } @@ -279,7 +279,7 @@ func newAuthenticator(opts Options, initVerifier func(ctx context.Context, a *Au return nil, fmt.Errorf("Failed to read the CA file: %v", err) } } else { - glog.Info("OIDC: No x509 certificates provided, will use host's root CA set") + klog.Info("OIDC: No x509 certificates provided, will use host's root CA set") } // Copied from http.DefaultTransport. diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc_test.go b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc_test.go index 0ceb72e83b245..4e93a74e99d42 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc_test.go +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc_test.go @@ -36,9 +36,9 @@ import ( "time" oidc "github.com/coreos/go-oidc" - "github.com/golang/glog" jose "gopkg.in/square/go-jose.v2" "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/klog" ) // utilities for loading JOSE keys. @@ -148,7 +148,7 @@ func replace(tmpl string, v interface{}) string { buf := bytes.NewBuffer(nil) t.Execute(buf, &v) ret := buf.String() - glog.V(4).Infof("Replaced: %v into: %v", tmpl, ret) + klog.V(4).Infof("Replaced: %v into: %v", tmpl, ret) return ret } @@ -158,7 +158,7 @@ func replace(tmpl string, v interface{}) string { // responses that the server will return for each claim it is given. func newClaimServer(t *testing.T, keys jose.JSONWebKeySet, signer jose.Signer, claimToResponseMap map[string]string, openIDConfig *string) *httptest.Server { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - glog.V(5).Infof("request: %+v", *r) + klog.V(5).Infof("request: %+v", *r) switch r.URL.Path { case "/.testing/keys": w.Header().Set("Content-Type", "application/json") @@ -166,12 +166,12 @@ func newClaimServer(t *testing.T, keys jose.JSONWebKeySet, signer jose.Signer, c if err != nil { t.Fatalf("unexpected error while marshaling keys: %v", err) } - glog.V(5).Infof("%v: returning: %+v", r.URL, string(keyBytes)) + klog.V(5).Infof("%v: returning: %+v", r.URL, string(keyBytes)) w.Write(keyBytes) case "/.well-known/openid-configuration": w.Header().Set("Content-Type", "application/json") - glog.V(5).Infof("%v: returning: %+v", r.URL, *openIDConfig) + klog.V(5).Infof("%v: returning: %+v", r.URL, *openIDConfig) w.Write([]byte(*openIDConfig)) // These claims are tested in the unit tests. case "/groups": @@ -200,7 +200,7 @@ func newClaimServer(t *testing.T, keys jose.JSONWebKeySet, signer jose.Signer, c fmt.Fprintf(w, "unexpected URL: %v", r.URL) } })) - glog.V(4).Infof("Serving OIDC at: %v", ts.URL) + klog.V(4).Infof("Serving OIDC at: %v", ts.URL) return ts } diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD index fc926d1f75c71..eb13b669bdf3c 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/BUILD @@ -37,7 +37,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authentication/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go index f462276f5bbb7..3f1e1f92ed738 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go @@ -21,7 +21,7 @@ import ( "context" "time" - "github.com/golang/glog" + "k8s.io/klog" authentication "k8s.io/api/authentication/v1beta1" "k8s.io/apimachinery/pkg/runtime" @@ -84,7 +84,7 @@ func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token }) if err != nil { // An error here indicates bad configuration or an outage. Log for debugging. - glog.Errorf("Failed to make webhook authenticator request: %v", err) + klog.Errorf("Failed to make webhook authenticator request: %v", err) return nil, false, err } r.Status = result.Status diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD b/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD index 59a23decbbe62..270aed93282c4 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/BUILD @@ -38,7 +38,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index 03b7bda32fe5c..e05ef503f22a0 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -22,7 +22,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" authorization "k8s.io/api/authorization/v1beta1" "k8s.io/apimachinery/pkg/runtime" @@ -189,7 +189,7 @@ func (w *WebhookAuthorizer) Authorize(attr authorizer.Attributes) (decision auth }) if err != nil { // An error here indicates bad configuration or an outage. Log for debugging. - glog.Errorf("Failed to make webhook authorizer request: %v", err) + klog.Errorf("Failed to make webhook authorizer request: %v", err) return w.decisionOnError, "", err } r.Status = result.Status diff --git a/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json b/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json index c34141413743a..98d650950db5e 100644 --- a/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json +++ b/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json @@ -22,10 +22,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index 8656daeaefcea..1a3087f39af85 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -66,10 +66,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/groupcache/lru", "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433" diff --git a/staging/src/k8s.io/client-go/discovery/BUILD b/staging/src/k8s.io/client-go/discovery/BUILD index 1ddb339a4ff2f..64e75dc511ca1 100644 --- a/staging/src/k8s.io/client-go/discovery/BUILD +++ b/staging/src/k8s.io/client-go/discovery/BUILD @@ -28,12 +28,12 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", "//vendor/github.com/googleapis/gnostic/OpenAPIv2:go_default_library", "//vendor/github.com/gregjones/httpcache:go_default_library", "//vendor/github.com/gregjones/httpcache/diskcache:go_default_library", "//vendor/github.com/peterbourgon/diskv:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/discovery/cached_discovery.go b/staging/src/k8s.io/client-go/discovery/cached_discovery.go index 90cd342017a97..df69d6a1930bd 100644 --- a/staging/src/k8s.io/client-go/discovery/cached_discovery.go +++ b/staging/src/k8s.io/client-go/discovery/cached_discovery.go @@ -25,8 +25,8 @@ import ( "sync" "time" - "github.com/golang/glog" "github.com/googleapis/gnostic/OpenAPIv2" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -67,23 +67,23 @@ func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion stri if err == nil { cachedResources := &metav1.APIResourceList{} if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { - glog.V(10).Infof("returning cached discovery info from %v", filename) + klog.V(10).Infof("returning cached discovery info from %v", filename) return cachedResources, nil } } liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) if err != nil { - glog.V(3).Infof("skipped caching discovery info due to %v", err) + klog.V(3).Infof("skipped caching discovery info due to %v", err) return liveResources, err } if liveResources == nil || len(liveResources.APIResources) == 0 { - glog.V(3).Infof("skipped caching discovery info, no resources found") + klog.V(3).Infof("skipped caching discovery info, no resources found") return liveResources, err } if err := d.writeCachedFile(filename, liveResources); err != nil { - glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) } return liveResources, nil @@ -103,23 +103,23 @@ func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { if err == nil { cachedGroups := &metav1.APIGroupList{} if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { - glog.V(10).Infof("returning cached discovery info from %v", filename) + klog.V(10).Infof("returning cached discovery info from %v", filename) return cachedGroups, nil } } liveGroups, err := d.delegate.ServerGroups() if err != nil { - glog.V(3).Infof("skipped caching discovery info due to %v", err) + klog.V(3).Infof("skipped caching discovery info due to %v", err) return liveGroups, err } if liveGroups == nil || len(liveGroups.Groups) == 0 { - glog.V(3).Infof("skipped caching discovery info, no groups found") + klog.V(3).Infof("skipped caching discovery info, no groups found") return liveGroups, err } if err := d.writeCachedFile(filename, liveGroups); err != nil { - glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + klog.V(1).Infof("failed to write cache to %v due to %v", filename, err) } return liveGroups, nil diff --git a/staging/src/k8s.io/client-go/discovery/round_tripper.go b/staging/src/k8s.io/client-go/discovery/round_tripper.go index 75b7f52097711..4e2bc24e774d2 100644 --- a/staging/src/k8s.io/client-go/discovery/round_tripper.go +++ b/staging/src/k8s.io/client-go/discovery/round_tripper.go @@ -20,10 +20,10 @@ import ( "net/http" "path/filepath" - "github.com/golang/glog" "github.com/gregjones/httpcache" "github.com/gregjones/httpcache/diskcache" "github.com/peterbourgon/diskv" + "k8s.io/klog" ) type cacheRoundTripper struct { @@ -55,7 +55,7 @@ func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { if cr, ok := rt.rt.Transport.(canceler); ok { cr.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) + klog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) } } diff --git a/staging/src/k8s.io/client-go/examples/workqueue/BUILD b/staging/src/k8s.io/client-go/examples/workqueue/BUILD index 0bf338e895839..e993d2c478bdd 100644 --- a/staging/src/k8s.io/client-go/examples/workqueue/BUILD +++ b/staging/src/k8s.io/client-go/examples/workqueue/BUILD @@ -26,7 +26,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/examples/workqueue/main.go b/staging/src/k8s.io/client-go/examples/workqueue/main.go index 6768f5d91b248..c306aaae00c30 100644 --- a/staging/src/k8s.io/client-go/examples/workqueue/main.go +++ b/staging/src/k8s.io/client-go/examples/workqueue/main.go @@ -21,7 +21,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -72,7 +72,7 @@ func (c *Controller) processNextItem() bool { func (c *Controller) syncToStdout(key string) error { obj, exists, err := c.indexer.GetByKey(key) if err != nil { - glog.Errorf("Fetching object with key %s from store failed with %v", key, err) + klog.Errorf("Fetching object with key %s from store failed with %v", key, err) return err } @@ -99,7 +99,7 @@ func (c *Controller) handleErr(err error, key interface{}) { // This controller retries 5 times if something goes wrong. After that, it stops trying. if c.queue.NumRequeues(key) < 5 { - glog.Infof("Error syncing pod %v: %v", key, err) + klog.Infof("Error syncing pod %v: %v", key, err) // Re-enqueue the key rate limited. Based on the rate limiter on the // queue and the re-enqueue history, the key will be processed later again. @@ -110,7 +110,7 @@ func (c *Controller) handleErr(err error, key interface{}) { c.queue.Forget(key) // Report to an external entity that, even after several retries, we could not successfully process this key runtime.HandleError(err) - glog.Infof("Dropping pod %q out of the queue: %v", key, err) + klog.Infof("Dropping pod %q out of the queue: %v", key, err) } func (c *Controller) Run(threadiness int, stopCh chan struct{}) { @@ -118,7 +118,7 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) { // Let the workers stop when we are done defer c.queue.ShutDown() - glog.Info("Starting Pod controller") + klog.Info("Starting Pod controller") go c.informer.Run(stopCh) @@ -133,7 +133,7 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) { } <-stopCh - glog.Info("Stopping Pod controller") + klog.Info("Stopping Pod controller") } func (c *Controller) runWorker() { @@ -152,13 +152,13 @@ func main() { // creates the connection config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } // creates the clientset clientset, err := kubernetes.NewForConfig(config) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } // create the pod watcher diff --git a/staging/src/k8s.io/client-go/listers/policy/v1beta1/BUILD b/staging/src/k8s.io/client-go/listers/policy/v1beta1/BUILD index 5c140cb3df006..640f0435cf59a 100644 --- a/staging/src/k8s.io/client-go/listers/policy/v1beta1/BUILD +++ b/staging/src/k8s.io/client-go/listers/policy/v1beta1/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go b/staging/src/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go index c0ab9d3ed4ccb..d07d11a98dfc3 100644 --- a/staging/src/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go +++ b/staging/src/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go @@ -19,11 +19,11 @@ package v1beta1 import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/klog" ) // PodDisruptionBudgetListerExpansion allows custom methods to be added to @@ -54,7 +54,7 @@ func (s *podDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]* pdb := list[i] selector, err = metav1.LabelSelectorAsSelector(pdb.Spec.Selector) if err != nil { - glog.Warningf("invalid selector: %v", err) + klog.Warningf("invalid selector: %v", err) // TODO(mml): add an event to the PDB continue } diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD index 264deb2383cf4..6a28475a76dec 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/BUILD @@ -24,7 +24,7 @@ go_library( "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go index 9858963e38116..d42449fc2575a 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go @@ -27,7 +27,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/net" restclient "k8s.io/client-go/rest" @@ -50,7 +50,7 @@ const ( func init() { if err := restclient.RegisterAuthProviderPlugin("azure", newAzureAuthProvider); err != nil { - glog.Fatalf("Failed to register azure auth plugin: %v", err) + klog.Fatalf("Failed to register azure auth plugin: %v", err) } } @@ -124,7 +124,7 @@ func (r *azureRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) token, err := r.tokenSource.Token() if err != nil { - glog.Errorf("Failed to acquire a token: %v", err) + klog.Errorf("Failed to acquire a token: %v", err) return nil, fmt.Errorf("acquiring a token for authorization header: %v", err) } diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD index d2459367275d9..69415598332b7 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/BUILD @@ -18,8 +18,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/transport:go_default_library", "//staging/src/k8s.io/client-go/util/connrotation:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index cae9d0d618e78..4d72526583ef6 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -31,7 +31,6 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/crypto/ssh/terminal" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -44,6 +43,7 @@ import ( "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/transport" "k8s.io/client-go/util/connrotation" + "k8s.io/klog" ) const execInfoEnv = "KUBERNETES_EXEC_INFO" @@ -228,7 +228,7 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { Code: int32(res.StatusCode), } if err := r.a.maybeRefreshCreds(creds, resp); err != nil { - glog.Errorf("refreshing credentials: %v", err) + klog.Errorf("refreshing credentials: %v", err) } } return res, nil diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD index 0f87724c3b7fb..1254922771802 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD @@ -23,9 +23,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/util/jsonpath:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go index 2bc6c4474bbaf..e44c2adabb3a2 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go @@ -27,18 +27,18 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/yaml" restclient "k8s.io/client-go/rest" "k8s.io/client-go/util/jsonpath" + "k8s.io/klog" ) func init() { if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil { - glog.Fatalf("Failed to register gcp auth plugin: %v", err) + klog.Fatalf("Failed to register gcp auth plugin: %v", err) } } @@ -223,7 +223,7 @@ func (t *cachedTokenSource) Token() (*oauth2.Token, error) { cache := t.update(tok) if t.persister != nil { if err := t.persister.Persist(cache); err != nil { - glog.V(4).Infof("Failed to persist token: %v", err) + klog.V(4).Infof("Failed to persist token: %v", err) } } return tok, nil @@ -329,7 +329,7 @@ func (c *commandTokenSource) parseTokenCmdOutput(output []byte) (*oauth2.Token, } var expiry time.Time if t, err := time.Parse(c.timeFmt, expiryStr); err != nil { - glog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err) + klog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err) } else { expiry = t } @@ -373,7 +373,7 @@ func (t *conditionalTransport) RoundTrip(req *http.Request) (*http.Response, err } if res.StatusCode == 401 { - glog.V(4).Infof("The credentials that were supplied are invalid for the target cluster") + klog.V(4).Infof("The credentials that were supplied are invalid for the target cluster") t.persister.Persist(t.resetCache) } diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD index 92674d54135e3..f87a2d6589ad2 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/BUILD @@ -20,8 +20,8 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go index 9c3ea0ab8d53f..1383a97c62eb1 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go @@ -28,10 +28,10 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/oauth2" "k8s.io/apimachinery/pkg/util/net" restclient "k8s.io/client-go/rest" + "k8s.io/klog" ) const ( @@ -49,7 +49,7 @@ const ( func init() { if err := restclient.RegisterAuthProviderPlugin("oidc", newOIDCAuthProvider); err != nil { - glog.Fatalf("Failed to register oidc auth plugin: %v", err) + klog.Fatalf("Failed to register oidc auth plugin: %v", err) } } @@ -124,7 +124,7 @@ func newOIDCAuthProvider(_ string, cfg map[string]string, persister restclient.A } if len(cfg[cfgExtraScopes]) > 0 { - glog.V(2).Infof("%s auth provider field depricated, refresh request don't send scopes", + klog.V(2).Infof("%s auth provider field depricated, refresh request don't send scopes", cfgExtraScopes) } diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD index 15552999ba7ca..488af00d32b0f 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/BUILD @@ -20,9 +20,9 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/gophercloud/gophercloud:go_default_library", "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go index e6d7f04934aa6..fab5104ef61cc 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go @@ -22,9 +22,9 @@ import ( "sync" "time" - "github.com/golang/glog" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/openstack" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/net" restclient "k8s.io/client-go/rest" @@ -32,7 +32,7 @@ import ( func init() { if err := restclient.RegisterAuthProviderPlugin("openstack", newOpenstackAuthProvider); err != nil { - glog.Fatalf("Failed to register openstack auth plugin: %s", err) + klog.Fatalf("Failed to register openstack auth plugin: %s", err) } } @@ -62,7 +62,7 @@ func (t *tokenGetter) Token() (string, error) { var err error if t.authOpt == nil { // reads the config from the environment - glog.V(4).Info("reading openstack config from the environment variables") + klog.V(4).Info("reading openstack config from the environment variables") options, err = openstack.AuthOptionsFromEnv() if err != nil { return "", fmt.Errorf("failed to read openstack env vars: %s", err) @@ -126,7 +126,7 @@ func (t *tokenRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) if err == nil { req.Header.Set("Authorization", "Bearer "+token) } else { - glog.V(4).Infof("failed to get token: %s", err) + klog.V(4).Infof("failed to get token: %s", err) } return t.RoundTripper.RoundTrip(req) @@ -140,7 +140,7 @@ func newOpenstackAuthProvider(_ string, config map[string]string, persister rest var ttlDuration time.Duration var err error - glog.Warningf("WARNING: in-tree openstack auth plugin is now deprecated. please use the \"client-keystone-auth\" kubectl/client-go credential plugin instead") + klog.Warningf("WARNING: in-tree openstack auth plugin is now deprecated. please use the \"client-keystone-auth\" kubectl/client-go credential plugin instead") ttl, found := config["ttl"] if !found { ttlDuration = DefaultTTLDuration diff --git a/staging/src/k8s.io/client-go/rest/BUILD b/staging/src/k8s.io/client-go/rest/BUILD index 0d95e60cad395..70920303e4e4b 100644 --- a/staging/src/k8s.io/client-go/rest/BUILD +++ b/staging/src/k8s.io/client-go/rest/BUILD @@ -39,10 +39,10 @@ go_test( "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//staging/src/k8s.io/client-go/util/testing:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -79,9 +79,9 @@ go_library( "//staging/src/k8s.io/client-go/transport:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/net/http2:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/rest/config.go b/staging/src/k8s.io/client-go/rest/config.go index d5ef84065ccd7..438eb3bedac0a 100644 --- a/staging/src/k8s.io/client-go/rest/config.go +++ b/staging/src/k8s.io/client-go/rest/config.go @@ -29,7 +29,6 @@ import ( "strings" "time" - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -37,6 +36,7 @@ import ( clientcmdapi "k8s.io/client-go/tools/clientcmd/api" certutil "k8s.io/client-go/util/cert" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) const ( @@ -331,7 +331,7 @@ func InClusterConfig() (*Config, error) { tlsClientConfig := TLSClientConfig{} if _, err := certutil.NewPool(rootCAFile); err != nil { - glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { tlsClientConfig.CAFile = rootCAFile } diff --git a/staging/src/k8s.io/client-go/rest/plugin.go b/staging/src/k8s.io/client-go/rest/plugin.go index cf8fbabfdf1cf..83ef5ae320fea 100644 --- a/staging/src/k8s.io/client-go/rest/plugin.go +++ b/staging/src/k8s.io/client-go/rest/plugin.go @@ -21,7 +21,7 @@ import ( "net/http" "sync" - "github.com/golang/glog" + "k8s.io/klog" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) @@ -57,7 +57,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error { if _, found := plugins[name]; found { return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) } - glog.V(4).Infof("Registered Auth Provider Plugin %q", name) + klog.V(4).Infof("Registered Auth Provider Plugin %q", name) plugins[name] = plugin return nil } diff --git a/staging/src/k8s.io/client-go/rest/request.go b/staging/src/k8s.io/client-go/rest/request.go index 9bb311448ab41..64901fba20d88 100644 --- a/staging/src/k8s.io/client-go/rest/request.go +++ b/staging/src/k8s.io/client-go/rest/request.go @@ -32,7 +32,6 @@ import ( "strings" "time" - "github.com/golang/glog" "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,6 +43,7 @@ import ( restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) var ( @@ -114,7 +114,7 @@ type Request struct { // NewRequest creates a new request helper object for accessing runtime.Objects on a server. func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request { if backoff == nil { - glog.V(2).Infof("Not implementing request backoff strategy.") + klog.V(2).Infof("Not implementing request backoff strategy.") backoff = &NoBackoff{} } @@ -527,7 +527,7 @@ func (r *Request) tryThrottle() { r.throttle.Accept() } if latency := time.Since(now); latency > longThrottleLatency { - glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } } @@ -683,7 +683,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { }() if r.err != nil { - glog.V(4).Infof("Error in request: %v", r.err) + klog.V(4).Infof("Error in request: %v", r.err) return r.err } @@ -770,13 +770,13 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { _, err := seeker.Seek(0, 0) if err != nil { - glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) fn(req, resp) return true } } - glog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) + klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) return false } @@ -844,13 +844,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // 2. Apiserver sends back the headers and then part of the body // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. - glog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) + klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err) return Result{ err: streamErr, } default: - glog.Errorf("Unexpected error when reading response body: %#v", err) + klog.Errorf("Unexpected error when reading response body: %#v", err) unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err) return Result{ err: unexpectedErr, @@ -914,11 +914,11 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu func truncateBody(body string) string { max := 0 switch { - case bool(glog.V(10)): + case bool(klog.V(10)): return body - case bool(glog.V(9)): + case bool(klog.V(9)): max = 10240 - case bool(glog.V(8)): + case bool(klog.V(8)): max = 1024 } @@ -933,13 +933,13 @@ func truncateBody(body string) string { // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. func glogBody(prefix string, body []byte) { - if glog.V(8) { + if klog.V(8) { if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - glog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) + klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) } else { - glog.Infof("%s: %s", prefix, truncateBody(string(body))) + klog.Infof("%s: %s", prefix, truncateBody(string(body))) } } } @@ -1141,7 +1141,7 @@ func (r Result) Error() error { // to be backwards compatible with old servers that do not return a version, default to "v1" out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) if err != nil { - glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) + klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) return r.err } switch t := out.(type) { diff --git a/staging/src/k8s.io/client-go/rest/request_test.go b/staging/src/k8s.io/client-go/rest/request_test.go index 2660c0be5d451..a415f60ae799d 100755 --- a/staging/src/k8s.io/client-go/rest/request_test.go +++ b/staging/src/k8s.io/client-go/rest/request_test.go @@ -35,7 +35,7 @@ import ( "testing" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -1855,6 +1855,10 @@ func buildString(length int) string { return string(s) } +func init() { + klog.InitFlags(nil) +} + func TestTruncateBody(t *testing.T) { tests := []struct { body string @@ -1904,7 +1908,7 @@ func TestTruncateBody(t *testing.T) { }, } - l := flag.Lookup("v").Value.(flag.Getter).Get().(glog.Level) + l := flag.Lookup("v").Value.(flag.Getter).Get().(klog.Level) for _, test := range tests { flag.Set("v", test.level) got := truncateBody(test.body) diff --git a/staging/src/k8s.io/client-go/rest/token_source.go b/staging/src/k8s.io/client-go/rest/token_source.go index e0a6eb7d9f197..c251b5eb0bb63 100644 --- a/staging/src/k8s.io/client-go/rest/token_source.go +++ b/staging/src/k8s.io/client-go/rest/token_source.go @@ -24,8 +24,8 @@ import ( "sync" "time" - "github.com/golang/glog" "golang.org/x/oauth2" + "k8s.io/klog" ) // TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens @@ -131,7 +131,7 @@ func (ts *cachingTokenSource) Token() (*oauth2.Token, error) { if ts.tok == nil { return nil, err } - glog.Errorf("Unable to rotate token: %v", err) + klog.Errorf("Unable to rotate token: %v", err) return ts.tok, nil } diff --git a/staging/src/k8s.io/client-go/rest/urlbackoff.go b/staging/src/k8s.io/client-go/rest/urlbackoff.go index eff848abc12b6..d00e42f86671f 100644 --- a/staging/src/k8s.io/client-go/rest/urlbackoff.go +++ b/staging/src/k8s.io/client-go/rest/urlbackoff.go @@ -20,9 +20,9 @@ import ( "net/url" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" ) // Set of resp. Codes that we backoff for. @@ -64,7 +64,7 @@ func (n *NoBackoff) Sleep(d time.Duration) { // Disable makes the backoff trivial, i.e., sets it to zero. This might be used // by tests which want to run 1000s of mock requests without slowing down. func (b *URLBackoff) Disable() { - glog.V(4).Infof("Disabling backoff strategy") + klog.V(4).Infof("Disabling backoff strategy") b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) } @@ -76,7 +76,7 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string { // in the future. host, err := url.Parse(rawurl.String()) if err != nil { - glog.V(4).Infof("Error extracting url: %v", rawurl) + klog.V(4).Infof("Error extracting url: %v", rawurl) panic("bad url!") } return host.Host @@ -89,7 +89,7 @@ func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode i b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now()) return } else if responseCode >= 300 || err != nil { - glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) + klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) } //If we got this far, there is no backoff required for this URL anymore. diff --git a/staging/src/k8s.io/client-go/restmapper/BUILD b/staging/src/k8s.io/client-go/restmapper/BUILD index d13169643cec9..ed8006f031807 100644 --- a/staging/src/k8s.io/client-go/restmapper/BUILD +++ b/staging/src/k8s.io/client-go/restmapper/BUILD @@ -15,7 +15,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/restmapper/discovery.go b/staging/src/k8s.io/client-go/restmapper/discovery.go index aa158626af4fb..84491f4c5d16c 100644 --- a/staging/src/k8s.io/client-go/restmapper/discovery.go +++ b/staging/src/k8s.io/client-go/restmapper/discovery.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" - "github.com/golang/glog" + "k8s.io/klog" ) // APIGroupResources is an API group with a mapping of versions to @@ -212,7 +212,7 @@ func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { // Reset resets the internally cached Discovery information and will // cause the next mapping request to re-discover. func (d *DeferredDiscoveryRESTMapper) Reset() { - glog.V(5).Info("Invalidating discovery information") + klog.V(5).Info("Invalidating discovery information") d.initMu.Lock() defer d.initMu.Unlock() diff --git a/staging/src/k8s.io/client-go/restmapper/shortcut.go b/staging/src/k8s.io/client-go/restmapper/shortcut.go index d9f4be0b6b113..6f3c9d9306917 100644 --- a/staging/src/k8s.io/client-go/restmapper/shortcut.go +++ b/staging/src/k8s.io/client-go/restmapper/shortcut.go @@ -19,7 +19,7 @@ package restmapper import ( "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -86,12 +86,12 @@ func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []re // This can return an error *and* the results it was able to find. We don't need to fail on the error. apiResList, err := e.discoveryClient.ServerResources() if err != nil { - glog.V(1).Infof("Error loading discovery information: %v", err) + klog.V(1).Infof("Error loading discovery information: %v", err) } for _, apiResources := range apiResList { gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) if err != nil { - glog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) continue } for _, apiRes := range apiResources.APIResources { diff --git a/staging/src/k8s.io/client-go/tools/cache/BUILD b/staging/src/k8s.io/client-go/tools/cache/BUILD index 0240fa6f7002f..8b8b912585597 100644 --- a/staging/src/k8s.io/client-go/tools/cache/BUILD +++ b/staging/src/k8s.io/client-go/tools/cache/BUILD @@ -83,7 +83,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/pager:go_default_library", "//staging/src/k8s.io/client-go/util/buffer:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/tools/cache/delta_fifo.go b/staging/src/k8s.io/client-go/tools/cache/delta_fifo.go index 45c3b500d427e..ded637ac45022 100644 --- a/staging/src/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/staging/src/k8s.io/client-go/tools/cache/delta_fifo.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) // NewDeltaFIFO returns a Store which can be used process changes to items. @@ -506,10 +506,10 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { deletedObj, exists, err := f.knownObjects.GetByKey(k) if err != nil { deletedObj = nil - glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) } else if !exists { deletedObj = nil - glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) } queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { @@ -553,10 +553,10 @@ func (f *DeltaFIFO) syncKey(key string) error { func (f *DeltaFIFO) syncKeyLocked(key string) error { obj, exists, err := f.knownObjects.GetByKey(key) if err != nil { - glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) + klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) return nil } else if !exists { - glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) + klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) return nil } diff --git a/staging/src/k8s.io/client-go/tools/cache/expiration_cache.go b/staging/src/k8s.io/client-go/tools/cache/expiration_cache.go index d284453ec4335..b38fe70b95669 100644 --- a/staging/src/k8s.io/client-go/tools/cache/expiration_cache.go +++ b/staging/src/k8s.io/client-go/tools/cache/expiration_cache.go @@ -20,8 +20,8 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/klog" ) // ExpirationCache implements the store interface @@ -95,7 +95,7 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { return nil, false } if c.expirationPolicy.IsExpired(timestampedItem) { - glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) + klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj) c.cacheStorage.Delete(key) return nil, false } diff --git a/staging/src/k8s.io/client-go/tools/cache/listers.go b/staging/src/k8s.io/client-go/tools/cache/listers.go index 27d51a6b38797..ce377329c7f1c 100644 --- a/staging/src/k8s.io/client-go/tools/cache/listers.go +++ b/staging/src/k8s.io/client-go/tools/cache/listers.go @@ -17,7 +17,7 @@ limitations under the License. package cache import ( - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -60,7 +60,7 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) if err != nil { // Ignore error; do slow search without index. - glog.Warningf("can not retrieve list of objects using index : %v", err) + klog.Warningf("can not retrieve list of objects using index : %v", err) for _, m := range indexer.List() { metadata, err := meta.Accessor(m) if err != nil { diff --git a/staging/src/k8s.io/client-go/tools/cache/mutation_cache.go b/staging/src/k8s.io/client-go/tools/cache/mutation_cache.go index cbb6434ebde2a..4c6686e918c14 100644 --- a/staging/src/k8s.io/client-go/tools/cache/mutation_cache.go +++ b/staging/src/k8s.io/client-go/tools/cache/mutation_cache.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -156,7 +156,7 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er } elements, err := fn(updated) if err != nil { - glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) + klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) continue } for _, inIndex := range elements { diff --git a/staging/src/k8s.io/client-go/tools/cache/mutation_detector.go b/staging/src/k8s.io/client-go/tools/cache/mutation_detector.go index e2aa448484087..adb5b8be8af46 100644 --- a/staging/src/k8s.io/client-go/tools/cache/mutation_detector.go +++ b/staging/src/k8s.io/client-go/tools/cache/mutation_detector.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" @@ -45,7 +45,7 @@ func NewCacheMutationDetector(name string) CacheMutationDetector { if !mutationDetectionEnabled { return dummyMutationDetector{} } - glog.Warningln("Mutation detector is enabled, this will result in memory leakage.") + klog.Warningln("Mutation detector is enabled, this will result in memory leakage.") return &defaultCacheMutationDetector{name: name, period: 1 * time.Second} } diff --git a/staging/src/k8s.io/client-go/tools/cache/reflector.go b/staging/src/k8s.io/client-go/tools/cache/reflector.go index 9ee7efcbbd822..12e2a33422127 100644 --- a/staging/src/k8s.io/client-go/tools/cache/reflector.go +++ b/staging/src/k8s.io/client-go/tools/cache/reflector.go @@ -31,7 +31,6 @@ import ( "syscall" "time" - "github.com/golang/glog" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,6 +40,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog" ) // Reflector watches a specified resource and causes all changes to be reflected in the given store. @@ -128,7 +128,7 @@ var internalPackages = []string{"client-go/tools/cache/"} // Run starts a watch and handles watch events. Will restart the watch if it is closed. // Run will exit when stopCh is closed. func (r *Reflector) Run(stopCh <-chan struct{}) { - glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) + klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name) wait.Until(func() { if err := r.ListAndWatch(stopCh); err != nil { utilruntime.HandleError(err) @@ -166,7 +166,7 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { // and then use the resource version to watch. // It returns error if ListAndWatch didn't even try to initialize watch. func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { - glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) + klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name) var resourceVersion string // Explicitly set "0" as resource version - it's fine for the List() @@ -212,7 +212,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { return } if r.ShouldResync == nil || r.ShouldResync() { - glog.V(4).Infof("%s: forcing resync", r.name) + klog.V(4).Infof("%s: forcing resync", r.name) if err := r.store.Resync(); err != nil { resyncerrc <- err return @@ -246,7 +246,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err) default: utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err)) } @@ -267,7 +267,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil { if err != errorStopRequested { - glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) } return nil } @@ -354,7 +354,7 @@ loop: r.metrics.numberOfShortWatches.Inc() return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name) } - glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) + klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount) return nil } diff --git a/staging/src/k8s.io/client-go/tools/cache/shared_informer.go b/staging/src/k8s.io/client-go/tools/cache/shared_informer.go index f29a4b3312694..e91fc9e95583a 100644 --- a/staging/src/k8s.io/client-go/tools/cache/shared_informer.go +++ b/staging/src/k8s.io/client-go/tools/cache/shared_informer.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/util/buffer" "k8s.io/client-go/util/retry" - "github.com/golang/glog" + "k8s.io/klog" ) // SharedInformer has a shared data cache and is capable of distributing notifications for changes @@ -116,11 +116,11 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool }, stopCh) if err != nil { - glog.V(2).Infof("stop requested") + klog.V(2).Infof("stop requested") return false } - glog.V(4).Infof("caches populated") + klog.V(4).Infof("caches populated") return true } @@ -279,11 +279,11 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration { return desired } if check == 0 { - glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) return 0 } if desired < check { - glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) return check } return desired @@ -296,19 +296,19 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv defer s.startedLock.Unlock() if s.stopped { - glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) + klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler) return } if resyncPeriod > 0 { if resyncPeriod < minimumResyncPeriod { - glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) resyncPeriod = minimumResyncPeriod } if resyncPeriod < s.resyncCheckPeriod { if s.started { - glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) resyncPeriod = s.resyncCheckPeriod } else { // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/BUILD b/staging/src/k8s.io/client-go/tools/clientcmd/BUILD index d92f2003c38e0..8746085e1a119 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/BUILD +++ b/staging/src/k8s.io/client-go/tools/clientcmd/BUILD @@ -54,10 +54,10 @@ go_library( "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api/latest:go_default_library", "//staging/src/k8s.io/client-go/util/homedir:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/imdario/mergo:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/golang.org/x/crypto/ssh/terminal:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go index e5dc921ff9a8e..dea229c918251 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go @@ -24,8 +24,8 @@ import ( "os" "strings" - "github.com/golang/glog" "github.com/imdario/mergo" + "k8s.io/klog" restclient "k8s.io/client-go/rest" clientauth "k8s.io/client-go/tools/auth" @@ -545,12 +545,12 @@ func (config *inClusterClientConfig) Possible() bool { // to the default config. func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { if kubeconfigPath == "" && masterUrl == "" { - glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") + klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") kubeconfig, err := restclient.InClusterConfig() if err == nil { return kubeconfig, nil } - glog.Warning("error creating inClusterConfig, falling back to default config: ", err) + klog.Warning("error creating inClusterConfig, falling back to default config: ", err) } return NewNonInteractiveDeferredLoadingClientConfig( &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/config.go b/staging/src/k8s.io/client-go/tools/clientcmd/config.go index 9495849b09284..b8cc39688219f 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/config.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/config.go @@ -24,7 +24,7 @@ import ( "reflect" "sort" - "github.com/golang/glog" + "k8s.io/klog" restclient "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -483,7 +483,7 @@ func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { config, err := getConfigFromFile(filename) if err != nil { - glog.FatalDepth(1, err) + klog.FatalDepth(1, err) } return config diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/loader.go b/staging/src/k8s.io/client-go/tools/clientcmd/loader.go index 6038c8d457a19..7e928a918563e 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/loader.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/loader.go @@ -27,8 +27,8 @@ import ( goruntime "runtime" "strings" - "github.com/golang/glog" "github.com/imdario/mergo" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -356,7 +356,7 @@ func LoadFromFile(filename string) (*clientcmdapi.Config, error) { if err != nil { return nil, err } - glog.V(6).Infoln("Config loaded from file", filename) + klog.V(6).Infoln("Config loaded from file", filename) // set LocationOfOrigin on every Cluster, User, and Context for key, obj := range config.AuthInfos { diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 05038133b6b84..76380db82ab95 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -20,7 +20,7 @@ import ( "io" "sync" - "github.com/golang/glog" + "k8s.io/klog" restclient "k8s.io/client-go/rest" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -119,7 +119,7 @@ func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, e // check for in-cluster configuration and use it if config.icc.Possible() { - glog.V(4).Infof("Using in-cluster configuration") + klog.V(4).Infof("Using in-cluster configuration") return config.icc.ClientConfig() } @@ -156,7 +156,7 @@ func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { } } - glog.V(4).Infof("Using in-cluster namespace") + klog.V(4).Infof("Using in-cluster namespace") // allow the namespace from the service account token directory to be used. return config.icc.Namespace() diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/BUILD b/staging/src/k8s.io/client-go/tools/leaderelection/BUILD index 8d53bfe508afe..6716f87e25330 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/BUILD +++ b/staging/src/k8s.io/client-go/tools/leaderelection/BUILD @@ -17,7 +17,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go b/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go index be52e85a048eb..1bd6167b6ea29 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/leaderelection.go @@ -60,7 +60,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" rl "k8s.io/client-go/tools/leaderelection/resourcelock" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -184,16 +184,16 @@ func (le *LeaderElector) acquire(ctx context.Context) bool { defer cancel() succeeded := false desc := le.config.Lock.Describe() - glog.Infof("attempting to acquire leader lease %v...", desc) + klog.Infof("attempting to acquire leader lease %v...", desc) wait.JitterUntil(func() { succeeded = le.tryAcquireOrRenew() le.maybeReportTransition() if !succeeded { - glog.V(4).Infof("failed to acquire lease %v", desc) + klog.V(4).Infof("failed to acquire lease %v", desc) return } le.config.Lock.RecordEvent("became leader") - glog.Infof("successfully acquired lease %v", desc) + klog.Infof("successfully acquired lease %v", desc) cancel() }, le.config.RetryPeriod, JitterFactor, true, ctx.Done()) return succeeded @@ -224,11 +224,11 @@ func (le *LeaderElector) renew(ctx context.Context) { le.maybeReportTransition() desc := le.config.Lock.Describe() if err == nil { - glog.V(5).Infof("successfully renewed lease %v", desc) + klog.V(5).Infof("successfully renewed lease %v", desc) return } le.config.Lock.RecordEvent("stopped leading") - glog.Infof("failed to renew lease %v: %v", desc, err) + klog.Infof("failed to renew lease %v: %v", desc, err) cancel() }, le.config.RetryPeriod, ctx.Done()) } @@ -249,11 +249,11 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { oldLeaderElectionRecord, err := le.config.Lock.Get() if err != nil { if !errors.IsNotFound(err) { - glog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) + klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) return false } if err = le.config.Lock.Create(leaderElectionRecord); err != nil { - glog.Errorf("error initially creating leader election record: %v", err) + klog.Errorf("error initially creating leader election record: %v", err) return false } le.observedRecord = leaderElectionRecord @@ -268,7 +268,7 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { } if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && !le.IsLeader() { - glog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) + klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) return false } @@ -283,7 +283,7 @@ func (le *LeaderElector) tryAcquireOrRenew() bool { // update the lock itself if err = le.config.Lock.Update(leaderElectionRecord); err != nil { - glog.Errorf("Failed to update lock: %v", err) + klog.Errorf("Failed to update lock: %v", err) return false } le.observedRecord = leaderElectionRecord diff --git a/staging/src/k8s.io/client-go/tools/record/BUILD b/staging/src/k8s.io/client-go/tools/record/BUILD index fc1eaf2e6715c..2a8546138e30f 100644 --- a/staging/src/k8s.io/client-go/tools/record/BUILD +++ b/staging/src/k8s.io/client-go/tools/record/BUILD @@ -50,8 +50,8 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/groupcache/lru:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/tools/record/event.go b/staging/src/k8s.io/client-go/tools/record/event.go index 168dfa80c56ab..2ee69589c6d25 100644 --- a/staging/src/k8s.io/client-go/tools/record/event.go +++ b/staging/src/k8s.io/client-go/tools/record/event.go @@ -33,7 +33,7 @@ import ( "net/http" - "github.com/golang/glog" + "k8s.io/klog" ) const maxTriesPerEvent = 12 @@ -144,7 +144,7 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela } tries++ if tries >= maxTriesPerEvent { - glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) break } // Randomize the first sleep so that various clients won't all be @@ -194,13 +194,13 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv switch err.(type) { case *restclient.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. - glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) return true case *errors.StatusError: if errors.IsAlreadyExists(err) { - glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) } else { - glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) } return true case *errors.UnexpectedObjectError: @@ -209,7 +209,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv default: // This case includes actual http transport errors. Go ahead and retry. } - glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) return false } @@ -256,12 +256,12 @@ type recorderImpl struct { func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { ref, err := ref.GetReference(recorder.scheme, object) if err != nil { - glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) return } if !validateEventType(eventtype) { - glog.Errorf("Unsupported event type: '%v'", eventtype) + klog.Errorf("Unsupported event type: '%v'", eventtype) return } diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/BUILD b/staging/src/k8s.io/client-go/tools/remotecommand/BUILD index 9e0aac987705a..efeffeab72643 100644 --- a/staging/src/k8s.io/client-go/tools/remotecommand/BUILD +++ b/staging/src/k8s.io/client-go/tools/remotecommand/BUILD @@ -43,7 +43,7 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/transport/spdy:go_default_library", "//staging/src/k8s.io/client-go/util/exec:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/remotecommand.go b/staging/src/k8s.io/client-go/tools/remotecommand/remotecommand.go index d2b29861e6eeb..892d8d105dc31 100644 --- a/staging/src/k8s.io/client-go/tools/remotecommand/remotecommand.go +++ b/staging/src/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -22,7 +22,7 @@ import ( "net/http" "net/url" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/remotecommand" @@ -132,7 +132,7 @@ func (e *streamExecutor) Stream(options StreamOptions) error { case remotecommand.StreamProtocolV2Name: streamer = newStreamProtocolV2(options) case "": - glog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) fallthrough case remotecommand.StreamProtocolV1Name: streamer = newStreamProtocolV1(options) diff --git a/staging/src/k8s.io/client-go/tools/remotecommand/v1.go b/staging/src/k8s.io/client-go/tools/remotecommand/v1.go index 92dad727f301f..81cc93ec0550c 100644 --- a/staging/src/k8s.io/client-go/tools/remotecommand/v1.go +++ b/staging/src/k8s.io/client-go/tools/remotecommand/v1.go @@ -22,9 +22,9 @@ import ( "io/ioutil" "net/http" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog" ) // streamProtocolV1 implements the first version of the streaming exec & attach @@ -53,10 +53,10 @@ func (p *streamProtocolV1) stream(conn streamCreator) error { errorChan := make(chan error) cp := func(s string, dst io.Writer, src io.Reader) { - glog.V(6).Infof("Copying %s", s) - defer glog.V(6).Infof("Done copying %s", s) + klog.V(6).Infof("Copying %s", s) + defer klog.V(6).Infof("Done copying %s", s) if _, err := io.Copy(dst, src); err != nil && err != io.EOF { - glog.Errorf("Error copying %s: %v", s, err) + klog.Errorf("Error copying %s: %v", s, err) } if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr { doneChan <- struct{}{} diff --git a/staging/src/k8s.io/client-go/tools/watch/BUILD b/staging/src/k8s.io/client-go/tools/watch/BUILD index d1994ebbbef7b..9f7a97cd4aa14 100644 --- a/staging/src/k8s.io/client-go/tools/watch/BUILD +++ b/staging/src/k8s.io/client-go/tools/watch/BUILD @@ -16,7 +16,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/tools/watch/until.go b/staging/src/k8s.io/client-go/tools/watch/until.go index 9335788439767..aa4bbc21169fa 100644 --- a/staging/src/k8s.io/client-go/tools/watch/until.go +++ b/staging/src/k8s.io/client-go/tools/watch/until.go @@ -22,13 +22,13 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/cache" + "k8s.io/klog" ) // PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet, @@ -135,7 +135,7 @@ func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime. func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { if timeout < 0 { // This should be handled in validation - glog.Errorf("Timeout for context shall not be negative!") + klog.Errorf("Timeout for context shall not be negative!") timeout = 0 } diff --git a/staging/src/k8s.io/client-go/transport/BUILD b/staging/src/k8s.io/client-go/transport/BUILD index 05b0e604ac288..dc1800681d3fb 100644 --- a/staging/src/k8s.io/client-go/transport/BUILD +++ b/staging/src/k8s.io/client-go/transport/BUILD @@ -28,7 +28,7 @@ go_library( importpath = "k8s.io/client-go/transport", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/transport/round_trippers.go b/staging/src/k8s.io/client-go/transport/round_trippers.go index 0ebcbbc803736..da417cf96ea08 100644 --- a/staging/src/k8s.io/client-go/transport/round_trippers.go +++ b/staging/src/k8s.io/client-go/transport/round_trippers.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" ) @@ -62,13 +62,13 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // DebugWrappers wraps a round tripper and logs based on the current log level. func DebugWrappers(rt http.RoundTripper) http.RoundTripper { switch { - case bool(glog.V(9)): + case bool(klog.V(9)): rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) - case bool(glog.V(8)): + case bool(klog.V(8)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) - case bool(glog.V(7)): + case bool(klog.V(7)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) - case bool(glog.V(6)): + case bool(klog.V(6)): rt = newDebuggingRoundTripper(rt, debugURLTiming) } @@ -138,7 +138,7 @@ func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -166,7 +166,7 @@ func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -197,7 +197,7 @@ func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -257,7 +257,7 @@ func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.delegate.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.delegate) + klog.Errorf("CancelRequest not implemented by %T", rt.delegate) } } @@ -288,7 +288,7 @@ func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt) + klog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -372,7 +372,7 @@ func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { canceler.CancelRequest(req) } else { - glog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) + klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) } } @@ -380,17 +380,17 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo := newRequestInfo(req) if rt.levels[debugJustURL] { - glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) } if rt.levels[debugCurlCommand] { - glog.Infof("%s", reqInfo.toCurl()) + klog.Infof("%s", reqInfo.toCurl()) } if rt.levels[debugRequestHeaders] { - glog.Infof("Request Headers:") + klog.Infof("Request Headers:") for key, values := range reqInfo.RequestHeaders { for _, value := range values { - glog.Infof(" %s: %s", key, value) + klog.Infof(" %s: %s", key, value) } } } @@ -402,16 +402,16 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo.complete(response, err) if rt.levels[debugURLTiming] { - glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseStatus] { - glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseHeaders] { - glog.Infof("Response Headers:") + klog.Infof("Response Headers:") for key, values := range reqInfo.ResponseHeaders { for _, value := range values { - glog.Infof(" %s: %s", key, value) + klog.Infof(" %s: %s", key, value) } } } diff --git a/staging/src/k8s.io/client-go/util/certificate/BUILD b/staging/src/k8s.io/client-go/util/certificate/BUILD index 609fb2c28340b..f204883af919f 100644 --- a/staging/src/k8s.io/client-go/util/certificate/BUILD +++ b/staging/src/k8s.io/client-go/util/certificate/BUILD @@ -45,7 +45,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", "//staging/src/k8s.io/client-go/util/certificate/csr:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/util/certificate/certificate_manager.go b/staging/src/k8s.io/client-go/util/certificate/certificate_manager.go index fbdf4ec7f36ee..ed74559e203c3 100644 --- a/staging/src/k8s.io/client-go/util/certificate/certificate_manager.go +++ b/staging/src/k8s.io/client-go/util/certificate/certificate_manager.go @@ -28,7 +28,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" certificates "k8s.io/api/certificates/v1beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -227,17 +227,17 @@ func (m *manager) Start() { // signing API, so don't start the certificate manager if we don't have a // client. if m.certSigningRequestClient == nil { - glog.V(2).Infof("Certificate rotation is not enabled, no connection to the apiserver.") + klog.V(2).Infof("Certificate rotation is not enabled, no connection to the apiserver.") return } - glog.V(2).Infof("Certificate rotation is enabled.") + klog.V(2).Infof("Certificate rotation is enabled.") templateChanged := make(chan struct{}) go wait.Forever(func() { deadline := m.nextRotationDeadline() if sleepInterval := deadline.Sub(time.Now()); sleepInterval > 0 { - glog.V(2).Infof("Waiting %v for next certificate rotation", sleepInterval) + klog.V(2).Infof("Waiting %v for next certificate rotation", sleepInterval) timer := time.NewTimer(sleepInterval) defer timer.Stop() @@ -250,7 +250,7 @@ func (m *manager) Start() { // if the template now matches what we last requested, restart the rotation deadline loop return } - glog.V(2).Infof("Certificate template changed, rotating") + klog.V(2).Infof("Certificate template changed, rotating") } } @@ -321,7 +321,7 @@ func getCurrentCertificateOrBootstrap( if _, err := store.Update(bootstrapCertificatePEM, bootstrapKeyPEM); err != nil { utilruntime.HandleError(fmt.Errorf("Unable to set the cert/key pair to the bootstrap certificate: %v", err)) } else { - glog.V(4).Infof("Updated the store to contain the initial bootstrap certificate") + klog.V(4).Infof("Updated the store to contain the initial bootstrap certificate") } return &bootstrapCert, true, nil @@ -333,7 +333,7 @@ func getCurrentCertificateOrBootstrap( // This method also keeps track of "server health" by interpreting the responses it gets // from the server on the various calls it makes. func (m *manager) rotateCerts() (bool, error) { - glog.V(2).Infof("Rotating certificates") + klog.V(2).Infof("Rotating certificates") template, csrPEM, keyPEM, privateKey, err := m.generateCSR() if err != nil { @@ -403,7 +403,7 @@ func (m *manager) certSatisfiesTemplateLocked() bool { if template := m.getTemplate(); template != nil { if template.Subject.CommonName != m.cert.Leaf.Subject.CommonName { - glog.V(2).Infof("Current certificate CN (%s) does not match requested CN (%s)", m.cert.Leaf.Subject.CommonName, template.Subject.CommonName) + klog.V(2).Infof("Current certificate CN (%s) does not match requested CN (%s)", m.cert.Leaf.Subject.CommonName, template.Subject.CommonName) return false } @@ -411,7 +411,7 @@ func (m *manager) certSatisfiesTemplateLocked() bool { desiredDNSNames := sets.NewString(template.DNSNames...) missingDNSNames := desiredDNSNames.Difference(currentDNSNames) if len(missingDNSNames) > 0 { - glog.V(2).Infof("Current certificate is missing requested DNS names %v", missingDNSNames.List()) + klog.V(2).Infof("Current certificate is missing requested DNS names %v", missingDNSNames.List()) return false } @@ -425,7 +425,7 @@ func (m *manager) certSatisfiesTemplateLocked() bool { } missingIPs := desiredIPs.Difference(currentIPs) if len(missingIPs) > 0 { - glog.V(2).Infof("Current certificate is missing requested IP addresses %v", missingIPs.List()) + klog.V(2).Infof("Current certificate is missing requested IP addresses %v", missingIPs.List()) return false } @@ -433,7 +433,7 @@ func (m *manager) certSatisfiesTemplateLocked() bool { desiredOrgs := sets.NewString(template.Subject.Organization...) missingOrgs := desiredOrgs.Difference(currentOrgs) if len(missingOrgs) > 0 { - glog.V(2).Infof("Current certificate is missing requested orgs %v", missingOrgs.List()) + klog.V(2).Infof("Current certificate is missing requested orgs %v", missingOrgs.List()) return false } } @@ -468,7 +468,7 @@ func (m *manager) nextRotationDeadline() time.Time { totalDuration := float64(notAfter.Sub(m.cert.Leaf.NotBefore)) deadline := m.cert.Leaf.NotBefore.Add(jitteryDuration(totalDuration)) - glog.V(2).Infof("Certificate expiration is %v, rotation deadline is %v", notAfter, deadline) + klog.V(2).Infof("Certificate expiration is %v, rotation deadline is %v", notAfter, deadline) if m.certificateExpiration != nil { m.certificateExpiration.Set(float64(notAfter.Unix())) } diff --git a/staging/src/k8s.io/client-go/util/certificate/certificate_store.go b/staging/src/k8s.io/client-go/util/certificate/certificate_store.go index 81a9e7647c2b6..d266661542108 100644 --- a/staging/src/k8s.io/client-go/util/certificate/certificate_store.go +++ b/staging/src/k8s.io/client-go/util/certificate/certificate_store.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -127,7 +127,7 @@ func (s *fileStore) Current() (*tls.Certificate, error) { if pairFileExists, err := fileExists(pairFile); err != nil { return nil, err } else if pairFileExists { - glog.Infof("Loading cert/key pair from %q.", pairFile) + klog.Infof("Loading cert/key pair from %q.", pairFile) return loadFile(pairFile) } @@ -140,7 +140,7 @@ func (s *fileStore) Current() (*tls.Certificate, error) { return nil, err } if certFileExists && keyFileExists { - glog.Infof("Loading cert/key pair from (%q, %q).", s.certFile, s.keyFile) + klog.Infof("Loading cert/key pair from (%q, %q).", s.certFile, s.keyFile) return loadX509KeyPair(s.certFile, s.keyFile) } @@ -155,7 +155,7 @@ func (s *fileStore) Current() (*tls.Certificate, error) { return nil, err } if certFileExists && keyFileExists { - glog.Infof("Loading cert/key pair from (%q, %q).", c, k) + klog.Infof("Loading cert/key pair from (%q, %q).", c, k) return loadX509KeyPair(c, k) } diff --git a/staging/src/k8s.io/client-go/util/certificate/csr/BUILD b/staging/src/k8s.io/client-go/util/certificate/csr/BUILD index e160947dbc2e6..feb484afebde9 100644 --- a/staging/src/k8s.io/client-go/util/certificate/csr/BUILD +++ b/staging/src/k8s.io/client-go/util/certificate/csr/BUILD @@ -24,7 +24,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/util/certificate/csr/csr.go b/staging/src/k8s.io/client-go/util/certificate/csr/csr.go index 4a53352fee022..04a4a03dc263c 100644 --- a/staging/src/k8s.io/client-go/util/certificate/csr/csr.go +++ b/staging/src/k8s.io/client-go/util/certificate/csr/csr.go @@ -27,7 +27,7 @@ import ( "reflect" "time" - "github.com/golang/glog" + "k8s.io/klog" certificates "k8s.io/api/certificates/v1beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -104,7 +104,7 @@ func RequestCertificate(client certificatesclient.CertificateSigningRequestInter switch { case err == nil: case errors.IsAlreadyExists(err) && len(name) > 0: - glog.Infof("csr for this node already exists, reusing") + klog.Infof("csr for this node already exists, reusing") req, err = client.Get(name, metav1.GetOptions{}) if err != nil { return nil, formatError("cannot retrieve certificate signing request: %v", err) @@ -112,7 +112,7 @@ func RequestCertificate(client certificatesclient.CertificateSigningRequestInter if err := ensureCompatible(req, csr, privateKey); err != nil { return nil, fmt.Errorf("retrieved csr is not compatible: %v", err) } - glog.Infof("csr for this node is still valid") + klog.Infof("csr for this node is still valid") default: return nil, formatError("cannot create certificate signing request: %v", err) } diff --git a/staging/src/k8s.io/cloud-provider/BUILD b/staging/src/k8s.io/cloud-provider/BUILD index bbabcabc33ad4..4c0c21dcbe841 100644 --- a/staging/src/k8s.io/cloud-provider/BUILD +++ b/staging/src/k8s.io/cloud-provider/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json b/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json index 57a4b722de49e..bda2ced916441 100644 --- a/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json +++ b/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json @@ -18,10 +18,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" diff --git a/staging/src/k8s.io/cloud-provider/plugins.go b/staging/src/k8s.io/cloud-provider/plugins.go index b9ec938d3862f..9fc6aff8cd255 100644 --- a/staging/src/k8s.io/cloud-provider/plugins.go +++ b/staging/src/k8s.io/cloud-provider/plugins.go @@ -22,7 +22,7 @@ import ( "os" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) // Factory is a function that returns a cloudprovider.Interface. @@ -59,9 +59,9 @@ func RegisterCloudProvider(name string, cloud Factory) { providersMutex.Lock() defer providersMutex.Unlock() if _, found := providers[name]; found { - glog.Fatalf("Cloud provider %q was registered twice", name) + klog.Fatalf("Cloud provider %q was registered twice", name) } - glog.V(1).Infof("Registered cloud provider %q", name) + klog.V(1).Infof("Registered cloud provider %q", name) providers[name] = cloud } @@ -100,12 +100,12 @@ func InitCloudProvider(name string, configFilePath string) (Interface, error) { var err error if name == "" { - glog.Info("No cloud provider specified.") + klog.Info("No cloud provider specified.") return nil, nil } if IsExternal(name) { - glog.Info("External cloud provider specified") + klog.Info("External cloud provider specified") return nil, nil } @@ -115,7 +115,7 @@ func InitCloudProvider(name string, configFilePath string) (Interface, error) { if provider.external { detail = fmt.Sprintf("Please use 'external' cloud provider for %s: %s", name, provider.detail) } - glog.Warningf("WARNING: %s built-in cloud provider is now deprecated. %s", name, detail) + klog.Warningf("WARNING: %s built-in cloud provider is now deprecated. %s", name, detail) break } @@ -125,7 +125,7 @@ func InitCloudProvider(name string, configFilePath string) (Interface, error) { var config *os.File config, err = os.Open(configFilePath) if err != nil { - glog.Fatalf("Couldn't open cloud provider configuration %s: %#v", + klog.Fatalf("Couldn't open cloud provider configuration %s: %#v", configFilePath, err) } diff --git a/staging/src/k8s.io/cluster-bootstrap/Godeps/Godeps.json b/staging/src/k8s.io/cluster-bootstrap/Godeps/Godeps.json index 28d4545947a00..4fd105c86ac86 100644 --- a/staging/src/k8s.io/cluster-bootstrap/Godeps/Godeps.json +++ b/staging/src/k8s.io/cluster-bootstrap/Godeps/Godeps.json @@ -14,10 +14,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" diff --git a/staging/src/k8s.io/code-generator/Godeps/Godeps.json b/staging/src/k8s.io/code-generator/Godeps/Godeps.json index cc809b55f8918..c9b2ce06a102f 100644 --- a/staging/src/k8s.io/code-generator/Godeps/Godeps.json +++ b/staging/src/k8s.io/code-generator/Godeps/Godeps.json @@ -106,10 +106,6 @@ "ImportPath": "github.com/gogo/protobuf/vanity/command", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/spf13/pflag", "Rev": "583c0c0531f06d5278b7d917446061adc344b5cd" diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD index 8785ee09a7f96..5df0eb366d64c 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/BUILD @@ -20,9 +20,9 @@ go_library( "//staging/src/k8s.io/code-generator/cmd/client-gen/args:go_default_library", "//staging/src/k8s.io/code-generator/cmd/client-gen/generators:go_default_library", "//staging/src/k8s.io/code-generator/pkg/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/BUILD index 3bae7189e15c2..cf386272f1aad 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/BUILD @@ -24,11 +24,11 @@ go_library( "//staging/src/k8s.io/code-generator/cmd/client-gen/generators/util:go_default_library", "//staging/src/k8s.io/code-generator/cmd/client-gen/path:go_default_library", "//staging/src/k8s.io/code-generator/cmd/client-gen/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go index bf5368781716b..ee6ebbcf0937a 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go @@ -32,7 +32,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "github.com/golang/glog" + "k8s.io/klog" ) // NameSystems returns the name system used by the generators in this package. @@ -318,12 +318,12 @@ func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.Cust func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) + klog.Fatalf("Failed loading boilerplate: %v", err) } customArgs, ok := arguments.CustomArgs.(*clientgenargs.CustomArgs) if !ok { - glog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs") + klog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs") } includedTypesOverrides := customArgs.IncludedTypesOverrides diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go index 22c28e35f8f0b..6e0d187f5cb67 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/main.go @@ -21,9 +21,9 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/client-gen/args" "k8s.io/code-generator/cmd/client-gen/generators" @@ -31,6 +31,7 @@ import ( ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -52,7 +53,7 @@ func main() { } if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } if err := genericArgs.Execute( @@ -60,6 +61,6 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } } diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD index 1991d17530015..fd488f18c2503 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/BUILD @@ -20,9 +20,9 @@ go_library( "//staging/src/k8s.io/code-generator/cmd/conversion-gen/args:go_default_library", "//staging/src/k8s.io/code-generator/cmd/conversion-gen/generators:go_default_library", "//staging/src/k8s.io/code-generator/pkg/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD index 1088b39fcdced..2b650f2405eca 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/BUILD @@ -12,11 +12,11 @@ go_library( importpath = "k8s.io/code-generator/cmd/conversion-gen/generators", deps = [ "//staging/src/k8s.io/code-generator/cmd/conversion-gen/args:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go index 422237e11772e..775972d12318a 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go @@ -29,7 +29,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "github.com/golang/glog" + "k8s.io/klog" conversionargs "k8s.io/code-generator/cmd/conversion-gen/args" ) @@ -124,10 +124,10 @@ type conversionFuncMap map[conversionPair]*types.Type // Returns all manually-defined conversion functions in the package. func getManualConversionFunctions(context *generator.Context, pkg *types.Package, manualMap conversionFuncMap) { if pkg == nil { - glog.Warningf("Skipping nil package passed to getManualConversionFunctions") + klog.Warningf("Skipping nil package passed to getManualConversionFunctions") return } - glog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name) + klog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name) scopeName := types.Ref(conversionPackagePath, "Scope").Name errorName := types.Ref("", "error").Name @@ -136,34 +136,34 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package for _, f := range pkg.Functions { if f.Underlying == nil || f.Underlying.Kind != types.Func { - glog.Errorf("Malformed function: %#v", f) + klog.Errorf("Malformed function: %#v", f) continue } if f.Underlying.Signature == nil { - glog.Errorf("Function without signature: %#v", f) + klog.Errorf("Function without signature: %#v", f) continue } - glog.V(8).Infof("Considering function %s", f.Name) + klog.V(8).Infof("Considering function %s", f.Name) signature := f.Underlying.Signature // Check whether the function is conversion function. // Note that all of them have signature: // func Convert_inType_To_outType(inType, outType, conversion.Scope) error if signature.Receiver != nil { - glog.V(8).Infof("%s has a receiver", f.Name) + klog.V(8).Infof("%s has a receiver", f.Name) continue } if len(signature.Parameters) != 3 || signature.Parameters[2].Name != scopeName { - glog.V(8).Infof("%s has wrong parameters", f.Name) + klog.V(8).Infof("%s has wrong parameters", f.Name) continue } if len(signature.Results) != 1 || signature.Results[0].Name != errorName { - glog.V(8).Infof("%s has wrong results", f.Name) + klog.V(8).Infof("%s has wrong results", f.Name) continue } inType := signature.Parameters[0] outType := signature.Parameters[1] if inType.Kind != types.Pointer || outType.Kind != types.Pointer { - glog.V(8).Infof("%s has wrong parameter types", f.Name) + klog.V(8).Infof("%s has wrong parameter types", f.Name) continue } // Now check if the name satisfies the convention. @@ -171,7 +171,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package args := argsFromType(inType.Elem, outType.Elem) sw.Do("Convert_$.inType|public$_To_$.outType|public$", args) if f.Name.Name == buffer.String() { - glog.V(4).Infof("Found conversion function %s", f.Name) + klog.V(4).Infof("Found conversion function %s", f.Name) key := conversionPair{inType.Elem, outType.Elem} // We might scan the same package twice, and that's OK. if v, ok := manualMap[key]; ok && v != nil && v.Name.Package != pkg.Path { @@ -181,9 +181,9 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package } else { // prevent user error when they don't get the correct conversion signature if strings.HasPrefix(f.Name.Name, "Convert_") { - glog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String()) + klog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String()) } - glog.V(8).Infof("%s has wrong name", f.Name) + klog.V(8).Infof("%s has wrong name", f.Name) } buffer.Reset() } @@ -192,7 +192,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) + klog.Fatalf("Failed loading boilerplate: %v", err) } packages := generator.Packages{} @@ -220,7 +220,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat } processed[i] = true - glog.V(5).Infof("considering pkg %q", i) + klog.V(5).Infof("considering pkg %q", i) pkg := context.Universe[i] // typesPkg is where the versioned types are defined. Sometimes it is // different from pkg. For example, kubernetes core/v1 types are defined @@ -239,9 +239,9 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // in their doc.go file. peerPkgs := extractTag(pkg.Comments) if peerPkgs != nil { - glog.V(5).Infof(" tags: %q", peerPkgs) + klog.V(5).Infof(" tags: %q", peerPkgs) } else { - glog.V(5).Infof(" no tag") + klog.V(5).Infof(" no tag") continue } skipUnsafe := false @@ -255,14 +255,14 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat externalTypesValues := extractExternalTypesTag(pkg.Comments) if externalTypesValues != nil { if len(externalTypesValues) != 1 { - glog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues) + klog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues) } externalTypes := externalTypesValues[0] - glog.V(5).Infof(" external types tags: %q", externalTypes) + klog.V(5).Infof(" external types tags: %q", externalTypes) var err error typesPkg, err = context.AddDirectory(externalTypes) if err != nil { - glog.Fatalf("cannot import package %s", externalTypes) + klog.Fatalf("cannot import package %s", externalTypes) } // update context.Order to the latest context.Universe orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} @@ -291,7 +291,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat context.AddDir(pp) p := context.Universe[pp] if nil == p { - glog.Fatalf("failed to find pkg: %s", pp) + klog.Fatalf("failed to find pkg: %s", pp) } getManualConversionFunctions(context, p, manualConversions) } @@ -335,7 +335,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // from being a candidate for unsafe conversion for k, v := range manualConversions { if isCopyOnly(v.CommentLines) { - glog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) + klog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) continue } // this type should be excluded from all equivalence, because the converter must be called. @@ -518,9 +518,9 @@ func (g *genConversion) convertibleOnlyWithinPackage(inType, outType *types.Type tagvals := extractTag(t.CommentLines) if tagvals != nil { if tagvals[0] != "false" { - glog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0]) + klog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0]) } - glog.V(5).Infof("type %v requests no conversion generation, skipping", t) + klog.V(5).Infof("type %v requests no conversion generation, skipping", t) return false } // TODO: Consider generating functions for other kinds too. @@ -582,10 +582,10 @@ func (g *genConversion) preexists(inType, outType *types.Type) (*types.Type, boo } func (g *genConversion) Init(c *generator.Context, w io.Writer) error { - if glog.V(5) { + if klog.V(5) { if m, ok := g.useUnsafe.(equalMemoryTypes); ok { var result []string - glog.Infof("All objects without identical memory layout:") + klog.Infof("All objects without identical memory layout:") for k, v := range m { if v { continue @@ -594,7 +594,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error { } sort.Strings(result) for _, s := range result { - glog.Infof(s) + klog.Infof(s) } } } @@ -643,7 +643,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error { } func (g *genConversion) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { - glog.V(5).Infof("generating for type %v", t) + klog.V(5).Infof("generating for type %v", t) peerType := getPeerTypeFor(c, t, g.peerPackages) sw := generator.NewSnippetWriter(w, c, "$", "$") g.generateConversion(t, peerType, sw) @@ -664,10 +664,10 @@ func (g *genConversion) generateConversion(inType, outType *types.Type, sw *gene // There is a public manual Conversion method: use it. } else if skipped := g.skippedFields[inType]; len(skipped) != 0 { // The inType had some fields we could not generate. - glog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType) - glog.Errorf(" the following fields need manual conversion:") + klog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType) + klog.Errorf(" the following fields need manual conversion:") for _, f := range skipped { - glog.Errorf(" - %v", f) + klog.Errorf(" - %v", f) } } else { // Emit a public conversion function. @@ -682,7 +682,7 @@ func (g *genConversion) generateConversion(inType, outType *types.Type, sw *gene // at any nesting level. This makes the autogenerator easy to understand, and // the compiler shouldn't care. func (g *genConversion) generateFor(inType, outType *types.Type, sw *generator.SnippetWriter) { - glog.V(5).Infof("generating %v -> %v", inType, outType) + klog.V(5).Infof("generating %v -> %v", inType, outType) var f func(*types.Type, *types.Type, *generator.SnippetWriter) switch inType.Kind { @@ -853,7 +853,7 @@ func (g *genConversion) doStruct(inType, outType *types.Type, sw *generator.Snip sw.Do("}\n", nil) continue } - glog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) + klog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) } // If we can't auto-convert, punt before we emit any code. diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go index f2b91cc2e29ad..698baa7db7a7f 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/main.go @@ -38,9 +38,9 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" "k8s.io/code-generator/cmd/conversion-gen/generators" @@ -48,6 +48,7 @@ import ( ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -61,7 +62,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } // Run it. @@ -70,7 +71,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD index e6841050a0699..a1a48d4d91628 100644 --- a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/BUILD @@ -19,10 +19,10 @@ go_library( deps = [ "//staging/src/k8s.io/code-generator/cmd/deepcopy-gen/args:go_default_library", "//staging/src/k8s.io/code-generator/pkg/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/deepcopy-gen/generators:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go index cce65b772f810..96fb298734150 100644 --- a/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/deepcopy-gen/main.go @@ -46,16 +46,17 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" "k8s.io/gengo/examples/deepcopy-gen/generators" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args" "k8s.io/code-generator/pkg/util" ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -69,7 +70,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } // Run it. @@ -78,7 +79,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD index 056cd14add8e9..87a8e852926c7 100644 --- a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/BUILD @@ -19,10 +19,10 @@ go_library( deps = [ "//staging/src/k8s.io/code-generator/cmd/defaulter-gen/args:go_default_library", "//staging/src/k8s.io/code-generator/pkg/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/defaulter-gen/generators:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go index 9d33f700b33c5..40bb875e52a4f 100644 --- a/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/defaulter-gen/main.go @@ -45,16 +45,17 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/gengo/args" "k8s.io/gengo/examples/defaulter-gen/generators" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/defaulter-gen/args" "k8s.io/code-generator/pkg/util" ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -68,7 +69,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } // Run it. @@ -77,7 +78,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD index e90c1f35e3d75..a2718d402b8ef 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/BUILD @@ -22,13 +22,13 @@ go_library( deps = [ "//staging/src/k8s.io/code-generator/pkg/util:go_default_library", "//staging/src/k8s.io/code-generator/third_party/forked/golang/reflect:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/parser:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go index 49e8297dbd045..1a9803dc88d99 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/generator.go @@ -25,7 +25,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/gengo/generator" "k8s.io/gengo/namer" @@ -85,7 +85,7 @@ func (g *genProtoIDL) Filter(c *generator.Context, t *types.Type) bool { // Type specified "true". return true } - glog.Fatalf(`Comment tag "protobuf" must be true or false, found: %q`, tagVals[0]) + klog.Fatalf(`Comment tag "protobuf" must be true or false, found: %q`, tagVals[0]) } if !g.generateAll { // We're not generating everything. diff --git a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go index 2dff5b9229d86..8e2a1917d041e 100644 --- a/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go +++ b/staging/src/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/tags.go @@ -17,8 +17,8 @@ limitations under the License. package protobuf import ( - "github.com/golang/glog" "k8s.io/gengo/types" + "k8s.io/klog" ) // extractBoolTagOrDie gets the comment-tags for the key and asserts that, if @@ -27,7 +27,7 @@ import ( func extractBoolTagOrDie(key string, lines []string) bool { val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return val } diff --git a/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD b/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD index 1b73d4d96648e..6fec2c33388ab 100644 --- a/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/import-boss/BUILD @@ -18,9 +18,9 @@ go_library( importpath = "k8s.io/code-generator/cmd/import-boss", deps = [ "//staging/src/k8s.io/code-generator/pkg/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/import-boss/generators:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/import-boss/main.go b/staging/src/k8s.io/code-generator/cmd/import-boss/main.go index d998994415da9..c0f10c3a49c16 100644 --- a/staging/src/k8s.io/code-generator/cmd/import-boss/main.go +++ b/staging/src/k8s.io/code-generator/cmd/import-boss/main.go @@ -63,10 +63,11 @@ import ( "k8s.io/gengo/args" "k8s.io/gengo/examples/import-boss/generators" - "github.com/golang/glog" + "k8s.io/klog" ) func main() { + klog.InitFlags(nil) arguments := args.Default() // Override defaults. @@ -82,8 +83,8 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Errorf("Error: %v", err) + klog.Errorf("Error: %v", err) os.Exit(1) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD index d27d696d89736..5a11f5433fdd2 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/BUILD @@ -20,9 +20,9 @@ go_library( "//staging/src/k8s.io/code-generator/cmd/informer-gen/args:go_default_library", "//staging/src/k8s.io/code-generator/cmd/informer-gen/generators:go_default_library", "//staging/src/k8s.io/code-generator/pkg/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD index cead45f7aeed4..9902386d81b01 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/BUILD @@ -24,11 +24,11 @@ go_library( "//staging/src/k8s.io/code-generator/cmd/client-gen/generators/util:go_default_library", "//staging/src/k8s.io/code-generator/cmd/client-gen/types:go_default_library", "//staging/src/k8s.io/code-generator/cmd/informer-gen/args:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/factory.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/factory.go index 5c557db7393f6..62ae109a4a24d 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/factory.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/factory.go @@ -25,7 +25,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "github.com/golang/glog" + "k8s.io/klog" ) // factoryGenerator produces a file of listers for a given GroupVersion and @@ -65,7 +65,7 @@ func (g *factoryGenerator) Imports(c *generator.Context) (imports []string) { func (g *factoryGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "{{", "}}") - glog.V(5).Infof("processing type %v", t) + klog.V(5).Infof("processing type %v", t) gvInterfaces := make(map[string]*types.Type) gvNewFuncs := make(map[string]*types.Type) diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go index 92cde1486c884..fc0668c5bed29 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/factoryinterface.go @@ -23,7 +23,7 @@ import ( "k8s.io/gengo/namer" "k8s.io/gengo/types" - "github.com/golang/glog" + "k8s.io/klog" ) // factoryInterfaceGenerator produces a file of interfaces used to break a dependency cycle for @@ -60,7 +60,7 @@ func (g *factoryInterfaceGenerator) Imports(c *generator.Context) (imports []str func (g *factoryInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "{{", "}}") - glog.V(5).Infof("processing type %v", t) + klog.V(5).Infof("processing type %v", t) m := map[string]interface{}{ "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/informer.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/informer.go index 88cc08df52fcb..9204d6215ac8b 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/informer.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/informer.go @@ -28,7 +28,7 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "github.com/golang/glog" + "k8s.io/klog" ) // informerGenerator produces a file of listers for a given GroupVersion and @@ -66,7 +66,7 @@ func (g *informerGenerator) Imports(c *generator.Context) (imports []string) { func (g *informerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") - glog.V(5).Infof("processing type %v", t) + klog.V(5).Infof("processing type %v", t) listerPackage := fmt.Sprintf("%s/%s/%s", g.listersPackage, g.groupPkgName, strings.ToLower(g.groupVersion.Version.NonEmpty())) clientSetInterface := c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}) diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go index 642f9a466ef92..cfb91cebac639 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go @@ -22,11 +22,11 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" "k8s.io/gengo/args" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" + "k8s.io/klog" "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" @@ -102,12 +102,12 @@ func vendorless(p string) string { func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) + klog.Fatalf("Failed loading boilerplate: %v", err) } customArgs, ok := arguments.CustomArgs.(*informergenargs.CustomArgs) if !ok { - glog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) + klog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs) } internalVersionPackagePath := filepath.Join(arguments.OutputPackagePath) @@ -128,7 +128,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat objectMeta, internal, err := objectMetaForPackage(p) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } if objectMeta == nil { // no types in this package had genclient @@ -141,7 +141,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat if internal { lastSlash := strings.LastIndex(p.Path, "/") if lastSlash == -1 { - glog.Fatalf("error constructing internal group version for package %q", p.Path) + klog.Fatalf("error constructing internal group version for package %q", p.Path) } gv.Group = clientgentypes.Group(p.Path[lastSlash+1:]) targetGroupVersions = internalGroupVersions diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/tags.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/tags.go index afa287815204a..d25d5b6304905 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/tags.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/tags.go @@ -17,8 +17,8 @@ limitations under the License. package generators import ( - "github.com/golang/glog" "k8s.io/gengo/types" + "k8s.io/klog" ) // extractBoolTagOrDie gets the comment-tags for the key and asserts that, if @@ -27,7 +27,7 @@ import ( func extractBoolTagOrDie(key string, lines []string) bool { val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return val } diff --git a/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go b/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go index bfe826080cc99..14f3e923e6cc0 100644 --- a/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/informer-gen/main.go @@ -20,16 +20,17 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/code-generator/cmd/informer-gen/generators" "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/informer-gen/args" ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -47,7 +48,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } // Run it. @@ -56,7 +57,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD index fec8ca914d2e3..e2286b8a49e15 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/BUILD @@ -20,9 +20,9 @@ go_library( "//staging/src/k8s.io/code-generator/cmd/lister-gen/args:go_default_library", "//staging/src/k8s.io/code-generator/cmd/lister-gen/generators:go_default_library", "//staging/src/k8s.io/code-generator/pkg/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/BUILD index 75212ca1da97b..ec1a2ec626ee7 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/BUILD @@ -17,11 +17,11 @@ go_library( deps = [ "//staging/src/k8s.io/code-generator/cmd/client-gen/generators/util:go_default_library", "//staging/src/k8s.io/code-generator/cmd/client-gen/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/lister.go b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/lister.go index cde6e2f770a9c..c8ed5ad4d3be6 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/lister.go +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/lister.go @@ -30,7 +30,7 @@ import ( "k8s.io/code-generator/cmd/client-gen/generators/util" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" - "github.com/golang/glog" + "k8s.io/klog" ) // NameSystems returns the name system used by the generators in this package. @@ -66,7 +66,7 @@ func DefaultNameSystem() string { func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) + klog.Fatalf("Failed loading boilerplate: %v", err) } var packageList generator.Packages @@ -75,7 +75,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat objectMeta, internal, err := objectMetaForPackage(p) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } if objectMeta == nil { // no types in this package had genclient @@ -88,7 +88,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat if internal { lastSlash := strings.LastIndex(p.Path, "/") if lastSlash == -1 { - glog.Fatalf("error constructing internal group version for package %q", p.Path) + klog.Fatalf("error constructing internal group version for package %q", p.Path) } gv.Group = clientgentypes.Group(p.Path[lastSlash+1:]) internalGVPkg = p.Path @@ -223,7 +223,7 @@ func (g *listerGenerator) Imports(c *generator.Context) (imports []string) { func (g *listerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { sw := generator.NewSnippetWriter(w, c, "$", "$") - glog.V(5).Infof("processing type %v", t) + klog.V(5).Infof("processing type %v", t) m := map[string]interface{}{ "Resource": c.Universe.Function(types.Name{Package: t.Name.Package, Name: "Resource"}), "type": t, diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/tags.go b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/tags.go index afa287815204a..d25d5b6304905 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/tags.go +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/generators/tags.go @@ -17,8 +17,8 @@ limitations under the License. package generators import ( - "github.com/golang/glog" "k8s.io/gengo/types" + "k8s.io/klog" ) // extractBoolTagOrDie gets the comment-tags for the key and asserts that, if @@ -27,7 +27,7 @@ import ( func extractBoolTagOrDie(key string, lines []string) bool { val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return val } diff --git a/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go b/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go index d5ff8e46ee035..aca16b2bda39e 100644 --- a/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/lister-gen/main.go @@ -20,16 +20,17 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" "k8s.io/code-generator/cmd/lister-gen/generators" "k8s.io/code-generator/pkg/util" "k8s.io/gengo/args" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/lister-gen/args" ) func main() { + klog.InitFlags(nil) genericArgs, customArgs := generatorargs.NewDefaults() // Override defaults. @@ -44,7 +45,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } // Run it. @@ -53,7 +54,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/staging/src/k8s.io/code-generator/cmd/register-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/register-gen/BUILD index bdf68063d9f73..a784d2a4bb154 100644 --- a/staging/src/k8s.io/code-generator/cmd/register-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/register-gen/BUILD @@ -10,9 +10,9 @@ go_library( "//staging/src/k8s.io/code-generator/cmd/register-gen/args:go_default_library", "//staging/src/k8s.io/code-generator/cmd/register-gen/generators:go_default_library", "//staging/src/k8s.io/code-generator/pkg/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/register-gen/generators/BUILD b/staging/src/k8s.io/code-generator/cmd/register-gen/generators/BUILD index 810ae444f1478..f88391aa5ce32 100644 --- a/staging/src/k8s.io/code-generator/cmd/register-gen/generators/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/register-gen/generators/BUILD @@ -11,11 +11,11 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/code-generator/cmd/client-gen/types:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/generator:go_default_library", "//vendor/k8s.io/gengo/namer:go_default_library", "//vendor/k8s.io/gengo/types:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go b/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go index ca13ca85798c7..5186e421f2ea1 100644 --- a/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go +++ b/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go @@ -22,7 +22,7 @@ import ( "path" "strings" - "github.com/golang/glog" + "k8s.io/klog" clientgentypes "k8s.io/code-generator/cmd/client-gen/types" "k8s.io/gengo/args" @@ -46,7 +46,7 @@ func DefaultNameSystem() string { func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages { boilerplate, err := arguments.LoadGoBoilerplate() if err != nil { - glog.Fatalf("Failed loading boilerplate: %v", err) + klog.Fatalf("Failed loading boilerplate: %v", err) } packages := generator.Packages{} @@ -54,27 +54,27 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat pkg := context.Universe.Package(inputDir) internal, err := isInternal(pkg) if err != nil { - glog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err) + klog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err) continue } if internal { - glog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name) + klog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name) continue } registerFileName := "register.go" searchPath := path.Join(args.DefaultSourceTree(), inputDir, registerFileName) if _, err := os.Stat(path.Join(searchPath)); err == nil { - glog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath) + klog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath) continue } else if err != nil && !os.IsNotExist(err) { - glog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName) + klog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName) } gv := clientgentypes.GroupVersion{} { pathParts := strings.Split(pkg.Path, "/") if len(pathParts) < 2 { - glog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path) + klog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path) continue } gv.Group = clientgentypes.Group(pathParts[len(pathParts)-2]) @@ -84,14 +84,14 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat // extract the fully qualified API group name from it and overwrite the group inferred from the package path if override := types.ExtractCommentTags("+", pkg.DocComments)["groupName"]; override != nil { groupName := override[0] - glog.V(5).Infof("overriding the group name with = %s", groupName) + klog.V(5).Infof("overriding the group name with = %s", groupName) gv.Group = clientgentypes.Group(groupName) } } typesToRegister := []*types.Type{} for _, t := range pkg.Types { - glog.V(5).Infof("considering type = %s", t.Name.String()) + klog.V(5).Infof("considering type = %s", t.Name.String()) for _, typeMember := range t.Members { if typeMember.Name == "TypeMeta" && typeMember.Embedded == true { typesToRegister = append(typesToRegister, t) diff --git a/staging/src/k8s.io/code-generator/cmd/register-gen/main.go b/staging/src/k8s.io/code-generator/cmd/register-gen/main.go index db02a4af4b5a5..30a175d8d6201 100644 --- a/staging/src/k8s.io/code-generator/cmd/register-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/register-gen/main.go @@ -20,8 +20,8 @@ import ( "flag" "path/filepath" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" generatorargs "k8s.io/code-generator/cmd/register-gen/args" "k8s.io/code-generator/cmd/register-gen/generators" @@ -30,6 +30,7 @@ import ( ) func main() { + klog.InitFlags(nil) genericArgs := generatorargs.NewDefaults() genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath()) genericArgs.AddFlags(pflag.CommandLine) @@ -38,7 +39,7 @@ func main() { pflag.Parse() if err := generatorargs.Validate(genericArgs); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } if err := genericArgs.Execute( @@ -46,7 +47,7 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Fatalf("Error: %v", err) + klog.Fatalf("Error: %v", err) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD b/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD index 188f8e1ffe1f1..2b2702964067b 100644 --- a/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD +++ b/staging/src/k8s.io/code-generator/cmd/set-gen/BUILD @@ -22,9 +22,9 @@ go_library( importpath = "k8s.io/code-generator/cmd/set-gen", deps = [ "//staging/src/k8s.io/code-generator/pkg/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/gengo/args:go_default_library", "//vendor/k8s.io/gengo/examples/set-gen/generators:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/code-generator/cmd/set-gen/main.go b/staging/src/k8s.io/code-generator/cmd/set-gen/main.go index cf8f01d89e514..45694d4f33056 100644 --- a/staging/src/k8s.io/code-generator/cmd/set-gen/main.go +++ b/staging/src/k8s.io/code-generator/cmd/set-gen/main.go @@ -32,10 +32,11 @@ import ( "k8s.io/gengo/args" "k8s.io/gengo/examples/set-gen/generators" - "github.com/golang/glog" + "k8s.io/klog" ) func main() { + klog.InitFlags(nil) arguments := args.Default() // Override defaults. @@ -48,8 +49,8 @@ func main() { generators.DefaultNameSystem(), generators.Packages, ); err != nil { - glog.Errorf("Error: %v", err) + klog.Errorf("Error: %v", err) os.Exit(1) } - glog.V(2).Info("Completed successfully.") + klog.V(2).Info("Completed successfully.") } diff --git a/staging/src/k8s.io/csi-api/Godeps/Godeps.json b/staging/src/k8s.io/csi-api/Godeps/Godeps.json index 201028f302879..98cefa2d132c9 100644 --- a/staging/src/k8s.io/csi-api/Godeps/Godeps.json +++ b/staging/src/k8s.io/csi-api/Godeps/Godeps.json @@ -22,10 +22,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" diff --git a/staging/src/k8s.io/kube-aggregator/BUILD b/staging/src/k8s.io/kube-aggregator/BUILD index 95cbea4fb90f0..99bf2540e290c 100644 --- a/staging/src/k8s.io/kube-aggregator/BUILD +++ b/staging/src/k8s.io/kube-aggregator/BUILD @@ -28,7 +28,7 @@ go_library( "//staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/cmd/server:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index f94fd9da8f559..1e9f56ee37406 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -142,10 +142,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" diff --git a/staging/src/k8s.io/kube-aggregator/main.go b/staging/src/k8s.io/kube-aggregator/main.go index b0d76a545f8d3..659cb0753eddc 100644 --- a/staging/src/k8s.io/kube-aggregator/main.go +++ b/staging/src/k8s.io/kube-aggregator/main.go @@ -20,7 +20,7 @@ import ( "flag" "os" - "github.com/golang/glog" + "k8s.io/klog" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/util/logs" @@ -43,6 +43,6 @@ func main() { cmd := server.NewCommandStartAggregator(options, stopCh) cmd.Flags().AddGoFlagSet(flag.CommandLine) if err := cmd.Execute(); err != nil { - glog.Fatal(err) + klog.Fatal(err) } } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD index 9070e6a08942e..69af808983058 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/BUILD @@ -79,7 +79,7 @@ go_library( "//staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/controllers/status:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/registry/apiservice/rest:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go index 8eec79589cec3..f0b61a67c3064 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "github.com/golang/glog" + "k8s.io/klog" apierrors "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -87,8 +87,8 @@ func (c *APIServiceRegistrationController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Infof("Starting APIServiceRegistrationController") - defer glog.Infof("Shutting down APIServiceRegistrationController") + klog.Infof("Starting APIServiceRegistrationController") + defer klog.Infof("Shutting down APIServiceRegistrationController") if !controllers.WaitForCacheSync("APIServiceRegistrationController", stopCh, c.apiServiceSynced) { return @@ -129,7 +129,7 @@ func (c *APIServiceRegistrationController) processNextWorkItem() bool { func (c *APIServiceRegistrationController) enqueue(obj *apiregistration.APIService) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { - glog.Errorf("Couldn't get key for object %#v: %v", obj, err) + klog.Errorf("Couldn't get key for object %#v: %v", obj, err) return } @@ -138,13 +138,13 @@ func (c *APIServiceRegistrationController) enqueue(obj *apiregistration.APIServi func (c *APIServiceRegistrationController) addAPIService(obj interface{}) { castObj := obj.(*apiregistration.APIService) - glog.V(4).Infof("Adding %s", castObj.Name) + klog.V(4).Infof("Adding %s", castObj.Name) c.enqueue(castObj) } func (c *APIServiceRegistrationController) updateAPIService(obj, _ interface{}) { castObj := obj.(*apiregistration.APIService) - glog.V(4).Infof("Updating %s", castObj.Name) + klog.V(4).Infof("Updating %s", castObj.Name) c.enqueue(castObj) } @@ -153,15 +153,15 @@ func (c *APIServiceRegistrationController) deleteAPIService(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %#v", obj) + klog.Errorf("Couldn't get object from tombstone %#v", obj) return } castObj, ok = tombstone.Obj.(*apiregistration.APIService) if !ok { - glog.Errorf("Tombstone contained object that is not expected %#v", obj) + klog.Errorf("Tombstone contained object that is not expected %#v", obj) return } } - glog.V(4).Infof("Deleting %q", castObj.Name) + klog.V(4).Infof("Deleting %q", castObj.Name) c.enqueue(castObj) } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go index 991d318e13a20..e8976f07eff0f 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go @@ -22,7 +22,7 @@ import ( "net/url" "sync/atomic" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/httpstream" @@ -112,7 +112,7 @@ func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { location.Scheme = "https" rloc, err := r.serviceResolver.ResolveEndpoint(handlingInfo.serviceNamespace, handlingInfo.serviceName) if err != nil { - glog.Errorf("error resolving %s/%s: %v", handlingInfo.serviceNamespace, handlingInfo.serviceName, err) + klog.Errorf("error resolving %s/%s: %v", handlingInfo.serviceNamespace, handlingInfo.serviceName, err) http.Error(w, "service unavailable", http.StatusServiceUnavailable) return } @@ -213,7 +213,7 @@ func (r *proxyHandler) updateAPIService(apiService *apiregistrationapi.APIServic } newInfo.proxyRoundTripper, newInfo.transportBuildingError = restclient.TransportFor(newInfo.restConfig) if newInfo.transportBuildingError != nil { - glog.Warning(newInfo.transportBuildingError.Error()) + klog.Warning(newInfo.transportBuildingError.Error()) } r.handlingInfo.Store(newInfo) } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/controllers/BUILD index f8a315bf3ff21..f5946513bd637 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/BUILD @@ -13,7 +13,7 @@ go_library( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD index cd0d786325798..db971bda52d37 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/BUILD @@ -38,7 +38,7 @@ go_library( "//staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/controllers:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go index c6051136b1250..3815c7d46353d 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" @@ -111,12 +111,12 @@ func NewAutoRegisterController(apiServiceInformer informers.APIServiceInformer, if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.V(2).Infof("Couldn't get object from tombstone %#v", obj) + klog.V(2).Infof("Couldn't get object from tombstone %#v", obj) return } cast, ok = tombstone.Obj.(*apiregistration.APIService) if !ok { - glog.V(2).Infof("Tombstone contained unexpected object: %#v", obj) + klog.V(2).Infof("Tombstone contained unexpected object: %#v", obj) return } } @@ -133,8 +133,8 @@ func (c *autoRegisterController) Run(threadiness int, stopCh <-chan struct{}) { // make sure the work queue is shutdown which will trigger workers to end defer c.queue.ShutDown() - glog.Infof("Starting autoregister controller") - defer glog.Infof("Shutting down autoregister controller") + klog.Infof("Starting autoregister controller") + defer klog.Infof("Shutting down autoregister controller") // wait for your secondary caches to fill before starting your work if !controllers.WaitForCacheSync("autoregister", stopCh, c.apiServiceSynced) { diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/cache.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/cache.go index bd366ec5b871b..5a53b90629fb8 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/cache.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/cache.go @@ -19,7 +19,7 @@ package controllers import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" @@ -29,13 +29,13 @@ import ( // indicating that the controller identified by controllerName is waiting for syncs, followed by // either a successful or failed sync. func WaitForCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool { - glog.Infof("Waiting for caches to sync for %s controller", controllerName) + klog.Infof("Waiting for caches to sync for %s controller", controllerName) if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { utilruntime.HandleError(fmt.Errorf("Unable to sync caches for %s controller", controllerName)) return false } - glog.Infof("Caches are synced for %s controller", controllerName) + klog.Infof("Caches are synced for %s controller", controllerName) return true } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD index a489947c87d6f..9a4bef259a2b7 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library", "//vendor/github.com/emicklei/go-restful:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/aggregator:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/builder:go_default_library", "//vendor/k8s.io/kube-openapi/pkg/common:go_default_library", diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go index c38350ddbbefb..49d190e902083 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go @@ -22,7 +22,7 @@ import ( "time" "github.com/go-openapi/spec" - "github.com/golang/glog" + "k8s.io/klog" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -81,8 +81,8 @@ func (c *AggregationController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Infof("Starting OpenAPI AggregationController") - defer glog.Infof("Shutting down OpenAPI AggregationController") + klog.Infof("Starting OpenAPI AggregationController") + defer klog.Infof("Shutting down OpenAPI AggregationController") go wait.Until(c.runWorker, time.Second, stopCh) @@ -102,7 +102,7 @@ func (c *AggregationController) processNextWorkItem() bool { return false } - glog.Infof("OpenAPI AggregationController: Processing item %s", key) + klog.Infof("OpenAPI AggregationController: Processing item %s", key) action, err := c.syncHandler(key.(string)) if err == nil { @@ -113,13 +113,13 @@ func (c *AggregationController) processNextWorkItem() bool { switch action { case syncRequeue: - glog.Infof("OpenAPI AggregationController: action for item %s: Requeue.", key) + klog.Infof("OpenAPI AggregationController: action for item %s: Requeue.", key) c.queue.AddAfter(key, successfulUpdateDelay) case syncRequeueRateLimited: - glog.Infof("OpenAPI AggregationController: action for item %s: Rate Limited Requeue.", key) + klog.Infof("OpenAPI AggregationController: action for item %s: Rate Limited Requeue.", key) c.queue.AddRateLimited(key) case syncNothing: - glog.Infof("OpenAPI AggregationController: action for item %s: Nothing (removed from the queue).", key) + klog.Infof("OpenAPI AggregationController: action for item %s: Nothing (removed from the queue).", key) } return true diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD index 5dafa2e178dee..3810051bc50d9 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/BUILD @@ -29,7 +29,7 @@ go_library( "//staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/client/listers/apiregistration/internalversion:go_default_library", "//staging/src/k8s.io/kube-aggregator/pkg/controllers:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go index 3268f0c701e21..66a4d0f2f75d2 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go @@ -23,7 +23,7 @@ import ( "net/url" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -280,8 +280,8 @@ func (c *AvailableConditionController) Run(threadiness int, stopCh <-chan struct defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Infof("Starting AvailableConditionController") - defer glog.Infof("Shutting down AvailableConditionController") + klog.Infof("Starting AvailableConditionController") + defer klog.Infof("Shutting down AvailableConditionController") if !controllers.WaitForCacheSync("AvailableConditionController", stopCh, c.apiServiceSynced, c.servicesSynced, c.endpointsSynced) { return @@ -322,7 +322,7 @@ func (c *AvailableConditionController) processNextWorkItem() bool { func (c *AvailableConditionController) enqueue(obj *apiregistration.APIService) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { - glog.Errorf("Couldn't get key for object %#v: %v", obj, err) + klog.Errorf("Couldn't get key for object %#v: %v", obj, err) return } @@ -331,13 +331,13 @@ func (c *AvailableConditionController) enqueue(obj *apiregistration.APIService) func (c *AvailableConditionController) addAPIService(obj interface{}) { castObj := obj.(*apiregistration.APIService) - glog.V(4).Infof("Adding %s", castObj.Name) + klog.V(4).Infof("Adding %s", castObj.Name) c.enqueue(castObj) } func (c *AvailableConditionController) updateAPIService(obj, _ interface{}) { castObj := obj.(*apiregistration.APIService) - glog.V(4).Infof("Updating %s", castObj.Name) + klog.V(4).Infof("Updating %s", castObj.Name) c.enqueue(castObj) } @@ -346,16 +346,16 @@ func (c *AvailableConditionController) deleteAPIService(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %#v", obj) + klog.Errorf("Couldn't get object from tombstone %#v", obj) return } castObj, ok = tombstone.Obj.(*apiregistration.APIService) if !ok { - glog.Errorf("Tombstone contained object that is not expected %#v", obj) + klog.Errorf("Tombstone contained object that is not expected %#v", obj) return } } - glog.V(4).Infof("Deleting %q", castObj.Name) + klog.V(4).Infof("Deleting %q", castObj.Name) c.enqueue(castObj) } @@ -400,12 +400,12 @@ func (c *AvailableConditionController) deleteService(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %#v", obj) + klog.Errorf("Couldn't get object from tombstone %#v", obj) return } castObj, ok = tombstone.Obj.(*v1.Service) if !ok { - glog.Errorf("Tombstone contained object that is not expected %#v", obj) + klog.Errorf("Tombstone contained object that is not expected %#v", obj) return } } @@ -431,12 +431,12 @@ func (c *AvailableConditionController) deleteEndpoints(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Errorf("Couldn't get object from tombstone %#v", obj) + klog.Errorf("Couldn't get object from tombstone %#v", obj) return } castObj, ok = tombstone.Obj.(*v1.Endpoints) if !ok { - glog.Errorf("Tombstone contained object that is not expected %#v", obj) + klog.Errorf("Tombstone contained object that is not expected %#v", obj) return } } diff --git a/staging/src/k8s.io/kube-controller-manager/Godeps/Godeps.json b/staging/src/k8s.io/kube-controller-manager/Godeps/Godeps.json index 276b04f99a566..3cd957766b9e0 100644 --- a/staging/src/k8s.io/kube-controller-manager/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-controller-manager/Godeps/Godeps.json @@ -14,10 +14,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" diff --git a/staging/src/k8s.io/kube-proxy/Godeps/Godeps.json b/staging/src/k8s.io/kube-proxy/Godeps/Godeps.json index 9ac52dd018042..bafb875bd5d56 100644 --- a/staging/src/k8s.io/kube-proxy/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-proxy/Godeps/Godeps.json @@ -14,10 +14,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" diff --git a/staging/src/k8s.io/kube-scheduler/Godeps/Godeps.json b/staging/src/k8s.io/kube-scheduler/Godeps/Godeps.json index 6bdc98a3127b2..473c4b735e3d3 100644 --- a/staging/src/k8s.io/kube-scheduler/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-scheduler/Godeps/Godeps.json @@ -14,10 +14,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" diff --git a/staging/src/k8s.io/kubelet/Godeps/Godeps.json b/staging/src/k8s.io/kubelet/Godeps/Godeps.json index 8929144b6d2d0..a44c90f50dc68 100644 --- a/staging/src/k8s.io/kubelet/Godeps/Godeps.json +++ b/staging/src/k8s.io/kubelet/Godeps/Godeps.json @@ -14,10 +14,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/google/gofuzz", "Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c" diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index 429dd3dd90390..76c6bb1d467d7 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -22,10 +22,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" diff --git a/staging/src/k8s.io/sample-apiserver/BUILD b/staging/src/k8s.io/sample-apiserver/BUILD index 6e0d566284425..c2dcbc9644214 100644 --- a/staging/src/k8s.io/sample-apiserver/BUILD +++ b/staging/src/k8s.io/sample-apiserver/BUILD @@ -20,7 +20,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/server:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library", "//staging/src/k8s.io/sample-apiserver/pkg/cmd/server:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index 68eac2dcf994e..1c59b70b0c0c3 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -134,10 +134,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" diff --git a/staging/src/k8s.io/sample-apiserver/main.go b/staging/src/k8s.io/sample-apiserver/main.go index 8e1769a36ff14..abede29b096eb 100644 --- a/staging/src/k8s.io/sample-apiserver/main.go +++ b/staging/src/k8s.io/sample-apiserver/main.go @@ -20,7 +20,7 @@ import ( "flag" "os" - "github.com/golang/glog" + "k8s.io/klog" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/util/logs" @@ -36,6 +36,6 @@ func main() { cmd := server.NewCommandStartWardleServer(options, stopCh) cmd.Flags().AddGoFlagSet(flag.CommandLine) if err := cmd.Execute(); err != nil { - glog.Fatal(err) + klog.Fatal(err) } } diff --git a/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json b/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json index d3fdaebbac283..22fae7f059603 100644 --- a/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json @@ -18,10 +18,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "b4deda0973fb4c70b50d226b1af49f3da59f5265" diff --git a/staging/src/k8s.io/sample-controller/BUILD b/staging/src/k8s.io/sample-controller/BUILD index 53d8671e0eae0..a95c03ffb1e10 100644 --- a/staging/src/k8s.io/sample-controller/BUILD +++ b/staging/src/k8s.io/sample-controller/BUILD @@ -34,7 +34,7 @@ go_library( "//staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1:go_default_library", "//staging/src/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1:go_default_library", "//staging/src/k8s.io/sample-controller/pkg/signals:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index 9bcc0cbefb682..9bf0f1f04d4df 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -22,10 +22,6 @@ "ImportPath": "github.com/gogo/protobuf/sortkeys", "Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02" }, - { - "ImportPath": "github.com/golang/glog", - "Rev": "44145f04b68cf362d9c4df2182967c2275eaefed" - }, { "ImportPath": "github.com/golang/groupcache/lru", "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433" diff --git a/staging/src/k8s.io/sample-controller/controller.go b/staging/src/k8s.io/sample-controller/controller.go index ab8468a0a28d8..e9d1d8389ee65 100644 --- a/staging/src/k8s.io/sample-controller/controller.go +++ b/staging/src/k8s.io/sample-controller/controller.go @@ -20,7 +20,6 @@ import ( "fmt" "time" - "github.com/golang/glog" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -37,6 +36,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" samplev1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1" clientset "k8s.io/sample-controller/pkg/client/clientset/versioned" @@ -96,9 +96,9 @@ func NewController( // Add sample-controller types to the default Kubernetes Scheme so Events can be // logged for sample-controller types. utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme)) - glog.V(4).Info("Creating event broadcaster") + klog.V(4).Info("Creating event broadcaster") eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) @@ -113,7 +113,7 @@ func NewController( recorder: recorder, } - glog.Info("Setting up event handlers") + klog.Info("Setting up event handlers") // Set up an event handler for when Foo resources change fooInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: controller.enqueueFoo, @@ -154,23 +154,23 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer c.workqueue.ShutDown() // Start the informer factories to begin populating the informer caches - glog.Info("Starting Foo controller") + klog.Info("Starting Foo controller") // Wait for the caches to be synced before starting workers - glog.Info("Waiting for informer caches to sync") + klog.Info("Waiting for informer caches to sync") if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.foosSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } - glog.Info("Starting workers") + klog.Info("Starting workers") // Launch two workers to process Foo resources for i := 0; i < threadiness; i++ { go wait.Until(c.runWorker, time.Second, stopCh) } - glog.Info("Started workers") + klog.Info("Started workers") <-stopCh - glog.Info("Shutting down workers") + klog.Info("Shutting down workers") return nil } @@ -226,7 +226,7 @@ func (c *Controller) processNextWorkItem() bool { // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. c.workqueue.Forget(obj) - glog.Infof("Successfully synced '%s'", key) + klog.Infof("Successfully synced '%s'", key) return nil }(obj) @@ -297,7 +297,7 @@ func (c *Controller) syncHandler(key string) error { // number does not equal the current desired replicas on the Deployment, we // should update the Deployment resource. if foo.Spec.Replicas != nil && *foo.Spec.Replicas != *deployment.Spec.Replicas { - glog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas) + klog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas) deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(newDeployment(foo)) } @@ -365,9 +365,9 @@ func (c *Controller) handleObject(obj interface{}) { runtime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type")) return } - glog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName()) + klog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName()) } - glog.V(4).Infof("Processing object: %s", object.GetName()) + klog.V(4).Infof("Processing object: %s", object.GetName()) if ownerRef := metav1.GetControllerOf(object); ownerRef != nil { // If this object is not owned by a Foo, we should not do anything more // with it. @@ -377,7 +377,7 @@ func (c *Controller) handleObject(obj interface{}) { foo, err := c.foosLister.Foos(object.GetNamespace()).Get(ownerRef.Name) if err != nil { - glog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name) + klog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name) return } diff --git a/staging/src/k8s.io/sample-controller/main.go b/staging/src/k8s.io/sample-controller/main.go index c9d3193df99fa..d8167715602b0 100644 --- a/staging/src/k8s.io/sample-controller/main.go +++ b/staging/src/k8s.io/sample-controller/main.go @@ -20,10 +20,10 @@ import ( "flag" "time" - "github.com/golang/glog" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" @@ -45,17 +45,17 @@ func main() { cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) if err != nil { - glog.Fatalf("Error building kubeconfig: %s", err.Error()) + klog.Fatalf("Error building kubeconfig: %s", err.Error()) } kubeClient, err := kubernetes.NewForConfig(cfg) if err != nil { - glog.Fatalf("Error building kubernetes clientset: %s", err.Error()) + klog.Fatalf("Error building kubernetes clientset: %s", err.Error()) } exampleClient, err := clientset.NewForConfig(cfg) if err != nil { - glog.Fatalf("Error building example clientset: %s", err.Error()) + klog.Fatalf("Error building example clientset: %s", err.Error()) } kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30) @@ -71,7 +71,7 @@ func main() { exampleInformerFactory.Start(stopCh) if err = controller.Run(2, stopCh); err != nil { - glog.Fatalf("Error running controller: %s", err.Error()) + klog.Fatalf("Error running controller: %s", err.Error()) } } diff --git a/test/e2e/BUILD b/test/e2e/BUILD index a7f1864dc93cf..51b9455a58063 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -68,11 +68,11 @@ go_library( "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/manifest:go_default_library", "//test/utils:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo/config:go_default_library", "//vendor/github.com/onsi/ginkgo/reporters:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e/autoscaling/BUILD b/test/e2e/autoscaling/BUILD index df5694b1e23d2..3cc66811f4218 100644 --- a/test/e2e/autoscaling/BUILD +++ b/test/e2e/autoscaling/BUILD @@ -44,11 +44,11 @@ go_library( "//test/e2e/scheduling:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", "//vendor/google.golang.org/api/monitoring/v3:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index 5ac3f01fb8c92..8832cf503f650 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -33,9 +33,9 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - "github.com/golang/glog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/klog" ) const ( @@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun } break } - glog.Infof("Made nodes schedulable again in %v", time.Since(s).String()) + klog.Infof("Made nodes schedulable again in %v", time.Since(s).String()) }) It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() { @@ -170,7 +170,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun replicas1 := additionalNodes1 * replicasPerNode replicas2 := additionalNodes2 * replicasPerNode - glog.Infof("cores per node: %v", coresPerNode) + klog.Infof("cores per node: %v", coresPerNode) // saturate cluster initialReplicas := nodeCount @@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun defer reservationCleanup() framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) - glog.Infof("Reserved successfully") + klog.Infof("Reserved successfully") // configure pending pods & expected scale up #1 rcConfig := reserveMemoryRCConfig(f, "extra-pod-1", replicas1, additionalNodes1*perNodeReservation, largeScaleUpTimeout) @@ -191,7 +191,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun testCleanup1 := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods) defer testCleanup1() - glog.Infof("Scaled up once") + klog.Infof("Scaled up once") // configure pending pods & expected scale up #2 rcConfig2 := reserveMemoryRCConfig(f, "extra-pod-2", replicas2, additionalNodes2*perNodeReservation, largeScaleUpTimeout) @@ -204,7 +204,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun testCleanup2 := simpleScaleUpTestWithTolerance(f, config2, tolerateUnreadyNodes, tolerateUnreadyPods) defer testCleanup2() - glog.Infof("Scaled up twice") + klog.Infof("Scaled up twice") }) It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() { @@ -327,7 +327,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun By("Checking if the number of nodes is as expected") nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - glog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes) + klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes) Expect(len(nodes.Items)).Should(Equal(totalNodes)) }) @@ -390,7 +390,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC } else { framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout)) } - glog.Infof("cluster is increased") + klog.Infof("cluster is increased") if tolerateMissingPodCount > 0 { framework.ExpectNoError(waitForCaPodsReadyInNamespace(f, f.ClientSet, tolerateMissingPodCount)) } else { @@ -527,5 +527,5 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist func timeTrack(start time.Time, name string) { elapsed := time.Since(start) - glog.Infof("%s took %s", name, elapsed) + klog.Infof("%s took %s", name, elapsed) } diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index e9e0e542f083e..c84d515a02bbb 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -48,9 +48,9 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - "github.com/golang/glog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/klog" ) const ( @@ -161,7 +161,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } break } - glog.Infof("Made nodes schedulable again in %v", time.Since(s).String()) + klog.Infof("Made nodes schedulable again in %v", time.Since(s).String()) }) It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() { @@ -352,7 +352,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { status, err = getScaleUpStatus(c) framework.ExpectNoError(err) if status.target != target { - glog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target) + klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target) } Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(Equal(false)) Expect(status.status).Should(Equal(caNoScaleUpStatus)) @@ -372,7 +372,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { // We wait for nodes to become schedulable to make sure the new nodes // will be returned by getPoolNodes below. framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout)) - glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") + klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") By("Getting memory available on new nodes, so we can account for it when creating RC") nodes := getPoolNodes(f, extraPoolName) @@ -508,7 +508,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) defer func() { framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") - glog.Infof("RC and pods not using volume deleted") + klog.Infof("RC and pods not using volume deleted") }() By("waiting for all pods before triggering scale up") @@ -582,14 +582,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { newNodesSet.Delete(nodes...) if len(newNodesSet) > 1 { By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet)) - glog.Infof("Usually only 1 new node is expected, investigating") - glog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json")) + klog.Infof("Usually only 1 new node is expected, investigating") + klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json")) if output, err := exec.Command("gcloud", "compute", "instances", "list", "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil { - glog.Infof("Gcloud compute instances list: %s", output) + klog.Infof("Gcloud compute instances list: %s", output) } else { - glog.Errorf("Failed to get instances list: %v", err) + klog.Errorf("Failed to get instances list: %v", err) } for newNode := range newNodesSet { @@ -597,9 +597,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { newNode, "--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil { - glog.Infof("Gcloud compute instances describe: %s", output) + klog.Infof("Gcloud compute instances describe: %s", output) } else { - glog.Errorf("Failed to get instances describe: %v", err) + klog.Errorf("Failed to get instances describe: %v", err) } } @@ -614,7 +614,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { if err == nil && node != nil { registeredNodes.Insert(nodeName) } else { - glog.Errorf("Failed to get node %v: %v", nodeName, err) + klog.Errorf("Failed to get node %v: %v", nodeName, err) } } By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List())) @@ -976,7 +976,7 @@ func installNvidiaDriversDaemonSet() { } func execCmd(args ...string) *exec.Cmd { - glog.Infof("Executing: %s", strings.Join(args, " ")) + klog.Infof("Executing: %s", strings.Join(args, " ")) return exec.Command(args[0], args[1:]...) } @@ -1108,7 +1108,7 @@ func isRegionalCluster() bool { } func enableAutoscaler(nodePool string, minCount, maxCount int) error { - glog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool) + klog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool) args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster, "--enable-autoscaling", @@ -1118,10 +1118,10 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error { output, err := execCmd(getGcloudCommand(args)...).CombinedOutput() if err != nil { - glog.Errorf("Failed config update result: %s", output) + klog.Errorf("Failed config update result: %s", output) return fmt.Errorf("Failed to enable autoscaling: %v", err) } - glog.Infof("Config update result: %s", output) + klog.Infof("Config update result: %s", output) var finalErr error for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) { @@ -1135,17 +1135,17 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error { } func disableAutoscaler(nodePool string, minCount, maxCount int) error { - glog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool) + klog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool) args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster, "--no-enable-autoscaling", "--node-pool=" + nodePool} output, err := execCmd(getGcloudCommand(args)...).CombinedOutput() if err != nil { - glog.Errorf("Failed config update result: %s", output) + klog.Errorf("Failed config update result: %s", output) return fmt.Errorf("Failed to disable autoscaling: %v", err) } - glog.Infof("Config update result: %s", output) + klog.Infof("Config update result: %s", output) var finalErr error for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) { @@ -1183,7 +1183,7 @@ func addNodePool(name string, machineType string, numNodes int) { "--num-nodes=" + strconv.Itoa(numNodes), "--cluster=" + framework.TestContext.CloudConfig.Cluster} output, err := execCmd(getGcloudCommand(args)...).CombinedOutput() - glog.Infof("Creating node-pool %s: %s", name, output) + klog.Infof("Creating node-pool %s: %s", name, output) framework.ExpectNoError(err, string(output)) } @@ -1193,12 +1193,12 @@ func addGpuNodePool(name string, gpuType string, gpuCount int, numNodes int) { "--num-nodes=" + strconv.Itoa(numNodes), "--cluster=" + framework.TestContext.CloudConfig.Cluster} output, err := execCmd(getGcloudCommand(args)...).CombinedOutput() - glog.Infof("Creating node-pool %s: %s", name, output) + klog.Infof("Creating node-pool %s: %s", name, output) framework.ExpectNoError(err, string(output)) } func deleteNodePool(name string) { - glog.Infof("Deleting node pool %s", name) + klog.Infof("Deleting node pool %s", name) args := []string{"container", "node-pools", "delete", name, "--quiet", "--cluster=" + framework.TestContext.CloudConfig.Cluster} err := wait.ExponentialBackoff( @@ -1206,10 +1206,10 @@ func deleteNodePool(name string) { func() (bool, error) { output, err := execCmd(getGcloudCommand(args)...).CombinedOutput() if err != nil { - glog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output) + klog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output) return false, nil } - glog.Infof("Node-pool deletion output: %s", output) + klog.Infof("Node-pool deletion output: %s", output) return true, nil }) framework.ExpectNoError(err) @@ -1235,7 +1235,7 @@ func getPoolInitialSize(poolName string) int { "--cluster=" + framework.TestContext.CloudConfig.Cluster, "--format=value(initialNodeCount)"} output, err := execCmd(getGcloudCommand(args)...).CombinedOutput() - glog.Infof("Node-pool initial size: %s", output) + klog.Infof("Node-pool initial size: %s", output) framework.ExpectNoError(err, string(output)) fields := strings.Fields(string(output)) Expect(len(fields)).Should(Equal(1)) @@ -1302,7 +1302,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) { err := framework.RunRC(*config) if err != nil && strings.Contains(err.Error(), "Error creating replication controller") { - glog.Warningf("Failed to create memory reservation: %v", err) + klog.Warningf("Failed to create memory reservation: %v", err) continue } if expectRunning { @@ -1346,7 +1346,7 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { - glog.Warningf("Failed to list nodes: %v", err) + klog.Warningf("Failed to list nodes: %v", err) continue } numNodes := len(nodes.Items) @@ -1358,10 +1358,10 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) numReady := len(nodes.Items) if numNodes == numReady+expectedUnready && sizeFunc(numNodes) { - glog.Infof("Cluster has reached the desired size") + klog.Infof("Cluster has reached the desired size") return nil } - glog.Infof("Waiting for cluster with func, current size %d, not ready nodes %d", numNodes, numNodes-numReady) + klog.Infof("Waiting for cluster with func, current size %d, not ready nodes %d", numNodes, numNodes-numReady) } return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout) } @@ -1384,21 +1384,21 @@ func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface // Failed pods in this context generally mean that they have been // double scheduled onto a node, but then failed a constraint check. if pod.Status.Phase == v1.PodFailed { - glog.Warningf("Pod has failed: %v", pod) + klog.Warningf("Pod has failed: %v", pod) } if !ready && pod.Status.Phase != v1.PodFailed { notready = append(notready, pod.Name) } } if len(notready) <= tolerateUnreadyCount { - glog.Infof("sufficient number of pods ready. Tolerating %d unready", tolerateUnreadyCount) + klog.Infof("sufficient number of pods ready. Tolerating %d unready", tolerateUnreadyCount) return nil } - glog.Infof("Too many pods are not ready yet: %v", notready) + klog.Infof("Too many pods are not ready yet: %v", notready) } - glog.Info("Timeout on waiting for pods being ready") - glog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces")) - glog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json")) + klog.Info("Timeout on waiting for pods being ready") + klog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces")) + klog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json")) // Some pods are still not running. return fmt.Errorf("Too many pods are still not running: %v", notready) @@ -1413,11 +1413,11 @@ func getAnyNode(c clientset.Interface) *v1.Node { "spec.unschedulable": "false", }.AsSelector().String()}) if err != nil { - glog.Errorf("Failed to get node list: %v", err) + klog.Errorf("Failed to get node list: %v", err) return nil } if len(nodes.Items) == 0 { - glog.Errorf("No nodes") + klog.Errorf("No nodes") return nil } return &nodes.Items[0] @@ -1476,7 +1476,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error { if !errors.IsConflict(err) { return err } - glog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j) + klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j) } return fmt.Errorf("Failed to taint node in allowed number of retries") } @@ -1517,7 +1517,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd if !errors.IsConflict(err) { return err } - glog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j) + klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j) } return fmt.Errorf("Failed to remove taint from node in allowed number of retries") } @@ -1696,7 +1696,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa if !errors.IsConflict(err) { return err } - glog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j) + klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j) rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) if err != nil { return err @@ -1747,7 +1747,7 @@ func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[strin } resized := setMigSizes(newSizes) if resized { - glog.Warning("Unexpected node group size while waiting for cluster resize. Setting size to target again.") + klog.Warning("Unexpected node group size while waiting for cluster resize. Setting size to target again.") } return false } @@ -1852,7 +1852,7 @@ func getScaleUpStatus(c clientset.Interface) (*scaleUpStatus, error) { } result.target += newTarget } - glog.Infof("Cluster-Autoscaler scale-up status: %v (%v, %v)", result.status, result.ready, result.target) + klog.Infof("Cluster-Autoscaler scale-up status: %v (%v, %v)", result.status, result.ready, result.target) return &result, nil } @@ -1890,7 +1890,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) { err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{}) if err != nil { // log error, but attempt to remove other pdbs - glog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err) + klog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err) finalErr = err } } @@ -1943,7 +1943,7 @@ func createPriorityClasses(f *framework.Framework) func() { for className, priority := range priorityClasses { _, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority}) if err != nil { - glog.Errorf("Error creating priority class: %v", err) + klog.Errorf("Error creating priority class: %v", err) } Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true)) } @@ -1952,7 +1952,7 @@ func createPriorityClasses(f *framework.Framework) func() { for className := range priorityClasses { err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil) if err != nil { - glog.Errorf("Error deleting priority class: %v", err) + klog.Errorf("Error deleting priority class: %v", err) } } } diff --git a/test/e2e/common/BUILD b/test/e2e/common/BUILD index 87fc58f4c1a01..040e119a2162c 100644 --- a/test/e2e/common/BUILD +++ b/test/e2e/common/BUILD @@ -80,11 +80,11 @@ go_library( "//test/e2e/framework:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega/types:go_default_library", "//vendor/golang.org/x/net/websocket:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e/common/kubelet_etc_hosts.go b/test/e2e/common/kubelet_etc_hosts.go index d4e267b7b5ad6..64d4f6c2b97e8 100644 --- a/test/e2e/common/kubelet_etc_hosts.go +++ b/test/e2e/common/kubelet_etc_hosts.go @@ -20,10 +20,10 @@ import ( "strings" "time" - "github.com/golang/glog" . "github.com/onsi/ginkgo" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -126,7 +126,7 @@ func assertManagedStatus( } } - glog.Warningf( + klog.Warningf( "For pod: %s, name: %s, expected %t, (/etc/hosts was %q), (/etc/hosts-original was %q), retryCount: %d", podName, name, expectedIsManaged, etcHostsContent, etcHostsOriginalContent, retryCount) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 1b06acf97c125..d05483d20c431 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -24,11 +24,11 @@ import ( "testing" "time" - "github.com/golang/glog" "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/reporters" "github.com/onsi/gomega" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeutils "k8s.io/apimachinery/pkg/util/runtime" @@ -74,7 +74,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { c, err := framework.LoadClientset() if err != nil { - glog.Fatal("Error loading client: ", err) + klog.Fatal("Error loading client: ", err) } // Delete any namespaces except those created by the system. This ensures no @@ -89,7 +89,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { if err != nil { framework.Failf("Error deleting orphaned namespaces: %v", err) } - glog.Infof("Waiting for deletion of the following namespaces: %v", deleted) + klog.Infof("Waiting for deletion of the following namespaces: %v", deleted) if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil { framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) } @@ -216,12 +216,12 @@ func RunE2ETests(t *testing.T) { // TODO: we should probably only be trying to create this directory once // rather than once-per-Ginkgo-node. if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil { - glog.Errorf("Failed creating report directory: %v", err) + klog.Errorf("Failed creating report directory: %v", err) } else { r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode)))) } } - glog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode) + klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode) ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r) } diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 002477db7d648..6a1be3930038f 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -134,7 +134,6 @@ go_library( "//test/e2e/perftype:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo/config:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", @@ -144,6 +143,7 @@ go_library( "//vendor/github.com/prometheus/common/model:go_default_library", "//vendor/golang.org/x/crypto/ssh:go_default_library", "//vendor/golang.org/x/net/websocket:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], ) diff --git a/test/e2e/framework/authorizer_util.go b/test/e2e/framework/authorizer_util.go index c77d98e893ee6..cd35526700256 100644 --- a/test/e2e/framework/authorizer_util.go +++ b/test/e2e/framework/authorizer_util.go @@ -17,7 +17,7 @@ limitations under the License. package framework import ( - "github.com/golang/glog" + "k8s.io/klog" "sync" "time" @@ -62,7 +62,7 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews // GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine // has adjusted as expected. In this case, simply wait one second and hope it's up to date if apierrors.IsNotFound(err) { - glog.Info("SubjectAccessReview endpoint is missing") + klog.Info("SubjectAccessReview endpoint is missing") time.Sleep(1 * time.Second) return true, nil } @@ -94,7 +94,7 @@ func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns st // if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled. if err != nil { - glog.Errorf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects) + klog.Errorf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects) } } @@ -124,7 +124,7 @@ func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string // if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled. if err != nil { - glog.Errorf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects) + klog.Errorf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects) } } diff --git a/test/e2e/framework/gpu_util.go b/test/e2e/framework/gpu_util.go index 80da1aff16c79..ff652e8d353ee 100644 --- a/test/e2e/framework/gpu_util.go +++ b/test/e2e/framework/gpu_util.go @@ -21,8 +21,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" - "github.com/golang/glog" . "github.com/onsi/gomega" + "k8s.io/klog" ) const ( @@ -71,15 +71,15 @@ func NVIDIADevicePlugin() *v1.Pod { func GetGPUDevicePluginImage() string { ds, err := DsFromManifest(GPUDevicePluginDSYAML) if err != nil { - glog.Errorf("Failed to parse the device plugin image: %v", err) + klog.Errorf("Failed to parse the device plugin image: %v", err) return "" } if ds == nil { - glog.Errorf("Failed to parse the device plugin image: the extracted DaemonSet is nil") + klog.Errorf("Failed to parse the device plugin image: the extracted DaemonSet is nil") return "" } if len(ds.Spec.Template.Spec.Containers) < 1 { - glog.Errorf("Failed to parse the device plugin image: cannot extract the container from YAML") + klog.Errorf("Failed to parse the device plugin image: cannot extract the container from YAML") return "" } return ds.Spec.Template.Spec.Containers[0].Image diff --git a/test/e2e/framework/ingress/BUILD b/test/e2e/framework/ingress/BUILD index c5c46a7601117..f991757f1c16c 100644 --- a/test/e2e/framework/ingress/BUILD +++ b/test/e2e/framework/ingress/BUILD @@ -20,10 +20,10 @@ go_library( "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/manifest:go_default_library", "//test/utils:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 219b718dd2364..0cb68dc4e9a6a 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -34,7 +34,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" @@ -115,11 +115,11 @@ type TestLogger interface { type GLogger struct{} func (l *GLogger) Infof(format string, args ...interface{}) { - glog.Infof(format, args...) + klog.Infof(format, args...) } func (l *GLogger) Errorf(format string, args ...interface{}) { - glog.Errorf(format, args...) + klog.Errorf(format, args...) } type E2ELogger struct{} diff --git a/test/e2e/framework/metrics/BUILD b/test/e2e/framework/metrics/BUILD index 429acdc538d0d..8bf48c6a93b38 100644 --- a/test/e2e/framework/metrics/BUILD +++ b/test/e2e/framework/metrics/BUILD @@ -24,9 +24,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/prometheus/common/expfmt:go_default_library", "//vendor/github.com/prometheus/common/model:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e/framework/metrics/generic_metrics.go b/test/e2e/framework/metrics/generic_metrics.go index 7355877905289..7257c9120f74d 100644 --- a/test/e2e/framework/metrics/generic_metrics.go +++ b/test/e2e/framework/metrics/generic_metrics.go @@ -22,9 +22,9 @@ import ( "reflect" "strings" - "github.com/golang/glog" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" + "k8s.io/klog" ) type Metrics map[string]model.Samples @@ -88,7 +88,7 @@ func parseMetrics(data string, output *Metrics) error { // Expected loop termination condition. return nil } - glog.Warningf("Invalid Decode. Skipping.") + klog.Warningf("Invalid Decode. Skipping.") continue } for _, metric := range v { diff --git a/test/e2e/framework/metrics/metrics_grabber.go b/test/e2e/framework/metrics/metrics_grabber.go index a9851b2cc8c9a..667ecc29d6501 100644 --- a/test/e2e/framework/metrics/metrics_grabber.go +++ b/test/e2e/framework/metrics/metrics_grabber.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/util/system" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -62,7 +62,7 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b return nil, err } if len(nodeList.Items) < 1 { - glog.Warning("Can't find any Nodes in the API server to grab metrics from") + klog.Warning("Can't find any Nodes in the API server to grab metrics from") } for _, node := range nodeList.Items { if system.IsMasterNode(node.Name) { @@ -76,9 +76,9 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b controllers = false clusterAutoscaler = ec != nil if clusterAutoscaler { - glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager is disabled.") + klog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager is disabled.") } else { - glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.") + klog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.") } } diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index be50ec1c6bee5..7ac659465a64e 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -23,13 +23,13 @@ import ( "os" "time" - "github.com/golang/glog" "github.com/onsi/ginkgo/config" "github.com/pkg/errors" utilflag "k8s.io/apiserver/pkg/util/flag" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/klog" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" ) @@ -355,11 +355,11 @@ func AfterReadingAllFlags(t *TestContextType) { kubeConfig := createKubeConfig(clusterConfig) clientcmd.WriteToFile(*kubeConfig, tempFile.Name()) t.KubeConfig = tempFile.Name() - glog.Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name()) + klog.Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name()) } } if len(t.KubeConfig) == 0 { - glog.Warningf("Unable to find in-cluster config, using default host : %s", defaultHost) + klog.Warningf("Unable to find in-cluster config, using default host : %s", defaultHost) t.Host = defaultHost } } @@ -382,7 +382,7 @@ func AfterReadingAllFlags(t *TestContextType) { // TODO (https://github.com/kubernetes/kubernetes/issues/70200): // - remove the fallback for unknown providers // - proper error message instead of Failf (which panics) - glog.Warningf("Unknown provider %q, proceeding as for --provider=skeleton.", TestContext.Provider) + klog.Warningf("Unknown provider %q, proceeding as for --provider=skeleton.", TestContext.Provider) TestContext.CloudConfig.Provider, err = SetupProviderConfig("skeleton") if err != nil { Failf("Failed to setup fallback skeleton provider config: %v", err) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index d9e64d7c4e9a9..a6e3aa207e923 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -41,9 +41,9 @@ import ( "text/tabwriter" "time" - "github.com/golang/glog" "golang.org/x/crypto/ssh" "golang.org/x/net/websocket" + "k8s.io/klog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -4865,7 +4865,7 @@ func ListNamespaceEvents(c clientset.Interface, ns string) error { return err } for _, event := range ls.Items { - glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) + klog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message) } return nil } @@ -4904,7 +4904,7 @@ func (p *E2ETestNodePreparer) PrepareNodes() error { sum += v.Count for ; index < sum; index++ { if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil { - glog.Errorf("Aborting node preparation: %v", err) + klog.Errorf("Aborting node preparation: %v", err) return err } p.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy @@ -4922,7 +4922,7 @@ func (p *E2ETestNodePreparer) CleanupNodes() error { strategy, found := p.nodeToAppliedStrategy[name] if found { if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil { - glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err) + klog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err) encounteredError = err } } diff --git a/test/e2e/generated/BUILD b/test/e2e/generated/BUILD index 250b668226ffa..c5d8f822a811f 100644 --- a/test/e2e/generated/BUILD +++ b/test/e2e/generated/BUILD @@ -15,7 +15,7 @@ go_library( ], importpath = "k8s.io/kubernetes/test/e2e/generated", deps = [ - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e/generated/gobindata_util.go b/test/e2e/generated/gobindata_util.go index 62e031d980204..1bb2ed553d658 100644 --- a/test/e2e/generated/gobindata_util.go +++ b/test/e2e/generated/gobindata_util.go @@ -16,7 +16,7 @@ limitations under the License. package generated -import "github.com/golang/glog" +import "k8s.io/klog" /* ReadOrDie reads a file from gobindata. @@ -27,8 +27,8 @@ func ReadOrDie(filePath string) []byte { fileBytes, err := Asset(filePath) if err != nil { gobindataMsg := "An error occurred, possibly gobindata doesn't know about the file you're opening. For questions on maintaining gobindata, contact the sig-testing group." - glog.Infof("Available gobindata files: %v ", AssetNames()) - glog.Fatalf("Failed opening %v , with error %v. %v.", filePath, err, gobindataMsg) + klog.Infof("Available gobindata files: %v ", AssetNames()) + klog.Fatalf("Failed opening %v , with error %v. %v.", filePath, err, gobindataMsg) } return fileBytes } diff --git a/test/e2e/network/scale/localrun/BUILD b/test/e2e/network/scale/localrun/BUILD index e2bffae875741..5066276eee644 100644 --- a/test/e2e/network/scale/localrun/BUILD +++ b/test/e2e/network/scale/localrun/BUILD @@ -15,7 +15,7 @@ go_library( "//test/e2e/framework/ingress:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/network/scale:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e/network/scale/localrun/ingress_scale.go b/test/e2e/network/scale/localrun/ingress_scale.go index 0fc38675eb5bb..2cb237ec51bfa 100644 --- a/test/e2e/network/scale/localrun/ingress_scale.go +++ b/test/e2e/network/scale/localrun/ingress_scale.go @@ -24,7 +24,7 @@ import ( "sort" "strconv" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -91,19 +91,19 @@ func main() { registerFlags() flag.Parse() if err := verifyFlags(); err != nil { - glog.Errorf("Failed to verify flags: %v", err) + klog.Errorf("Failed to verify flags: %v", err) os.Exit(1) } // Initializing a k8s client. config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { - glog.Errorf("Failed to build kubeconfig: %v", err) + klog.Errorf("Failed to build kubeconfig: %v", err) os.Exit(1) } cs, err := clientset.NewForConfig(config) if err != nil { - glog.Errorf("Failed to create kubeclient: %v", err) + klog.Errorf("Failed to create kubeclient: %v", err) os.Exit(1) } @@ -116,7 +116,7 @@ func main() { AlphaFeatureGate: gceAlphaFeatureGate, }) if err != nil { - glog.Errorf("Error building GCE provider: %v", err) + klog.Errorf("Error building GCE provider: %v", err) os.Exit(1) } cloudConfig.Provider = gce.NewProvider(gceCloud) @@ -124,7 +124,7 @@ func main() { testSuccessFlag := true defer func() { if !testSuccessFlag { - glog.Errorf("Ingress scale test failed.") + klog.Errorf("Ingress scale test failed.") os.Exit(1) } }() @@ -134,17 +134,17 @@ func main() { Name: testNamespace, }, } - glog.Infof("Creating namespace %s...", ns.Name) + klog.Infof("Creating namespace %s...", ns.Name) if _, err := cs.CoreV1().Namespaces().Create(ns); err != nil { - glog.Errorf("Failed to create namespace %s: %v", ns.Name, err) + klog.Errorf("Failed to create namespace %s: %v", ns.Name, err) testSuccessFlag = false return } if cleanup { defer func() { - glog.Infof("Deleting namespace %s...", ns.Name) + klog.Infof("Deleting namespace %s...", ns.Name) if err := cs.CoreV1().Namespaces().Delete(ns.Name, nil); err != nil { - glog.Errorf("Failed to delete namespace %s: %v", ns.Name, err) + klog.Errorf("Failed to delete namespace %s: %v", ns.Name, err) testSuccessFlag = false } }() @@ -164,20 +164,20 @@ func main() { if cleanup { defer func() { if errs := f.CleanupScaleTest(); len(errs) != 0 { - glog.Errorf("Failed to cleanup scale test: %v", errs) + klog.Errorf("Failed to cleanup scale test: %v", errs) testSuccessFlag = false } }() } err = f.PrepareScaleTest() if err != nil { - glog.Errorf("Failed to prepare scale test: %v", err) + klog.Errorf("Failed to prepare scale test: %v", err) testSuccessFlag = false return } if errs := f.RunScaleTest(); len(errs) != 0 { - glog.Errorf("Failed while running scale test: %v", errs) + klog.Errorf("Failed while running scale test: %v", errs) testSuccessFlag = false } } diff --git a/test/e2e/storage/vsphere/BUILD b/test/e2e/storage/vsphere/BUILD index 8d0e22c106dee..3c23b80976ff9 100644 --- a/test/e2e/storage/vsphere/BUILD +++ b/test/e2e/storage/vsphere/BUILD @@ -53,7 +53,6 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/utils/image:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/vmware/govmomi:go_default_library", @@ -65,6 +64,7 @@ go_library( "//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e/storage/vsphere/connection.go b/test/e2e/storage/vsphere/connection.go index 60e8e40ac0982..bb212ae50f804 100644 --- a/test/e2e/storage/vsphere/connection.go +++ b/test/e2e/storage/vsphere/connection.go @@ -22,10 +22,10 @@ import ( neturl "net/url" "sync" - "github.com/golang/glog" "github.com/vmware/govmomi" "github.com/vmware/govmomi/session" "github.com/vmware/govmomi/vim25" + "k8s.io/klog" ) const ( @@ -46,7 +46,7 @@ func Connect(ctx context.Context, vs *VSphere) error { if vs.Client == nil { vs.Client, err = NewClient(ctx, vs) if err != nil { - glog.Errorf("Failed to create govmomi client. err: %+v", err) + klog.Errorf("Failed to create govmomi client. err: %+v", err) return err } return nil @@ -54,17 +54,17 @@ func Connect(ctx context.Context, vs *VSphere) error { manager := session.NewManager(vs.Client.Client) userSession, err := manager.UserSession(ctx) if err != nil { - glog.Errorf("Error while obtaining user session. err: %+v", err) + klog.Errorf("Error while obtaining user session. err: %+v", err) return err } if userSession != nil { return nil } - glog.Warningf("Creating new client session since the existing session is not valid or not authenticated") + klog.Warningf("Creating new client session since the existing session is not valid or not authenticated") vs.Client.Logout(ctx) vs.Client, err = NewClient(ctx, vs) if err != nil { - glog.Errorf("Failed to create govmomi client. err: %+v", err) + klog.Errorf("Failed to create govmomi client. err: %+v", err) return err } return nil @@ -74,13 +74,13 @@ func Connect(ctx context.Context, vs *VSphere) error { func NewClient(ctx context.Context, vs *VSphere) (*govmomi.Client, error) { url, err := neturl.Parse(fmt.Sprintf("https://%s:%s/sdk", vs.Config.Hostname, vs.Config.Port)) if err != nil { - glog.Errorf("Failed to parse URL: %s. err: %+v", url, err) + klog.Errorf("Failed to parse URL: %s. err: %+v", url, err) return nil, err } url.User = neturl.UserPassword(vs.Config.Username, vs.Config.Password) client, err := govmomi.NewClient(ctx, url, true) if err != nil { - glog.Errorf("Failed to create new client. err: %+v", err) + klog.Errorf("Failed to create new client. err: %+v", err) return nil, err } if vs.Config.RoundTripperCount == 0 { diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 99e61e268cff3..443c3366d8100 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -24,13 +24,13 @@ import ( "strings" "time" - "github.com/golang/glog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" vim25types "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog" "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" @@ -478,7 +478,7 @@ func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPa diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc) if err != nil { - glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err) + klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err) return "", err } diskUUID = formatVirtualDiskUUID(diskUUID) diff --git a/test/e2e_kubeadm/runner/local/BUILD b/test/e2e_kubeadm/runner/local/BUILD index 412adbe48e3dd..c0fbdc31655b5 100644 --- a/test/e2e_kubeadm/runner/local/BUILD +++ b/test/e2e_kubeadm/runner/local/BUILD @@ -7,7 +7,7 @@ go_library( visibility = ["//visibility:private"], deps = [ "//test/utils:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e_kubeadm/runner/local/run_local.go b/test/e2e_kubeadm/runner/local/run_local.go index 9ccdfcb7309e0..4adcba10948b2 100644 --- a/test/e2e_kubeadm/runner/local/run_local.go +++ b/test/e2e_kubeadm/runner/local/run_local.go @@ -25,7 +25,7 @@ import ( "runtime" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/test/utils" ) @@ -49,25 +49,25 @@ func main() { if *build { if err := bazelBuild(); err != nil { - glog.Exitf("couldn't build with bazel: %v", err) + klog.Exitf("couldn't build with bazel: %v", err) } } ginkgo, err := getBazelGinkgo() if err != nil { - glog.Fatalf("Failed to get ginkgo binary: %v", err) + klog.Fatalf("Failed to get ginkgo binary: %v", err) } test, err := getBazelTestBin() if err != nil { - glog.Fatalf("Failed to get test file: %v", err) + klog.Fatalf("Failed to get test file: %v", err) } args := append(strings.Split(*ginkgoFlags, " "), test, "--") args = append(args, strings.Split(*testFlags, " ")...) if execCommand(ginkgo, args...); err != nil { - glog.Exitf("Test failed: %v", err) + klog.Exitf("Test failed: %v", err) } } diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index 2670233e2b3fa..6f7856eb4a59d 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -46,10 +46,10 @@ go_library( "//vendor/github.com/coreos/go-systemd/util:go_default_library", "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/docker/docker/client:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/prometheus/common/model:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//pkg/api/v1/node:go_default_library", @@ -149,12 +149,12 @@ go_test( "//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/coreos/go-systemd/util:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega/gstruct:go_default_library", "//vendor/github.com/onsi/gomega/types:go_default_library", "//vendor/github.com/prometheus/common/model:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:linux": [ "//cmd/kubeadm/app/util/system:go_default_library", diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 0bf75efe64a54..bda5fe1b73f66 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -37,9 +37,9 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "github.com/davecgh/go-spew/spew" - "github.com/golang/glog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/klog" ) var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() { @@ -132,14 +132,14 @@ func loadTestProfiles() error { // apparmor_parser does not always return an error code, so consider any stderr output an error. if err != nil || stderr.Len() > 0 { if stderr.Len() > 0 { - glog.Warning(stderr.String()) + klog.Warning(stderr.String()) } if len(out) > 0 { - glog.Infof("apparmor_parser: %s", out) + klog.Infof("apparmor_parser: %s", out) } return fmt.Errorf("failed to load profiles: %v", err) } - glog.V(2).Infof("Loaded profiles: %v", out) + klog.V(2).Infof("Loaded profiles: %v", out) return nil } @@ -211,7 +211,7 @@ func isAppArmorEnabled() bool { if len(matches) == 2 { version, err := strconv.Atoi(matches[1]) if err != nil { - glog.Errorf("Error parsing GCI version from NodeName %q: %v", framework.TestContext.NodeName, err) + klog.Errorf("Error parsing GCI version from NodeName %q: %v", framework.TestContext.NodeName, err) return false } return version >= 54 diff --git a/test/e2e_node/builder/BUILD b/test/e2e_node/builder/BUILD index eeda57b85d757..5dd457b59467a 100644 --- a/test/e2e_node/builder/BUILD +++ b/test/e2e_node/builder/BUILD @@ -11,7 +11,7 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e_node/builder", deps = [ "//test/utils:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e_node/builder/build.go b/test/e2e_node/builder/build.go index 87ea32a87c7cb..797de1dc53840 100644 --- a/test/e2e_node/builder/build.go +++ b/test/e2e_node/builder/build.go @@ -24,7 +24,7 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/test/utils" ) @@ -38,7 +38,7 @@ var buildTargets = []string{ } func BuildGo() error { - glog.Infof("Building k8s binaries...") + klog.Infof("Building k8s binaries...") k8sRoot, err := utils.GetK8sRootDir() if err != nil { return fmt.Errorf("failed to locate kubernetes root directory %v.", err) @@ -90,7 +90,7 @@ func getK8sBin(bin string) (string, error) { func GetKubeletServerBin() string { bin, err := getK8sBin("kubelet") if err != nil { - glog.Fatalf("Could not locate kubelet binary %v.", err) + klog.Fatalf("Could not locate kubelet binary %v.", err) } return bin } diff --git a/test/e2e_node/e2e_node_suite_test.go b/test/e2e_node/e2e_node_suite_test.go index acf3b1fa33865..41357cc407df4 100644 --- a/test/e2e_node/e2e_node_suite_test.go +++ b/test/e2e_node/e2e_node_suite_test.go @@ -44,13 +44,13 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e_node/services" - "github.com/golang/glog" "github.com/kardianos/osext" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/config" morereporters "github.com/onsi/ginkgo/reporters" . "github.com/onsi/gomega" "github.com/spf13/pflag" + "k8s.io/klog" ) var e2es *services.E2EServices @@ -103,7 +103,7 @@ func TestE2eNode(t *testing.T) { var err error spec, err = loadSystemSpecFromFile(*systemSpecFile) if err != nil { - glog.Exitf("Failed to load system spec: %v", err) + klog.Exitf("Failed to load system spec: %v", err) } } if framework.TestContext.NodeConformance { @@ -112,11 +112,11 @@ func TestE2eNode(t *testing.T) { // TODO(random-liu): Consider to chroot the whole test process to make writing // test easier. if err := syscall.Chroot(rootfs); err != nil { - glog.Exitf("chroot %q failed: %v", rootfs, err) + klog.Exitf("chroot %q failed: %v", rootfs, err) } } if _, err := system.ValidateSpec(*spec, framework.TestContext.ContainerRuntime); err != nil { - glog.Exitf("system validation failed: %v", err) + klog.Exitf("system validation failed: %v", err) } return } @@ -127,7 +127,7 @@ func TestE2eNode(t *testing.T) { if reportDir != "" { // Create the directory if it doesn't already exists if err := os.MkdirAll(reportDir, 0755); err != nil { - glog.Errorf("Failed creating report directory: %v", err) + klog.Errorf("Failed creating report directory: %v", err) } else { // Configure a junit reporter to write to the directory junitFile := fmt.Sprintf("junit_%s_%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode) @@ -146,7 +146,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { // Pre-pull the images tests depend on so we can fail immediately if there is an image pull issue // This helps with debugging test flakes since it is hard to tell when a test failure is due to image pulling. if framework.TestContext.PrepullImages { - glog.Infof("Pre-pulling images so that they are cached for the tests.") + klog.Infof("Pre-pulling images so that they are cached for the tests.") err := PrePullAllImages() Expect(err).ShouldNot(HaveOccurred()) } @@ -161,12 +161,12 @@ var _ = SynchronizedBeforeSuite(func() []byte { // If the services are expected to keep running after test, they should not monitor the test process. e2es = services.NewE2EServices(*stopServices) Expect(e2es.Start()).To(Succeed(), "should be able to start node services.") - glog.Infof("Node services started. Running tests...") + klog.Infof("Node services started. Running tests...") } else { - glog.Infof("Running tests without starting services.") + klog.Infof("Running tests without starting services.") } - glog.Infof("Wait for the node to be ready") + klog.Infof("Wait for the node to be ready") waitForNodeReady() // Reference common test to make the import valid. @@ -182,12 +182,12 @@ var _ = SynchronizedBeforeSuite(func() []byte { var _ = SynchronizedAfterSuite(func() {}, func() { if e2es != nil { if *startServices && *stopServices { - glog.Infof("Stopping node services...") + klog.Infof("Stopping node services...") e2es.Stop() } } - glog.Infof("Tests Finished") + klog.Infof("Tests Finished") }) // validateSystem runs system validation in a separate process and returns error if validation fails. @@ -210,13 +210,13 @@ func maskLocksmithdOnCoreos() { data, err := ioutil.ReadFile("/etc/os-release") if err != nil { // Not all distros contain this file. - glog.Infof("Could not read /etc/os-release: %v", err) + klog.Infof("Could not read /etc/os-release: %v", err) return } if bytes.Contains(data, []byte("ID=coreos")) { output, err := exec.Command("systemctl", "mask", "--now", "locksmithd").CombinedOutput() Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("should be able to mask locksmithd - output: %q", string(output))) - glog.Infof("Locksmithd is masked successfully") + klog.Infof("Locksmithd is masked successfully") } } diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index e7bc3f9baee8a..dcf62ef2790d7 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -22,7 +22,7 @@ import ( "os/user" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/util/sets" internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri" @@ -128,7 +128,7 @@ func PrePullAllImages() error { return err } images := framework.ImageWhiteList.List() - glog.V(4).Infof("Pre-pulling images with %s %+v", puller.Name(), images) + klog.V(4).Infof("Pre-pulling images with %s %+v", puller.Name(), images) for _, image := range images { var ( err error @@ -141,11 +141,11 @@ func PrePullAllImages() error { if output, err = puller.Pull(image); err == nil { break } - glog.Warningf("Failed to pull %s as user %q, retrying in %s (%d of %d): %v", + klog.Warningf("Failed to pull %s as user %q, retrying in %s (%d of %d): %v", image, usr.Username, imagePullRetryDelay.String(), i+1, maxImagePullRetries, err) } if err != nil { - glog.Warningf("Could not pre-pull image %s %v output: %s", image, err, output) + klog.Warningf("Could not pre-pull image %s %v output: %s", image, err, output) return err } } diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index 128860b17c053..f7ba0048a3a03 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -27,9 +27,9 @@ import ( "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" - "github.com/golang/glog" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/klog" ) // getResourceList returns a ResourceList with the @@ -71,7 +71,7 @@ func makePodToVerifyCgroups(cgroupNames []string) *v1.Pod { cgroupName := cm.NewCgroupName(rootCgroupName, cgroupComponents...) cgroupFsNames = append(cgroupFsNames, toCgroupFsName(cgroupName)) } - glog.Infof("expecting %v cgroups to be found", cgroupFsNames) + klog.Infof("expecting %v cgroups to be found", cgroupFsNames) // build the pod command to either verify cgroups exist command := "" for _, cgroupFsName := range cgroupFsNames { diff --git a/test/e2e_node/remote/BUILD b/test/e2e_node/remote/BUILD index 9a6dcdd50e03d..3952cd69cac9e 100644 --- a/test/e2e_node/remote/BUILD +++ b/test/e2e_node/remote/BUILD @@ -21,7 +21,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//test/e2e_node/builder:go_default_library", "//test/utils:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e_node/remote/cadvisor_e2e.go b/test/e2e_node/remote/cadvisor_e2e.go index 8bdb567d031a5..6beb2c7c44e7e 100644 --- a/test/e2e_node/remote/cadvisor_e2e.go +++ b/test/e2e_node/remote/cadvisor_e2e.go @@ -22,7 +22,7 @@ import ( "os/exec" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/test/utils" ) @@ -67,7 +67,7 @@ func (n *CAdvisorE2ERemote) RunTest(host, workspace, results, imageDesc, junitFi // Kill any running node processes cleanupNodeProcesses(host) - glog.V(2).Infof("Starting tests on %q", host) + klog.V(2).Infof("Starting tests on %q", host) return SSH(host, "sh", "-c", getSSHCommand(" && ", fmt.Sprintf("cd %s/cadvisor", workspace), fmt.Sprintf("timeout -k 30s %fs ./build/integration.sh ../results/cadvisor.log", diff --git a/test/e2e_node/remote/node_conformance.go b/test/e2e_node/remote/node_conformance.go index 9c78ae30887c3..39ef80f5e4b27 100644 --- a/test/e2e_node/remote/node_conformance.go +++ b/test/e2e_node/remote/node_conformance.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/test/e2e_node/builder" "k8s.io/kubernetes/test/utils" @@ -206,13 +206,13 @@ func launchKubelet(host, workspace, results, testArgs string) error { ), } } - glog.V(2).Infof("Launch kubelet with command: %v", cmd) + klog.V(2).Infof("Launch kubelet with command: %v", cmd) output, err := SSH(host, cmd...) if err != nil { return fmt.Errorf("failed to launch kubelet with command %v: error - %v output - %q", cmd, err, output) } - glog.Info("Successfully launch kubelet") + klog.Info("Successfully launch kubelet") return nil } @@ -221,12 +221,12 @@ const kubeletStopGracePeriod = 10 * time.Second // stopKubelet stops kubelet launcher and kubelet gracefully. func stopKubelet(host, workspace string) error { - glog.Info("Gracefully stop kubelet launcher") + klog.Info("Gracefully stop kubelet launcher") if output, err := SSH(host, "pkill", conformanceTestBinary); err != nil { return fmt.Errorf("failed to gracefully stop kubelet launcher: error - %v output - %q", err, output) } - glog.Info("Wait for kubelet launcher to stop") + klog.Info("Wait for kubelet launcher to stop") stopped := false for start := time.Now(); time.Since(start) < kubeletStopGracePeriod; time.Sleep(time.Second) { // Check whether the process is still running. @@ -242,13 +242,13 @@ func stopKubelet(host, workspace string) error { } } if !stopped { - glog.Info("Forcibly stop kubelet") + klog.Info("Forcibly stop kubelet") if output, err := SSH(host, "pkill", "-SIGKILL", conformanceTestBinary); err != nil { return fmt.Errorf("failed to forcibly stop kubelet: error - %v output - %q", err, output) } } - glog.Info("Successfully stop kubelet") + klog.Info("Successfully stop kubelet") // Clean up the pod manifest path podManifestPath := getPodPath(workspace) if output, err := SSH(host, "rm", "-f", filepath.Join(workspace, podManifestPath)); err != nil { @@ -286,12 +286,12 @@ func (c *ConformanceRemote) RunTest(host, workspace, results, imageDesc, junitFi defer func() { if err := stopKubelet(host, workspace); err != nil { // Only log an error if failed to stop kubelet because it is not critical. - glog.Errorf("failed to stop kubelet: %v", err) + klog.Errorf("failed to stop kubelet: %v", err) } }() // Run the tests - glog.V(2).Infof("Starting tests on %q", host) + klog.V(2).Infof("Starting tests on %q", host) podManifestPath := getPodPath(workspace) cmd := fmt.Sprintf("'timeout -k 30s %fs docker run --rm --privileged=true --net=host -v /:/rootfs -v %s:%s -v %s:/var/result -e TEST_ARGS=--report-prefix=%s %s'", timeout.Seconds(), podManifestPath, podManifestPath, results, junitFilePrefix, getConformanceTestImageName(systemSpecName)) diff --git a/test/e2e_node/remote/node_e2e.go b/test/e2e_node/remote/node_e2e.go index 082437975eac6..4a45594bfcf56 100644 --- a/test/e2e_node/remote/node_e2e.go +++ b/test/e2e_node/remote/node_e2e.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/test/e2e_node/builder" "k8s.io/kubernetes/test/utils" @@ -108,7 +108,7 @@ func tarAddFile(tar, source, dest string) error { // prependCOSMounterFlag prepends the flag for setting the GCI mounter path to // args and returns the result. func prependCOSMounterFlag(args, host, workspace string) (string, error) { - glog.V(2).Infof("GCI/COS node and GCI/COS mounter both detected, modifying --experimental-mounter-path accordingly") + klog.V(2).Infof("GCI/COS node and GCI/COS mounter both detected, modifying --experimental-mounter-path accordingly") mounterPath := filepath.Join(workspace, "mounter") args = fmt.Sprintf("--kubelet-flags=--experimental-mounter-path=%s ", mounterPath) + args return args, nil @@ -164,7 +164,7 @@ func (n *NodeE2ERemote) RunTest(host, workspace, results, imageDesc, junitFilePr } // Run the tests - glog.V(2).Infof("Starting tests on %q", host) + klog.V(2).Infof("Starting tests on %q", host) cmd := getSSHCommand(" && ", fmt.Sprintf("cd %s", workspace), fmt.Sprintf("timeout -k 30s %fs ./ginkgo %s ./e2e_node.test -- --system-spec-name=%s --system-spec-file=%s --logtostderr --v 4 --node-name=%s --report-dir=%s --report-prefix=%s --image-description=\"%s\" %s", diff --git a/test/e2e_node/remote/remote.go b/test/e2e_node/remote/remote.go index 746899f8b57b7..1a0ff30290c3d 100644 --- a/test/e2e_node/remote/remote.go +++ b/test/e2e_node/remote/remote.go @@ -27,8 +27,8 @@ import ( "strings" "time" - "github.com/golang/glog" utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" ) var testTimeoutSeconds = flag.Duration("test-timeout", 45*time.Minute, "How long (in golang duration format) to wait for ginkgo tests to complete.") @@ -37,7 +37,7 @@ var resultsDir = flag.String("results-dir", "/tmp/", "Directory to scp test resu const archiveName = "e2e_node_test.tar.gz" func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) { - glog.V(2).Infof("Building archive...") + klog.V(2).Infof("Building archive...") tardir, err := ioutil.TempDir("", "node-e2e-archive") if err != nil { return "", fmt.Errorf("failed to create temporary directory %v.", err) @@ -67,7 +67,7 @@ func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) { // TODO(random-liu): junitFilePrefix is not prefix actually, the file name is junit-junitFilePrefix.xml. Change the variable name. func RunRemote(suite TestSuite, archive string, host string, cleanup bool, imageDesc, junitFilePrefix string, testArgs string, ginkgoArgs string, systemSpecName string) (string, bool, error) { // Create the temp staging directory - glog.V(2).Infof("Staging test binaries on %q", host) + klog.V(2).Infof("Staging test binaries on %q", host) workspace := newWorkspaceDir() // Do not sudo here, so that we can use scp to copy test archive to the directdory. if output, err := SSHNoSudo(host, "mkdir", workspace); err != nil { @@ -78,7 +78,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image defer func() { output, err := SSH(host, "rm", "-rf", workspace) if err != nil { - glog.Errorf("failed to cleanup workspace %q on host %q: %v. Output:\n%s", workspace, host, err, output) + klog.Errorf("failed to cleanup workspace %q on host %q: %v. Output:\n%s", workspace, host, err, output) } }() } @@ -94,7 +94,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image fmt.Sprintf("cd %s", workspace), fmt.Sprintf("tar -xzvf ./%s", archiveName), ) - glog.V(2).Infof("Extracting tar on %q", host) + klog.V(2).Infof("Extracting tar on %q", host) // Do not use sudo here, because `sudo tar -x` will recover the file ownership inside the tar ball, but // we want the extracted files to be owned by the current user. if output, err := SSHNoSudo(host, "sh", "-c", cmd); err != nil { @@ -109,7 +109,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image return "", false, fmt.Errorf("failed to create test result directory %q on host %q: %v output: %q", resultDir, host, err, output) } - glog.V(2).Infof("Running test on %q", host) + klog.V(2).Infof("Running test on %q", host) output, err := suite.RunTest(host, workspace, resultDir, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, *testTimeoutSeconds) aggErrs := []error{} @@ -119,7 +119,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image collectSystemLog(host) } - glog.V(2).Infof("Copying test artifacts from %q", host) + klog.V(2).Infof("Copying test artifacts from %q", host) scpErr := getTestArtifacts(host, workspace) if scpErr != nil { aggErrs = append(aggErrs, scpErr) @@ -194,17 +194,17 @@ func collectSystemLog(host string) { logPath = fmt.Sprintf("/tmp/%s-%s", getTimestamp(), logName) destPath = fmt.Sprintf("%s/%s-%s", *resultsDir, host, logName) ) - glog.V(2).Infof("Test failed unexpectedly. Attempting to retrieving system logs (only works for nodes with journald)") + klog.V(2).Infof("Test failed unexpectedly. Attempting to retrieving system logs (only works for nodes with journald)") // Try getting the system logs from journald and store it to a file. // Don't reuse the original test directory on the remote host because // it could've be been removed if the node was rebooted. if output, err := SSH(host, "sh", "-c", fmt.Sprintf("'journalctl --system --all > %s'", logPath)); err == nil { - glog.V(2).Infof("Got the system logs from journald; copying it back...") + klog.V(2).Infof("Got the system logs from journald; copying it back...") if output, err := runSSHCommand("scp", fmt.Sprintf("%s:%s", GetHostnameOrIp(host), logPath), destPath); err != nil { - glog.V(2).Infof("Failed to copy the log: err: %v, output: %q", err, output) + klog.V(2).Infof("Failed to copy the log: err: %v, output: %q", err, output) } } else { - glog.V(2).Infof("Failed to run journactl (normal if it doesn't exist on the node): %v, output: %q", err, output) + klog.V(2).Infof("Failed to run journactl (normal if it doesn't exist on the node): %v, output: %q", err, output) } } diff --git a/test/e2e_node/remote/ssh.go b/test/e2e_node/remote/ssh.go index fe82a66463749..4a4f0d82bd0cc 100644 --- a/test/e2e_node/remote/ssh.go +++ b/test/e2e_node/remote/ssh.go @@ -24,7 +24,7 @@ import ( "strings" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) var sshOptions = flag.String("ssh-options", "", "Commandline options passed to ssh.") @@ -38,7 +38,7 @@ var sshDefaultKeyMap map[string]string func init() { usr, err := user.Current() if err != nil { - glog.Fatal(err) + klog.Fatal(err) } sshOptionsMap = map[string]string{ "gce": "-o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o CheckHostIP=no -o StrictHostKeyChecking=no -o ServerAliveInterval=30 -o LogLevel=ERROR", diff --git a/test/e2e_node/remote/utils.go b/test/e2e_node/remote/utils.go index 28ab74cf52a1f..fab3fbeb10fdd 100644 --- a/test/e2e_node/remote/utils.go +++ b/test/e2e_node/remote/utils.go @@ -21,7 +21,7 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" + "k8s.io/klog" ) // utils.go contains functions used across test suites. @@ -52,7 +52,7 @@ const cniConfig = `{ // Install the cni plugin and add basic bridge configuration to the // configuration directory. func setupCNI(host, workspace string) error { - glog.V(2).Infof("Install CNI on %q", host) + klog.V(2).Infof("Install CNI on %q", host) cniPath := filepath.Join(workspace, cniDirectory) cmd := getSSHCommand(" ; ", fmt.Sprintf("mkdir -p %s", cniPath), @@ -65,7 +65,7 @@ func setupCNI(host, workspace string) error { // The added CNI network config is not needed for kubenet. It is only // used when testing the CNI network plugin, but is added in both cases // for consistency and simplicity. - glog.V(2).Infof("Adding CNI configuration on %q", host) + klog.V(2).Infof("Adding CNI configuration on %q", host) cniConfigPath := filepath.Join(workspace, cniConfDirectory) cmd = getSSHCommand(" ; ", fmt.Sprintf("mkdir -p %s", cniConfigPath), @@ -79,7 +79,7 @@ func setupCNI(host, workspace string) error { // configureFirewall configures iptable firewall rules. func configureFirewall(host string) error { - glog.V(2).Infof("Configure iptables firewall rules on %q", host) + klog.V(2).Infof("Configure iptables firewall rules on %q", host) // TODO: consider calling bootstrap script to configure host based on OS output, err := SSH(host, "iptables", "-L", "INPUT") if err != nil { @@ -114,7 +114,7 @@ func configureFirewall(host string) error { // cleanupNodeProcesses kills all running node processes may conflict with the test. func cleanupNodeProcesses(host string) { - glog.V(2).Infof("Killing any existing node processes on %q", host) + klog.V(2).Infof("Killing any existing node processes on %q", host) cmd := getSSHCommand(" ; ", "pkill kubelet", "pkill kube-apiserver", diff --git a/test/e2e_node/runner/local/BUILD b/test/e2e_node/runner/local/BUILD index 8bd0e3b8d1e77..50d8969a89d9f 100644 --- a/test/e2e_node/runner/local/BUILD +++ b/test/e2e_node/runner/local/BUILD @@ -18,7 +18,7 @@ go_library( deps = [ "//test/e2e_node/builder:go_default_library", "//test/utils:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e_node/runner/local/run_local.go b/test/e2e_node/runner/local/run_local.go index 9b0119992949c..4a69a997b6202 100644 --- a/test/e2e_node/runner/local/run_local.go +++ b/test/e2e_node/runner/local/run_local.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/test/e2e_node/builder" "k8s.io/kubernetes/test/utils" - "github.com/golang/glog" + "k8s.io/klog" ) var buildDependencies = flag.Bool("build-dependencies", true, "If true, build all dependencies.") @@ -40,21 +40,22 @@ const ( ) func main() { + klog.InitFlags(nil) flag.Parse() // Build dependencies - ginkgo, kubelet and apiserver. if *buildDependencies { if err := builder.BuildGo(); err != nil { - glog.Fatalf("Failed to build the dependencies: %v", err) + klog.Fatalf("Failed to build the dependencies: %v", err) } } // Run node e2e test outputDir, err := utils.GetK8sBuildOutputDir() if err != nil { - glog.Fatalf("Failed to get build output directory: %v", err) + klog.Fatalf("Failed to get build output directory: %v", err) } - glog.Infof("Got build output dir: %v", outputDir) + klog.Infof("Got build output dir: %v", outputDir) ginkgo := filepath.Join(outputDir, "ginkgo") test := filepath.Join(outputDir, "e2e_node.test") @@ -62,19 +63,19 @@ func main() { if *systemSpecName != "" { rootDir, err := utils.GetK8sRootDir() if err != nil { - glog.Fatalf("Failed to get k8s root directory: %v", err) + klog.Fatalf("Failed to get k8s root directory: %v", err) } systemSpecFile := filepath.Join(rootDir, systemSpecPath, *systemSpecName+".yaml") args = append(args, fmt.Sprintf("--system-spec-name=%s --system-spec-file=%s", *systemSpecName, systemSpecFile)) } if err := runCommand(ginkgo, args...); err != nil { - glog.Exitf("Test failed: %v", err) + klog.Exitf("Test failed: %v", err) } return } func runCommand(name string, args ...string) error { - glog.Infof("Running command: %v %v", name, strings.Join(args, " ")) + klog.Infof("Running command: %v %v", name, strings.Join(args, " ")) cmd := exec.Command("sudo", "sh", "-c", strings.Join(append([]string{name}, args...), " ")) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/test/e2e_node/runner/remote/BUILD b/test/e2e_node/runner/remote/BUILD index d324fe8a6de55..88c58616d3f6b 100644 --- a/test/e2e_node/runner/remote/BUILD +++ b/test/e2e_node/runner/remote/BUILD @@ -17,11 +17,11 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e_node/runner/remote", deps = [ "//test/e2e_node/remote:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", "//vendor/golang.org/x/oauth2/google:go_default_library", "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", ], ) diff --git a/test/e2e_node/runner/remote/run_remote.go b/test/e2e_node/runner/remote/run_remote.go index 8c60551e32ee0..c2131da22c78f 100644 --- a/test/e2e_node/runner/remote/run_remote.go +++ b/test/e2e_node/runner/remote/run_remote.go @@ -37,11 +37,11 @@ import ( "k8s.io/kubernetes/test/e2e_node/remote" - "github.com/golang/glog" "github.com/pborman/uuid" "golang.org/x/oauth2" "golang.org/x/oauth2/google" compute "google.golang.org/api/compute/v0.beta" + "k8s.io/klog" "sigs.k8s.io/yaml" ) @@ -174,6 +174,7 @@ type internalGCEImage struct { } func main() { + klog.InitFlags(nil) flag.Parse() switch *testSuite { case "conformance": @@ -185,7 +186,7 @@ func main() { // Use node e2e suite by default if no subcommand is specified. suite = remote.InitNodeE2ERemote() default: - glog.Fatalf("--test-suite must be one of default or conformance") + klog.Fatalf("--test-suite must be one of default or conformance") } rand.Seed(time.Now().UnixNano()) @@ -196,12 +197,12 @@ func main() { } if *hosts == "" && *imageConfigFile == "" && *images == "" { - glog.Fatalf("Must specify one of --image-config-file, --hosts, --images.") + klog.Fatalf("Must specify one of --image-config-file, --hosts, --images.") } var err error computeService, err = getComputeClient() if err != nil { - glog.Fatalf("Unable to create gcloud compute service using defaults. Make sure you are authenticated. %v", err) + klog.Fatalf("Unable to create gcloud compute service using defaults. Make sure you are authenticated. %v", err) } gceImages := &internalImageConfig{ @@ -216,12 +217,12 @@ func main() { // parse images imageConfigData, err := ioutil.ReadFile(configPath) if err != nil { - glog.Fatalf("Could not read image config file provided: %v", err) + klog.Fatalf("Could not read image config file provided: %v", err) } externalImageConfig := ImageConfig{Images: make(map[string]GCEImage)} err = yaml.Unmarshal(imageConfigData, &externalImageConfig) if err != nil { - glog.Fatalf("Could not parse image config file: %v", err) + klog.Fatalf("Could not parse image config file: %v", err) } for shortName, imageConfig := range externalImageConfig.Images { var images []string @@ -230,7 +231,7 @@ func main() { isRegex = true images, err = getGCEImages(imageConfig.ImageRegex, imageConfig.Project, imageConfig.PreviousImages) if err != nil { - glog.Fatalf("Could not retrieve list of images based on image prefix %q: %v", imageConfig.ImageRegex, err) + klog.Fatalf("Could not retrieve list of images based on image prefix %q: %v", imageConfig.ImageRegex, err) } } else { images = []string{imageConfig.Image} @@ -265,7 +266,7 @@ func main() { // convenience; merge in with config file if *images != "" { if *imageProject == "" { - glog.Fatal("Must specify --image-project if you specify --images") + klog.Fatal("Must specify --image-project if you specify --images") } cliImages := strings.Split(*images, ",") for _, img := range cliImages { @@ -279,16 +280,16 @@ func main() { } if len(gceImages.images) != 0 && *zone == "" { - glog.Fatal("Must specify --zone flag") + klog.Fatal("Must specify --zone flag") } for shortName, image := range gceImages.images { if image.project == "" { - glog.Fatalf("Invalid config for %v; must specify a project", shortName) + klog.Fatalf("Invalid config for %v; must specify a project", shortName) } } if len(gceImages.images) != 0 { if *project == "" { - glog.Fatal("Must specify --project flag to launch images into") + klog.Fatal("Must specify --project flag to launch images into") } } if *instanceNamePrefix == "" { @@ -394,9 +395,9 @@ func getImageMetadata(input string) *compute.Metadata { if input == "" { return nil } - glog.V(3).Infof("parsing instance metadata: %q", input) + klog.V(3).Infof("parsing instance metadata: %q", input) raw := parseInstanceMetadata(input) - glog.V(4).Infof("parsed instance metadata: %v", raw) + klog.V(4).Infof("parsed instance metadata: %v", raw) metadataItems := []*compute.MetadataItems{} for k, v := range raw { val := v @@ -482,7 +483,7 @@ func getGCEImages(imageRegex, project string, previousImages int) ([]string, err creationTime: creationTime, name: instance.Name, } - glog.V(4).Infof("Found image %q based on regex %q in project %q", io.string(), imageRegex, project) + klog.V(4).Infof("Found image %q based on regex %q in project %q", io.string(), imageRegex, project) imageObjs = append(imageObjs, io) } } @@ -531,12 +532,12 @@ func testImage(imageConfig *internalGCEImage, junitFilePrefix string) *TestResul // TODO(random-liu): Extract out and unify log collection logic with cluste e2e. serialPortOutput, err := computeService.Instances.GetSerialPortOutput(*project, *zone, host).Port(1).Do() if err != nil { - glog.Errorf("Failed to collect serial output from node %q: %v", host, err) + klog.Errorf("Failed to collect serial output from node %q: %v", host, err) } else { logFilename := "serial-1.log" err := remote.WriteLog(host, logFilename, serialPortOutput.Contents) if err != nil { - glog.Errorf("Failed to write serial output from node %q to %q: %v", host, logFilename, err) + klog.Errorf("Failed to write serial output from node %q to %q: %v", host, logFilename, err) } } return result @@ -544,7 +545,7 @@ func testImage(imageConfig *internalGCEImage, junitFilePrefix string) *TestResul // Provision a gce instance using image func createInstance(imageConfig *internalGCEImage) (string, error) { - glog.V(1).Infof("Creating instance %+v", *imageConfig) + klog.V(1).Infof("Creating instance %+v", *imageConfig) name := imageToInstanceName(imageConfig) i := &compute.Instance{ Name: name, @@ -712,10 +713,10 @@ func getComputeClient() (*compute.Service, error) { } func deleteInstance(host string) { - glog.Infof("Deleting instance %q", host) + klog.Infof("Deleting instance %q", host) _, err := computeService.Instances.Delete(*project, *zone, host).Do() if err != nil { - glog.Errorf("Error deleting instance %q: %v", host, err) + klog.Errorf("Error deleting instance %q: %v", host, err) } } @@ -730,7 +731,7 @@ func parseInstanceMetadata(str string) map[string]string { } kp := strings.Split(s, "<") if len(kp) != 2 { - glog.Fatalf("Invalid instance metadata: %q", s) + klog.Fatalf("Invalid instance metadata: %q", s) continue } metaPath := kp[1] @@ -739,7 +740,7 @@ func parseInstanceMetadata(str string) map[string]string { } v, err := ioutil.ReadFile(metaPath) if err != nil { - glog.Fatalf("Failed to read metadata file %q: %v", metaPath, err) + klog.Fatalf("Failed to read metadata file %q: %v", metaPath, err) continue } metadata[kp[0]] = string(v) diff --git a/test/e2e_node/services/BUILD b/test/e2e_node/services/BUILD index 60d41f37d6bf6..5eb7899fde2c4 100644 --- a/test/e2e_node/services/BUILD +++ b/test/e2e_node/services/BUILD @@ -40,9 +40,9 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e_node/builder:go_default_library", "//test/e2e_node/remote:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/kardianos/osext:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/e2e_node/services/internal_services.go b/test/e2e_node/services/internal_services.go index 356c00ef14670..3890ffd287512 100644 --- a/test/e2e_node/services/internal_services.go +++ b/test/e2e_node/services/internal_services.go @@ -24,7 +24,7 @@ import ( "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/kubernetes/test/e2e/framework" - "github.com/golang/glog" + "k8s.io/klog" ) // e2eService manages e2e services in current process. @@ -55,7 +55,7 @@ func (es *e2eServices) run(t *testing.T) error { // start starts the tests embedded services or returns an error. func (es *e2eServices) start(t *testing.T) error { - glog.Info("Starting e2e services...") + klog.Info("Starting e2e services...") err := es.startEtcd(t) if err != nil { return err @@ -68,48 +68,48 @@ func (es *e2eServices) start(t *testing.T) error { if err != nil { return nil } - glog.Info("E2E services started.") + klog.Info("E2E services started.") return nil } // stop stops the embedded e2e services. func (es *e2eServices) stop(t *testing.T) { - glog.Info("Stopping e2e services...") + klog.Info("Stopping e2e services...") // TODO(random-liu): Use a loop to stop all services after introducing // service interface. - glog.Info("Stopping namespace controller") + klog.Info("Stopping namespace controller") if es.nsController != nil { if err := es.nsController.Stop(); err != nil { - glog.Errorf("Failed to stop %q: %v", es.nsController.Name(), err) + klog.Errorf("Failed to stop %q: %v", es.nsController.Name(), err) } } - glog.Info("Stopping API server") + klog.Info("Stopping API server") if es.apiServer != nil { if err := es.apiServer.Stop(); err != nil { - glog.Errorf("Failed to stop %q: %v", es.apiServer.Name(), err) + klog.Errorf("Failed to stop %q: %v", es.apiServer.Name(), err) } } - glog.Info("Stopping etcd") + klog.Info("Stopping etcd") if es.etcdServer != nil { es.etcdServer.Terminate(t) } for _, d := range es.rmDirs { - glog.Infof("Deleting directory %v", d) + klog.Infof("Deleting directory %v", d) err := os.RemoveAll(d) if err != nil { - glog.Errorf("Failed to delete directory %s.\n%v", d, err) + klog.Errorf("Failed to delete directory %s.\n%v", d, err) } } - glog.Info("E2E services stopped.") + klog.Info("E2E services stopped.") } // startEtcd starts the embedded etcd instance or returns an error. func (es *e2eServices) startEtcd(t *testing.T) error { - glog.Info("Starting etcd") + klog.Info("Starting etcd") server, etcdStorage := etcdtesting.NewUnsecuredEtcd3TestClientServer(t) es.etcdServer = server es.etcdStorage = etcdStorage @@ -118,14 +118,14 @@ func (es *e2eServices) startEtcd(t *testing.T) error { // startApiServer starts the embedded API server or returns an error. func (es *e2eServices) startApiServer(etcdStorage *storagebackend.Config) error { - glog.Info("Starting API server") + klog.Info("Starting API server") es.apiServer = NewAPIServer(*etcdStorage) return es.apiServer.Start() } // startNamespaceController starts the embedded namespace controller or returns an error. func (es *e2eServices) startNamespaceController() error { - glog.Info("Starting namespace controller") + klog.Info("Starting namespace controller") es.nsController = NewNamespaceController(framework.TestContext.Host) return es.nsController.Start() } diff --git a/test/e2e_node/services/kubelet.go b/test/e2e_node/services/kubelet.go index 21ef291c141ba..276dfbe36fa9c 100644 --- a/test/e2e_node/services/kubelet.go +++ b/test/e2e_node/services/kubelet.go @@ -26,8 +26,8 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/spf13/pflag" + "k8s.io/klog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -83,7 +83,7 @@ func RunKubelet() { defer e.Stop() e.kubelet, err = e.startKubelet() if err != nil { - glog.Fatalf("Failed to start kubelet: %v", err) + klog.Fatalf("Failed to start kubelet: %v", err) } // Wait until receiving a termination signal. waitForTerminationSignal() @@ -105,7 +105,7 @@ func (e *E2EServices) startKubelet() (*server, error) { return nil, fmt.Errorf("the --hyperkube-image option must be set") } - glog.Info("Starting kubelet") + klog.Info("Starting kubelet") // set feature gates so we can check which features are enabled and pass the appropriate flags utilfeature.DefaultFeatureGate.SetFromMap(framework.TestContext.FeatureGates) diff --git a/test/e2e_node/services/server.go b/test/e2e_node/services/server.go index 999c11da654be..22a162f7a2591 100644 --- a/test/e2e_node/services/server.go +++ b/test/e2e_node/services/server.go @@ -29,7 +29,7 @@ import ( "syscall" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/test/e2e/framework" ) @@ -99,7 +99,7 @@ func (s *server) String() string { // // Note: restartOnExit == true requires len(s.healthCheckUrls) > 0 to work properly. func (s *server) start() error { - glog.Infof("Starting server %q with command %q", s.name, commandToString(s.startCommand)) + klog.Infof("Starting server %q with command %q", s.name, commandToString(s.startCommand)) errCh := make(chan error) // Set up restart channels if the server is configured for restart on exit. @@ -127,7 +127,7 @@ func (s *server) start() error { errCh <- fmt.Errorf("failed to create file %q for `%s` %v.", outPath, s, err) return } else { - glog.Infof("Output file for server %q: %v", s.name, outfile.Name()) + klog.Infof("Output file for server %q: %v", s.name, outfile.Name()) } defer outfile.Close() defer outfile.Sync() @@ -158,7 +158,7 @@ func (s *server) start() error { return } if !s.restartOnExit { - glog.Infof("Waiting for server %q start command to complete", s.name) + klog.Infof("Waiting for server %q start command to complete", s.name) // If we aren't planning on restarting, ok to Wait() here to release resources. // Otherwise, we Wait() in the restart loop. err = s.startCommand.Wait() @@ -169,18 +169,18 @@ func (s *server) start() error { } else { usedStartCmd := true for { - glog.Infof("Running health check for service %q", s.name) + klog.Infof("Running health check for service %q", s.name) // Wait for an initial health check to pass, so that we are sure the server started. err := readinessCheck(s.name, s.healthCheckUrls, nil) if err != nil { if usedStartCmd { - glog.Infof("Waiting for server %q start command to complete after initial health check failed", s.name) + klog.Infof("Waiting for server %q start command to complete after initial health check failed", s.name) s.startCommand.Wait() // Release resources if necessary. } // This should not happen, immediately stop the e2eService process. - glog.Fatalf("Restart loop readinessCheck failed for %s", s) + klog.Fatalf("Restart loop readinessCheck failed for %s", s) } else { - glog.Infof("Initial health check passed for service %q", s.name) + klog.Infof("Initial health check passed for service %q", s.name) } // Initial health check passed, wait until a health check fails again. @@ -220,11 +220,11 @@ func (s *server) start() error { } // Run and wait for exit. This command is assumed to have // short duration, e.g. systemctl restart - glog.Infof("Restarting server %q with restart command", s.name) + klog.Infof("Restarting server %q with restart command", s.name) err = s.restartCommand.Run() if err != nil { // This should not happen, immediately stop the e2eService process. - glog.Fatalf("Restarting server %s with restartCommand failed. Error: %v.", s, err) + klog.Fatalf("Restarting server %s with restartCommand failed. Error: %v.", s, err) } } else { s.startCommand = &exec.Cmd{ @@ -238,12 +238,12 @@ func (s *server) start() error { ExtraFiles: s.startCommand.ExtraFiles, SysProcAttr: s.startCommand.SysProcAttr, } - glog.Infof("Restarting server %q with start command", s.name) + klog.Infof("Restarting server %q with start command", s.name) err = s.startCommand.Start() usedStartCmd = true if err != nil { // This should not happen, immediately stop the e2eService process. - glog.Fatalf("Restarting server %s with startCommand failed. Error: %v.", s, err) + klog.Fatalf("Restarting server %s with startCommand failed. Error: %v.", s, err) } } } @@ -255,7 +255,7 @@ func (s *server) start() error { // kill runs the server's kill command. func (s *server) kill() error { - glog.Infof("Kill server %q", s.name) + klog.Infof("Kill server %q", s.name) name := s.name cmd := s.startCommand @@ -274,7 +274,7 @@ func (s *server) kill() error { } if cmd.Process == nil { - glog.V(2).Infof("%q not running", name) + klog.V(2).Infof("%q not running", name) return nil } pid := cmd.Process.Pid @@ -292,11 +292,11 @@ func (s *server) kill() error { const timeout = 10 * time.Second for _, signal := range []string{"-TERM", "-KILL"} { - glog.V(2).Infof("Killing process %d (%s) with %s", pid, name, signal) + klog.V(2).Infof("Killing process %d (%s) with %s", pid, name, signal) cmd := exec.Command("kill", signal, strconv.Itoa(pid)) _, err := cmd.Output() if err != nil { - glog.Errorf("Error signaling process %d (%s) with %s: %v", pid, name, signal, err) + klog.Errorf("Error signaling process %d (%s) with %s: %v", pid, name, signal, err) continue } diff --git a/test/e2e_node/services/services.go b/test/e2e_node/services/services.go index b73658ab306a6..58ac3534ada40 100644 --- a/test/e2e_node/services/services.go +++ b/test/e2e_node/services/services.go @@ -24,8 +24,8 @@ import ( "path" "testing" - "github.com/golang/glog" "github.com/kardianos/osext" + "k8s.io/klog" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/test/e2e/framework" @@ -86,19 +86,19 @@ func (e *E2EServices) Stop() { }() if e.services != nil { if err := e.services.kill(); err != nil { - glog.Errorf("Failed to stop services: %v", err) + klog.Errorf("Failed to stop services: %v", err) } } if e.kubelet != nil { if err := e.kubelet.kill(); err != nil { - glog.Errorf("Failed to stop kubelet: %v", err) + klog.Errorf("Failed to stop kubelet: %v", err) } } if e.rmDirs != nil { for _, d := range e.rmDirs { err := os.RemoveAll(d) if err != nil { - glog.Errorf("Failed to delete directory %s: %v", d, err) + klog.Errorf("Failed to delete directory %s: %v", d, err) } } } @@ -112,7 +112,7 @@ func RunE2EServices(t *testing.T) { utilfeature.DefaultFeatureGate.SetFromMap(framework.TestContext.FeatureGates) e := newE2EServices() if err := e.run(t); err != nil { - glog.Fatalf("Failed to run e2e services: %v", err) + klog.Fatalf("Failed to run e2e services: %v", err) } } @@ -143,7 +143,7 @@ func (e *E2EServices) collectLogFiles() { if framework.TestContext.ReportDir == "" { return } - glog.Info("Fetching log files...") + klog.Info("Fetching log files...") journaldFound := isJournaldAvailable() for targetFileName, log := range e.logs { targetLink := path.Join(framework.TestContext.ReportDir, targetFileName) @@ -152,13 +152,13 @@ func (e *E2EServices) collectLogFiles() { if len(log.JournalctlCommand) == 0 { continue } - glog.Infof("Get log file %q with journalctl command %v.", targetFileName, log.JournalctlCommand) + klog.Infof("Get log file %q with journalctl command %v.", targetFileName, log.JournalctlCommand) out, err := exec.Command("journalctl", log.JournalctlCommand...).CombinedOutput() if err != nil { - glog.Errorf("failed to get %q from journald: %v, %v", targetFileName, string(out), err) + klog.Errorf("failed to get %q from journald: %v, %v", targetFileName, string(out), err) } else { if err = ioutil.WriteFile(targetLink, out, 0644); err != nil { - glog.Errorf("failed to write logs to %q: %v", targetLink, err) + klog.Errorf("failed to write logs to %q: %v", targetLink, err) } } continue @@ -169,7 +169,7 @@ func (e *E2EServices) collectLogFiles() { continue } if err := copyLogFile(file, targetLink); err != nil { - glog.Error(err) + klog.Error(err) } else { break } diff --git a/test/e2e_node/services/util.go b/test/e2e_node/services/util.go index a21a483d5f886..0e1f47bb33d4c 100644 --- a/test/e2e_node/services/util.go +++ b/test/e2e_node/services/util.go @@ -18,7 +18,7 @@ package services import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" "net/http" "os" "os/signal" @@ -41,7 +41,7 @@ func waitForTerminationSignal() { // check URLs. Once there is an error in errCh, the function will stop waiting // and return the error. func readinessCheck(name string, urls []string, errCh <-chan error) error { - glog.Infof("Running readiness check for service %q", name) + klog.Infof("Running readiness check for service %q", name) endTime := time.Now().Add(*serverStartTimeout) blockCh := make(chan error) defer close(blockCh) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index 14770308663ef..eae0e6a7e6e14 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -27,7 +27,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" apiv1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" @@ -195,7 +195,7 @@ func setKubeletConfiguration(f *framework.Framework, kubeCfg *kubeletconfig.Kube if !apiequality.Semantic.DeepEqual(*kubeCfg, *newKubeCfg) { return fmt.Errorf("still waiting for new configuration to take effect, will continue to watch /configz") } - glog.Infof("new configuration has taken effect") + klog.Infof("new configuration has taken effect") return nil }, restartGap, pollInterval).Should(BeNil()) @@ -238,11 +238,11 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration) *http.Respon Eventually(func() bool { resp, err = client.Do(req) if err != nil { - glog.Errorf("Failed to get /configz, retrying. Error: %v", err) + klog.Errorf("Failed to get /configz, retrying. Error: %v", err) return false } if resp.StatusCode != 200 { - glog.Errorf("/configz response status not 200, retrying. Response was: %+v", resp) + klog.Errorf("/configz response status not 200, retrying. Response was: %+v", resp) return false } return true diff --git a/test/images/apparmor-loader/BUILD b/test/images/apparmor-loader/BUILD index f033cc4ad8892..8347d5292a199 100644 --- a/test/images/apparmor-loader/BUILD +++ b/test/images/apparmor-loader/BUILD @@ -5,7 +5,7 @@ go_library( srcs = ["loader.go"], importpath = "k8s.io/kubernetes/test/images/apparmor-loader", visibility = ["//visibility:private"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) go_binary( diff --git a/test/images/apparmor-loader/loader.go b/test/images/apparmor-loader/loader.go index 5f1e2095e6d5e..a716bd986d242 100644 --- a/test/images/apparmor-loader/loader.go +++ b/test/images/apparmor-loader/loader.go @@ -29,7 +29,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -53,19 +53,19 @@ func main() { dirs = flag.Args() if len(dirs) == 0 { - glog.Errorf("Must specify at least one directory.") + klog.Errorf("Must specify at least one directory.") flag.Usage() os.Exit(1) } // Check that the required parser binary is found. if _, err := exec.LookPath(parser); err != nil { - glog.Exitf("Required binary %s not found in PATH", parser) + klog.Exitf("Required binary %s not found in PATH", parser) } // Check that loaded profiles can be read. if _, err := getLoadedProfiles(); err != nil { - glog.Exitf("Unable to access apparmor profiles: %v", err) + klog.Exitf("Unable to access apparmor profiles: %v", err) } if *poll < 0 { @@ -79,26 +79,26 @@ func main() { func runOnce() { if success, newProfiles := loadNewProfiles(); !success { if len(newProfiles) > 0 { - glog.Exitf("Not all profiles were successfully loaded. Loaded: %v", newProfiles) + klog.Exitf("Not all profiles were successfully loaded. Loaded: %v", newProfiles) } else { - glog.Exit("Error loading profiles.") + klog.Exit("Error loading profiles.") } } else { if len(newProfiles) > 0 { - glog.Infof("Successfully loaded profiles: %v", newProfiles) + klog.Infof("Successfully loaded profiles: %v", newProfiles) } else { - glog.Warning("No new profiles found.") + klog.Warning("No new profiles found.") } } } // Poll the directories indefinitely. func pollForever() { - glog.V(2).Infof("Polling %s every %s", strings.Join(dirs, ", "), poll.String()) + klog.V(2).Infof("Polling %s every %s", strings.Join(dirs, ", "), poll.String()) pollFn := func() { _, newProfiles := loadNewProfiles() if len(newProfiles) > 0 { - glog.V(2).Infof("Successfully loaded profiles: %v", newProfiles) + klog.V(2).Infof("Successfully loaded profiles: %v", newProfiles) } } pollFn() // Run immediately. @@ -111,7 +111,7 @@ func pollForever() { func loadNewProfiles() (success bool, newProfiles []string) { loadedProfiles, err := getLoadedProfiles() if err != nil { - glog.Errorf("Error reading loaded profiles: %v", err) + klog.Errorf("Error reading loaded profiles: %v", err) return false, nil } @@ -119,7 +119,7 @@ func loadNewProfiles() (success bool, newProfiles []string) { for _, dir := range dirs { infos, err := ioutil.ReadDir(dir) if err != nil { - glog.Warningf("Error reading %s: %v", dir, err) + klog.Warningf("Error reading %s: %v", dir, err) success = false continue } @@ -129,26 +129,26 @@ func loadNewProfiles() (success bool, newProfiles []string) { // If directory, or symlink to a directory, skip it. resolvedInfo, err := resolveSymlink(dir, info) if err != nil { - glog.Warningf("Error resolving symlink: %v", err) + klog.Warningf("Error resolving symlink: %v", err) continue } if resolvedInfo.IsDir() { // Directory listing is shallow. - glog.V(4).Infof("Skipping directory %s", path) + klog.V(4).Infof("Skipping directory %s", path) continue } - glog.V(4).Infof("Scanning %s for new profiles", path) + klog.V(4).Infof("Scanning %s for new profiles", path) profiles, err := getProfileNames(path) if err != nil { - glog.Warningf("Error reading %s: %v", path, err) + klog.Warningf("Error reading %s: %v", path, err) success = false continue } if unloadedProfiles(loadedProfiles, profiles) { if err := loadProfiles(path); err != nil { - glog.Errorf("Could not load profiles: %v", err) + klog.Errorf("Could not load profiles: %v", err) success = false continue } @@ -171,7 +171,7 @@ func getProfileNames(path string) ([]string, error) { out, err := cmd.Output() if err != nil { if stderr.Len() > 0 { - glog.Warning(stderr.String()) + klog.Warning(stderr.String()) } return nil, fmt.Errorf("error reading profiles from %s: %v", path, err) } @@ -194,10 +194,10 @@ func loadProfiles(path string) error { stderr := &bytes.Buffer{} cmd.Stderr = stderr out, err := cmd.Output() - glog.V(2).Infof("Loading profiles from %s:\n%s", path, out) + klog.V(2).Infof("Loading profiles from %s:\n%s", path, out) if err != nil { if stderr.Len() > 0 { - glog.Warning(stderr.String()) + klog.Warning(stderr.String()) } return fmt.Errorf("error loading profiles from %s: %v", path, err) } diff --git a/test/images/logs-generator/BUILD b/test/images/logs-generator/BUILD index ce9086efeeea8..c632d7d15331f 100644 --- a/test/images/logs-generator/BUILD +++ b/test/images/logs-generator/BUILD @@ -17,7 +17,7 @@ go_library( importpath = "k8s.io/kubernetes/test/images/logs-generator", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/images/logs-generator/logs_generator.go b/test/images/logs-generator/logs_generator.go index b5ab2bc953338..27a65c3b798fb 100644 --- a/test/images/logs-generator/logs_generator.go +++ b/test/images/logs-generator/logs_generator.go @@ -21,8 +21,8 @@ import ( "fmt" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/klog" ) var ( @@ -47,11 +47,11 @@ func main() { flag.Parse() if *linesTotal <= 0 { - glog.Fatalf("Invalid total number of lines: %d", *linesTotal) + klog.Fatalf("Invalid total number of lines: %d", *linesTotal) } if *duration <= 0 { - glog.Fatalf("Invalid duration: %v", *duration) + klog.Fatalf("Invalid duration: %v", *duration) } generateLogs(*linesTotal, *duration) @@ -64,7 +64,7 @@ func generateLogs(linesTotal int, duration time.Duration) { ticker := time.NewTicker(delay) defer ticker.Stop() for id := 0; id < linesTotal; id++ { - glog.Info(generateLogLine(id)) + klog.Info(generateLogLine(id)) <-ticker.C } } diff --git a/test/images/webhook/BUILD b/test/images/webhook/BUILD index 5d75f0e94dc74..17f9f04925684 100644 --- a/test/images/webhook/BUILD +++ b/test/images/webhook/BUILD @@ -24,7 +24,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/images/webhook/addlabel.go b/test/images/webhook/addlabel.go index 48ff86351cbf0..b6b12fe7f6561 100644 --- a/test/images/webhook/addlabel.go +++ b/test/images/webhook/addlabel.go @@ -19,9 +19,9 @@ package main import ( "encoding/json" - "github.com/golang/glog" "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" ) const ( @@ -35,7 +35,7 @@ const ( // Add a label {"added-label": "yes"} to the object func addLabel(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(2).Info("calling add-label") + klog.V(2).Info("calling add-label") obj := struct { metav1.ObjectMeta Data map[string]string @@ -43,7 +43,7 @@ func addLabel(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { raw := ar.Request.Object.Raw err := json.Unmarshal(raw, &obj) if err != nil { - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } diff --git a/test/images/webhook/alwaysdeny.go b/test/images/webhook/alwaysdeny.go index 8c8796e18e150..8e417ac020eed 100644 --- a/test/images/webhook/alwaysdeny.go +++ b/test/images/webhook/alwaysdeny.go @@ -17,14 +17,14 @@ limitations under the License. package main import ( - "github.com/golang/glog" "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" ) // alwaysDeny all requests made to this function. func alwaysDeny(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(2).Info("calling always-deny") + klog.V(2).Info("calling always-deny") reviewResponse := v1beta1.AdmissionResponse{} reviewResponse.Allowed = false reviewResponse.Result = &metav1.Status{Message: "this webhook denies all requests"} diff --git a/test/images/webhook/config.go b/test/images/webhook/config.go index 5c84a2b8ec72c..2aadace809def 100644 --- a/test/images/webhook/config.go +++ b/test/images/webhook/config.go @@ -20,7 +20,7 @@ import ( "crypto/tls" "flag" - "github.com/golang/glog" + "k8s.io/klog" ) // Config contains the server (the webhook) cert and key. @@ -40,7 +40,7 @@ func (c *Config) addFlags() { func configTLS(config Config) *tls.Config { sCert, err := tls.LoadX509KeyPair(config.CertFile, config.KeyFile) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } return &tls.Config{ Certificates: []tls.Certificate{sCert}, diff --git a/test/images/webhook/configmap.go b/test/images/webhook/configmap.go index 147c6e748544c..25ba7f4efa321 100644 --- a/test/images/webhook/configmap.go +++ b/test/images/webhook/configmap.go @@ -17,10 +17,10 @@ limitations under the License. package main import ( - "github.com/golang/glog" "k8s.io/api/admission/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" ) const ( @@ -34,10 +34,10 @@ const ( // deny configmaps with specific key-value pair. func admitConfigMaps(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(2).Info("admitting configmaps") + klog.V(2).Info("admitting configmaps") configMapResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} if ar.Request.Resource != configMapResource { - glog.Errorf("expect resource to be %s", configMapResource) + klog.Errorf("expect resource to be %s", configMapResource) return nil } @@ -45,7 +45,7 @@ func admitConfigMaps(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { configmap := corev1.ConfigMap{} deserializer := codecs.UniversalDeserializer() if _, _, err := deserializer.Decode(raw, nil, &configmap); err != nil { - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } reviewResponse := v1beta1.AdmissionResponse{} @@ -62,10 +62,10 @@ func admitConfigMaps(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { } func mutateConfigmaps(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(2).Info("mutating configmaps") + klog.V(2).Info("mutating configmaps") configMapResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} if ar.Request.Resource != configMapResource { - glog.Errorf("expect resource to be %s", configMapResource) + klog.Errorf("expect resource to be %s", configMapResource) return nil } @@ -73,7 +73,7 @@ func mutateConfigmaps(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { configmap := corev1.ConfigMap{} deserializer := codecs.UniversalDeserializer() if _, _, err := deserializer.Decode(raw, nil, &configmap); err != nil { - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } reviewResponse := v1beta1.AdmissionResponse{} diff --git a/test/images/webhook/crd.go b/test/images/webhook/crd.go index 1114058c5e237..977a828002d18 100644 --- a/test/images/webhook/crd.go +++ b/test/images/webhook/crd.go @@ -22,18 +22,18 @@ import ( apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/golang/glog" "k8s.io/api/admission/v1beta1" + "k8s.io/klog" ) // This function expects all CRDs submitted to it to be apiextensions.k8s.io/v1beta1 // TODO: When apiextensions.k8s.io/v1 is added we will need to update this function. func admitCRD(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(2).Info("admitting crd") + klog.V(2).Info("admitting crd") crdResource := metav1.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1beta1", Resource: "customresourcedefinitions"} if ar.Request.Resource != crdResource { err := fmt.Errorf("expect resource to be %s", crdResource) - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } @@ -41,7 +41,7 @@ func admitCRD(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { crd := apiextensionsv1beta1.CustomResourceDefinition{} deserializer := codecs.UniversalDeserializer() if _, _, err := deserializer.Decode(raw, nil, &crd); err != nil { - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } reviewResponse := v1beta1.AdmissionResponse{} diff --git a/test/images/webhook/customresource.go b/test/images/webhook/customresource.go index 273ae4f08e6ca..53e235f3fa155 100644 --- a/test/images/webhook/customresource.go +++ b/test/images/webhook/customresource.go @@ -21,8 +21,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/golang/glog" "k8s.io/api/admission/v1beta1" + "k8s.io/klog" ) const ( @@ -35,7 +35,7 @@ const ( ) func mutateCustomResource(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(2).Info("mutating custom resource") + klog.V(2).Info("mutating custom resource") cr := struct { metav1.ObjectMeta Data map[string]string @@ -44,7 +44,7 @@ func mutateCustomResource(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse raw := ar.Request.Object.Raw err := json.Unmarshal(raw, &cr) if err != nil { - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } @@ -63,7 +63,7 @@ func mutateCustomResource(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse } func admitCustomResource(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(2).Info("admitting custom resource") + klog.V(2).Info("admitting custom resource") cr := struct { metav1.ObjectMeta Data map[string]string @@ -72,7 +72,7 @@ func admitCustomResource(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse raw := ar.Request.Object.Raw err := json.Unmarshal(raw, &cr) if err != nil { - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } diff --git a/test/images/webhook/main.go b/test/images/webhook/main.go index 37c0a1de8a95b..0d9460ca8bab0 100644 --- a/test/images/webhook/main.go +++ b/test/images/webhook/main.go @@ -23,9 +23,9 @@ import ( "io/ioutil" "net/http" - "github.com/golang/glog" "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" // TODO: try this library to see if it generates correct json patch // https://github.com/mattbaird/jsonpatch ) @@ -56,11 +56,11 @@ func serve(w http.ResponseWriter, r *http.Request, admit admitFunc) { // verify the content type is accurate contentType := r.Header.Get("Content-Type") if contentType != "application/json" { - glog.Errorf("contentType=%s, expect application/json", contentType) + klog.Errorf("contentType=%s, expect application/json", contentType) return } - glog.V(2).Info(fmt.Sprintf("handling request: %s", body)) + klog.V(2).Info(fmt.Sprintf("handling request: %s", body)) // The AdmissionReview that was sent to the webhook requestedAdmissionReview := v1beta1.AdmissionReview{} @@ -70,7 +70,7 @@ func serve(w http.ResponseWriter, r *http.Request, admit admitFunc) { deserializer := codecs.UniversalDeserializer() if _, _, err := deserializer.Decode(body, nil, &requestedAdmissionReview); err != nil { - glog.Error(err) + klog.Error(err) responseAdmissionReview.Response = toAdmissionResponse(err) } else { // pass to admitFunc @@ -80,14 +80,14 @@ func serve(w http.ResponseWriter, r *http.Request, admit admitFunc) { // Return the same UID responseAdmissionReview.Response.UID = requestedAdmissionReview.Request.UID - glog.V(2).Info(fmt.Sprintf("sending response: %v", responseAdmissionReview.Response)) + klog.V(2).Info(fmt.Sprintf("sending response: %v", responseAdmissionReview.Response)) respBytes, err := json.Marshal(responseAdmissionReview) if err != nil { - glog.Error(err) + klog.Error(err) } if _, err := w.Write(respBytes); err != nil { - glog.Error(err) + klog.Error(err) } } diff --git a/test/images/webhook/pods.go b/test/images/webhook/pods.go index ee0bbb2e1ee62..8338551db92ef 100644 --- a/test/images/webhook/pods.go +++ b/test/images/webhook/pods.go @@ -23,8 +23,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/golang/glog" "k8s.io/api/admission/v1beta1" + "k8s.io/klog" ) const ( @@ -35,11 +35,11 @@ const ( // only allow pods to pull images from specific registry. func admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(2).Info("admitting pods") + klog.V(2).Info("admitting pods") podResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} if ar.Request.Resource != podResource { err := fmt.Errorf("expect resource to be %s", podResource) - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } @@ -47,7 +47,7 @@ func admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { pod := corev1.Pod{} deserializer := codecs.UniversalDeserializer() if _, _, err := deserializer.Decode(raw, nil, &pod); err != nil { - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } reviewResponse := v1beta1.AdmissionResponse{} @@ -78,10 +78,10 @@ func admitPods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { } func mutatePods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(2).Info("mutating pods") + klog.V(2).Info("mutating pods") podResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} if ar.Request.Resource != podResource { - glog.Errorf("expect resource to be %s", podResource) + klog.Errorf("expect resource to be %s", podResource) return nil } @@ -89,7 +89,7 @@ func mutatePods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { pod := corev1.Pod{} deserializer := codecs.UniversalDeserializer() if _, _, err := deserializer.Decode(raw, nil, &pod); err != nil { - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } reviewResponse := v1beta1.AdmissionResponse{} @@ -105,19 +105,19 @@ func mutatePods(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { // denySpecificAttachment denies `kubectl attach to-be-attached-pod -i -c=container1" // or equivalent client requests. func denySpecificAttachment(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { - glog.V(2).Info("handling attaching pods") + klog.V(2).Info("handling attaching pods") if ar.Request.Name != "to-be-attached-pod" { return &v1beta1.AdmissionResponse{Allowed: true} } podResource := metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} if e, a := podResource, ar.Request.Resource; e != a { err := fmt.Errorf("expect resource to be %s, got %s", e, a) - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } if e, a := "attach", ar.Request.SubResource; e != a { err := fmt.Errorf("expect subresource to be %s, got %s", e, a) - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } @@ -125,10 +125,10 @@ func denySpecificAttachment(ar v1beta1.AdmissionReview) *v1beta1.AdmissionRespon podAttachOptions := corev1.PodAttachOptions{} deserializer := codecs.UniversalDeserializer() if _, _, err := deserializer.Decode(raw, nil, &podAttachOptions); err != nil { - glog.Error(err) + klog.Error(err) return toAdmissionResponse(err) } - glog.V(2).Info(fmt.Sprintf("podAttachOptions=%#v\n", podAttachOptions)) + klog.V(2).Info(fmt.Sprintf("podAttachOptions=%#v\n", podAttachOptions)) if !podAttachOptions.Stdin || podAttachOptions.Container != "container1" { return &v1beta1.AdmissionResponse{Allowed: true} } diff --git a/test/integration/apiserver/BUILD b/test/integration/apiserver/BUILD index 8261c4056985c..ce40df99c5e64 100644 --- a/test/integration/apiserver/BUILD +++ b/test/integration/apiserver/BUILD @@ -51,9 +51,9 @@ go_test( "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/tools/pager:go_default_library", "//test/integration/framework:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/k8s.io/gengo/examples/set-gen/sets:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/integration/apiserver/apiserver_test.go b/test/integration/apiserver/apiserver_test.go index 38e604faf8438..37606dcd13f72 100644 --- a/test/integration/apiserver/apiserver_test.go +++ b/test/integration/apiserver/apiserver_test.go @@ -26,7 +26,6 @@ import ( "reflect" "testing" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/meta" @@ -38,6 +37,7 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/pager" + "k8s.io/klog" "k8s.io/kubernetes/pkg/api/testapi" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/master" @@ -68,7 +68,7 @@ func verifyStatusCode(t *testing.T, verb, URL, body string, expectedStatusCode i t.Fatalf("unexpected error: %v in sending req with verb: %s, URL: %s and body: %s", err, verb, URL, body) } transport := http.DefaultTransport - glog.Infof("Sending request: %v", req) + klog.Infof("Sending request: %v", req) resp, err := transport.RoundTrip(req) if err != nil { t.Fatalf("unexpected error: %v in req: %v", err, req) diff --git a/test/integration/auth/BUILD b/test/integration/auth/BUILD index bd7254767aef2..af4345306b9d0 100644 --- a/test/integration/auth/BUILD +++ b/test/integration/auth/BUILD @@ -90,8 +90,8 @@ go_test( "//test/integration:go_default_library", "//test/integration/etcd:go_default_library", "//test/integration/framework:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/gopkg.in/square/go-jose.v2/jwt:go_default_library", + "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/test/integration/auth/rbac_test.go b/test/integration/auth/rbac_test.go index 9c6865c9e9bf8..effffb12228de 100644 --- a/test/integration/auth/rbac_test.go +++ b/test/integration/auth/rbac_test.go @@ -27,7 +27,7 @@ import ( "testing" "time" - "github.com/golang/glog" + "k8s.io/klog" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -562,7 +562,7 @@ func TestRBAC(t *testing.T) { // // go test -v -tags integration -run RBAC -args -v 10 // - glog.V(8).Infof("case %d, req %d: %s\n%s\n", i, j, reqDump, respDump) + klog.V(8).Infof("case %d, req %d: %s\n%s\n", i, j, reqDump, respDump) t.Errorf("case %d, req %d: %s expected %q got %q", i, j, r, statusCode(r.expectedStatus), statusCode(resp.StatusCode)) } diff --git a/test/integration/framework/BUILD b/test/integration/framework/BUILD index 9fb021ed33aee..dbf6cb4cbb147 100644 --- a/test/integration/framework/BUILD +++ b/test/integration/framework/BUILD @@ -60,8 +60,8 @@ go_library( "//test/e2e/framework:go_default_library", "//test/utils:go_default_library", "//vendor/github.com/go-openapi/spec:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/integration/framework/etcd.go b/test/integration/framework/etcd.go index d7f43ec2a388c..18ae79904d573 100644 --- a/test/integration/framework/etcd.go +++ b/test/integration/framework/etcd.go @@ -26,7 +26,7 @@ import ( "path/filepath" "strings" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/util/env" ) @@ -69,11 +69,11 @@ func startEtcd() (func(), error) { etcdURL = env.GetEnvAsStringOrFallback("KUBE_INTEGRATION_ETCD_URL", "http://127.0.0.1:2379") conn, err := net.Dial("tcp", strings.TrimPrefix(etcdURL, "http://")) if err == nil { - glog.Infof("etcd already running at %s", etcdURL) + klog.Infof("etcd already running at %s", etcdURL) conn.Close() return func() {}, nil } - glog.V(1).Infof("could not connect to etcd: %v", err) + klog.V(1).Infof("could not connect to etcd: %v", err) // TODO: Check for valid etcd version. etcdPath, err := getEtcdPath() @@ -86,13 +86,13 @@ func startEtcd() (func(), error) { return nil, fmt.Errorf("could not get a port: %v", err) } etcdURL = fmt.Sprintf("http://127.0.0.1:%d", etcdPort) - glog.Infof("starting etcd on %s", etcdURL) + klog.Infof("starting etcd on %s", etcdURL) etcdDataDir, err := ioutil.TempDir(os.TempDir(), "integration_test_etcd_data") if err != nil { return nil, fmt.Errorf("unable to make temp etcd data dir: %v", err) } - glog.Infof("storing etcd data in: %v", etcdDataDir) + klog.Infof("storing etcd data in: %v", etcdDataDir) ctx, cancel := context.WithCancel(context.Background()) cmd := exec.CommandContext( @@ -112,10 +112,10 @@ func startEtcd() (func(), error) { stop := func() { cancel() err := cmd.Wait() - glog.Infof("etcd exit status: %v", err) + klog.Infof("etcd exit status: %v", err) err = os.RemoveAll(etcdDataDir) if err != nil { - glog.Warningf("error during etcd cleanup: %v", err) + klog.Warningf("error during etcd cleanup: %v", err) } } @@ -129,7 +129,7 @@ func startEtcd() (func(), error) { func EtcdMain(tests func() int) { stop, err := startEtcd() if err != nil { - glog.Fatalf("cannot run integration tests: unable to start etcd: %v", err) + klog.Fatalf("cannot run integration tests: unable to start etcd: %v", err) } result := tests() stop() // Don't defer this. See os.Exit documentation. diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 52df5c97b445c..d5512120b5935 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -24,8 +24,8 @@ import ( "time" "github.com/go-openapi/spec" - "github.com/golang/glog" "github.com/pborman/uuid" + "k8s.io/klog" apps "k8s.io/api/apps/v1beta1" auditreg "k8s.io/api/auditregistration/v1alpha1" @@ -178,14 +178,14 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv clientset, err := clientset.NewForConfig(masterConfig.GenericConfig.LoopbackClientConfig) if err != nil { - glog.Fatal(err) + klog.Fatal(err) } masterConfig.ExtraConfig.VersionedInformers = informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout) m, err = masterConfig.Complete().New(genericapiserver.NewEmptyDelegate()) if err != nil { closeFn() - glog.Fatalf("error in bringing up the master: %v", err) + klog.Fatalf("error in bringing up the master: %v", err) } if masterReceiver != nil { masterReceiver.SetMaster(m) @@ -202,7 +202,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv privilegedClient, err := restclient.RESTClientFor(&cfg) if err != nil { closeFn() - glog.Fatal(err) + klog.Fatal(err) } var lastHealthContent []byte err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { @@ -217,8 +217,8 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv }) if err != nil { closeFn() - glog.Errorf("last health content: %q", string(lastHealthContent)) - glog.Fatal(err) + klog.Errorf("last health content: %q", string(lastHealthContent)) + klog.Fatal(err) } return m, s, closeFn diff --git a/test/integration/framework/perf_utils.go b/test/integration/framework/perf_utils.go index 8897e93f6021d..77bee4232013a 100644 --- a/test/integration/framework/perf_utils.go +++ b/test/integration/framework/perf_utils.go @@ -24,7 +24,7 @@ import ( e2eframework "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -51,7 +51,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error { numNodes += v.Count } - glog.Infof("Making %d nodes", numNodes) + klog.Infof("Making %d nodes", numNodes) baseNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ GenerateName: p.nodeNamePrefix, @@ -77,7 +77,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error { } } if err != nil { - glog.Fatalf("Error creating node: %v", err) + klog.Fatalf("Error creating node: %v", err) } } @@ -88,7 +88,7 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error { sum += v.Count for ; index < sum; index++ { if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil { - glog.Errorf("Aborting node preparation: %v", err) + klog.Errorf("Aborting node preparation: %v", err) return err } } @@ -100,7 +100,7 @@ func (p *IntegrationTestNodePreparer) CleanupNodes() error { nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client) for i := range nodes.Items { if err := p.client.CoreV1().Nodes().Delete(nodes.Items[i].Name, &metav1.DeleteOptions{}); err != nil { - glog.Errorf("Error while deleting Node: %v", err) + klog.Errorf("Error while deleting Node: %v", err) } } return nil diff --git a/test/integration/ipamperf/BUILD b/test/integration/ipamperf/BUILD index f1113b2310b79..52b0889b6a70c 100644 --- a/test/integration/ipamperf/BUILD +++ b/test/integration/ipamperf/BUILD @@ -20,7 +20,7 @@ go_test( "//staging/src/k8s.io/client-go/rest:go_default_library", "//test/integration/framework:go_default_library", "//test/integration/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -63,8 +63,8 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", "//test/integration/util:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/api/compute/v0.beta:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/integration/ipamperf/ipam_test.go b/test/integration/ipamperf/ipam_test.go index 8c0369bbe0c81..afaaa75f81261 100644 --- a/test/integration/ipamperf/ipam_test.go +++ b/test/integration/ipamperf/ipam_test.go @@ -25,7 +25,7 @@ import ( "testing" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/informers" @@ -65,7 +65,7 @@ func setupAllocator(apiURL string, config *Config, clusterCIDR, serviceCIDR *net func runTest(t *testing.T, apiURL string, config *Config, clusterCIDR, serviceCIDR *net.IPNet, subnetMaskSize int) (*Results, error) { t.Helper() - glog.Infof("Running test %s", t.Name()) + klog.Infof("Running test %s", t.Name()) defer deleteNodes(apiURL, config) // cleanup nodes on after controller shutdown @@ -85,7 +85,7 @@ func runTest(t *testing.T, apiURL string, config *Config, clusterCIDR, serviceCI } results := o.Results(t.Name(), config) - glog.Infof("Results: %s", results) + klog.Infof("Results: %s", results) if !results.Succeeded { t.Errorf("%s: Not allocations succeeded", t.Name()) } @@ -95,16 +95,16 @@ func runTest(t *testing.T, apiURL string, config *Config, clusterCIDR, serviceCI func logResults(allResults []*Results) { jStr, err := json.MarshalIndent(allResults, "", " ") if err != nil { - glog.Errorf("Error formatting results: %v", err) + klog.Errorf("Error formatting results: %v", err) return } if resultsLogFile != "" { - glog.Infof("Logging results to %s", resultsLogFile) + klog.Infof("Logging results to %s", resultsLogFile) if err := ioutil.WriteFile(resultsLogFile, jStr, os.FileMode(0644)); err != nil { - glog.Errorf("Error logging results to %s: %v", resultsLogFile, err) + klog.Errorf("Error logging results to %s: %v", resultsLogFile, err) } } - glog.Infof("AllResults:\n%s", string(jStr)) + klog.Infof("AllResults:\n%s", string(jStr)) } func TestPerformance(t *testing.T) { diff --git a/test/integration/ipamperf/main_test.go b/test/integration/ipamperf/main_test.go index 401ad4528363f..0ade6c875757a 100644 --- a/test/integration/ipamperf/main_test.go +++ b/test/integration/ipamperf/main_test.go @@ -20,7 +20,7 @@ import ( "flag" "testing" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" "k8s.io/kubernetes/test/integration/framework" ) @@ -59,7 +59,7 @@ func TestMain(m *testing.M) { case string(ipam.IPAMFromClusterAllocatorType): customConfig.AllocatorType = ipam.IPAMFromClusterAllocatorType default: - glog.Fatalf("Unknown allocator type: %s", allocator) + klog.Fatalf("Unknown allocator type: %s", allocator) } framework.EtcdMain(m.Run) diff --git a/test/integration/ipamperf/results.go b/test/integration/ipamperf/results.go index 8880477b9b001..cef7942b100a3 100644 --- a/test/integration/ipamperf/results.go +++ b/test/integration/ipamperf/results.go @@ -23,12 +23,12 @@ import ( "sync" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" nodeutil "k8s.io/kubernetes/pkg/controller/util/node" ) @@ -96,7 +96,7 @@ func NewObserver(clientSet *clientset.Clientset, numNodes int) *Observer { // Call Results() to get the test results after starting observer. func (o *Observer) StartObserving() error { o.monitor() - glog.Infof("Test observer started") + klog.Infof("Test observer started") return nil } @@ -174,12 +174,12 @@ func (o *Observer) monitor() { nTime.podCIDR = newNode.Spec.PodCIDR o.numAllocated++ if o.numAllocated%10 == 0 { - glog.Infof("progress: %d/%d - %.2d%%", o.numAllocated, o.numNodes, (o.numAllocated * 100.0 / o.numNodes)) + klog.Infof("progress: %d/%d - %.2d%%", o.numAllocated, o.numNodes, (o.numAllocated * 100.0 / o.numNodes)) } // do following check only if numAllocated is modified, as otherwise, redundant updates // can cause wg.Done() to be called multiple times, causing a panic if o.numAdded == o.numNodes && o.numAllocated == o.numNodes { - glog.Info("All nodes assigned podCIDR") + klog.Info("All nodes assigned podCIDR") o.wg.Done() } } diff --git a/test/integration/ipamperf/util.go b/test/integration/ipamperf/util.go index 34c1c175ccefe..1b6e25b48749d 100644 --- a/test/integration/ipamperf/util.go +++ b/test/integration/ipamperf/util.go @@ -19,7 +19,6 @@ package ipamperf import ( "time" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -27,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + "k8s.io/klog" ) const ( @@ -54,7 +54,7 @@ var ( ) func deleteNodes(apiURL string, config *Config) { - glog.Info("Deleting nodes") + klog.Info("Deleting nodes") clientSet := clientset.NewForConfigOrDie(&restclient.Config{ Host: apiURL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}, @@ -63,7 +63,7 @@ func deleteNodes(apiURL string, config *Config) { }) noGrace := int64(0) if err := clientSet.CoreV1().Nodes().DeleteCollection(&metav1.DeleteOptions{GracePeriodSeconds: &noGrace}, metav1.ListOptions{}); err != nil { - glog.Errorf("Error deleting node: %v", err) + klog.Errorf("Error deleting node: %v", err) } } @@ -74,22 +74,22 @@ func createNodes(apiURL string, config *Config) error { QPS: float32(config.CreateQPS), Burst: config.CreateQPS, }) - glog.Infof("Creating %d nodes", config.NumNodes) + klog.Infof("Creating %d nodes", config.NumNodes) for i := 0; i < config.NumNodes; i++ { var err error for j := 0; j < maxCreateRetries; j++ { if _, err = clientSet.CoreV1().Nodes().Create(baseNodeTemplate); err != nil && errors.IsServerTimeout(err) { - glog.Infof("Server timeout creating nodes, retrying after %v", retryDelay) + klog.Infof("Server timeout creating nodes, retrying after %v", retryDelay) time.Sleep(retryDelay) continue } break } if err != nil { - glog.Errorf("Error creating nodes: %v", err) + klog.Errorf("Error creating nodes: %v", err) return err } } - glog.Infof("%d nodes created", config.NumNodes) + klog.Infof("%d nodes created", config.NumNodes) return nil } diff --git a/test/integration/master/BUILD b/test/integration/master/BUILD index 0bb76a3fd4c11..87c9afda8fe2f 100644 --- a/test/integration/master/BUILD +++ b/test/integration/master/BUILD @@ -138,53 +138,53 @@ go_library( ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:darwin": [ "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:dragonfly": [ "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:freebsd": [ "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:netbsd": [ "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:openbsd": [ "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:plan9": [ "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "@io_bazel_rules_go//go/platform:solaris": [ "//staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/google.golang.org/grpc:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], "//conditions:default": [], }), diff --git a/test/integration/master/kms_plugin_mock.go b/test/integration/master/kms_plugin_mock.go index ad7c73f049c49..a731e1ddcb17d 100644 --- a/test/integration/master/kms_plugin_mock.go +++ b/test/integration/master/kms_plugin_mock.go @@ -26,8 +26,8 @@ import ( "google.golang.org/grpc" - "github.com/golang/glog" kmsapi "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1" + "k8s.io/klog" ) const ( @@ -51,7 +51,7 @@ func NewBase64Plugin() (*base64Plugin, error) { if err != nil { return nil, fmt.Errorf("failed to listen on the unix socket, error: %v", err) } - glog.Infof("Listening on %s", sockFile) + klog.Infof("Listening on %s", sockFile) server := grpc.NewServer() @@ -78,7 +78,7 @@ func (s *base64Plugin) Version(ctx context.Context, request *kmsapi.VersionReque } func (s *base64Plugin) Decrypt(ctx context.Context, request *kmsapi.DecryptRequest) (*kmsapi.DecryptResponse, error) { - glog.Infof("Received Decrypt Request for DEK: %s", string(request.Cipher)) + klog.Infof("Received Decrypt Request for DEK: %s", string(request.Cipher)) buf := make([]byte, base64.StdEncoding.DecodedLen(len(request.Cipher))) n, err := base64.StdEncoding.Decode(buf, request.Cipher) @@ -90,7 +90,7 @@ func (s *base64Plugin) Decrypt(ctx context.Context, request *kmsapi.DecryptReque } func (s *base64Plugin) Encrypt(ctx context.Context, request *kmsapi.EncryptRequest) (*kmsapi.EncryptResponse, error) { - glog.Infof("Received Encrypt Request for DEK: %x", request.Plain) + klog.Infof("Received Encrypt Request for DEK: %x", request.Plain) s.encryptRequest <- request buf := make([]byte, base64.StdEncoding.EncodedLen(len(request.Plain))) diff --git a/test/integration/metrics/BUILD b/test/integration/metrics/BUILD index 55718ed466ec7..05bd17a9ca2f7 100644 --- a/test/integration/metrics/BUILD +++ b/test/integration/metrics/BUILD @@ -40,8 +40,8 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//test/integration/framework:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/golang/protobuf/proto:go_default_library", "//vendor/github.com/prometheus/client_model/go:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/integration/metrics/metrics_test.go b/test/integration/metrics/metrics_test.go index 9269f7ba4e162..6eff0c1908e21 100644 --- a/test/integration/metrics/metrics_test.go +++ b/test/integration/metrics/metrics_test.go @@ -30,9 +30,9 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/integration/framework" - "github.com/golang/glog" "github.com/golang/protobuf/proto" prometheuspb "github.com/prometheus/client_model/go" + "k8s.io/klog" ) const scrapeRequestHeader = "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text" @@ -66,7 +66,7 @@ func scrapeMetrics(s *httptest.Server) ([]*prometheuspb.MetricFamily, error) { if err := proto.UnmarshalText(scanner.Text(), &metric); err != nil { return nil, fmt.Errorf("Failed to unmarshal line of metrics response: %v", err) } - glog.V(4).Infof("Got metric %q", metric.GetName()) + klog.V(4).Infof("Got metric %q", metric.GetName()) metrics = append(metrics, &metric) } return metrics, nil diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index 0956657c99e69..4ef611c99678b 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -64,7 +64,7 @@ go_test( "//test/integration/framework:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index 27dd50cdf42dc..a1fc044a78c7d 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -36,7 +36,7 @@ import ( _ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" testutils "k8s.io/kubernetes/test/utils" - "github.com/golang/glog" + "k8s.io/klog" ) var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300) @@ -481,7 +481,7 @@ func TestPreemptionStarvation(t *testing.T) { t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err) } // Cleanup - glog.Info("Cleaning up all pods...") + klog.Info("Cleaning up all pods...") allPods := pendingPods allPods = append(allPods, runningPods...) allPods = append(allPods, preemptor) diff --git a/test/integration/scheduler/volume_binding_test.go b/test/integration/scheduler/volume_binding_test.go index 9e694f8785a4c..94092320f6285 100644 --- a/test/integration/scheduler/volume_binding_test.go +++ b/test/integration/scheduler/volume_binding_test.go @@ -26,7 +26,7 @@ import ( "testing" "time" - "github.com/golang/glog" + "k8s.io/klog" "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -191,7 +191,7 @@ func TestVolumeBinding(t *testing.T) { } for name, test := range cases { - glog.Infof("Running test %v", name) + klog.Infof("Running test %v", name) // Create two StorageClasses suffix := rand.String(4) @@ -335,7 +335,7 @@ func TestVolumeBindingRescheduling(t *testing.T) { } for name, test := range cases { - glog.Infof("Running test %v", name) + klog.Infof("Running test %v", name) if test.pod == nil { t.Fatal("pod is required for this test") @@ -363,7 +363,7 @@ func TestVolumeBindingRescheduling(t *testing.T) { } // Wait for pod is unschedulable. - glog.Infof("Waiting for pod is unschedulable") + klog.Infof("Waiting for pod is unschedulable") if err := waitForPodUnschedulable(config.client, test.pod); err != nil { t.Errorf("Failed as Pod %s was not unschedulable: %v", test.pod.Name, err) } @@ -373,12 +373,12 @@ func TestVolumeBindingRescheduling(t *testing.T) { // Wait for pod is scheduled or unscheduable. if !test.shouldFail { - glog.Infof("Waiting for pod is scheduled") + klog.Infof("Waiting for pod is scheduled") if err := waitForPodToSchedule(config.client, test.pod); err != nil { t.Errorf("Failed to schedule Pod %q: %v", test.pod.Name, err) } } else { - glog.Infof("Waiting for pod is unschedulable") + klog.Infof("Waiting for pod is unschedulable") if err := waitForPodUnschedulable(config.client, test.pod); err != nil { t.Errorf("Failed as Pod %s was not unschedulable: %v", test.pod.Name, err) } @@ -737,7 +737,7 @@ func TestVolumeProvision(t *testing.T) { } for name, test := range cases { - glog.Infof("Running test %v", name) + klog.Infof("Running test %v", name) // Create StorageClasses suffix := rand.String(4) diff --git a/test/integration/scheduler_perf/BUILD b/test/integration/scheduler_perf/BUILD index ab2aeeb0eaa5e..abaadbe35b726 100644 --- a/test/integration/scheduler_perf/BUILD +++ b/test/integration/scheduler_perf/BUILD @@ -42,7 +42,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//test/integration/framework:go_default_library", "//test/utils:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/integration/scheduler_perf/scheduler_bench_test.go b/test/integration/scheduler_perf/scheduler_bench_test.go index 30403d14080c2..85af95672340a 100644 --- a/test/integration/scheduler_perf/scheduler_bench_test.go +++ b/test/integration/scheduler_perf/scheduler_bench_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/test/integration/framework" testutils "k8s.io/kubernetes/test/utils" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -227,7 +227,7 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int, "scheduler-perf-", ) if err := nodePreparer.PrepareNodes(); err != nil { - glog.Fatalf("%v", err) + klog.Fatalf("%v", err) } defer nodePreparer.CleanupNodes() @@ -239,7 +239,7 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int, for { scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything()) if err != nil { - glog.Fatalf("%v", err) + klog.Fatalf("%v", err) } if len(scheduled) >= numExistingPods { break @@ -257,7 +257,7 @@ func benchmarkScheduling(numNodes, numExistingPods, minPods int, // TODO: Setup watch on apiserver and wait until all pods scheduled. scheduled, err := schedulerConfigFactory.GetScheduledPodLister().List(labels.Everything()) if err != nil { - glog.Fatalf("%v", err) + klog.Fatalf("%v", err) } if len(scheduled) >= numExistingPods+b.N { break diff --git a/test/integration/scheduler_perf/scheduler_test.go b/test/integration/scheduler_perf/scheduler_test.go index e8b232b8e2d60..80e97d6f63b12 100644 --- a/test/integration/scheduler_perf/scheduler_test.go +++ b/test/integration/scheduler_perf/scheduler_test.go @@ -18,11 +18,11 @@ package benchmark import ( "fmt" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/klog" "k8s.io/kubernetes/pkg/scheduler/factory" testutils "k8s.io/kubernetes/test/utils" "math" @@ -137,7 +137,7 @@ func schedulePods(config *testConfig) int32 { time.Sleep(50 * time.Millisecond) scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything()) if err != nil { - glog.Fatalf("%v", err) + klog.Fatalf("%v", err) } // 30,000 pods -> wait till @ least 300 are scheduled to start measuring. // TODO Find out why sometimes there may be scheduling blips in the beginning. @@ -155,7 +155,7 @@ func schedulePods(config *testConfig) int32 { // TODO: Setup watch on apiserver and wait until all pods scheduled. scheduled, err := config.schedulerSupportFunctions.GetScheduledPodLister().List(labels.Everything()) if err != nil { - glog.Fatalf("%v", err) + klog.Fatalf("%v", err) } // We will be completed when all pods are done being scheduled. diff --git a/test/integration/util/BUILD b/test/integration/util/BUILD index 7063c2162edd8..5bd8b0bb80473 100644 --- a/test/integration/util/BUILD +++ b/test/integration/util/BUILD @@ -27,8 +27,8 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//test/integration/framework:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", "//vendor/golang.org/x/oauth2:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/integration/util/util.go b/test/integration/util/util.go index d79c17641cd27..2443b97153743 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -20,13 +20,13 @@ import ( "net/http" "net/http/httptest" - "github.com/golang/glog" "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" clientv1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" + "k8s.io/klog" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler" @@ -49,9 +49,9 @@ func StartApiserver() (string, ShutdownFunc) { framework.RunAMasterUsingServer(framework.NewIntegrationTestMasterConfig(), s, h) shutdownFunc := func() { - glog.Infof("destroying API server") + klog.Infof("destroying API server") s.Close() - glog.Infof("destroyed API server") + klog.Infof("destroyed API server") } return s.URL, shutdownFunc } @@ -73,17 +73,17 @@ func StartScheduler(clientSet clientset.Interface) (factory.Configurator, Shutdo conf.Recorder = evtBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"}) }) if err != nil { - glog.Fatalf("Error creating scheduler: %v", err) + klog.Fatalf("Error creating scheduler: %v", err) } informerFactory.Start(stopCh) sched.Run() shutdownFunc := func() { - glog.Infof("destroying scheduler") + klog.Infof("destroying scheduler") evtWatch.Stop() close(stopCh) - glog.Infof("destroyed scheduler") + klog.Infof("destroyed scheduler") } return schedulerConfigurator, shutdownFunc } diff --git a/test/integration/volume/BUILD b/test/integration/volume/BUILD index 0e6231fde6c7e..27785393155c6 100644 --- a/test/integration/volume/BUILD +++ b/test/integration/volume/BUILD @@ -37,7 +37,7 @@ go_test( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", "//test/integration/framework:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index 57a49c75fa27c..7fdba82ec3143 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -41,8 +41,8 @@ import ( volumetest "k8s.io/kubernetes/pkg/volume/testing" "k8s.io/kubernetes/test/integration/framework" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog" ) // Several tests in this file are configurable by environment variables: @@ -66,10 +66,10 @@ func getObjectCount() int { var err error objectCount, err = strconv.Atoi(s) if err != nil { - glog.Fatalf("cannot parse value of KUBE_INTEGRATION_PV_OBJECTS: %v", err) + klog.Fatalf("cannot parse value of KUBE_INTEGRATION_PV_OBJECTS: %v", err) } } - glog.V(2).Infof("using KUBE_INTEGRATION_PV_OBJECTS=%d", objectCount) + klog.V(2).Infof("using KUBE_INTEGRATION_PV_OBJECTS=%d", objectCount) return objectCount } @@ -79,10 +79,10 @@ func getSyncPeriod(syncPeriod time.Duration) time.Duration { var err error period, err = time.ParseDuration(s) if err != nil { - glog.Fatalf("cannot parse value of KUBE_INTEGRATION_PV_SYNC_PERIOD: %v", err) + klog.Fatalf("cannot parse value of KUBE_INTEGRATION_PV_SYNC_PERIOD: %v", err) } } - glog.V(2).Infof("using KUBE_INTEGRATION_PV_SYNC_PERIOD=%v", period) + klog.V(2).Infof("using KUBE_INTEGRATION_PV_SYNC_PERIOD=%v", period) return period } @@ -92,18 +92,18 @@ func testSleep() { var err error period, err = time.ParseDuration(s) if err != nil { - glog.Fatalf("cannot parse value of KUBE_INTEGRATION_PV_END_SLEEP: %v", err) + klog.Fatalf("cannot parse value of KUBE_INTEGRATION_PV_END_SLEEP: %v", err) } } - glog.V(2).Infof("using KUBE_INTEGRATION_PV_END_SLEEP=%v", period) + klog.V(2).Infof("using KUBE_INTEGRATION_PV_END_SLEEP=%v", period) if period != 0 { time.Sleep(period) - glog.V(2).Infof("sleep finished") + klog.V(2).Infof("sleep finished") } } func TestPersistentVolumeRecycler(t *testing.T) { - glog.V(2).Infof("TestPersistentVolumeRecycler started") + klog.V(2).Infof("TestPersistentVolumeRecycler started") _, s, closeFn := framework.RunAMaster(nil) defer closeFn() @@ -131,34 +131,34 @@ func TestPersistentVolumeRecycler(t *testing.T) { if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } - glog.V(2).Infof("TestPersistentVolumeRecycler pvc created") + klog.V(2).Infof("TestPersistentVolumeRecycler pvc created") _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) if err != nil { t.Errorf("Failed to create PersistentVolumeClaim: %v", err) } - glog.V(2).Infof("TestPersistentVolumeRecycler pvc created") + klog.V(2).Infof("TestPersistentVolumeRecycler pvc created") // wait until the controller pairs the volume and claim waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound) - glog.V(2).Infof("TestPersistentVolumeRecycler pv bound") + klog.V(2).Infof("TestPersistentVolumeRecycler pv bound") waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound) - glog.V(2).Infof("TestPersistentVolumeRecycler pvc bound") + klog.V(2).Infof("TestPersistentVolumeRecycler pvc bound") // deleting a claim releases the volume, after which it can be recycled if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } - glog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted") + klog.V(2).Infof("TestPersistentVolumeRecycler pvc deleted") waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeReleased) - glog.V(2).Infof("TestPersistentVolumeRecycler pv released") + klog.V(2).Infof("TestPersistentVolumeRecycler pv released") waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeAvailable) - glog.V(2).Infof("TestPersistentVolumeRecycler pv available") + klog.V(2).Infof("TestPersistentVolumeRecycler pv available") } func TestPersistentVolumeDeleter(t *testing.T) { - glog.V(2).Infof("TestPersistentVolumeDeleter started") + klog.V(2).Infof("TestPersistentVolumeDeleter started") _, s, closeFn := framework.RunAMaster(nil) defer closeFn() @@ -186,25 +186,25 @@ func TestPersistentVolumeDeleter(t *testing.T) { if err != nil { t.Errorf("Failed to create PersistentVolume: %v", err) } - glog.V(2).Infof("TestPersistentVolumeDeleter pv created") + klog.V(2).Infof("TestPersistentVolumeDeleter pv created") _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) if err != nil { t.Errorf("Failed to create PersistentVolumeClaim: %v", err) } - glog.V(2).Infof("TestPersistentVolumeDeleter pvc created") + klog.V(2).Infof("TestPersistentVolumeDeleter pvc created") waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound) - glog.V(2).Infof("TestPersistentVolumeDeleter pv bound") + klog.V(2).Infof("TestPersistentVolumeDeleter pv bound") waitForPersistentVolumeClaimPhase(testClient, pvc.Name, ns.Name, watchPVC, v1.ClaimBound) - glog.V(2).Infof("TestPersistentVolumeDeleter pvc bound") + klog.V(2).Infof("TestPersistentVolumeDeleter pvc bound") // deleting a claim releases the volume, after which it can be recycled if err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Delete(pvc.Name, nil); err != nil { t.Errorf("error deleting claim %s", pvc.Name) } - glog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted") + klog.V(2).Infof("TestPersistentVolumeDeleter pvc deleted") waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeReleased) - glog.V(2).Infof("TestPersistentVolumeDeleter pv released") + klog.V(2).Infof("TestPersistentVolumeDeleter pv released") for { event := <-watchPV.ResultChan() @@ -212,13 +212,13 @@ func TestPersistentVolumeDeleter(t *testing.T) { break } } - glog.V(2).Infof("TestPersistentVolumeDeleter pv deleted") + klog.V(2).Infof("TestPersistentVolumeDeleter pv deleted") } func TestPersistentVolumeBindRace(t *testing.T) { // Test a race binding many claims to a PV that is pre-bound to a specific // PVC. Only this specific PVC should get bound. - glog.V(2).Infof("TestPersistentVolumeBindRace started") + klog.V(2).Infof("TestPersistentVolumeBindRace started") _, s, closeFn := framework.RunAMaster(nil) defer closeFn() @@ -253,7 +253,7 @@ func TestPersistentVolumeBindRace(t *testing.T) { } claims = append(claims, claim) } - glog.V(2).Infof("TestPersistentVolumeBindRace claims created") + klog.V(2).Infof("TestPersistentVolumeBindRace claims created") // putting a bind manually on a pv should only match the claim it is bound to claim := claims[rand.Intn(maxClaims-1)] @@ -268,12 +268,12 @@ func TestPersistentVolumeBindRace(t *testing.T) { if err != nil { t.Fatalf("Unexpected error creating pv: %v", err) } - glog.V(2).Infof("TestPersistentVolumeBindRace pv created, pre-bound to %s", claim.Name) + klog.V(2).Infof("TestPersistentVolumeBindRace pv created, pre-bound to %s", claim.Name) waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound) - glog.V(2).Infof("TestPersistentVolumeBindRace pv bound") + klog.V(2).Infof("TestPersistentVolumeBindRace pv bound") waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound) - glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound") + klog.V(2).Infof("TestPersistentVolumeBindRace pvc bound") pv, err = testClient.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) if err != nil { @@ -590,7 +590,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { } // Create PVs first - glog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: start") + klog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: start") // Create the volumes in a separate goroutine to pop events from // watchPV early - it seems it has limited capacity and it gets stuck @@ -603,9 +603,9 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // Wait for them to get Available for i := 0; i < objCount; i++ { waitForAnyPersistentVolumePhase(watchPV, v1.VolumeAvailable) - glog.V(1).Infof("%d volumes available", i+1) + klog.V(1).Infof("%d volumes available", i+1) } - glog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: volumes are Available") + klog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: volumes are Available") // Start a separate goroutine that randomly modifies PVs and PVCs while the // binder is working. We test that the binder can bind volumes despite @@ -622,7 +622,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { if err != nil { // Silently ignore error, the PV may have be already deleted // or not exists yet. - glog.V(4).Infof("Failed to read PV %s: %v", name, err) + klog.V(4).Infof("Failed to read PV %s: %v", name, err) continue } if pv.Annotations == nil { @@ -634,10 +634,10 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { if err != nil { // Silently ignore error, the PV may have been updated by // the controller. - glog.V(4).Infof("Failed to update PV %s: %v", pv.Name, err) + klog.V(4).Infof("Failed to update PV %s: %v", pv.Name, err) continue } - glog.V(4).Infof("Updated PV %s", pv.Name) + klog.V(4).Infof("Updated PV %s", pv.Name) } else { // Modify PVC i := rand.Intn(objCount) @@ -646,7 +646,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { if err != nil { // Silently ignore error, the PVC may have be already // deleted or not exists yet. - glog.V(4).Infof("Failed to read PVC %s: %v", name, err) + klog.V(4).Infof("Failed to read PVC %s: %v", name, err) continue } if pvc.Annotations == nil { @@ -658,10 +658,10 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { if err != nil { // Silently ignore error, the PVC may have been updated by // the controller. - glog.V(4).Infof("Failed to update PVC %s: %v", pvc.Name, err) + klog.V(4).Infof("Failed to update PVC %s: %v", pvc.Name, err) continue } - glog.V(4).Infof("Updated PVC %s", pvc.Name) + klog.V(4).Infof("Updated PVC %s", pvc.Name) } select { @@ -684,15 +684,15 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { // wait until the binder pairs all claims for i := 0; i < objCount; i++ { waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound) - glog.V(1).Infof("%d claims bound", i+1) + klog.V(1).Infof("%d claims bound", i+1) } // wait until the binder pairs all volumes for i := 0; i < objCount; i++ { waitForPersistentVolumePhase(testClient, pvs[i].Name, watchPV, v1.VolumeBound) - glog.V(1).Infof("%d claims bound", i+1) + klog.V(1).Infof("%d claims bound", i+1) } - glog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: claims are bound") + klog.V(2).Infof("TestPersistentVolumeMultiPVsPVCs: claims are bound") stopCh <- struct{}{} // check that everything is bound to something @@ -704,7 +704,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { if pv.Spec.ClaimRef == nil { t.Fatalf("PV %q is not bound", pv.Name) } - glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) + klog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) pvc, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{}) if err != nil { @@ -713,7 +713,7 @@ func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { if pvc.Spec.VolumeName == "" { t.Fatalf("PVC %q is not bound", pvc.Name) } - glog.V(2).Infof("PVC %q is bound to PV %q", pvc.Name, pvc.Spec.VolumeName) + klog.V(2).Infof("PVC %q is bound to PV %q", pvc.Name, pvc.Spec.VolumeName) } testSleep() } @@ -766,7 +766,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain) claimRef, err := ref.GetReference(legacyscheme.Scheme, newPVC) if err != nil { - glog.V(3).Infof("unexpected error getting claim reference: %v", err) + klog.V(3).Infof("unexpected error getting claim reference: %v", err) return } pv.Spec.ClaimRef = claimRef @@ -820,7 +820,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { case <-timer.C: // Wait finished - glog.V(2).Infof("Wait finished") + klog.V(2).Infof("Wait finished") finished = true } } @@ -834,7 +834,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { if pv.Spec.ClaimRef == nil { t.Fatalf("PV %q is not bound", pv.Name) } - glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) + klog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) pvc, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Get(pvcs[i].Name, metav1.GetOptions{}) if err != nil { @@ -843,7 +843,7 @@ func TestPersistentVolumeControllerStartup(t *testing.T) { if pvc.Spec.VolumeName == "" { t.Fatalf("PVC %q is not bound", pvc.Name) } - glog.V(2).Infof("PVC %q is bound to PV %q", pvc.Name, pvc.Spec.VolumeName) + klog.V(2).Infof("PVC %q is bound to PV %q", pvc.Name, pvc.Spec.VolumeName) } } @@ -888,7 +888,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { pvcs[i] = pvc } - glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: start") + klog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: start") // Create the claims in a separate goroutine to pop events from watchPVC // early. It gets stuck with >3000 claims. go func() { @@ -900,9 +900,9 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { // Wait until the controller provisions and binds all of them for i := 0; i < objCount; i++ { waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound) - glog.V(1).Infof("%d claims bound", i+1) + klog.V(1).Infof("%d claims bound", i+1) } - glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound") + klog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: claims are bound") // check that we have enough bound PVs pvList, err := testClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{}) @@ -917,7 +917,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { if pv.Status.Phase != v1.VolumeBound { t.Fatalf("Expected volume %s to be bound, is %s instead", pv.Name, pv.Status.Phase) } - glog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) + klog.V(2).Infof("PV %q is bound to PVC %q", pv.Name, pv.Spec.ClaimRef.Name) } // Delete the claims @@ -933,13 +933,13 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) { t.Fatalf("Failed to list volumes: %v", err) } - glog.V(1).Infof("%d volumes remaining", len(volumes.Items)) + klog.V(1).Infof("%d volumes remaining", len(volumes.Items)) if len(volumes.Items) == 0 { break } time.Sleep(time.Second) } - glog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: volumes are deleted") + klog.V(2).Infof("TestPersistentVolumeProvisionMultiPVCs: volumes are deleted") } // TestPersistentVolumeMultiPVsDiffAccessModes tests binding of one PVC to two @@ -1038,7 +1038,7 @@ func waitForPersistentVolumePhase(client *clientset.Clientset, pvName string, w continue } if volume.Status.Phase == phase && volume.Name == pvName { - glog.V(2).Infof("volume %q is %s", volume.Name, phase) + klog.V(2).Infof("volume %q is %s", volume.Name, phase) break } } @@ -1059,7 +1059,7 @@ func waitForPersistentVolumeClaimPhase(client *clientset.Clientset, claimName, n continue } if claim.Status.Phase == phase && claim.Name == claimName { - glog.V(2).Infof("claim %q is %s", claim.Name, phase) + klog.V(2).Infof("claim %q is %s", claim.Name, phase) break } } @@ -1073,7 +1073,7 @@ func waitForAnyPersistentVolumePhase(w watch.Interface, phase v1.PersistentVolum continue } if volume.Status.Phase == phase { - glog.V(2).Infof("volume %q is %s", volume.Name, phase) + klog.V(2).Infof("volume %q is %s", volume.Name, phase) break } } @@ -1087,7 +1087,7 @@ func waitForAnyPersistentVolumeClaimPhase(w watch.Interface, phase v1.Persistent continue } if claim.Status.Phase == phase { - glog.V(2).Infof("claim %q is %s", claim.Name, phase) + klog.V(2).Infof("claim %q is %s", claim.Name, phase) break } } diff --git a/test/soak/cauldron/BUILD b/test/soak/cauldron/BUILD index 84a81a97acacb..dbf02e64b422a 100644 --- a/test/soak/cauldron/BUILD +++ b/test/soak/cauldron/BUILD @@ -23,7 +23,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", "//test/e2e/framework:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/soak/cauldron/cauldron.go b/test/soak/cauldron/cauldron.go index 867e88c5554eb..63173667841f9 100644 --- a/test/soak/cauldron/cauldron.go +++ b/test/soak/cauldron/cauldron.go @@ -29,11 +29,11 @@ import ( "net/http" "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" restclient "k8s.io/client-go/rest" + "k8s.io/klog" api "k8s.io/kubernetes/pkg/apis/core" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/test/e2e/framework" @@ -59,17 +59,17 @@ const ( func main() { flag.Parse() - glog.Infof("Starting cauldron soak test with queries=%d podsPerNode=%d upTo=%d maxPar=%d", + klog.Infof("Starting cauldron soak test with queries=%d podsPerNode=%d upTo=%d maxPar=%d", *queriesAverage, *podsPerNode, *upTo, *maxPar) cc, err := restclient.InClusterConfig() if err != nil { - glog.Fatalf("Failed to make client: %v", err) + klog.Fatalf("Failed to make client: %v", err) } client, err := clientset.NewForConfig(cc) if err != nil { - glog.Fatalf("Failed to make client: %v", err) + klog.Fatalf("Failed to make client: %v", err) } var nodes *api.NodeList @@ -78,19 +78,19 @@ func main() { if err == nil { break } - glog.Warningf("Failed to list nodes: %v", err) + klog.Warningf("Failed to list nodes: %v", err) } if err != nil { - glog.Fatalf("Giving up trying to list nodes: %v", err) + klog.Fatalf("Giving up trying to list nodes: %v", err) } if len(nodes.Items) == 0 { - glog.Fatalf("Failed to find any nodes.") + klog.Fatalf("Failed to find any nodes.") } - glog.Infof("Found %d nodes on this cluster:", len(nodes.Items)) + klog.Infof("Found %d nodes on this cluster:", len(nodes.Items)) for i, node := range nodes.Items { - glog.Infof("%d: %s", i, node.Name) + klog.Infof("%d: %s", i, node.Name) } queries := *queriesAverage * len(nodes.Items) * *podsPerNode @@ -98,12 +98,12 @@ func main() { // Create a uniquely named namespace. got, err := client.Core().Namespaces().Create(&api.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}}) if err != nil { - glog.Fatalf("Failed to create namespace: %v", err) + klog.Fatalf("Failed to create namespace: %v", err) } ns := got.Name defer func(ns string) { if err := client.Core().Namespaces().Delete(ns, nil); err != nil { - glog.Warningf("Failed to delete namespace %s: %v", ns, err) + klog.Warningf("Failed to delete namespace %s: %v", ns, err) } else { // wait until the namespace disappears for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { @@ -116,10 +116,10 @@ func main() { } } }(ns) - glog.Infof("Created namespace %s", ns) + klog.Infof("Created namespace %s", ns) // Create a service for these pods. - glog.Infof("Creating service %s/serve-hostnames", ns) + klog.Infof("Creating service %s/serve-hostnames", ns) // Make several attempts to create a service. var svc *api.Service for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { @@ -142,25 +142,25 @@ func main() { }, }, }) - glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) + klog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) if err == nil { break } - glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err) + klog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err) } if err != nil { - glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) + klog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) return } // Clean up service defer func() { - glog.Infof("Cleaning up service %s/serve-hostnames", ns) + klog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err := client.Core().Services(ns).Delete(svc.Name, nil); err == nil { return } - glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) + klog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) } }() @@ -172,7 +172,7 @@ func main() { podNames = append(podNames, podName) // Make several attempts for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { - glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) + klog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) t := time.Now() _, err = client.Core().Pods(ns).Create(&api.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -192,39 +192,39 @@ func main() { NodeName: node.Name, }, }) - glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) + klog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) if err == nil { break } - glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err) + klog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err) } if err != nil { - glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err) + klog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err) return } } } // Clean up the pods defer func() { - glog.Info("Cleaning up pods") + klog.Info("Cleaning up pods") // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err = client.Core().Pods(ns).Delete(podName, nil); err == nil { break } - glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) + klog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) } } }() - glog.Info("Waiting for the serve-hostname pods to be ready") + klog.Info("Waiting for the serve-hostname pods to be ready") for _, podName := range podNames { var pod *api.Pod for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { pod, err = client.Core().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { - glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) + klog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) continue } if pod.Status.Phase == api.PodRunning { @@ -232,9 +232,9 @@ func main() { } } if pod.Status.Phase != api.PodRunning { - glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) + klog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) } else { - glog.Infof("%s/%s is running", ns, podName) + klog.Infof("%s/%s is running", ns, podName) } } @@ -244,10 +244,10 @@ func main() { if err == nil { break } - glog.Infof("After %v while making a request got error %v", time.Since(start), err) + klog.Infof("After %v while making a request got error %v", time.Since(start), err) } if err != nil { - glog.Errorf("Failed to get a response from service: %v", err) + klog.Errorf("Failed to get a response from service: %v", err) } // Repeatedly make requests. @@ -262,9 +262,9 @@ func main() { inFlight <- struct{}{} t := time.Now() resp, err := http.Get(fmt.Sprintf("http://serve-hostnames.%s:9376", ns)) - glog.V(4).Infof("Call to serve-hostnames in namespace %s took %v", ns, time.Since(t)) + klog.V(4).Infof("Call to serve-hostnames in namespace %s took %v", ns, time.Since(t)) if err != nil { - glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err) + klog.Warningf("Call failed during iteration %d query %d : %v", i, query, err) // If the query failed return a string which starts with a character // that can't be part of a hostname. responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err) @@ -284,28 +284,28 @@ func main() { missing := 0 for q := 0; q < queries; q++ { r := <-responseChan - glog.V(4).Infof("Got response from %s", r) + klog.V(4).Infof("Got response from %s", r) responses[r]++ // If the returned hostname starts with '!' then it indicates // an error response. if len(r) > 0 && r[0] == '!' { - glog.V(3).Infof("Got response %s", r) + klog.V(3).Infof("Got response %s", r) missing++ } } if missing > 0 { - glog.Warningf("Missing %d responses out of %d", missing, queries) + klog.Warningf("Missing %d responses out of %d", missing, queries) } // Report any nodes that did not respond. for n, node := range nodes.Items { for i := 0; i < *podsPerNode; i++ { name := fmt.Sprintf("serve-hostname-%d-%d", n, i) if _, ok := responses[name]; !ok { - glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) + klog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) } } } - glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing", + klog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing", iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing) } } diff --git a/test/soak/serve_hostnames/BUILD b/test/soak/serve_hostnames/BUILD index e5bbf5764f92d..8fb14520d900d 100644 --- a/test/soak/serve_hostnames/BUILD +++ b/test/soak/serve_hostnames/BUILD @@ -26,7 +26,7 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//test/e2e/framework:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index 92b48bf551288..c3ee9dbc36004 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -40,7 +40,7 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" e2e "k8s.io/kubernetes/test/e2e/framework" - "github.com/golang/glog" + "k8s.io/klog" ) var ( @@ -64,7 +64,7 @@ const ( func main() { flag.Parse() - glog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d", + klog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d", *queriesAverage, *podsPerNode, *upTo) var spec string @@ -75,19 +75,19 @@ func main() { } settings, err := clientcmd.LoadFromFile(spec) if err != nil { - glog.Fatalf("Error loading configuration: %v", err.Error()) + klog.Fatalf("Error loading configuration: %v", err.Error()) } if *gke != "" { settings.CurrentContext = *gke } config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { - glog.Fatalf("Failed to construct config: %v", err) + klog.Fatalf("Failed to construct config: %v", err) } client, err := clientset.NewForConfig(config) if err != nil { - glog.Fatalf("Failed to make client: %v", err) + klog.Fatalf("Failed to make client: %v", err) } var nodes *v1.NodeList @@ -96,19 +96,19 @@ func main() { if err == nil { break } - glog.Warningf("Failed to list nodes: %v", err) + klog.Warningf("Failed to list nodes: %v", err) } if err != nil { - glog.Fatalf("Giving up trying to list nodes: %v", err) + klog.Fatalf("Giving up trying to list nodes: %v", err) } if len(nodes.Items) == 0 { - glog.Fatalf("Failed to find any nodes.") + klog.Fatalf("Failed to find any nodes.") } - glog.Infof("Found %d nodes on this cluster:", len(nodes.Items)) + klog.Infof("Found %d nodes on this cluster:", len(nodes.Items)) for i, node := range nodes.Items { - glog.Infof("%d: %s", i, node.Name) + klog.Infof("%d: %s", i, node.Name) } queries := *queriesAverage * len(nodes.Items) * *podsPerNode @@ -116,12 +116,12 @@ func main() { // Create the namespace got, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}}) if err != nil { - glog.Fatalf("Failed to create namespace: %v", err) + klog.Fatalf("Failed to create namespace: %v", err) } ns := got.Name defer func(ns string) { if err := client.CoreV1().Namespaces().Delete(ns, nil); err != nil { - glog.Warningf("Failed to delete namespace %s: %v", ns, err) + klog.Warningf("Failed to delete namespace %s: %v", ns, err) } else { // wait until the namespace disappears for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ { @@ -134,10 +134,10 @@ func main() { } } }(ns) - glog.Infof("Created namespace %s", ns) + klog.Infof("Created namespace %s", ns) // Create a service for these pods. - glog.Infof("Creating service %s/serve-hostnames", ns) + klog.Infof("Creating service %s/serve-hostnames", ns) // Make several attempts to create a service. var svc *v1.Service for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) { @@ -160,25 +160,25 @@ func main() { }, }, }) - glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) + klog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t)) if err == nil { break } - glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err) + klog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err) } if err != nil { - glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) + klog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) return } // Clean up service defer func() { - glog.Infof("Cleaning up service %s/serve-hostnames", ns) + klog.Infof("Cleaning up service %s/serve-hostnames", ns) // Make several attempts to delete the service. for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err := client.CoreV1().Services(ns).Delete(svc.Name, nil); err == nil { return } - glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) + klog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err) } }() @@ -190,7 +190,7 @@ func main() { podNames = append(podNames, podName) // Make several attempts for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) { - glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) + klog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) t := time.Now() _, err = client.CoreV1().Pods(ns).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -210,39 +210,39 @@ func main() { NodeName: node.Name, }, }) - glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) + klog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) if err == nil { break } - glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err) + klog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err) } if err != nil { - glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err) + klog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err) return } } } // Clean up the pods defer func() { - glog.Info("Cleaning up pods") + klog.Info("Cleaning up pods") // Make several attempts to delete the pods. for _, podName := range podNames { for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) { if err = client.CoreV1().Pods(ns).Delete(podName, nil); err == nil { break } - glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) + klog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err) } } }() - glog.Info("Waiting for the serve-hostname pods to be ready") + klog.Info("Waiting for the serve-hostname pods to be ready") for _, podName := range podNames { var pod *v1.Pod for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { pod, err = client.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { - glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) + klog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) continue } if pod.Status.Phase == v1.PodRunning { @@ -250,20 +250,20 @@ func main() { } } if pod.Status.Phase != v1.PodRunning { - glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) + klog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) } else { - glog.Infof("%s/%s is running", ns, podName) + klog.Infof("%s/%s is running", ns, podName) } } rclient, err := restclient.RESTClientFor(config) if err != nil { - glog.Warningf("Failed to build restclient: %v", err) + klog.Warningf("Failed to build restclient: %v", err) return } proxyRequest, errProxy := e2e.GetServicesProxyRequest(client, rclient.Get()) if errProxy != nil { - glog.Warningf("Get services proxy request failed: %v", errProxy) + klog.Warningf("Get services proxy request failed: %v", errProxy) return } @@ -274,7 +274,7 @@ func main() { Name("serve-hostnames"). DoRaw() if err != nil { - glog.Infof("After %v while making a proxy call got error %v", time.Since(start), err) + klog.Infof("After %v while making a proxy call got error %v", time.Since(start), err) continue } var r metav1.Status @@ -282,7 +282,7 @@ func main() { break } if r.Status == metav1.StatusFailure { - glog.Infof("After %v got status %v", time.Since(start), string(hostname)) + klog.Infof("After %v got status %v", time.Since(start), string(hostname)) continue } break @@ -303,9 +303,9 @@ func main() { Namespace(ns). Name("serve-hostnames"). DoRaw() - glog.V(4).Infof("Proxy call in namespace %s took %v", ns, time.Since(t)) + klog.V(4).Infof("Proxy call in namespace %s took %v", ns, time.Since(t)) if err != nil { - glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err) + klog.Warningf("Call failed during iteration %d query %d : %v", i, query, err) // If the query failed return a string which starts with a character // that can't be part of a hostname. responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err) @@ -319,28 +319,28 @@ func main() { missing := 0 for q := 0; q < queries; q++ { r := <-responseChan - glog.V(4).Infof("Got response from %s", r) + klog.V(4).Infof("Got response from %s", r) responses[r]++ // If the returned hostname starts with '!' then it indicates // an error response. if len(r) > 0 && r[0] == '!' { - glog.V(3).Infof("Got response %s", r) + klog.V(3).Infof("Got response %s", r) missing++ } } if missing > 0 { - glog.Warningf("Missing %d responses out of %d", missing, queries) + klog.Warningf("Missing %d responses out of %d", missing, queries) } // Report any nodes that did not respond. for n, node := range nodes.Items { for i := 0; i < *podsPerNode; i++ { name := fmt.Sprintf("serve-hostname-%d-%d", n, i) if _, ok := responses[name]; !ok { - glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) + klog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) } } } - glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing", + klog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing", iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing) } } diff --git a/test/utils/BUILD b/test/utils/BUILD index f594b7ba1448c..82a0722eebae9 100644 --- a/test/utils/BUILD +++ b/test/utils/BUILD @@ -57,7 +57,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/github.com/davecgh/go-spew/spew:go_default_library", - "//vendor/github.com/golang/glog:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/test/utils/density_utils.go b/test/utils/density_utils.go index c607e5c940e6a..60a32d50c19d7 100644 --- a/test/utils/density_utils.go +++ b/test/utils/density_utils.go @@ -21,12 +21,12 @@ import ( "strings" "time" - "github.com/golang/glog" "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" ) const ( @@ -79,7 +79,7 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKeys []stri if !apierrs.IsConflict(err) { return err } else { - glog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName) + klog.V(2).Infof("Conflict when trying to remove a labels %v from %v", labelKeys, nodeName) } } else { break diff --git a/test/utils/harness/BUILD b/test/utils/harness/BUILD index 510a812713246..853e519b0bdd9 100644 --- a/test/utils/harness/BUILD +++ b/test/utils/harness/BUILD @@ -5,7 +5,7 @@ go_library( srcs = ["harness.go"], importpath = "k8s.io/kubernetes/test/utils/harness", visibility = ["//visibility:public"], - deps = ["//vendor/github.com/golang/glog:go_default_library"], + deps = ["//vendor/k8s.io/klog:go_default_library"], ) filegroup( diff --git a/test/utils/harness/harness.go b/test/utils/harness/harness.go index a3c827318f327..46e65da006a0d 100644 --- a/test/utils/harness/harness.go +++ b/test/utils/harness/harness.go @@ -21,7 +21,7 @@ import ( "os" "testing" - "github.com/golang/glog" + "k8s.io/klog" ) // Harness adds some functionality to testing.T, in particular resource cleanup. @@ -51,7 +51,7 @@ func For(t *testing.T) *Harness { func (h *Harness) Close() { for _, d := range h.defers { if err := d(); err != nil { - glog.Warningf("error closing harness: %v", err) + klog.Warningf("error closing harness: %v", err) } } } diff --git a/test/utils/runners.go b/test/utils/runners.go index 2cc6456b293f0..4ebfde80e1305 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -47,7 +47,7 @@ import ( extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -163,7 +163,7 @@ type RCConfig struct { // If set to false starting RC will print progress, otherwise only errors will be printed. Silent bool - // If set this function will be used to print log lines instead of glog. + // If set this function will be used to print log lines instead of klog. LogFunc func(fmt string, args ...interface{}) // If set those functions will be used to gather data from Nodes - in integration tests where no // kubelets are running those variables should be nil. @@ -181,7 +181,7 @@ func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) { if rc.LogFunc != nil { rc.LogFunc(fmt, args...) } - glog.Infof(fmt, args...) + klog.Infof(fmt, args...) } type DeploymentConfig struct { @@ -1145,7 +1145,7 @@ type SecretConfig struct { Client clientset.Interface Name string Namespace string - // If set this function will be used to print log lines instead of glog. + // If set this function will be used to print log lines instead of klog. LogFunc func(fmt string, args ...interface{}) } @@ -1203,7 +1203,7 @@ type ConfigMapConfig struct { Client clientset.Interface Name string Namespace string - // If set this function will be used to print log lines instead of glog. + // If set this function will be used to print log lines instead of klog. LogFunc func(fmt string, args ...interface{}) } @@ -1314,7 +1314,7 @@ type DaemonConfig struct { Name string Namespace string Image string - // If set this function will be used to print log lines instead of glog. + // If set this function will be used to print log lines instead of klog. LogFunc func(fmt string, args ...interface{}) // How long we wait for DaemonSet to become running. Timeout time.Duration diff --git a/test/utils/tmpdir.go b/test/utils/tmpdir.go index 51cf19f1c1e2a..0c84c74d7ad45 100644 --- a/test/utils/tmpdir.go +++ b/test/utils/tmpdir.go @@ -19,7 +19,7 @@ package utils import ( "io/ioutil" - "github.com/golang/glog" + "k8s.io/klog" ) func MakeTempDirOrDie(prefix string, baseDir string) string { @@ -28,7 +28,7 @@ func MakeTempDirOrDie(prefix string, baseDir string) string { } tempDir, err := ioutil.TempDir(baseDir, prefix) if err != nil { - glog.Fatalf("Can't make a temp rootdir: %v", err) + klog.Fatalf("Can't make a temp rootdir: %v", err) } return tempDir }