Skip to content

Commit

Permalink
Add Reason(err error) to logger
Browse files Browse the repository at this point in the history
Very often the only reason why string formatting is needed, is to embed
an error. New Instead of

  logging.DefaultLogger().Error().Msgf("Something failed with %s." err)

do

  logging.DefaultLogger().Error().Reason(err).Msgf("Something failed.")
  • Loading branch information
rmohr committed Jan 13, 2017
1 parent fd81bc8 commit 01f728c
Show file tree
Hide file tree
Showing 5 changed files with 44 additions and 21 deletions.
9 changes: 9 additions & 0 deletions pkg/logging/logging.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ type FilteredLogger struct {
currentLogLevel logLevel
verbosityLevel int
currentVerbosityLevel int
err error
}

func InitializeLogging(comp string) {
Expand Down Expand Up @@ -136,6 +137,9 @@ func (l FilteredLogger) log(skipFrames int, params ...interface{}) error {
"pos", fmt.Sprintf("%s:%d", filepath.Base(fileName), lineNumber),
"component", l.component,
)
if l.err != nil {
l.logContext = l.logContext.With("reason", l.err)
}
return l.logContext.WithPrefix(logParams...).Log(params...)
}
return nil
Expand Down Expand Up @@ -216,3 +220,8 @@ func (l FilteredLogger) Critical() *FilteredLogger {
l.currentLogLevel = CRITICAL
return &l
}

func (l FilteredLogger) Reason(err error) *FilteredLogger {
l.err = err
return &l
}
14 changes: 14 additions & 0 deletions pkg/logging/logging_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package logging

import (
"errors"
"fmt"
"kubevirt.io/kubevirt/pkg/api"
"path/filepath"
"runtime"
Expand Down Expand Up @@ -371,3 +372,16 @@ func TestMsgfVerbosity(t *testing.T) {
assert(t, strings.HasPrefix(logEntry[5].(string), "logging_test.go"), "Logged line referenced wrong module")
tearDown()
}

func TestErrWithMsgf(t *testing.T) {
setUp()
log := MakeLogger(MockLogger{})
log.Reason(fmt.Errorf("testerror")).Msgf("%s", "test")

logEntry := logParams[0].([]interface{})
assert(t, logEntry[8].(string) == "reason", "Logged line did not contain message header")
assert(t, logEntry[9].(error).Error() == "testerror", "Logged line did not contain message header")
assert(t, logEntry[10].(string) == "msg", "Logged line did not contain message header")
assert(t, logEntry[11].(string) == "test", "Logged line did not contain message")
tearDown()
}
2 changes: 1 addition & 1 deletion pkg/virt-controller/watch/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func processPod(p *podResourceEventHandler, pod *v1.Pod) error {
vm.ObjectMeta.Labels[corev1.NodeNameLabel] = pod.Spec.NodeName
// Update the VM
if err := p.restCli.Put().Resource("vms").Body(&vm).Name(vm.ObjectMeta.Name).Namespace(kubeapi.NamespaceDefault).Do().Error(); err != nil {
logger.Error().Msgf("Setting the VM to pending failed with: %s", err)
logger.Error().Reason(err).Msg("Setting the VM to pending failed.")
if e, ok := err.(*errors.StatusError); ok {
if e.Status().Reason == metav1.StatusReasonNotFound {
// VM does not exist anymore, we don't have to retry
Expand Down
16 changes: 8 additions & 8 deletions pkg/virt-controller/watch/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,11 +75,11 @@ func processVM(v *vmResourceEventHandler, obj *v1.VM) error {

// TODO get rid of these service calls
if err := v.VMService.StartVM(&vm); err != nil {
logger.Error().Msgf("Defining a target pod for the VM failed with: %s", err)
logger.Error().Msg("Defining a target pod for the VM.")
pl, err := v.VMService.GetRunningPods(&vm)
if err != nil {
// TODO detect if communication error and backoff
logger.Error().Msgf("Getting all running Pods for the VM failed with: %s", err)
logger.Error().Reason(err).Msg("Getting all running Pods for the VM failed.")
return cache.ErrRequeue{Err: err}
}
for _, p := range pl.Items {
Expand All @@ -89,17 +89,17 @@ func processVM(v *vmResourceEventHandler, obj *v1.VM) error {
err = v.VMService.DeleteVM(&vm)
if err != nil {
// TODO detect if communication error and do backoff
logger.Critical().Msgf("Deleting orphaned pod with name '%s' for VM failed with: %s", p.GetName(), err)
logger.Critical().Reason(err).Msgf("Deleting orphaned pod with name '%s' for VM failed.", p.GetName())
return cache.ErrRequeue{Err: err}
}
} else {
// TODO virt-api should make sure this does not happen. For now don't ask and clean up.
// Pod from old VM object detected,
logger.Error().Msgf("Found orphan pod with name '%s' for deleted VM", p.GetName())
logger.Error().Msgf("Found orphan pod with name '%s' for deleted VM.", p.GetName())
err = v.VMService.DeleteVM(&vm)
if err != nil {
// TODO detect if communication error and backoff
logger.Critical().Msgf("Deleting orphaned pod with name '%s' for VM failed with: %s", p.GetName(), err)
logger.Critical().Reason(err).Msgf("Deleting orphaned pod with name '%s' for VM failed.", p.GetName())
return cache.ErrRequeue{Err: err}
}
}
Expand All @@ -118,7 +118,7 @@ func processVM(v *vmResourceEventHandler, obj *v1.VM) error {
// not get any updates
vm.Status.Phase = v1.Scheduling
if err := v.restCli.Put().Resource("vms").Body(&vm).Name(vm.ObjectMeta.Name).Namespace(kubeapi.NamespaceDefault).Do().Error(); err != nil {
logger.Error().Msgf("Updating the VM state to 'Scheduling' failed with: %s", err)
logger.Error().Reason(err).Msg("Updating the VM state to 'Scheduling' failed.")
if e, ok := err.(*errors.StatusError); ok {
if e.Status().Reason == metav1.StatusReasonNotFound ||
e.Status().Reason == metav1.StatusReasonConflict {
Expand All @@ -129,7 +129,7 @@ func processVM(v *vmResourceEventHandler, obj *v1.VM) error {
// TODO backoff policy here
return cache.ErrRequeue{Err: err}
}
logger.Info().Msg("Handing over the VM to the scheduler succeeded")
logger.Info().Msg("Handing over the VM to the scheduler succeeded.")
}
return nil
}
Expand All @@ -149,7 +149,7 @@ func (v *vmResourceEventHandler) OnDelete(obj interface{}) error {
// TODO maybe add a SIGTERM delay to virt-launcher in combination with a grace periode on the delete?
err := v.VMService.DeleteVM(obj.(*v1.VM))
if err != nil {
logger.Error().Msgf("Deleting VM target Pod failed with: %s", err)
logger.Error().Reason(err).Msg("Deleting VM target Pod failed.")
return cache.ErrRequeue{Err: err}
}
logger.Info().Msg("Deleting VM target Pod succeeded.")
Expand Down
24 changes: 12 additions & 12 deletions pkg/virt-handler/libvirt/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func (l *LibvirtConnection) LookupDomainByName(name string) (VirDomain, error) {
if !l.alive {
conn, err := newConnection(l.uri, l.user, l.pass)
if err != nil {
logging.DefaultLogger().Error().Msgf("Connection to libvirt lost because of %s", err)
logging.DefaultLogger().Error().Reason(err).Msg("Connection to libvirt lost.")
return nil, err
}
l.alive = true
Expand All @@ -55,7 +55,7 @@ func (l *LibvirtConnection) LookupDomainByName(name string) (VirDomain, error) {
dom, err := l.VirConnection.LookupDomainByName(name)
if err != nil && err.(libvirt.VirError).Code != libvirt.VIR_ERR_NO_DOMAIN {
l.alive = false
logging.DefaultLogger().Error().Msgf("Connection to libvirt lost because of %s", err)
logging.DefaultLogger().Error().Reason(err).Msg("Connection to libvirt lost.")
}
return &dom, err
}
Expand Down Expand Up @@ -133,23 +133,23 @@ func (l *LibvirtDomainManager) SyncVM(vm *v1.VM) error {
if err.(libvirt.VirError).Code == libvirt.VIR_ERR_NO_DOMAIN {
xml, err := xml.Marshal(&wantedSpec)
if err != nil {
logging.DefaultLogger().Object(vm).Error().Msgf("Generating the domain xml failed: %v", err)
logging.DefaultLogger().Object(vm).Error().Reason(err).Msg("Generating the domain xml failed.")
return err
}
dom, err = l.virConn.DomainDefineXML(string(xml))
if err != nil {
logging.DefaultLogger().Object(vm).Error().Msgf("Defining the VM failed with: %v", err)
logging.DefaultLogger().Object(vm).Error().Reason(err).Msg("Defining the VM failed.")
return err
}
l.recorder.Event(vm, kubev1.EventTypeNormal, v1.Created.String(), "VM defined")
} else {
logging.DefaultLogger().Object(vm).Error().Msgf("Getting the domain failed with: %v", err)
logging.DefaultLogger().Object(vm).Error().Reason(err).Msg("Getting the domain failed.")
return err
}
}
domState, err := dom.GetState()
if err != nil {
logging.DefaultLogger().Object(vm).Error().Msgf("Getting the domain state failed with: %v", err)
logging.DefaultLogger().Object(vm).Error().Reason(err).Msg("Getting the domain state failed.")
return err
}
// TODO Suspend, Pause, ..., for now we only support reaching the running state
Expand All @@ -159,15 +159,15 @@ func (l *LibvirtDomainManager) SyncVM(vm *v1.VM) error {
case NoState, Shutdown, Shutoff, Crashed:
err := dom.Create()
if err != nil {
logging.DefaultLogger().Object(vm).Error().Msgf("Starting the VM failed with: %v", err)
logging.DefaultLogger().Object(vm).Error().Reason(err).Msg("Starting the VM failed.")
return err
}
l.recorder.Event(vm, kubev1.EventTypeNormal, v1.Started.String(), "VM started")
case Paused:
// TODO: if state change reason indicates a system error, we could try something smarter
err := dom.Resume()
if err != nil {
logging.DefaultLogger().Object(vm).Error().Msgf("Resuming the VM failed with: %v", err)
logging.DefaultLogger().Object(vm).Error().Reason(err).Msg("Resuming the VM failed.")
return err
}
l.recorder.Event(vm, kubev1.EventTypeNormal, v1.Resumed.String(), "VM resumed")
Expand All @@ -187,30 +187,30 @@ func (l *LibvirtDomainManager) KillVM(vm *v1.VM) error {
if err.(libvirt.VirError).Code == libvirt.VIR_ERR_NO_DOMAIN {
return nil
} else {
logging.DefaultLogger().Object(vm).Error().Msgf("Getting the domain failed with: %v", err)
logging.DefaultLogger().Object(vm).Error().Reason(err).Msg("Getting the domain failed.")
return err
}
}
// TODO: Graceful shutdown
domState, err := dom.GetState()
if err != nil {
logging.DefaultLogger().Object(vm).Error().Msgf("Getting the domain state failed with: %v", err)
logging.DefaultLogger().Object(vm).Error().Reason(err).Msg("Getting the domain state failed.")
return err
}

state := LifeCycleTranslationMap[domState[0]]
if state == Running || state == Paused {
err = dom.Destroy()
if err != nil {
logging.DefaultLogger().Object(vm).Error().Msgf("Destroying the domain state failed with: %v", err)
logging.DefaultLogger().Object(vm).Error().Reason(err).Msg("Destroying the domain state failed.")
return err
}
l.recorder.Event(vm, kubev1.EventTypeNormal, v1.Stopped.String(), "VM stopped")
}

err = dom.Undefine()
if err != nil {
logging.DefaultLogger().Object(vm).Error().Msgf("Undefining the domain state failed with: %v", err)
logging.DefaultLogger().Object(vm).Error().Reason(err).Msg("Undefining the domain state failed.")
return err
}
l.recorder.Event(vm, kubev1.EventTypeNormal, v1.Deleted.String(), "VM undefined")
Expand Down

0 comments on commit 01f728c

Please sign in to comment.