Browse Source

Modified glog/klog to support color

Erdi Chen 3 years ago
commit
f77e3ac4bc
6 changed files with 3980 additions and 0 deletions
  1. 191 0
      LICENSE
  2. 121 0
      color/color.go
  3. 1643 0
      klog.go
  4. 164 0
      klog_file.go
  5. 1834 0
      klog_test.go
  6. 27 0
      klog_wrappers_test.go

+ 191 - 0
LICENSE

@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 121 - 0
color/color.go

@@ -0,0 +1,121 @@
+package color
+
+import (
+	"strconv"
+	"strings"
+)
+
+const (
+	csi   = "\x1b["
+	Clear = "\x1b[0m"
+)
+
+type Attribute int
+
+// https://en.wikipedia.org/wiki/ANSI_escape_code
+const (
+	Reset Attribute = iota
+	Bold
+	Faint
+	Italic
+	Underline
+	SlowBlink
+	RapidBlink
+	ReverseVideo
+	Conceal
+	CrossedOut
+)
+
+const (
+	重置 Attribute = iota
+	加粗
+	弱化
+	斜体
+	下划线
+	缓慢闪烁
+	快速闪烁
+	反显
+	消隐
+	划除
+)
+
+type Color int
+
+const (
+	Black Color = iota + 30
+	Red
+	Green
+	Yellow
+	Blue
+	Magenta
+	Cyan
+	White
+)
+
+type Sequence []Attribute
+
+func (seq Sequence) String() string {
+	var b strings.Builder
+	b.WriteString(csi)
+	if len(seq) > 0 {
+		b.WriteString(strconv.Itoa(int(seq[0])))
+	}
+	for _, v := range seq[1:] {
+		b.WriteRune(';')
+		b.WriteString(strconv.Itoa(int(v)))
+	}
+	b.WriteRune('m')
+	return b.String()
+}
+
+func (seq Sequence) Base(attrs ...Attribute) Sequence {
+	return append(seq, attrs...)
+}
+
+func (seq Sequence) Bg(c Color) Sequence {
+	return append(seq, add(c, 10))
+}
+
+func (seq Sequence) Fg(c Color) Sequence {
+	return append(seq, Attribute(c))
+}
+
+func (seq Sequence) Hi(c Color) Sequence {
+	return append(seq, add(c, 60))
+}
+
+func (seq Sequence) HiBg(c Color) Sequence {
+	return append(seq, add(c, 70))
+}
+
+func (seq Sequence) BgRGB(r, g, b int) Sequence {
+	return append(seq, Sequence{48, 2, Attribute(r), Attribute(g), Attribute(b)}...)
+}
+
+func (seq Sequence) FgRGB(r, g, b int) Sequence {
+	return append(seq, Sequence{38, 2, Attribute(r), Attribute(g), Attribute(b)}...)
+}
+
+func Base(attrs ...Attribute) Sequence {
+	return Sequence(attrs)
+}
+
+func Bg(c Color) Sequence {
+	return []Attribute{add(c, 10)}
+}
+
+func Fg(c Color) Sequence {
+	return []Attribute{Attribute(c)}
+}
+
+func Hi(c Color) Sequence {
+	return []Attribute{add(c, 60)}
+}
+
+func HiBg(c Color) Sequence {
+	return []Attribute{add(c, 70)}
+}
+
+func add(c Color, offset int) Attribute {
+	return Attribute(c) + Attribute(offset)
+}

+ 1643 - 0
klog.go

@@ -0,0 +1,1643 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+//	klog.Info("Prepare to repel boarders")
+//
+//	klog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+//	if klog.V(2) {
+//		klog.Info("Starting transaction...")
+//	}
+//
+//	klog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to standard error.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+//	-logtostderr=true
+//		Logs are written to standard error instead of to files.
+//	-alsologtostderr=false
+//		Logs are written to standard error as well as to files.
+//	-stderrthreshold=ERROR
+//		Log events at or above this severity are logged to standard
+//		error as well as to files.
+//	-log_dir=""
+//		Log files will be written to this directory instead of the
+//		default temporary directory.
+//
+//	Other flags provide aids to debugging.
+//
+//	-log_backtrace_at=""
+//		When set to a file and line number holding a logging statement,
+//		such as
+//			-log_backtrace_at=gopherflakes.go:234
+//		a stack trace will be written to the Info log whenever execution
+//		hits that statement. (Unlike with -vmodule, the ".go" must be
+//		present.)
+//	-v=0
+//		Enable V-leveled logging at the specified level.
+//	-vmodule=""
+//		The syntax of the argument is a comma-separated list of pattern=N,
+//		where pattern is a literal file name (minus the ".go" suffix) or
+//		"glob" pattern and N is a V level. For instance,
+//			-vmodule=gopher*=3
+//		sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package log
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	stdLog "log"
+	"math"
+	"os"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-logr/logr"
+
+	"erdi.us/aylang/log/color"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+	infoLog severity = iota
+	warningLog
+	errorLog
+	fatalLog
+	numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+	infoLog:    "INFO",
+	warningLog: "WARNING",
+	errorLog:   "ERROR",
+	fatalLog:   "FATAL",
+}
+
+var severityColors = [numSeverity]string{
+	color.Fg(color.Green).String(),
+	color.Fg(color.Yellow).String(),
+	color.Fg(color.Red).String(),
+	color.Fg(color.Red).Bg(color.Black).String(),
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+	return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+	atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+	return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Getter interface.
+func (s *severity) Get() interface{} {
+	return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+	var threshold severity
+	// Is it a known name?
+	if v, ok := severityByName(value); ok {
+		threshold = v
+	} else {
+		v, err := strconv.ParseInt(value, 10, 32)
+		if err != nil {
+			return err
+		}
+		threshold = severity(v)
+	}
+	logging.stderrThreshold.set(threshold)
+	return nil
+}
+
+func severityByName(s string) (severity, bool) {
+	s = strings.ToUpper(s)
+	for i, name := range severityName {
+		if name == s {
+			return severity(i), true
+		}
+	}
+	return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+	lines int64
+	bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+	return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+	return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+	Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+	infoLog:    &Stats.Info,
+	warningLog: &Stats.Warning,
+	errorLog:   &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+	return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+	atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+	return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Getter interface.
+func (l *Level) Get() interface{} {
+	return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+	v, err := strconv.ParseInt(value, 10, 32)
+	if err != nil {
+		return err
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(Level(v), logging.vmodule.filter, false)
+	return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+	filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+	pattern string
+	literal bool // The pattern is a literal string
+	level   Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+	if m.literal {
+		return file == m.pattern
+	}
+	match, _ := filepath.Match(m.pattern, file)
+	return match
+}
+
+func (m *moduleSpec) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	var b bytes.Buffer
+	for i, f := range m.filter {
+		if i > 0 {
+			b.WriteRune(',')
+		}
+		fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+	}
+	return b.String()
+}
+
+// Get is part of the (Go 1.2)  flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+	return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Set will sets module value
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+	var filter []modulePat
+	for _, pat := range strings.Split(value, ",") {
+		if len(pat) == 0 {
+			// Empty strings such as from a trailing comma can be ignored.
+			continue
+		}
+		patLev := strings.Split(pat, "=")
+		if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+			return errVmoduleSyntax
+		}
+		pattern := patLev[0]
+		v, err := strconv.ParseInt(patLev[1], 10, 32)
+		if err != nil {
+			return errors.New("syntax error: expect comma-separated list of filename=N")
+		}
+		if v < 0 {
+			return errors.New("negative value for vmodule level")
+		}
+		if v == 0 {
+			continue // Ignore. It's harmless but no point in paying the overhead.
+		}
+		// TODO: check syntax of filter?
+		filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(logging.verbosity, filter, true)
+	return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+	return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+	file string
+	line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+	return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+	if t.line != line {
+		return false
+	}
+	if i := strings.LastIndex(file, "/"); i >= 0 {
+		file = file[i+1:]
+	}
+	return t.file == file
+}
+
+func (t *traceLocation) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+	return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Set will sets backtrace value
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+	if value == "" {
+		// Unset.
+		logging.mu.Lock()
+		defer logging.mu.Unlock()
+		t.line = 0
+		t.file = ""
+		return nil
+	}
+	fields := strings.Split(value, ":")
+	if len(fields) != 2 {
+		return errTraceSyntax
+	}
+	file, line := fields[0], fields[1]
+	if !strings.Contains(file, ".") {
+		return errTraceSyntax
+	}
+	v, err := strconv.Atoi(line)
+	if err != nil {
+		return errTraceSyntax
+	}
+	if v <= 0 {
+		return errors.New("negative or zero value for level")
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	t.line = v
+	t.file = file
+	return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+	Flush() error
+	Sync() error
+	io.Writer
+}
+
+// init sets up the defaults and runs flushDaemon.
+func init() {
+	logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR.
+	logging.setVState(0, nil, false)
+	logging.logDir = ""
+	logging.logFile = ""
+	logging.logFileMaxSizeMB = 1800
+	logging.toStderr = true
+	logging.alsoToStderr = false
+	logging.skipHeaders = false
+	logging.addDirHeader = false
+	logging.skipLogHeaders = false
+	logging.oneOutput = false
+	go logging.flushDaemon()
+}
+
+// InitFlags is for explicitly initializing the flags.
+func InitFlags(flagset *flag.FlagSet) {
+	if flagset == nil {
+		flagset = flag.CommandLine
+	}
+
+	flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory")
+	flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file")
+	flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB,
+		"Defines the maximum size a log file can grow to. Unit is megabytes. "+
+			"If the value is 0, the maximum file size is unlimited.")
+	flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files")
+	flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files")
+	flagset.Var(&logging.verbosity, "v", "number for the log level verbosity")
+	flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages")
+	flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages")
+	flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)")
+	flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files")
+	flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+	flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+	flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+	logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+	// Boolean flags. Not handled atomically because the flag.Value interface
+	// does not let us avoid the =true, and that shorthand is necessary for
+	// compatibility. TODO: does this matter enough to fix? Seems unlikely.
+	toStderr     bool // The -logtostderr flag.
+	alsoToStderr bool // The -alsologtostderr flag.
+
+	// Level flag. Handled atomically.
+	stderrThreshold severity // The -stderrthreshold flag.
+
+	// freeList is a list of byte buffers, maintained under freeListMu.
+	freeList *buffer
+	// freeListMu maintains the free list. It is separate from the main mutex
+	// so buffers can be grabbed and printed to without holding the main lock,
+	// for better parallelization.
+	freeListMu sync.Mutex
+
+	// mu protects the remaining elements of this structure and is
+	// used to synchronize logging.
+	mu sync.Mutex
+	// file holds writer for each of the log types.
+	file [numSeverity]flushSyncWriter
+	// pcs is used in V to avoid an allocation when computing the caller's PC.
+	pcs [1]uintptr
+	// vmap is a cache of the V Level for each V() call site, identified by PC.
+	// It is wiped whenever the vmodule flag changes state.
+	vmap map[uintptr]Level
+	// filterLength stores the length of the vmodule filter chain. If greater
+	// than zero, it means vmodule is enabled. It may be read safely
+	// using sync.LoadInt32, but is only modified under mu.
+	filterLength int32
+	// traceLocation is the state of the -log_backtrace_at flag.
+	traceLocation traceLocation
+	// These flags are modified only under lock, although verbosity may be fetched
+	// safely using atomic.LoadInt32.
+	vmodule   moduleSpec // The state of the -vmodule flag.
+	verbosity Level      // V logging level, the value of the -v flag/
+
+	// If non-empty, overrides the choice of directory in which to write logs.
+	// See createLogDirs for the full list of possible destinations.
+	logDir string
+
+	// If non-empty, specifies the path of the file to write logs. mutually exclusive
+	// with the log_dir option.
+	logFile string
+
+	// When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the
+	// logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile.
+	logFileMaxSizeMB uint64
+
+	// If true, do not add the prefix headers, useful when used with SetOutput
+	skipHeaders bool
+
+	// If true, do not add the headers to log files
+	skipLogHeaders bool
+
+	// If true, add the file directory to the header
+	addDirHeader bool
+
+	// If set, all output will be redirected unconditionally to the provided logr.Logger
+	logr logr.Logger
+
+	// If true, messages will not be propagated to lower severity log levels
+	oneOutput bool
+
+	// If set, all output will be filtered through the filter.
+	filter LogFilter
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+	bytes.Buffer
+	tmp  [64]byte // temporary byte array for creating headers.
+	next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+	// Turn verbosity off so V will not fire while we are in transition.
+	l.verbosity.set(0)
+	// Ditto for filter length.
+	atomic.StoreInt32(&l.filterLength, 0)
+
+	// Set the new filters and wipe the pc->Level map if the filter has changed.
+	if setFilter {
+		l.vmodule.filter = filter
+		l.vmap = make(map[uintptr]Level)
+	}
+
+	// Things are consistent now, so enable filtering and verbosity.
+	// They are enabled in order opposite to that in V.
+	atomic.StoreInt32(&l.filterLength, int32(len(filter)))
+	l.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+	l.freeListMu.Lock()
+	b := l.freeList
+	if b != nil {
+		l.freeList = b.next
+	}
+	l.freeListMu.Unlock()
+	if b == nil {
+		b = new(buffer)
+	} else {
+		b.next = nil
+		b.Reset()
+	}
+	return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+	if b.Len() >= 256 {
+		// Let big buffers die a natural death.
+		return
+	}
+	l.freeListMu.Lock()
+	b.next = l.freeList
+	l.freeList = b
+	l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+	Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+	L                A single character, representing the log level (eg 'I' for INFO)
+	mm               The month (zero padded; ie May is '05')
+	dd               The day (zero padded)
+	hh:mm:ss.uuuuuu  Time in hours, minutes and fractional seconds
+	threadid         The space-padded thread ID as returned by GetTID()
+	file             The file name
+	line             The line number
+	msg              The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+	_, file, line, ok := runtime.Caller(3 + depth)
+	if !ok {
+		file = "???"
+		line = 1
+	} else {
+		if slash := strings.LastIndex(file, "/"); slash >= 0 {
+			path := file
+			file = path[slash+1:]
+			if l.addDirHeader {
+				if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 {
+					file = path[dirsep+1:]
+				}
+			}
+		}
+	}
+	return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+	now := timeNow()
+	if line < 0 {
+		line = 0 // not a real line number, but acceptable to someDigits
+	}
+	if s > fatalLog {
+		s = infoLog // for safety.
+	}
+	buf := l.getBuffer()
+	if l.skipHeaders {
+		return buf
+	}
+
+	// Set header color.
+	buf.WriteString(severityColors[s])
+
+	// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+	// It's worth about 3X. Fprintf is hard.
+	_, month, day := now.Date()
+	hour, minute, second := now.Clock()
+	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+	buf.tmp[0] = severityChar[s]
+	buf.twoDigits(1, int(month))
+	buf.twoDigits(3, day)
+	buf.tmp[5] = ' '
+	buf.twoDigits(6, hour)
+	buf.tmp[8] = ':'
+	buf.twoDigits(9, minute)
+	buf.tmp[11] = ':'
+	buf.twoDigits(12, second)
+	buf.tmp[14] = '.'
+	buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+	buf.tmp[21] = ' '
+	buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+	buf.tmp[29] = ' '
+	buf.Write(buf.tmp[:30])
+	buf.WriteString(file)
+	buf.tmp[0] = ':'
+	n := buf.someDigits(1, line)
+	buf.tmp[n+1] = ']'
+	buf.Write(buf.tmp[:n+2])
+
+	// Reset color to normal.
+	buf.WriteString(color.Clear)
+	buf.WriteRune(' ')
+	return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+	buf.tmp[i+1] = digits[d%10]
+	d /= 10
+	buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+	j := n - 1
+	for ; j >= 0 && d > 0; j-- {
+		buf.tmp[i+j] = digits[d%10]
+		d /= 10
+	}
+	for ; j >= 0; j-- {
+		buf.tmp[i+j] = pad
+	}
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+	// Print into the top, then copy down. We know there's space for at least
+	// a 10-digit number.
+	j := len(buf.tmp)
+	for {
+		j--
+		buf.tmp[j] = digits[d%10]
+		d /= 10
+		if d == 0 {
+			break
+		}
+	}
+	return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	// if logr is set, we clear the generated header as we rely on the backing
+	// logr implementation to print headers
+	if logr != nil {
+		l.putBuffer(buf)
+		buf = l.getBuffer()
+	}
+	if filter != nil {
+		args = filter.Filter(args)
+	}
+	fmt.Fprintln(buf, args...)
+	l.output(s, logr, buf, 0 /* depth */, file, line, false)
+}
+
+func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) {
+	l.printDepth(s, logr, filter, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, depth int, args ...interface{}) {
+	buf, file, line := l.header(s, depth)
+	// if logr is set, we clear the generated header as we rely on the backing
+	// logr implementation to print headers
+	if logr != nil {
+		l.putBuffer(buf)
+		buf = l.getBuffer()
+	}
+	if filter != nil {
+		args = filter.Filter(args)
+	}
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, logr, buf, depth, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	// if logr is set, we clear the generated header as we rely on the backing
+	// logr implementation to print headers
+	if logr != nil {
+		l.putBuffer(buf)
+		buf = l.getBuffer()
+	}
+	if filter != nil {
+		format, args = filter.FilterF(format, args)
+	}
+	fmt.Fprintf(buf, format, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, logr, buf, 0 /* depth */, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number.  If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) {
+	buf := l.formatHeader(s, file, line)
+	// if logr is set, we clear the generated header as we rely on the backing
+	// logr implementation to print headers
+	if logr != nil {
+		l.putBuffer(buf)
+		buf = l.getBuffer()
+	}
+	if filter != nil {
+		args = filter.Filter(args)
+	}
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, logr, buf, 2 /* depth */, file, line, alsoToStderr)
+}
+
+// if loggr is specified, will call loggr.Error, otherwise output with logging module.
+func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
+	if filter != nil {
+		msg, keysAndValues = filter.FilterS(msg, keysAndValues)
+	}
+	if loggr != nil {
+		logr.WithCallDepth(loggr, depth+2).Error(err, msg, keysAndValues...)
+		return
+	}
+	l.printS(err, errorLog, depth+1, msg, keysAndValues...)
+}
+
+// if loggr is specified, will call loggr.Info, otherwise output with logging module.
+func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
+	if filter != nil {
+		msg, keysAndValues = filter.FilterS(msg, keysAndValues)
+	}
+	if loggr != nil {
+		logr.WithCallDepth(loggr, depth+2).Info(msg, keysAndValues...)
+		return
+	}
+	l.printS(nil, infoLog, depth+1, msg, keysAndValues...)
+}
+
+// printS is called from infoS and errorS if loggr is not specified.
+// set log severity by s
+func (l *loggingT) printS(err error, s severity, depth int, msg string, keysAndValues ...interface{}) {
+	b := &bytes.Buffer{}
+	b.WriteString(fmt.Sprintf("%q", msg))
+	if err != nil {
+		b.WriteByte(' ')
+		b.WriteString(fmt.Sprintf("err=%q", err.Error()))
+	}
+	kvListFormat(b, keysAndValues...)
+	l.printDepth(s, logging.logr, nil, depth+1, b)
+}
+
+const missingValue = "(MISSING)"
+
+func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
+	for i := 0; i < len(keysAndValues); i += 2 {
+		var v interface{}
+		k := keysAndValues[i]
+		if i+1 < len(keysAndValues) {
+			v = keysAndValues[i+1]
+		} else {
+			v = missingValue
+		}
+		b.WriteByte(' ')
+
+		switch v.(type) {
+		case string, error:
+			b.WriteString(fmt.Sprintf("%s=%q", k, v))
+		case []byte:
+			b.WriteString(fmt.Sprintf("%s=%+q", k, v))
+		default:
+			if _, ok := v.(fmt.Stringer); ok {
+				b.WriteString(fmt.Sprintf("%s=%q", k, v))
+			} else {
+				b.WriteString(fmt.Sprintf("%s=%+v", k, v))
+			}
+		}
+	}
+}
+
+// redirectBuffer is used to set an alternate destination for the logs
+type redirectBuffer struct {
+	w io.Writer
+}
+
+func (rb *redirectBuffer) Sync() error {
+	return nil
+}
+
+func (rb *redirectBuffer) Flush() error {
+	return nil
+}
+
+func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
+	return rb.w.Write(bytes)
+}
+
+// SetLogger will set the backing logr implementation for klog.
+// If set, all log lines will be suppressed from the regular Output, and
+// redirected to the logr implementation.
+// Use as:
+//   ...
+//   klog.SetLogger(zapr.NewLogger(zapLog))
+func SetLogger(logr logr.Logger) {
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+
+	logging.logr = logr
+}
+
+// SetOutput sets the output destination for all severities
+func SetOutput(w io.Writer) {
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	for s := fatalLog; s >= infoLog; s-- {
+		rb := &redirectBuffer{
+			w: w,
+		}
+		logging.file[s] = rb
+	}
+}
+
+// SetOutputBySeverity sets the output destination for specific severity
+func SetOutputBySeverity(name string, w io.Writer) {
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	sev, ok := severityByName(name)
+	if !ok {
+		panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
+	}
+	rb := &redirectBuffer{
+		w: w,
+	}
+	logging.file[sev] = rb
+}
+
+// LogToStderr sets whether to log exclusively to stderr, bypassing outputs
+func LogToStderr(stderr bool) {
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+
+	logging.toStderr = stderr
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) {
+	l.mu.Lock()
+	if l.traceLocation.isSet() {
+		if l.traceLocation.match(file, line) {
+			buf.Write(stacks(false))
+		}
+	}
+	data := buf.Bytes()
+	if log != nil {
+		// TODO: set 'severity' and caller information as structured log info
+		// keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
+		if s == errorLog {
+			logr.WithCallDepth(l.logr, depth+3).Error(nil, string(data))
+		} else {
+			logr.WithCallDepth(log, depth+3).Info(string(data))
+		}
+	} else if l.toStderr {
+		os.Stderr.Write(data)
+	} else {
+		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+			os.Stderr.Write(data)
+		}
+
+		if logging.logFile != "" {
+			// Since we are using a single log file, all of the items in l.file array
+			// will point to the same file, so just use one of them to write data.
+			if l.file[infoLog] == nil {
+				if err := l.createFiles(infoLog); err != nil {
+					os.Stderr.Write(data) // Make sure the message appears somewhere.
+					l.exit(err)
+				}
+			}
+			l.file[infoLog].Write(data)
+		} else {
+			if l.file[s] == nil {
+				if err := l.createFiles(s); err != nil {
+					os.Stderr.Write(data) // Make sure the message appears somewhere.
+					l.exit(err)
+				}
+			}
+
+			if l.oneOutput {
+				l.file[s].Write(data)
+			} else {
+				switch s {
+				case fatalLog:
+					l.file[fatalLog].Write(data)
+					fallthrough
+				case errorLog:
+					l.file[errorLog].Write(data)
+					fallthrough
+				case warningLog:
+					l.file[warningLog].Write(data)
+					fallthrough
+				case infoLog:
+					l.file[infoLog].Write(data)
+				}
+			}
+		}
+	}
+	if s == fatalLog {
+		// If we got here via Exit rather than Fatal, print no stacks.
+		if atomic.LoadUint32(&fatalNoStacks) > 0 {
+			l.mu.Unlock()
+			timeoutFlush(10 * time.Second)
+			os.Exit(1)
+		}
+		// Dump all goroutine stacks before exiting.
+		trace := stacks(true)
+		// Write the stack trace for all goroutines to the stderr.
+		if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr {
+			os.Stderr.Write(trace)
+		}
+		// Write the stack trace for all goroutines to the files.
+		logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+		for log := fatalLog; log >= infoLog; log-- {
+			if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+				f.Write(trace)
+			}
+		}
+		l.mu.Unlock()
+		timeoutFlush(10 * time.Second)
+		os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+	}
+	l.putBuffer(buf)
+	l.mu.Unlock()
+	if stats := severityStats[s]; stats != nil {
+		atomic.AddInt64(&stats.lines, 1)
+		atomic.AddInt64(&stats.bytes, int64(len(data)))
+	}
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first.  This is needed because the hooks invoked
+// by Flush may deadlock when klog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+	done := make(chan bool, 1)
+	go func() {
+		Flush() // calls logging.lockAndFlushAll()
+		done <- true
+	}()
+	select {
+	case <-done:
+	case <-time.After(timeout):
+		fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout)
+	}
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+	// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+	n := 10000
+	if all {
+		n = 100000
+	}
+	var trace []byte
+	for i := 0; i < 5; i++ {
+		trace = make([]byte, n)
+		nbytes := runtime.Stack(trace, all)
+		if nbytes < len(trace) {
+			return trace[:nbytes]
+		}
+		n *= 2
+	}
+	return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+	fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+	// If logExitFunc is set, we do that instead of exiting.
+	if logExitFunc != nil {
+		logExitFunc(err)
+		return
+	}
+	l.flushAll()
+	os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+	logger *loggingT
+	*bufio.Writer
+	file     *os.File
+	sev      severity
+	nbytes   uint64 // The number of bytes written to this file
+	maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up.
+}
+
+func (sb *syncBuffer) Sync() error {
+	return sb.file.Sync()
+}
+
+// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options.
+func CalculateMaxSize() uint64 {
+	if logging.logFile != "" {
+		if logging.logFileMaxSizeMB == 0 {
+			// If logFileMaxSizeMB is zero, we don't have limitations on the log size.
+			return math.MaxUint64
+		}
+		// Flag logFileMaxSizeMB is in MB for user convenience.
+		return logging.logFileMaxSizeMB * 1024 * 1024
+	}
+	// If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size.
+	return MaxSize
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+	if sb.nbytes+uint64(len(p)) >= sb.maxbytes {
+		if err := sb.rotateFile(time.Now(), false); err != nil {
+			sb.logger.exit(err)
+		}
+	}
+	n, err = sb.Writer.Write(p)
+	sb.nbytes += uint64(n)
+	if err != nil {
+		sb.logger.exit(err)
+	}
+	return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error {
+	if sb.file != nil {
+		sb.Flush()
+		sb.file.Close()
+	}
+	var err error
+	sb.file, _, err = create(severityName[sb.sev], now, startup)
+	if err != nil {
+		return err
+	}
+	if startup {
+		fileInfo, err := sb.file.Stat()
+		if err != nil {
+			return fmt.Errorf("file stat could not get fileinfo: %v", err)
+		}
+		// init file size
+		sb.nbytes = uint64(fileInfo.Size())
+	} else {
+		sb.nbytes = 0
+	}
+	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+	if sb.logger.skipLogHeaders {
+		return nil
+	}
+
+	// Write header.
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+	fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+	n, err := sb.file.Write(buf.Bytes())
+	sb.nbytes += uint64(n)
+	return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+	now := time.Now()
+	// Files are created in decreasing severity order, so as soon as we find one
+	// has already been created, we can stop.
+	for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+		sb := &syncBuffer{
+			logger:   l,
+			sev:      s,
+			maxbytes: CalculateMaxSize(),
+		}
+		if err := sb.rotateFile(now, true); err != nil {
+			return err
+		}
+		l.file[s] = sb
+	}
+	return nil
+}
+
+const flushInterval = 5 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+	for range time.NewTicker(flushInterval).C {
+		l.lockAndFlushAll()
+	}
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+	l.mu.Lock()
+	l.flushAll()
+	l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+	// Flush from fatal down, in case there's trouble flushing.
+	for s := fatalLog; s >= infoLog; s-- {
+		file := l.file[s]
+		if file != nil {
+			file.Flush() // ignore error
+			file.Sync()  // ignore error
+		}
+	}
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities.  Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL".  If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+	sev, ok := severityByName(name)
+	if !ok {
+		panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+	}
+	// Set a log format that captures the user's file and line:
+	//   d.go:23: message
+	stdLog.SetFlags(stdLog.Lshortfile)
+	stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+	var (
+		file = "???"
+		line = 1
+		text string
+	)
+	// Split "d.go:23: message" into "d.go", "23", and "message".
+	if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+		text = fmt.Sprintf("bad log format: %s", b)
+	} else {
+		file = string(parts[0])
+		text = string(parts[2][1:]) // skip leading space
+		line, err = strconv.Atoi(string(parts[1]))
+		if err != nil {
+			text = fmt.Sprintf("bad line number: %s", b)
+			line = 1
+		}
+	}
+	// printWithFileLine with alsoToStderr=true, so standard log messages
+	// always appear on standard error.
+	logging.printWithFileLine(severity(lb), logging.logr, logging.filter, file, line, true, text)
+	return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+	fn := runtime.FuncForPC(pc)
+	file, _ := fn.FileLine(pc)
+	// The file is something like /a/b/c/d.go. We want just the d.
+	if strings.HasSuffix(file, ".go") {
+		file = file[:len(file)-3]
+	}
+	if slash := strings.LastIndex(file, "/"); slash >= 0 {
+		file = file[slash+1:]
+	}
+	for _, filter := range l.vmodule.filter {
+		if filter.match(file) {
+			l.vmap[pc] = filter.level
+			return filter.level
+		}
+	}
+	l.vmap[pc] = 0
+	return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose struct {
+	enabled bool
+	logr    logr.Logger
+	filter  LogFilter
+}
+
+func newVerbose(level Level, b bool) Verbose {
+	if logging.logr == nil {
+		return Verbose{b, nil, logging.filter}
+	}
+	return Verbose{b, logging.logr.V(int(level)), logging.filter}
+}
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a struct of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+//	if glog.V(2).Enabled() { klog.Info("log this") }
+// or
+//	klog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and -vmodule flags; both are off by default. The V call will log if its level
+// is less than or equal to the value of the -v flag, or alternatively if its level is
+// less than or equal to the value of the -vmodule pattern matching the source file
+// containing the call.
+func V(level Level) Verbose {
+	// This function tries hard to be cheap unless there's work to do.
+	// The fast path is two atomic loads and compares.
+
+	// Here is a cheap but safe test to see if V logging is enabled globally.
+	if logging.verbosity.get() >= level {
+		return newVerbose(level, true)
+	}
+
+	// It's off globally but vmodule may still be set.
+	// Here is another cheap but safe test to see if vmodule is enabled.
+	if atomic.LoadInt32(&logging.filterLength) > 0 {
+		// Now we need a proper lock to use the logging structure. The pcs field
+		// is shared so we must lock before accessing it. This is fairly expensive,
+		// but if V logging is enabled we're slow anyway.
+		logging.mu.Lock()
+		defer logging.mu.Unlock()
+		if runtime.Callers(2, logging.pcs[:]) == 0 {
+			return newVerbose(level, false)
+		}
+		v, ok := logging.vmap[logging.pcs[0]]
+		if !ok {
+			v = logging.setV(logging.pcs[0])
+		}
+		return newVerbose(level, v >= level)
+	}
+	return newVerbose(level, false)
+}
+
+// Enabled will return true if this log level is enabled, guarded by the value
+// of v.
+// See the documentation of V for usage.
+func (v Verbose) Enabled() bool {
+	return v.enabled
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+	if v.enabled {
+		logging.print(infoLog, v.logr, v.filter, args...)
+	}
+}
+
+func (v Verbose) InfoDepth(depth int, args ...interface{}) {
+	if v.enabled {
+		logging.printDepth(infoLog, v.logr, v.filter, depth, args...)
+	}
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+	if v.enabled {
+		logging.println(infoLog, v.logr, v.filter, args...)
+	}
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+	if v.enabled {
+		logging.printf(infoLog, v.logr, v.filter, format, args...)
+	}
+}
+
+// InfoS is equivalent to the global InfoS function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) {
+	if v.enabled {
+		logging.infoS(v.logr, v.filter, 0, msg, keysAndValues...)
+	}
+}
+
+// InfoSDepth acts as InfoS but uses depth to determine which call frame to log.
+// InfoSDepth(0, "msg") is the same as InfoS("msg").
+func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
+	logging.infoS(logging.logr, logging.filter, depth, msg, keysAndValues...)
+}
+
+// Deprecated: Use ErrorS instead.
+func (v Verbose) Error(err error, msg string, args ...interface{}) {
+	if v.enabled {
+		logging.errorS(err, v.logr, v.filter, 0, msg, args...)
+	}
+}
+
+// ErrorS is equivalent to the global Error function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) {
+	if v.enabled {
+		logging.errorS(err, v.logr, v.filter, 0, msg, keysAndValues...)
+	}
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+	logging.print(infoLog, logging.logr, logging.filter, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+	logging.printDepth(infoLog, logging.logr, logging.filter, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Infoln(args ...interface{}) {
+	logging.println(infoLog, logging.logr, logging.filter, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+	logging.printf(infoLog, logging.logr, logging.filter, format, args...)
+}
+
+// InfoS structured logs to the INFO log.
+// The msg argument used to add constant description to the log line.
+// The key/value pairs would be join by "=" ; a newline is always appended.
+//
+// Basic examples:
+// >> klog.InfoS("Pod status updated", "pod", "kubedns", "status", "ready")
+// output:
+// >> I1025 00:15:15.525108       1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready"
+func InfoS(msg string, keysAndValues ...interface{}) {
+	logging.infoS(logging.logr, logging.filter, 0, msg, keysAndValues...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+	logging.print(warningLog, logging.logr, logging.filter, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+	logging.printDepth(warningLog, logging.logr, logging.filter, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Warningln(args ...interface{}) {
+	logging.println(warningLog, logging.logr, logging.filter, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+	logging.printf(warningLog, logging.logr, logging.filter, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+	logging.print(errorLog, logging.logr, logging.filter, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+	logging.printDepth(errorLog, logging.logr, logging.filter, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Errorln(args ...interface{}) {
+	logging.println(errorLog, logging.logr, logging.filter, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+	logging.printf(errorLog, logging.logr, logging.filter, format, args...)
+}
+
+// ErrorS structured logs to the ERROR, WARNING, and INFO logs.
+// the err argument used as "err" field of log line.
+// The msg argument used to add constant description to the log line.
+// The key/value pairs would be join by "=" ; a newline is always appended.
+//
+// Basic examples:
+// >> klog.ErrorS(err, "Failed to update pod status")
+// output:
+// >> E1025 00:15:15.525108       1 controller_utils.go:114] "Failed to update pod status" err="timeout"
+func ErrorS(err error, msg string, keysAndValues ...interface{}) {
+	logging.errorS(err, logging.logr, logging.filter, 0, msg, keysAndValues...)
+}
+
+// ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log.
+// ErrorSDepth(0, "msg") is the same as ErrorS("msg").
+func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) {
+	logging.errorS(err, logging.logr, logging.filter, depth, msg, keysAndValues...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+	logging.print(fatalLog, logging.logr, logging.filter, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+	logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is always appended.
+func Fatalln(args ...interface{}) {
+	logging.println(fatalLog, logging.logr, logging.filter, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+	logging.printf(fatalLog, logging.logr, logging.filter, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.print(fatalLog, logging.logr, logging.filter, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.println(fatalLog, logging.logr, logging.filter, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printf(fatalLog, logging.logr, logging.filter, format, args...)
+}
+
+// LogFilter is a collection of functions that can filter all logging calls,
+// e.g. for sanitization of arguments and prevent accidental leaking of secrets.
+type LogFilter interface {
+	Filter(args []interface{}) []interface{}
+	FilterF(format string, args []interface{}) (string, []interface{})
+	FilterS(msg string, keysAndValues []interface{}) (string, []interface{})
+}
+
+func SetLogFilter(filter LogFilter) {
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+
+	logging.filter = filter
+}
+
+// ObjectRef references a kubernetes object
+type ObjectRef struct {
+	Name      string `json:"name"`
+	Namespace string `json:"namespace,omitempty"`
+}
+
+func (ref ObjectRef) String() string {
+	if ref.Namespace != "" {
+		return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name)
+	}
+	return ref.Name
+}
+
+// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface
+// this interface may expand in the future, but will always be a subset of the
+// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface
+type KMetadata interface {
+	GetName() string
+	GetNamespace() string
+}
+
+// KObj returns ObjectRef from ObjectMeta
+func KObj(obj KMetadata) ObjectRef {
+	if obj == nil {
+		return ObjectRef{}
+	}
+	if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() {
+		return ObjectRef{}
+	}
+
+	return ObjectRef{
+		Name:      obj.GetName(),
+		Namespace: obj.GetNamespace(),
+	}
+}
+
+// KRef returns ObjectRef from name and namespace
+func KRef(namespace, name string) ObjectRef {
+	return ObjectRef{
+		Name:      name,
+		Namespace: namespace,
+	}
+}
+
+// KObjs returns slice of ObjectRef from an slice of ObjectMeta
+func KObjs(arg interface{}) []ObjectRef {
+	s := reflect.ValueOf(arg)
+	if s.Kind() != reflect.Slice {
+		return nil
+	}
+	objectRefs := make([]ObjectRef, 0, s.Len())
+	for i := 0; i < s.Len(); i++ {
+		if v, ok := s.Index(i).Interface().(KMetadata); ok {
+			objectRefs = append(objectRefs, KObj(v))
+		} else {
+			return nil
+		}
+	}
+	return objectRefs
+}

+ 164 - 0
klog_file.go

@@ -0,0 +1,164 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package log
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"os/user"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+func createLogDirs() {
+	if logging.logDir != "" {
+		logDirs = append(logDirs, logging.logDir)
+	}
+	logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+	pid          = os.Getpid()
+	program      = filepath.Base(os.Args[0])
+	host         = "unknownhost"
+	userName     = "unknownuser"
+	userNameOnce sync.Once
+)
+
+func init() {
+	if h, err := os.Hostname(); err == nil {
+		host = shortHostname(h)
+	}
+}
+
+func getUserName() string {
+	userNameOnce.Do(func() {
+		// On Windows, the Go 'user' package requires netapi32.dll.
+		// This affects Windows Nano Server:
+		//   https://github.com/golang/go/issues/21867
+		// Fallback to using environment variables.
+		if runtime.GOOS == "windows" {
+			u := os.Getenv("USERNAME")
+			if len(u) == 0 {
+				return
+			}
+			// Sanitize the USERNAME since it may contain filepath separators.
+			u = strings.Replace(u, `\`, "_", -1)
+
+			// user.Current().Username normally produces something like 'USERDOMAIN\USERNAME'
+			d := os.Getenv("USERDOMAIN")
+			if len(d) != 0 {
+				userName = d + "_" + u
+			} else {
+				userName = u
+			}
+		} else {
+			current, err := user.Current()
+			if err == nil {
+				userName = current.Username
+			}
+		}
+	})
+
+	return userName
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+	if i := strings.Index(hostname, "."); i >= 0 {
+		return hostname[:i]
+	}
+	return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+	name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+		program,
+		host,
+		getUserName(),
+		tag,
+		t.Year(),
+		t.Month(),
+		t.Day(),
+		t.Hour(),
+		t.Minute(),
+		t.Second(),
+		pid)
+	return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t.  If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) {
+	if logging.logFile != "" {
+		f, err := openOrCreate(logging.logFile, startup)
+		if err == nil {
+			return f, logging.logFile, nil
+		}
+		return nil, "", fmt.Errorf("log: unable to create log: %v", err)
+	}
+	onceLogDirs.Do(createLogDirs)
+	if len(logDirs) == 0 {
+		return nil, "", errors.New("log: no log dirs")
+	}
+	name, link := logName(tag, t)
+	var lastErr error
+	for _, dir := range logDirs {
+		fname := filepath.Join(dir, name)
+		f, err := openOrCreate(fname, startup)
+		if err == nil {
+			symlink := filepath.Join(dir, link)
+			os.Remove(symlink)        // ignore err
+			os.Symlink(name, symlink) // ignore err
+			return f, fname, nil
+		}
+		lastErr = err
+	}
+	return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
+
+// The startup argument indicates whether this is the initial startup of klog.
+// If startup is true, existing files are opened for appending instead of truncated.
+func openOrCreate(name string, startup bool) (*os.File, error) {
+	if startup {
+		f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+		return f, err
+	}
+	f, err := os.Create(name)
+	return f, err
+}

+ 1834 - 0
klog_test.go

@@ -0,0 +1,1834 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	stdLog "log"
+	"os"
+	"path/filepath"
+	"reflect"
+	"regexp"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/go-logr/logr"
+)
+
+// TODO: This test package should be refactored so that tests cannot
+// interfere with each-other.
+
+// Test that shortHostname works as advertised.
+func TestShortHostname(t *testing.T) {
+	for hostname, expect := range map[string]string{
+		"":                "",
+		"host":            "host",
+		"host.google.com": "host",
+	} {
+		if got := shortHostname(hostname); expect != got {
+			t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got)
+		}
+	}
+}
+
+// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter.
+type flushBuffer struct {
+	bytes.Buffer
+}
+
+func (f *flushBuffer) Flush() error {
+	return nil
+}
+
+func (f *flushBuffer) Sync() error {
+	return nil
+}
+
+// swap sets the log writers and returns the old array.
+func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	old = l.file
+	for i, w := range writers {
+		logging.file[i] = w
+	}
+	return
+}
+
+// newBuffers sets the log writers to all new byte buffers and returns the old array.
+func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter {
+	return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)})
+}
+
+// contents returns the specified log value as a string.
+func contents(s severity) string {
+	return logging.file[s].(*flushBuffer).String()
+}
+
+// contains reports whether the string is contained in the log.
+func contains(s severity, str string, t *testing.T) bool {
+	return strings.Contains(contents(s), str)
+}
+
+// setFlags configures the logging flags how the test expects them.
+func setFlags() {
+	logging.toStderr = false
+	logging.addDirHeader = false
+}
+
+// Test that Info works as advertised.
+func TestInfo(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	Info("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+func TestInfoDepth(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+
+	f := func() { InfoDepth(1, "depth-test1") }
+
+	// The next three lines must stay together
+	_, _, wantLine, _ := runtime.Caller(0)
+	InfoDepth(0, "depth-test0")
+	f()
+
+	msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n")
+	if len(msgs) != 2 {
+		t.Fatalf("Got %d lines, expected 2", len(msgs))
+	}
+
+	for i, m := range msgs {
+		if !strings.HasPrefix(m, "I") {
+			t.Errorf("InfoDepth[%d] has wrong character: %q", i, m)
+		}
+		w := fmt.Sprintf("depth-test%d", i)
+		if !strings.Contains(m, w) {
+			t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m)
+		}
+
+		// pull out the line number (between : and ])
+		msg := m[strings.LastIndex(m, ":")+1:]
+		x := strings.Index(msg, "]")
+		if x < 0 {
+			t.Errorf("InfoDepth[%d]: missing ']': %q", i, m)
+			continue
+		}
+		line, err := strconv.Atoi(msg[:x])
+		if err != nil {
+			t.Errorf("InfoDepth[%d]: bad line number: %q", i, m)
+			continue
+		}
+		wantLine++
+		if wantLine != line {
+			t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine)
+		}
+	}
+}
+
+func init() {
+	CopyStandardLogTo("INFO")
+}
+
+// Test that CopyStandardLogTo panics on bad input.
+func TestCopyStandardLogToPanic(t *testing.T) {
+	defer func() {
+		if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") {
+			t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s)
+		}
+	}()
+	CopyStandardLogTo("LOG")
+}
+
+// Test that using the standard log package logs to INFO.
+func TestStandardLog(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	stdLog.Print("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that the header has the correct format.
+func TestHeader(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	defer func(previous func() time.Time) { timeNow = previous }(timeNow)
+	timeNow = func() time.Time {
+		return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
+	}
+	pid = 1234
+	Info("test")
+	var line int
+	format := "I0102 15:04:05.067890    1234 klog_test.go:%d] test\n"
+	n, err := fmt.Sscanf(contents(infoLog), format, &line)
+	if n != 1 || err != nil {
+		t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
+	}
+	// Scanf treats multiple spaces as equivalent to a single space,
+	// so check for correct space-padding also.
+	want := fmt.Sprintf(format, line)
+	if contents(infoLog) != want {
+		t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want)
+	}
+}
+
+func TestHeaderWithDir(t *testing.T) {
+	setFlags()
+	logging.addDirHeader = true
+	defer logging.swap(logging.newBuffers())
+	defer func(previous func() time.Time) { timeNow = previous }(timeNow)
+	timeNow = func() time.Time {
+		return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
+	}
+	pid = 1234
+	Info("test")
+	re := regexp.MustCompile(`I0102 15:04:05.067890    1234 (klog|v2)/klog_test.go:(\d+)] test\n`)
+	if !re.MatchString(contents(infoLog)) {
+		t.Errorf("log format error: line does not match regex:\n\t%q\n", contents(infoLog))
+	}
+}
+
+// Test that an Error log goes to Warning and Info.
+// Even in the Info log, the source character will be E, so the data should
+// all be identical.
+func TestError(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	Error("test")
+	if !contains(errorLog, "E", t) {
+		t.Errorf("Error has wrong character: %q", contents(errorLog))
+	}
+	if !contains(errorLog, "test", t) {
+		t.Error("Error failed")
+	}
+	str := contents(errorLog)
+	if !contains(warningLog, str, t) {
+		t.Error("Warning failed")
+	}
+	if !contains(infoLog, str, t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that an Error log does not goes to Warning and Info.
+// Even in the Info log, the source character will be E, so the data should
+// all be identical.
+func TestErrorWithOneOutput(t *testing.T) {
+	setFlags()
+	logging.oneOutput = true
+	buf := logging.newBuffers()
+	defer func() {
+		logging.swap(buf)
+		logging.oneOutput = false
+	}()
+	Error("test")
+	if !contains(errorLog, "E", t) {
+		t.Errorf("Error has wrong character: %q", contents(errorLog))
+	}
+	if !contains(errorLog, "test", t) {
+		t.Error("Error failed")
+	}
+	str := contents(errorLog)
+	if contains(warningLog, str, t) {
+		t.Error("Warning failed")
+	}
+	if contains(infoLog, str, t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a Warning log goes to Info.
+// Even in the Info log, the source character will be W, so the data should
+// all be identical.
+func TestWarning(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	Warning("test")
+	if !contains(warningLog, "W", t) {
+		t.Errorf("Warning has wrong character: %q", contents(warningLog))
+	}
+	if !contains(warningLog, "test", t) {
+		t.Error("Warning failed")
+	}
+	str := contents(warningLog)
+	if !contains(infoLog, str, t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a Warning log does not goes to Info.
+// Even in the Info log, the source character will be W, so the data should
+// all be identical.
+func TestWarningWithOneOutput(t *testing.T) {
+	setFlags()
+	logging.oneOutput = true
+	buf := logging.newBuffers()
+	defer func() {
+		logging.swap(buf)
+		logging.oneOutput = false
+	}()
+	Warning("test")
+	if !contains(warningLog, "W", t) {
+		t.Errorf("Warning has wrong character: %q", contents(warningLog))
+	}
+	if !contains(warningLog, "test", t) {
+		t.Error("Warning failed")
+	}
+	str := contents(warningLog)
+	if contains(infoLog, str, t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a V log goes to Info.
+func TestV(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	logging.verbosity.Set("2")
+	defer logging.verbosity.Set("0")
+	V(2).Info("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a vmodule enables a log in this file.
+func TestVmoduleOn(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	logging.vmodule.Set("klog_test=2")
+	defer logging.vmodule.Set("")
+	if !V(1).Enabled() {
+		t.Error("V not enabled for 1")
+	}
+	if !V(2).Enabled() {
+		t.Error("V not enabled for 2")
+	}
+	if V(3).Enabled() {
+		t.Error("V enabled for 3")
+	}
+	V(2).Info("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a vmodule of another file does not enable a log in this file.
+func TestVmoduleOff(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	logging.vmodule.Set("notthisfile=2")
+	defer logging.vmodule.Set("")
+	for i := 1; i <= 3; i++ {
+		if V(Level(i)).Enabled() {
+			t.Errorf("V enabled for %d", i)
+		}
+	}
+	V(2).Info("test")
+	if contents(infoLog) != "" {
+		t.Error("V logged incorrectly")
+	}
+}
+
+func TestSetOutputDataRace(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	var wg sync.WaitGroup
+	for i := 1; i <= 50; i++ {
+		go func() {
+			logging.flushDaemon()
+		}()
+	}
+	for i := 1; i <= 50; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			SetOutput(ioutil.Discard)
+		}()
+	}
+	for i := 1; i <= 50; i++ {
+		go func() {
+			logging.flushDaemon()
+		}()
+	}
+	for i := 1; i <= 50; i++ {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			SetOutputBySeverity("INFO", ioutil.Discard)
+		}()
+	}
+	for i := 1; i <= 50; i++ {
+		go func() {
+			logging.flushDaemon()
+		}()
+	}
+	wg.Wait()
+}
+
+func TestLogToOutput(t *testing.T) {
+	logging.toStderr = true
+	defer logging.swap(logging.newBuffers())
+	buf := new(bytes.Buffer)
+	SetOutput(buf)
+	LogToStderr(false)
+
+	Info("Does logging to an output work?")
+
+	str := buf.String()
+	if !strings.Contains(str, "Does logging to an output work?") {
+		t.Fatalf("Expected %q to contain \"Does logging to an output work?\"", str)
+	}
+}
+
+// vGlobs are patterns that match/don't match this file at V=2.
+var vGlobs = map[string]bool{
+	// Easy to test the numeric match here.
+	"klog_test=1": false, // If -vmodule sets V to 1, V(2) will fail.
+	"klog_test=2": true,
+	"klog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed.
+	// These all use 2 and check the patterns. All are true.
+	"*=2":           true,
+	"?l*=2":         true,
+	"????_*=2":      true,
+	"??[mno]?_*t=2": true,
+	// These all use 2 and check the patterns. All are false.
+	"*x=2":         false,
+	"m*=2":         false,
+	"??_*=2":       false,
+	"?[abc]?_*t=2": false,
+}
+
+// Test that vmodule globbing works as advertised.
+func testVmoduleGlob(pat string, match bool, t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	defer logging.vmodule.Set("")
+	logging.vmodule.Set(pat)
+	if V(2).Enabled() != match {
+		t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match)
+	}
+}
+
+// Test that a vmodule globbing works as advertised.
+func TestVmoduleGlob(t *testing.T) {
+	for glob, match := range vGlobs {
+		testVmoduleGlob(glob, match, t)
+	}
+}
+
+func TestRollover(t *testing.T) {
+	setFlags()
+	var err error
+	defer func(previous func(error)) { logExitFunc = previous }(logExitFunc)
+	logExitFunc = func(e error) {
+		err = e
+	}
+	defer func(previous uint64) { MaxSize = previous }(MaxSize)
+	MaxSize = 512
+	Info("x") // Be sure we have a file.
+	info, ok := logging.file[infoLog].(*syncBuffer)
+	if !ok {
+		t.Fatal("info wasn't created")
+	}
+	if err != nil {
+		t.Fatalf("info has initial error: %v", err)
+	}
+	fname0 := info.file.Name()
+	Info(strings.Repeat("x", int(MaxSize))) // force a rollover
+	if err != nil {
+		t.Fatalf("info has error after big write: %v", err)
+	}
+
+	// Make sure the next log file gets a file name with a different
+	// time stamp.
+	//
+	// TODO: determine whether we need to support subsecond log
+	// rotation.  C++ does not appear to handle this case (nor does it
+	// handle Daylight Savings Time properly).
+	time.Sleep(1 * time.Second)
+
+	Info("x") // create a new file
+	if err != nil {
+		t.Fatalf("error after rotation: %v", err)
+	}
+	fname1 := info.file.Name()
+	if fname0 == fname1 {
+		t.Errorf("info.f.Name did not change: %v", fname0)
+	}
+	if info.nbytes >= info.maxbytes {
+		t.Errorf("file size was not reset: %d", info.nbytes)
+	}
+}
+
+func TestOpenAppendOnStart(t *testing.T) {
+	const (
+		x string = "xxxxxxxxxx"
+		y string = "yyyyyyyyyy"
+	)
+
+	setFlags()
+	var err error
+	defer func(previous func(error)) { logExitFunc = previous }(logExitFunc)
+	logExitFunc = func(e error) {
+		err = e
+	}
+
+	f, err := ioutil.TempFile("", "test_klog_OpenAppendOnStart")
+	if err != nil {
+		t.Fatalf("unexpected error: %v", err)
+	}
+	defer os.Remove(f.Name())
+	logging.logFile = f.Name()
+
+	// Erase files created by prior tests,
+	for i := range logging.file {
+		logging.file[i] = nil
+	}
+
+	// Logging creates the file
+	Info(x)
+	_, ok := logging.file[infoLog].(*syncBuffer)
+	if !ok {
+		t.Fatal("info wasn't created")
+	}
+
+	// ensure we wrote what we expected
+	logging.flushAll()
+	b, err := ioutil.ReadFile(logging.logFile)
+	if err != nil {
+		t.Fatalf("unexpected error: %v", err)
+	}
+	if !strings.Contains(string(b), x) {
+		t.Fatalf("got %s, missing expected Info log: %s", string(b), x)
+	}
+
+	// Set the file to nil so it gets "created" (opened) again on the next write.
+	for i := range logging.file {
+		logging.file[i] = nil
+	}
+
+	// Logging again should open the file again with O_APPEND instead of O_TRUNC
+	Info(y)
+	// ensure we wrote what we expected
+	logging.lockAndFlushAll()
+	b, err = ioutil.ReadFile(logging.logFile)
+	if err != nil {
+		t.Fatalf("unexpected error: %v", err)
+	}
+	if !strings.Contains(string(b), y) {
+		t.Fatalf("got %s, missing expected Info log: %s", string(b), y)
+	}
+	// The initial log message should be preserved across create calls.
+	logging.lockAndFlushAll()
+	b, err = ioutil.ReadFile(logging.logFile)
+	if err != nil {
+		t.Fatalf("unexpected error: %v", err)
+	}
+	if !strings.Contains(string(b), x) {
+		t.Fatalf("got %s, missing expected Info log: %s", string(b), x)
+	}
+}
+
+func TestLogBacktraceAt(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	// The peculiar style of this code simplifies line counting and maintenance of the
+	// tracing block below.
+	var infoLine string
+	setTraceLocation := func(file string, line int, ok bool, delta int) {
+		if !ok {
+			t.Fatal("could not get file:line")
+		}
+		_, file = filepath.Split(file)
+		infoLine = fmt.Sprintf("%s:%d", file, line+delta)
+		err := logging.traceLocation.Set(infoLine)
+		if err != nil {
+			t.Fatal("error setting log_backtrace_at: ", err)
+		}
+	}
+	{
+		// Start of tracing block. These lines know about each other's relative position.
+		_, file, line, ok := runtime.Caller(0)
+		setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls.
+		Info("we want a stack trace here")
+	}
+	numAppearances := strings.Count(contents(infoLog), infoLine)
+	if numAppearances < 2 {
+		// Need 2 appearances, one in the log header and one in the trace:
+		//   log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
+		//   ...
+		//   k8s.io/klog/klog_test.go:280 (0x41ba91)
+		//   ...
+		// We could be more precise but that would require knowing the details
+		// of the traceback format, which may not be dependable.
+		t.Fatal("got no trace back; log is ", contents(infoLog))
+	}
+}
+
+func BenchmarkHeader(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		buf, _, _ := logging.header(infoLog, 0)
+		logging.putBuffer(buf)
+	}
+}
+
+func BenchmarkHeaderWithDir(b *testing.B) {
+	logging.addDirHeader = true
+	for i := 0; i < b.N; i++ {
+		buf, _, _ := logging.header(infoLog, 0)
+		logging.putBuffer(buf)
+	}
+}
+
+// Ensure that benchmarks have side effects to avoid compiler optimization
+var result ObjectRef
+
+func BenchmarkKRef(b *testing.B) {
+	var r ObjectRef
+	for i := 0; i < b.N; i++ {
+		r = KRef("namespace", "name")
+	}
+	result = r
+}
+
+func BenchmarkKObj(b *testing.B) {
+	a := kMetadataMock{name: "a", ns: "a"}
+	var r ObjectRef
+	for i := 0; i < b.N; i++ {
+		r = KObj(&a)
+	}
+	result = r
+}
+
+func BenchmarkLogs(b *testing.B) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+
+	testFile, err := ioutil.TempFile("", "test.log")
+	if err != nil {
+		b.Fatal("unable to create temporary file")
+	}
+	defer os.Remove(testFile.Name())
+
+	logging.verbosity.Set("0")
+	logging.toStderr = false
+	logging.alsoToStderr = false
+	logging.stderrThreshold = fatalLog
+	logging.logFile = testFile.Name()
+	logging.swap([numSeverity]flushSyncWriter{nil, nil, nil, nil})
+
+	for i := 0; i < b.N; i++ {
+		Error("error")
+		Warning("warning")
+		Info("info")
+	}
+	logging.flushAll()
+}
+
+// Test the logic on checking log size limitation.
+func TestFileSizeCheck(t *testing.T) {
+	setFlags()
+	testData := map[string]struct {
+		testLogFile          string
+		testLogFileMaxSizeMB uint64
+		testCurrentSize      uint64
+		expectedResult       bool
+	}{
+		"logFile not specified, exceeds max size": {
+			testLogFile:          "",
+			testLogFileMaxSizeMB: 1,
+			testCurrentSize:      1024 * 1024 * 2000, //exceeds the maxSize
+			expectedResult:       true,
+		},
+
+		"logFile not specified, not exceeds max size": {
+			testLogFile:          "",
+			testLogFileMaxSizeMB: 1,
+			testCurrentSize:      1024 * 1024 * 1000, //smaller than the maxSize
+			expectedResult:       false,
+		},
+		"logFile specified, exceeds max size": {
+			testLogFile:          "/tmp/test.log",
+			testLogFileMaxSizeMB: 500,                // 500MB
+			testCurrentSize:      1024 * 1024 * 1000, //exceeds the logFileMaxSizeMB
+			expectedResult:       true,
+		},
+		"logFile specified, not exceeds max size": {
+			testLogFile:          "/tmp/test.log",
+			testLogFileMaxSizeMB: 500,               // 500MB
+			testCurrentSize:      1024 * 1024 * 300, //smaller than the logFileMaxSizeMB
+			expectedResult:       false,
+		},
+	}
+
+	for name, test := range testData {
+		logging.logFile = test.testLogFile
+		logging.logFileMaxSizeMB = test.testLogFileMaxSizeMB
+		actualResult := test.testCurrentSize >= CalculateMaxSize()
+		if test.expectedResult != actualResult {
+			t.Fatalf("Error on test case '%v': Was expecting result equals %v, got %v",
+				name, test.expectedResult, actualResult)
+		}
+	}
+}
+
+func TestInitFlags(t *testing.T) {
+	fs1 := flag.NewFlagSet("test1", flag.PanicOnError)
+	InitFlags(fs1)
+	fs1.Set("log_dir", "/test1")
+	fs1.Set("log_file_max_size", "1")
+	fs2 := flag.NewFlagSet("test2", flag.PanicOnError)
+	InitFlags(fs2)
+	if logging.logDir != "/test1" {
+		t.Fatalf("Expected log_dir to be %q, got %q", "/test1", logging.logDir)
+	}
+	fs2.Set("log_file_max_size", "2048")
+	if logging.logFileMaxSizeMB != 2048 {
+		t.Fatal("Expected log_file_max_size to be 2048")
+	}
+}
+
+func TestInfoObjectRef(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+
+	tests := []struct {
+		name string
+		ref  ObjectRef
+		want string
+	}{
+		{
+			name: "with ns",
+			ref: ObjectRef{
+				Name:      "test-name",
+				Namespace: "test-ns",
+			},
+			want: "test-ns/test-name",
+		},
+		{
+			name: "without ns",
+			ref: ObjectRef{
+				Name:      "test-name",
+				Namespace: "",
+			},
+			want: "test-name",
+		},
+		{
+			name: "empty",
+			ref:  ObjectRef{},
+			want: "",
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			Info(tt.ref)
+			if !contains(infoLog, tt.want, t) {
+				t.Errorf("expected %v, got %v", tt.want, contents(infoLog))
+			}
+		})
+	}
+}
+
+type kMetadataMock struct {
+	name, ns string
+}
+
+func (m kMetadataMock) GetName() string {
+	return m.name
+}
+func (m kMetadataMock) GetNamespace() string {
+	return m.ns
+}
+
+type ptrKMetadataMock struct {
+	name, ns string
+}
+
+func (m *ptrKMetadataMock) GetName() string {
+	return m.name
+}
+func (m *ptrKMetadataMock) GetNamespace() string {
+	return m.ns
+}
+
+func TestKObj(t *testing.T) {
+	tests := []struct {
+		name string
+		obj  KMetadata
+		want ObjectRef
+	}{
+		{
+			name: "nil passed as pointer KMetadata implementation",
+			obj:  (*ptrKMetadataMock)(nil),
+			want: ObjectRef{},
+		},
+		{
+			name: "empty struct passed as non-pointer KMetadata implementation",
+			obj:  kMetadataMock{},
+			want: ObjectRef{},
+		},
+		{
+			name: "nil pointer passed to non-pointer KMetadata implementation",
+			obj:  (*kMetadataMock)(nil),
+			want: ObjectRef{},
+		},
+		{
+			name: "nil",
+			obj:  nil,
+			want: ObjectRef{},
+		},
+		{
+			name: "with ns",
+			obj:  &kMetadataMock{"test-name", "test-ns"},
+			want: ObjectRef{
+				Name:      "test-name",
+				Namespace: "test-ns",
+			},
+		},
+		{
+			name: "without ns",
+			obj:  &kMetadataMock{"test-name", ""},
+			want: ObjectRef{
+				Name: "test-name",
+			},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if KObj(tt.obj) != tt.want {
+				t.Errorf("expected %v, got %v", tt.want, KObj(tt.obj))
+			}
+		})
+	}
+}
+
+func TestKRef(t *testing.T) {
+	tests := []struct {
+		testname  string
+		name      string
+		namespace string
+		want      ObjectRef
+	}{
+		{
+			testname:  "with ns",
+			name:      "test-name",
+			namespace: "test-ns",
+			want: ObjectRef{
+				Name:      "test-name",
+				Namespace: "test-ns",
+			},
+		},
+		{
+			testname: "without ns",
+			name:     "test-name",
+			want: ObjectRef{
+				Name: "test-name",
+			},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.testname, func(t *testing.T) {
+			if KRef(tt.namespace, tt.name) != tt.want {
+				t.Errorf("expected %v, got %v", tt.want, KRef(tt.namespace, tt.name))
+			}
+		})
+	}
+}
+
+// Test that InfoS and InfoSDepth work as advertised.
+func TestInfoS(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	timeNow = func() time.Time {
+		return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
+	}
+	pid = 1234
+	var testDataInfo = []struct {
+		msg        string
+		format     string
+		keysValues []interface{}
+	}{
+		{
+			msg:        "test",
+			format:     "I0102 15:04:05.067890    1234 klog_test.go:%d] \"test\" pod=\"kubedns\"\n",
+			keysValues: []interface{}{"pod", "kubedns"},
+		},
+		{
+			msg:        "test",
+			format:     "I0102 15:04:05.067890    1234 klog_test.go:%d] \"test\" replicaNum=20\n",
+			keysValues: []interface{}{"replicaNum", 20},
+		},
+		{
+			msg:        "test",
+			format:     "I0102 15:04:05.067890    1234 klog_test.go:%d] \"test\" err=\"test error\"\n",
+			keysValues: []interface{}{"err", errors.New("test error")},
+		},
+		{
+			msg:        "test",
+			format:     "I0102 15:04:05.067890    1234 klog_test.go:%d] \"test\" err=\"test error\"\n",
+			keysValues: []interface{}{"err", errors.New("test error")},
+		},
+	}
+
+	functions := []func(msg string, keyAndValues ...interface{}){
+		InfoS,
+		myInfoS,
+	}
+	for _, f := range functions {
+		for _, data := range testDataInfo {
+			logging.file[infoLog] = &flushBuffer{}
+			f(data.msg, data.keysValues...)
+			var line int
+			n, err := fmt.Sscanf(contents(infoLog), data.format, &line)
+			if n != 1 || err != nil {
+				t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
+			}
+			want := fmt.Sprintf(data.format, line)
+			if contents(infoLog) != want {
+				t.Errorf("InfoS has wrong format: \n got:\t%s\nwant:\t%s", contents(infoLog), want)
+			}
+		}
+	}
+}
+
+// Test that Verbose.InfoS works as advertised.
+func TestVInfoS(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	timeNow = func() time.Time {
+		return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
+	}
+	pid = 1234
+	var testDataInfo = []struct {
+		msg        string
+		format     string
+		keysValues []interface{}
+	}{
+		{
+			msg:        "test",
+			format:     "I0102 15:04:05.067890    1234 klog_test.go:%d] \"test\" pod=\"kubedns\"\n",
+			keysValues: []interface{}{"pod", "kubedns"},
+		},
+		{
+			msg:        "test",
+			format:     "I0102 15:04:05.067890    1234 klog_test.go:%d] \"test\" replicaNum=20\n",
+			keysValues: []interface{}{"replicaNum", 20},
+		},
+		{
+			msg:        "test",
+			format:     "I0102 15:04:05.067890    1234 klog_test.go:%d] \"test\" err=\"test error\"\n",
+			keysValues: []interface{}{"err", errors.New("test error")},
+		},
+	}
+
+	logging.verbosity.Set("2")
+	defer logging.verbosity.Set("0")
+
+	for l := Level(0); l < Level(4); l++ {
+		for _, data := range testDataInfo {
+			logging.file[infoLog] = &flushBuffer{}
+
+			V(l).InfoS(data.msg, data.keysValues...)
+
+			var want string
+			var line int
+			if l <= 2 {
+				n, err := fmt.Sscanf(contents(infoLog), data.format, &line)
+				if n != 1 || err != nil {
+					t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
+				}
+
+				want = fmt.Sprintf(data.format, line)
+			} else {
+				want = ""
+			}
+			if contents(infoLog) != want {
+				t.Errorf("V(%d).InfoS has unexpected output: \n got:\t%s\nwant:\t%s", l, contents(infoLog), want)
+			}
+		}
+	}
+}
+
+// Test that ErrorS and ErrorSDepth work as advertised.
+func TestErrorS(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	timeNow = func() time.Time {
+		return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
+	}
+	logging.logFile = ""
+	pid = 1234
+
+	functions := []func(err error, msg string, keyAndValues ...interface{}){
+		ErrorS,
+		myErrorS,
+	}
+	for _, f := range functions {
+		var errorList = []struct {
+			err    error
+			format string
+		}{
+			{
+				err:    fmt.Errorf("update status failed"),
+				format: "E0102 15:04:05.067890    1234 klog_test.go:%d] \"Failed to update pod status\" err=\"update status failed\" pod=\"kubedns\"\n",
+			},
+			{
+				err:    nil,
+				format: "E0102 15:04:05.067890    1234 klog_test.go:%d] \"Failed to update pod status\" pod=\"kubedns\"\n",
+			},
+		}
+		for _, e := range errorList {
+			logging.file[errorLog] = &flushBuffer{}
+			f(e.err, "Failed to update pod status", "pod", "kubedns")
+			var line int
+			n, err := fmt.Sscanf(contents(errorLog), e.format, &line)
+			if n != 1 || err != nil {
+				t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(errorLog))
+			}
+			want := fmt.Sprintf(e.format, line)
+			if contents(errorLog) != want {
+				t.Errorf("ErrorS has wrong format: \n got:\t%s\nwant:\t%s", contents(errorLog), want)
+			}
+		}
+	}
+}
+
+// Test that kvListFormat works as advertised.
+func TestKvListFormat(t *testing.T) {
+	var testKVList = []struct {
+		keysValues []interface{}
+		want       string
+	}{
+		{
+			keysValues: []interface{}{"pod", "kubedns"},
+			want:       " pod=\"kubedns\"",
+		},
+		{
+			keysValues: []interface{}{"pod", "kubedns", "update", true},
+			want:       " pod=\"kubedns\" update=true",
+		},
+		{
+			keysValues: []interface{}{"pod", "kubedns", "spec", struct {
+				X int
+				Y string
+				N time.Time
+			}{X: 76, Y: "strval", N: time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.UTC)}},
+			want: " pod=\"kubedns\" spec={X:76 Y:strval N:2006-01-02 15:04:05.06789 +0000 UTC}",
+		},
+		{
+			keysValues: []interface{}{"pod", "kubedns", "values", []int{8, 6, 7, 5, 3, 0, 9}},
+			want:       " pod=\"kubedns\" values=[8 6 7 5 3 0 9]",
+		},
+		{
+			keysValues: []interface{}{"pod", "kubedns", "values", []string{"deployment", "svc", "configmap"}},
+			want:       " pod=\"kubedns\" values=[deployment svc configmap]",
+		},
+		{
+			keysValues: []interface{}{"pod", "kubedns", "bytes", []byte("test case for byte array")},
+			want:       " pod=\"kubedns\" bytes=\"test case for byte array\"",
+		},
+		{
+			keysValues: []interface{}{"pod", "kubedns", "bytes", []byte("��=� ⌘")},
+			want:       " pod=\"kubedns\" bytes=\"\\ufffd\\ufffd=\\ufffd \\u2318\"",
+		},
+		{
+			keysValues: []interface{}{"pod", "kubedns", "maps", map[string]int{"three": 4}},
+			want:       " pod=\"kubedns\" maps=map[three:4]",
+		},
+		{
+			keysValues: []interface{}{"pod", KRef("kube-system", "kubedns"), "status", "ready"},
+			want:       " pod=\"kube-system/kubedns\" status=\"ready\"",
+		},
+		{
+			keysValues: []interface{}{"pod", KRef("", "kubedns"), "status", "ready"},
+			want:       " pod=\"kubedns\" status=\"ready\"",
+		},
+		{
+			keysValues: []interface{}{"pod", KObj(kMetadataMock{"test-name", "test-ns"}), "status", "ready"},
+			want:       " pod=\"test-ns/test-name\" status=\"ready\"",
+		},
+		{
+			keysValues: []interface{}{"pod", KObj(kMetadataMock{"test-name", ""}), "status", "ready"},
+			want:       " pod=\"test-name\" status=\"ready\"",
+		},
+		{
+			keysValues: []interface{}{"pod", KObj(nil), "status", "ready"},
+			want:       " pod=\"\" status=\"ready\"",
+		},
+		{
+			keysValues: []interface{}{"pod", KObj((*ptrKMetadataMock)(nil)), "status", "ready"},
+			want:       " pod=\"\" status=\"ready\"",
+		},
+		{
+			keysValues: []interface{}{"pod", KObj((*kMetadataMock)(nil)), "status", "ready"},
+			want:       " pod=\"\" status=\"ready\"",
+		},
+	}
+
+	for _, d := range testKVList {
+		b := &bytes.Buffer{}
+		kvListFormat(b, d.keysValues...)
+		if b.String() != d.want {
+			t.Errorf("kvlist format error:\n got:\n\t%s\nwant:\t%s", b.String(), d.want)
+		}
+	}
+}
+
+func createTestValueOfLoggingT() *loggingT {
+	l := new(loggingT)
+	l.toStderr = true
+	l.alsoToStderr = false
+	l.stderrThreshold = errorLog
+	l.verbosity = Level(0)
+	l.skipHeaders = false
+	l.skipLogHeaders = false
+	l.addDirHeader = false
+	return l
+}
+
+func createTestValueOfModulePat(p string, li bool, le Level) modulePat {
+	m := modulePat{}
+	m.pattern = p
+	m.literal = li
+	m.level = le
+	return m
+}
+
+func compareModuleSpec(a, b moduleSpec) bool {
+	if len(a.filter) != len(b.filter) {
+		return false
+	}
+
+	for i := 0; i < len(a.filter); i++ {
+		if a.filter[i] != b.filter[i] {
+			return false
+		}
+	}
+
+	return true
+}
+
+func TestSetVState(t *testing.T) {
+	//Target loggingT value
+	want := createTestValueOfLoggingT()
+	want.verbosity = Level(3)
+	want.vmodule.filter = []modulePat{
+		createTestValueOfModulePat("recordio", true, Level(2)),
+		createTestValueOfModulePat("file", true, Level(1)),
+		createTestValueOfModulePat("gfs*", false, Level(3)),
+		createTestValueOfModulePat("gopher*", false, Level(3)),
+	}
+	want.filterLength = 4
+
+	//loggingT value to which test is run
+	target := createTestValueOfLoggingT()
+
+	tf := []modulePat{
+		createTestValueOfModulePat("recordio", true, Level(2)),
+		createTestValueOfModulePat("file", true, Level(1)),
+		createTestValueOfModulePat("gfs*", false, Level(3)),
+		createTestValueOfModulePat("gopher*", false, Level(3)),
+	}
+
+	target.setVState(Level(3), tf, true)
+
+	if want.verbosity != target.verbosity || !compareModuleSpec(want.vmodule, target.vmodule) || want.filterLength != target.filterLength {
+		t.Errorf("setVState method doesn't configure loggingT values' verbosity, vmodule or filterLength:\nwant:\n\tverbosity:\t%v\n\tvmodule:\t%v\n\tfilterLength:\t%v\ngot:\n\tverbosity:\t%v\n\tvmodule:\t%v\n\tfilterLength:\t%v", want.verbosity, want.vmodule, want.filterLength, target.verbosity, target.vmodule, target.filterLength)
+	}
+}
+
+type sampleLogFilter struct{}
+
+func (f *sampleLogFilter) Filter(args []interface{}) []interface{} {
+	for i, arg := range args {
+		v, ok := arg.(string)
+		if ok && strings.Contains(v, "filter me") {
+			args[i] = "[FILTERED]"
+		}
+	}
+	return args
+}
+
+func (f *sampleLogFilter) FilterF(format string, args []interface{}) (string, []interface{}) {
+	return strings.Replace(format, "filter me", "[FILTERED]", 1), f.Filter(args)
+}
+
+func (f *sampleLogFilter) FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) {
+	return strings.Replace(msg, "filter me", "[FILTERED]", 1), f.Filter(keysAndValues)
+}
+
+func TestLogFilter(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	SetLogFilter(&sampleLogFilter{})
+	defer SetLogFilter(nil)
+	funcs := []struct {
+		name     string
+		logFunc  func(args ...interface{})
+		severity severity
+	}{{
+		name:     "Info",
+		logFunc:  Info,
+		severity: infoLog,
+	}, {
+		name: "InfoDepth",
+		logFunc: func(args ...interface{}) {
+			InfoDepth(1, args...)
+		},
+		severity: infoLog,
+	}, {
+		name:     "Infoln",
+		logFunc:  Infoln,
+		severity: infoLog,
+	}, {
+		name: "Infof",
+		logFunc: func(args ...interface{}) {
+
+			Infof(args[0].(string), args[1:]...)
+		},
+		severity: infoLog,
+	}, {
+		name: "InfoS",
+		logFunc: func(args ...interface{}) {
+			InfoS(args[0].(string), args[1:]...)
+		},
+		severity: infoLog,
+	}, {
+		name:     "Warning",
+		logFunc:  Warning,
+		severity: warningLog,
+	}, {
+		name: "WarningDepth",
+		logFunc: func(args ...interface{}) {
+			WarningDepth(1, args...)
+		},
+		severity: warningLog,
+	}, {
+		name:     "Warningln",
+		logFunc:  Warningln,
+		severity: warningLog,
+	}, {
+		name: "Warningf",
+		logFunc: func(args ...interface{}) {
+			Warningf(args[0].(string), args[1:]...)
+		},
+		severity: warningLog,
+	}, {
+		name:     "Error",
+		logFunc:  Error,
+		severity: errorLog,
+	}, {
+		name: "ErrorDepth",
+		logFunc: func(args ...interface{}) {
+			ErrorDepth(1, args...)
+		},
+		severity: errorLog,
+	}, {
+		name:     "Errorln",
+		logFunc:  Errorln,
+		severity: errorLog,
+	}, {
+		name: "Errorf",
+		logFunc: func(args ...interface{}) {
+			Errorf(args[0].(string), args[1:]...)
+		},
+		severity: errorLog,
+	}, {
+		name: "ErrorS",
+		logFunc: func(args ...interface{}) {
+			ErrorS(errors.New("testerror"), args[0].(string), args[1:]...)
+		},
+		severity: errorLog,
+	}, {
+		name: "V().Info",
+		logFunc: func(args ...interface{}) {
+			V(0).Info(args...)
+		},
+		severity: infoLog,
+	}, {
+		name: "V().Infoln",
+		logFunc: func(args ...interface{}) {
+			V(0).Infoln(args...)
+		},
+		severity: infoLog,
+	}, {
+		name: "V().Infof",
+		logFunc: func(args ...interface{}) {
+			V(0).Infof(args[0].(string), args[1:]...)
+		},
+		severity: infoLog,
+	}, {
+		name: "V().InfoS",
+		logFunc: func(args ...interface{}) {
+			V(0).InfoS(args[0].(string), args[1:]...)
+		},
+		severity: infoLog,
+	}, {
+		name: "V().Error",
+		logFunc: func(args ...interface{}) {
+			V(0).Error(errors.New("test error"), args[0].(string), args[1:]...)
+		},
+		severity: errorLog,
+	}, {
+		name: "V().ErrorS",
+		logFunc: func(args ...interface{}) {
+			V(0).ErrorS(errors.New("test error"), args[0].(string), args[1:]...)
+		},
+		severity: errorLog,
+	}}
+
+	testcases := []struct {
+		name           string
+		args           []interface{}
+		expectFiltered bool
+	}{{
+		args:           []interface{}{"%s:%s", "foo", "bar"},
+		expectFiltered: false,
+	}, {
+		args:           []interface{}{"%s:%s", "foo", "filter me"},
+		expectFiltered: true,
+	}, {
+		args:           []interface{}{"filter me %s:%s", "foo", "bar"},
+		expectFiltered: true,
+	}}
+
+	for _, f := range funcs {
+		for _, tc := range testcases {
+			logging.newBuffers()
+			f.logFunc(tc.args...)
+			got := contains(f.severity, "[FILTERED]", t)
+			if got != tc.expectFiltered {
+				t.Errorf("%s filter application failed, got %v, want %v", f.name, got, tc.expectFiltered)
+			}
+		}
+	}
+}
+
+func TestInfoSWithLogr(t *testing.T) {
+	logger := new(testLogr)
+
+	testDataInfo := []struct {
+		msg        string
+		keysValues []interface{}
+		expected   testLogrEntry
+	}{{
+		msg:        "foo",
+		keysValues: []interface{}{},
+		expected: testLogrEntry{
+			severity:      infoLog,
+			msg:           "foo",
+			keysAndValues: []interface{}{},
+		},
+	}, {
+		msg:        "bar",
+		keysValues: []interface{}{"a", 1},
+		expected: testLogrEntry{
+			severity:      infoLog,
+			msg:           "bar",
+			keysAndValues: []interface{}{"a", 1},
+		},
+	}}
+
+	for _, data := range testDataInfo {
+		t.Run(data.msg, func(t *testing.T) {
+			SetLogger(logger)
+			defer SetLogger(nil)
+			defer logger.reset()
+
+			InfoS(data.msg, data.keysValues...)
+
+			if !reflect.DeepEqual(logger.entries, []testLogrEntry{data.expected}) {
+				t.Errorf("expected: %+v; but got: %+v", []testLogrEntry{data.expected}, logger.entries)
+			}
+		})
+	}
+}
+
+func TestErrorSWithLogr(t *testing.T) {
+	logger := new(testLogr)
+
+	testError := errors.New("testError")
+
+	testDataInfo := []struct {
+		err        error
+		msg        string
+		keysValues []interface{}
+		expected   testLogrEntry
+	}{{
+		err:        testError,
+		msg:        "foo1",
+		keysValues: []interface{}{},
+		expected: testLogrEntry{
+			severity:      errorLog,
+			msg:           "foo1",
+			keysAndValues: []interface{}{},
+			err:           testError,
+		},
+	}, {
+		err:        testError,
+		msg:        "bar1",
+		keysValues: []interface{}{"a", 1},
+		expected: testLogrEntry{
+			severity:      errorLog,
+			msg:           "bar1",
+			keysAndValues: []interface{}{"a", 1},
+			err:           testError,
+		},
+	}, {
+		err:        nil,
+		msg:        "foo2",
+		keysValues: []interface{}{},
+		expected: testLogrEntry{
+			severity:      errorLog,
+			msg:           "foo2",
+			keysAndValues: []interface{}{},
+			err:           nil,
+		},
+	}, {
+		err:        nil,
+		msg:        "bar2",
+		keysValues: []interface{}{"a", 1},
+		expected: testLogrEntry{
+			severity:      errorLog,
+			msg:           "bar2",
+			keysAndValues: []interface{}{"a", 1},
+			err:           nil,
+		},
+	}}
+
+	for _, data := range testDataInfo {
+		t.Run(data.msg, func(t *testing.T) {
+			SetLogger(logger)
+			defer SetLogger(nil)
+			defer logger.reset()
+
+			ErrorS(data.err, data.msg, data.keysValues...)
+
+			if !reflect.DeepEqual(logger.entries, []testLogrEntry{data.expected}) {
+				t.Errorf("expected: %+v; but got: %+v", []testLogrEntry{data.expected}, logger.entries)
+			}
+		})
+	}
+}
+
+func TestCallDepthLogr(t *testing.T) {
+	logger := &callDepthTestLogr{}
+	logger.resetCallDepth()
+
+	testCases := []struct {
+		name  string
+		logFn func()
+	}{
+		{
+			name:  "Info log",
+			logFn: func() { Info("info log") },
+		},
+		{
+			name:  "InfoDepth log",
+			logFn: func() { InfoDepth(0, "infodepth log") },
+		},
+		{
+			name:  "InfoSDepth log",
+			logFn: func() { InfoSDepth(0, "infoSDepth log") },
+		},
+		{
+			name:  "Warning log",
+			logFn: func() { Warning("warning log") },
+		},
+		{
+			name:  "WarningDepth log",
+			logFn: func() { WarningDepth(0, "warningdepth log") },
+		},
+		{
+			name:  "Error log",
+			logFn: func() { Error("error log") },
+		},
+		{
+			name:  "ErrorDepth log",
+			logFn: func() { ErrorDepth(0, "errordepth log") },
+		},
+		{
+			name:  "ErrorSDepth log",
+			logFn: func() { ErrorSDepth(0, errors.New("some error"), "errorSDepth log") },
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			SetLogger(logger)
+			defer SetLogger(nil)
+			defer logger.reset()
+			defer logger.resetCallDepth()
+
+			// Keep these lines together.
+			_, wantFile, wantLine, _ := runtime.Caller(0)
+			tc.logFn()
+			wantLine++
+
+			if len(logger.entries) != 1 {
+				t.Errorf("expected a single log entry to be generated, got %d", len(logger.entries))
+			}
+			checkLogrEntryCorrectCaller(t, wantFile, wantLine, logger.entries[0])
+		})
+	}
+}
+
+func TestCallDepthLogrInfoS(t *testing.T) {
+	logger := &callDepthTestLogr{}
+	logger.resetCallDepth()
+	SetLogger(logger)
+
+	// Add wrapper to ensure callDepthTestLogr +2 offset is correct.
+	logFunc := func() {
+		InfoS("infoS log")
+	}
+
+	// Keep these lines together.
+	_, wantFile, wantLine, _ := runtime.Caller(0)
+	logFunc()
+	wantLine++
+
+	if len(logger.entries) != 1 {
+		t.Errorf("expected a single log entry to be generated, got %d", len(logger.entries))
+	}
+	checkLogrEntryCorrectCaller(t, wantFile, wantLine, logger.entries[0])
+}
+
+func TestCallDepthLogrErrorS(t *testing.T) {
+	logger := &callDepthTestLogr{}
+	logger.resetCallDepth()
+	SetLogger(logger)
+
+	// Add wrapper to ensure callDepthTestLogr +2 offset is correct.
+	logFunc := func() {
+		ErrorS(errors.New("some error"), "errorS log")
+	}
+
+	// Keep these lines together.
+	_, wantFile, wantLine, _ := runtime.Caller(0)
+	logFunc()
+	wantLine++
+
+	if len(logger.entries) != 1 {
+		t.Errorf("expected a single log entry to be generated, got %d", len(logger.entries))
+	}
+	checkLogrEntryCorrectCaller(t, wantFile, wantLine, logger.entries[0])
+}
+
+func TestCallDepthLogrGoLog(t *testing.T) {
+	logger := &callDepthTestLogr{}
+	logger.resetCallDepth()
+	SetLogger(logger)
+	CopyStandardLogTo("INFO")
+
+	// Add wrapper to ensure callDepthTestLogr +2 offset is correct.
+	logFunc := func() {
+		stdLog.Print("some log")
+	}
+
+	// Keep these lines together.
+	_, wantFile, wantLine, _ := runtime.Caller(0)
+	logFunc()
+	wantLine++
+
+	if len(logger.entries) != 1 {
+		t.Errorf("expected a single log entry to be generated, got %d", len(logger.entries))
+	}
+	checkLogrEntryCorrectCaller(t, wantFile, wantLine, logger.entries[0])
+	fmt.Println(logger.entries[0])
+}
+
+// Test callDepthTestLogr logs the expected offsets.
+func TestCallDepthTestLogr(t *testing.T) {
+	logger := &callDepthTestLogr{}
+	logger.resetCallDepth()
+
+	logFunc := func() {
+		logger.Info("some info log")
+	}
+	// Keep these lines together.
+	_, wantFile, wantLine, _ := runtime.Caller(0)
+	logFunc()
+	wantLine++
+
+	if len(logger.entries) != 1 {
+		t.Errorf("expected a single log entry to be generated, got %d", len(logger.entries))
+	}
+	checkLogrEntryCorrectCaller(t, wantFile, wantLine, logger.entries[0])
+
+	logger.reset()
+
+	logFunc = func() {
+		logger.Error(errors.New("error"), "some error log")
+	}
+	// Keep these lines together.
+	_, wantFile, wantLine, _ = runtime.Caller(0)
+	logFunc()
+	wantLine++
+
+	if len(logger.entries) != 1 {
+		t.Errorf("expected a single log entry to be generated, got %d", len(logger.entries))
+	}
+	checkLogrEntryCorrectCaller(t, wantFile, wantLine, logger.entries[0])
+}
+
+type testLogr struct {
+	entries []testLogrEntry
+	mutex   sync.Mutex
+}
+
+type testLogrEntry struct {
+	severity      severity
+	msg           string
+	keysAndValues []interface{}
+	err           error
+}
+
+func (l *testLogr) reset() {
+	l.mutex.Lock()
+	defer l.mutex.Unlock()
+	l.entries = []testLogrEntry{}
+}
+
+func (l *testLogr) Info(msg string, keysAndValues ...interface{}) {
+	l.mutex.Lock()
+	defer l.mutex.Unlock()
+	l.entries = append(l.entries, testLogrEntry{
+		severity:      infoLog,
+		msg:           msg,
+		keysAndValues: keysAndValues,
+	})
+}
+
+func (l *testLogr) Error(err error, msg string, keysAndValues ...interface{}) {
+	l.mutex.Lock()
+	defer l.mutex.Unlock()
+	l.entries = append(l.entries, testLogrEntry{
+		severity:      errorLog,
+		msg:           msg,
+		keysAndValues: keysAndValues,
+		err:           err,
+	})
+}
+
+func (l *testLogr) Enabled() bool               { panic("not implemented") }
+func (l *testLogr) V(int) logr.Logger           { panic("not implemented") }
+func (l *testLogr) WithName(string) logr.Logger { panic("not implemented") }
+func (l *testLogr) WithValues(...interface{}) logr.Logger {
+	panic("not implemented")
+}
+
+type callDepthTestLogr struct {
+	testLogr
+	callDepth int
+}
+
+func (l *callDepthTestLogr) resetCallDepth() {
+	l.mutex.Lock()
+	defer l.mutex.Unlock()
+	l.callDepth = 0
+}
+
+func (l *callDepthTestLogr) WithCallDepth(depth int) logr.Logger {
+	l.mutex.Lock()
+	defer l.mutex.Unlock()
+	// Note: Usually WithCallDepth would be implemented by cloning l
+	// and setting the call depth on the clone. We modify l instead in
+	// this test helper for simplicity.
+	l.callDepth = depth
+	return l
+}
+
+func (l *callDepthTestLogr) Info(msg string, keysAndValues ...interface{}) {
+	l.mutex.Lock()
+	defer l.mutex.Unlock()
+	// Add 2 to depth for the wrapper function caller and for invocation in
+	// test case.
+	_, file, line, _ := runtime.Caller(l.callDepth + 2)
+	l.entries = append(l.entries, testLogrEntry{
+		severity:      infoLog,
+		msg:           msg,
+		keysAndValues: append([]interface{}{file, line}, keysAndValues...),
+	})
+}
+
+func (l *callDepthTestLogr) Error(err error, msg string, keysAndValues ...interface{}) {
+	l.mutex.Lock()
+	defer l.mutex.Unlock()
+	// Add 2 to depth for the wrapper function caller and for invocation in
+	// test case.
+	_, file, line, _ := runtime.Caller(l.callDepth + 2)
+	l.entries = append(l.entries, testLogrEntry{
+		severity:      errorLog,
+		msg:           msg,
+		keysAndValues: append([]interface{}{file, line}, keysAndValues...),
+		err:           err,
+	})
+}
+
+func checkLogrEntryCorrectCaller(t *testing.T, wantFile string, wantLine int, entry testLogrEntry) {
+	t.Helper()
+
+	want := fmt.Sprintf("%s:%d", wantFile, wantLine)
+	// Log fields contain file and line number as first elements.
+	got := fmt.Sprintf("%s:%d", entry.keysAndValues[0], entry.keysAndValues[1])
+
+	if want != got {
+		t.Errorf("expected file and line %q but got %q", want, got)
+	}
+}
+
+// existedFlag contains all existed flag, without KlogPrefix
+var existedFlag = map[string]struct{}{
+	"log_dir":           {},
+	"add_dir_header":    {},
+	"alsologtostderr":   {},
+	"log_backtrace_at":  {},
+	"log_file":          {},
+	"log_file_max_size": {},
+	"logtostderr":       {},
+	"one_output":        {},
+	"skip_headers":      {},
+	"skip_log_headers":  {},
+	"stderrthreshold":   {},
+	"v":                 {},
+	"vmodule":           {},
+}
+
+// KlogPrefix define new flag prefix
+const KlogPrefix string = "klog"
+
+// TestKlogFlagPrefix check every klog flag's prefix, exclude flag in existedFlag
+func TestKlogFlagPrefix(t *testing.T) {
+	fs := &flag.FlagSet{}
+	InitFlags(fs)
+	fs.VisitAll(func(f *flag.Flag) {
+		if _, found := existedFlag[f.Name]; !found {
+			if !strings.HasPrefix(f.Name, KlogPrefix) {
+				t.Errorf("flag %s not have klog prefix: %s", f.Name, KlogPrefix)
+			}
+		}
+	})
+}
+
+func TestKObjs(t *testing.T) {
+	tests := []struct {
+		name string
+		obj  interface{}
+		want []ObjectRef
+	}{
+		{
+			name: "test for KObjs function with KMetadata slice",
+			obj: []kMetadataMock{
+				{
+					name: "kube-dns",
+					ns:   "kube-system",
+				},
+				{
+					name: "mi-conf",
+				},
+				{},
+			},
+			want: []ObjectRef{
+				{
+					Name:      "kube-dns",
+					Namespace: "kube-system",
+				},
+				{
+					Name: "mi-conf",
+				},
+				{},
+			},
+		},
+		{
+			name: "test for KObjs function with KMetadata pointer slice",
+			obj: []*kMetadataMock{
+				{
+					name: "kube-dns",
+					ns:   "kube-system",
+				},
+				{
+					name: "mi-conf",
+				},
+				nil,
+			},
+			want: []ObjectRef{
+				{
+					Name:      "kube-dns",
+					Namespace: "kube-system",
+				},
+				{
+					Name: "mi-conf",
+				},
+				{},
+			},
+		},
+		{
+			name: "test for KObjs function with slice does not implement KMetadata",
+			obj:  []int{1, 2, 3, 4, 6},
+			want: nil,
+		},
+		{
+			name: "test for KObjs function with interface",
+			obj:  "test case",
+			want: nil,
+		},
+		{
+			name: "test for KObjs function with nil",
+			obj:  nil,
+			want: nil,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if !reflect.DeepEqual(KObjs(tt.obj), tt.want) {
+				t.Errorf("\nwant:\t %v\n got:\t %v", tt.want, KObjs(tt.obj))
+			}
+		})
+	}
+}

+ 27 - 0
klog_wrappers_test.go

@@ -0,0 +1,27 @@
+// Copyright 2020 The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+// These helper functions must be in a separate source file because the
+// tests in klog_test.go compare the logged source code file name against
+// "klog_test.go". "klog_wrappers_test.go" must *not* be logged.
+
+func myInfoS(msg string, keyAndValues ...interface{}) {
+	InfoSDepth(1, msg, keyAndValues...)
+}
+
+func myErrorS(err error, msg string, keyAndValues ...interface{}) {
+	ErrorSDepth(1, err, msg, keyAndValues...)
+}