Merge pull request #11788 from reikani/pchoi

Changed snake case naming to camelCase.
This commit is contained in:
Brian Goff 2015-03-26 23:55:50 -07:00
commit da5c863d20
19 changed files with 101 additions and 101 deletions

View file

@ -276,18 +276,18 @@ func TestGetEvents(t *testing.T) {
t.Fatal("handler was not called")
}
assertContentType(r, "application/json", t)
var stdout_json struct {
var stdoutJSON struct {
Since int
Until int
}
if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil {
if err := json.Unmarshal(r.Body.Bytes(), &stdoutJSON); err != nil {
t.Fatal(err)
}
if stdout_json.Since != 1 {
t.Errorf("since != 1: %#v", stdout_json.Since)
if stdoutJSON.Since != 1 {
t.Errorf("since != 1: %#v", stdoutJSON.Since)
}
if stdout_json.Until != 0 {
t.Errorf("until != 0: %#v", stdout_json.Until)
if stdoutJSON.Until != 0 {
t.Errorf("until != 0: %#v", stdoutJSON.Until)
}
}
@ -509,8 +509,8 @@ func toJson(data interface{}, t *testing.T) io.Reader {
return &buf
}
func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) {
if recorder.HeaderMap.Get("Content-Type") != content_type {
func assertContentType(recorder *httptest.ResponseRecorder, contentType string, t *testing.T) {
if recorder.HeaderMap.Get("Content-Type") != contentType {
t.Fatalf("%#v\n", recorder)
}
}

View file

@ -154,7 +154,7 @@ func (daemon *Daemon) Install(eng *engine.Engine) error {
}
// FIXME: this hack is necessary for legacy integration tests to access
// the daemon object.
eng.Hack_SetGlobalVar("httpapi.daemon", daemon)
eng.HackSetGlobalVar("httpapi.daemon", daemon)
return nil
}

View file

@ -24,7 +24,7 @@ type Driver struct {
// InitLoopbacks ensures that the loopback devices are properly created within
// the system running the device mapper tests.
func InitLoopbacks() error {
stat_t, err := getBaseLoopStats()
statT, err := getBaseLoopStats()
if err != nil {
return err
}
@ -34,10 +34,10 @@ func InitLoopbacks() error {
// only create new loopback files if they don't exist
if _, err := os.Stat(loopPath); err != nil {
if mkerr := syscall.Mknod(loopPath,
uint32(stat_t.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil {
uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil {
return mkerr
}
os.Chown(loopPath, int(stat_t.Uid), int(stat_t.Gid))
os.Chown(loopPath, int(statT.Uid), int(statT.Gid))
}
}
return nil

View file

@ -89,14 +89,14 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) error {
v.SetInt("NCPU", runtime.NumCPU())
v.SetInt64("MemTotal", meminfo.MemTotal)
v.Set("DockerRootDir", daemon.Config().Root)
if http_proxy := os.Getenv("http_proxy"); http_proxy != "" {
v.Set("HttpProxy", http_proxy)
if httpProxy := os.Getenv("http_proxy"); httpProxy != "" {
v.Set("HttpProxy", httpProxy)
}
if https_proxy := os.Getenv("https_proxy"); https_proxy != "" {
v.Set("HttpsProxy", https_proxy)
if httpsProxy := os.Getenv("https_proxy"); httpsProxy != "" {
v.Set("HttpsProxy", httpsProxy)
}
if no_proxy := os.Getenv("no_proxy"); no_proxy != "" {
v.Set("NoProxy", no_proxy)
if noProxy := os.Getenv("no_proxy"); noProxy != "" {
v.Set("NoProxy", noProxy)
}
if hostname, err := os.Hostname(); err == nil {

View file

@ -30,7 +30,7 @@ func (daemon *Daemon) Containers(job *engine.Job) error {
n = job.GetenvInt("limit")
size = job.GetenvBool("size")
psFilters filters.Args
filt_exited []int
filtExited []int
)
outs := engine.NewTable("Created", 0)
@ -44,7 +44,7 @@ func (daemon *Daemon) Containers(job *engine.Job) error {
if err != nil {
return err
}
filt_exited = append(filt_exited, code)
filtExited = append(filtExited, code)
}
}
@ -109,15 +109,15 @@ func (daemon *Daemon) Containers(job *engine.Job) error {
return errLast
}
}
if len(filt_exited) > 0 {
should_skip := true
for _, code := range filt_exited {
if len(filtExited) > 0 {
shouldSkip := true
for _, code := range filtExited {
if code == container.ExitCode && !container.Running {
should_skip = false
shouldSkip = false
break
}
}
if should_skip {
if shouldSkip {
return nil
}
}

View file

@ -266,7 +266,7 @@ func InitDriver(job *engine.Job) error {
ipAllocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP)
// https://github.com/docker/docker/issues/2768
job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP)
job.Eng.HackSetGlobalVar("httpapi.bridgeIP", bridgeIPv4Network.IP)
for name, f := range map[string]engine.Handler{
"allocate_interface": Allocate,
@ -522,8 +522,8 @@ func Allocate(job *engine.Job) error {
if globalIPv6Network != nil {
// If globalIPv6Network Size is at least a /80 subnet generate IPv6 address from MAC address
netmask_ones, _ := globalIPv6Network.Mask.Size()
if requestedIPv6 == nil && netmask_ones <= 80 {
netmaskOnes, _ := globalIPv6Network.Mask.Size()
if requestedIPv6 == nil && netmaskOnes <= 80 {
requestedIPv6 = make(net.IP, len(globalIPv6Network.IP))
copy(requestedIPv6, globalIPv6Network.IP)
for i, h := range mac {

View file

@ -184,16 +184,16 @@ func TestIPv6InterfaceAllocationAutoNetmaskLe80(t *testing.T) {
// ensure global ip with mac
ip := net.ParseIP(output.Get("GlobalIPv6"))
expected_ip := net.ParseIP("2001:db8:1234:1234:1234:abcd:abcd:abcd")
if ip.String() != expected_ip.String() {
t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String())
expectedIP := net.ParseIP("2001:db8:1234:1234:1234:abcd:abcd:abcd")
if ip.String() != expectedIP.String() {
t.Fatalf("Error ip %s should be %s", ip.String(), expectedIP.String())
}
// ensure link local format
ip = net.ParseIP(output.Get("LinkLocalIPv6"))
expected_ip = net.ParseIP("fe80::a9cd:abff:fecd:abcd")
if ip.String() != expected_ip.String() {
t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String())
expectedIP = net.ParseIP("fe80::a9cd:abff:fecd:abcd")
if ip.String() != expectedIP.String() {
t.Fatalf("Error ip %s should be %s", ip.String(), expectedIP.String())
}
}
@ -203,18 +203,18 @@ func TestIPv6InterfaceAllocationRequest(t *testing.T) {
input := engine.Env{}
_, subnet, _ := net.ParseCIDR("2001:db8:1234:1234:1234::/80")
expected_ip := net.ParseIP("2001:db8:1234:1234:1234::1328")
expectedIP := net.ParseIP("2001:db8:1234:1234:1234::1328")
// set global ipv6
input.Set("globalIPv6Network", subnet.String())
input.Set("RequestedIPv6", expected_ip.String())
input.Set("RequestedIPv6", expectedIP.String())
output := newInterfaceAllocation(t, input)
// ensure global ip with mac
ip := net.ParseIP(output.Get("GlobalIPv6"))
if ip.String() != expected_ip.String() {
t.Fatalf("Error ip %s should be %s", ip.String(), expected_ip.String())
if ip.String() != expectedIP.String() {
t.Fatalf("Error ip %s should be %s", ip.String(), expectedIP.String())
}
// retry -> fails for duplicated address

View file

@ -2,7 +2,7 @@ package engine
type Hack map[string]interface{}
func (eng *Engine) Hack_GetGlobalVar(key string) interface{} {
func (eng *Engine) HackGetGlobalVar(key string) interface{} {
if eng.hack == nil {
return nil
}
@ -13,7 +13,7 @@ func (eng *Engine) Hack_GetGlobalVar(key string) interface{} {
return val
}
func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) {
func (eng *Engine) HackSetGlobalVar(key string, val interface{}) {
if eng.hack == nil {
eng.hack = make(Hack)
}

View file

@ -21,8 +21,8 @@ func (s *TagStore) CmdImages(job *engine.Job) error {
var (
allImages map[string]*image.Image
err error
filt_tagged = true
filt_label = false
filtTagged = true
filtLabel = false
)
imageFilters, err := filters.FromParam(job.Getenv("filters"))
@ -38,14 +38,14 @@ func (s *TagStore) CmdImages(job *engine.Job) error {
if i, ok := imageFilters["dangling"]; ok {
for _, value := range i {
if strings.ToLower(value) == "true" {
filt_tagged = false
filtTagged = false
}
}
}
_, filt_label = imageFilters["label"]
_, filtLabel = imageFilters["label"]
if job.GetenvBool("all") && filt_tagged {
if job.GetenvBool("all") && filtTagged {
allImages, err = s.graph.Map()
} else {
allImages, err = s.graph.Heads()
@ -70,7 +70,7 @@ func (s *TagStore) CmdImages(job *engine.Job) error {
}
if out, exists := lookup[id]; exists {
if filt_tagged {
if filtTagged {
if utils.DigestReference(ref) {
out.SetList("RepoDigests", append(out.GetList("RepoDigests"), imgRef))
} else { // Tag Ref.
@ -83,7 +83,7 @@ func (s *TagStore) CmdImages(job *engine.Job) error {
if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) {
continue
}
if filt_tagged {
if filtTagged {
out := &engine.Env{}
out.SetJson("ParentId", image.Parent)
out.SetJson("Id", image.ID)
@ -114,7 +114,7 @@ func (s *TagStore) CmdImages(job *engine.Job) error {
}
// Display images which aren't part of a repository/tag
if job.Getenv("filter") == "" || filt_label {
if job.Getenv("filter") == "" || filtLabel {
for _, image := range allImages {
if !imageFilters.MatchKVList("label", image.ContainerConfig.Labels) {
continue

View file

@ -152,7 +152,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
errors := make(chan error)
layers_downloaded := false
layersDownloaded := false
for _, image := range repoData.ImgList {
downloadImage := func(img *registry.ImgData) {
if askedTag != "" && img.Tag != askedTag {
@ -189,29 +189,29 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil))
success := false
var lastErr, err error
var is_downloaded bool
var isDownloaded bool
for _, ep := range repoInfo.Index.Mirrors {
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
if isDownloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
// Don't report errors when pulling from mirrors.
logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err)
continue
}
layers_downloaded = layers_downloaded || is_downloaded
layersDownloaded = layersDownloaded || isDownloaded
success = true
break
}
if !success {
for _, ep := range repoData.Endpoints {
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil))
if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
if isDownloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil {
// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
// As the error is also given to the output stream the user will see the error.
lastErr = err
out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil))
continue
}
layers_downloaded = layers_downloaded || is_downloaded
layersDownloaded = layersDownloaded || isDownloaded
success = true
break
}
@ -262,7 +262,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *
if len(askedTag) > 0 {
requestedTag = utils.ImageReference(repoInfo.CanonicalName, askedTag)
}
WriteStatus(requestedTag, out, sf, layers_downloaded)
WriteStatus(requestedTag, out, sf, layersDownloaded)
return nil
}
@ -275,7 +275,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
// FIXME: Try to stream the images?
// FIXME: Launch the getRemoteImage() in goroutines
layers_downloaded := false
layersDownloaded := false
for i := len(history) - 1; i >= 0; i-- {
id := history[i]
@ -299,16 +299,16 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
if err != nil && j == retries {
out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, err
return layersDownloaded, err
} else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
}
img, err = image.NewImgJSON(imgJSON)
layers_downloaded = true
layersDownloaded = true
if err != nil && j == retries {
out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err)
return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
} else if err != nil {
time.Sleep(time.Duration(j) * 500 * time.Millisecond)
continue
@ -333,9 +333,9 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
continue
} else if err != nil {
out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
return layers_downloaded, err
return layersDownloaded, err
}
layers_downloaded = true
layersDownloaded = true
defer layer.Close()
err = s.graph.Register(img,
@ -353,7 +353,7 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
continue
} else if err != nil {
out.Write(sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
return layers_downloaded, err
return layersDownloaded, err
} else {
break
}
@ -361,11 +361,11 @@ func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint
}
out.Write(sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
}
return layers_downloaded, nil
return layersDownloaded, nil
}
func WriteStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layers_downloaded bool) {
if layers_downloaded {
func WriteStatus(requestedTag string, out io.Writer, sf *streamformatter.StreamFormatter, layersDownloaded bool) {
if layersDownloaded {
out.Write(sf.FormatStatus("", "Status: Downloaded newer image for %s", requestedTag))
} else {
out.Write(sf.FormatStatus("", "Status: Image is up to date for %s", requestedTag))

View file

@ -251,11 +251,11 @@ func TestPsListContainersSize(t *testing.T) {
cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello")
runCommandWithOutput(cmd)
cmd = exec.Command(dockerBinary, "ps", "-s", "-n=1")
base_out, _, err := runCommandWithOutput(cmd)
base_lines := strings.Split(strings.Trim(base_out, "\n "), "\n")
base_sizeIndex := strings.Index(base_lines[0], "SIZE")
base_foundSize := base_lines[1][base_sizeIndex:]
base_bytes, err := strconv.Atoi(strings.Split(base_foundSize, " ")[0])
baseOut, _, err := runCommandWithOutput(cmd)
baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n")
baseSizeIndex := strings.Index(baseLines[0], "SIZE")
baseFoundsize := baseLines[1][baseSizeIndex:]
baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0])
if err != nil {
t.Fatal(err)
}
@ -292,7 +292,7 @@ func TestPsListContainersSize(t *testing.T) {
if foundID != id[:12] {
t.Fatalf("Expected id %s, got %s", id[:12], foundID)
}
expectedSize := fmt.Sprintf("%d B", (2 + base_bytes))
expectedSize := fmt.Sprintf("%d B", (2 + baseBytes))
foundSize := lines[1][sizeIndex:]
if foundSize != expectedSize {
t.Fatalf("Expected size %q, got %q", expectedSize, foundSize)

View file

@ -2186,15 +2186,15 @@ func eqToBaseDiff(out string, t *testing.T) bool {
out1, _, err := runCommandWithOutput(cmd)
cID := stripTrailingCharacters(out1)
cmd = exec.Command(dockerBinary, "diff", cID)
base_diff, _, err := runCommandWithOutput(cmd)
baseDiff, _, err := runCommandWithOutput(cmd)
if err != nil {
t.Fatal(err, base_diff)
t.Fatal(err, baseDiff)
}
base_arr := strings.Split(base_diff, "\n")
sort.Strings(base_arr)
out_arr := strings.Split(out, "\n")
sort.Strings(out_arr)
return sliceEq(base_arr, out_arr)
baseArr := strings.Split(baseDiff, "\n")
sort.Strings(baseArr)
outArr := strings.Split(out, "\n")
sort.Strings(outArr)
return sliceEq(baseArr, outArr)
}
func sliceEq(a, b []string) bool {

View file

@ -158,9 +158,9 @@ func TestStartVolumesFromFailsCleanly(t *testing.T) {
// Check that we have the volumes we want
out, _, _ := dockerCmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer")
n_volumes := strings.Trim(out, " \r\n'")
if n_volumes != "2" {
t.Fatalf("Missing volumes: expected 2, got %s", n_volumes)
nVolumes := strings.Trim(out, " \r\n'")
if nVolumes != "2" {
t.Fatalf("Missing volumes: expected 2, got %s", nVolumes)
}
logDone("start - missing containers in --volumes-from did not affect subsequent runs")

View file

@ -61,9 +61,9 @@ func TestTagInvalidUnprefixedRepo(t *testing.T) {
// ensure we don't allow the use of invalid tags; these tag operations should fail
func TestTagInvalidPrefixedRepo(t *testing.T) {
long_tag := stringutils.GenerateRandomAlphaOnlyString(121)
longTag := stringutils.GenerateRandomAlphaOnlyString(121)
invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", long_tag}
invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag}
for _, repotag := range invalidTags {
tagCmd := exec.Command(dockerBinary, "tag", "busybox", repotag)

View file

@ -150,7 +150,7 @@ func getContainer(eng *engine.Engine, id string, t Fataler) *daemon.Container {
}
func mkDaemonFromEngine(eng *engine.Engine, t Fataler) *daemon.Daemon {
iDaemon := eng.Hack_GetGlobalVar("httpapi.daemon")
iDaemon := eng.HackGetGlobalVar("httpapi.daemon")
if iDaemon == nil {
panic("Legacy daemon field not set in engine")
}

View file

@ -260,7 +260,7 @@ func Exists(table Table, chain string, rule ...string) bool {
// parse "iptables -S" for the rule (this checks rules in a specific chain
// in a specific table)
rule_string := strings.Join(rule, " ")
ruleString := strings.Join(rule, " ")
existingRules, _ := exec.Command("iptables", "-t", string(table), "-S", chain).Output()
// regex to replace ips in rule
@ -269,7 +269,7 @@ func Exists(table Table, chain string, rule ...string) bool {
return strings.Contains(
re.ReplaceAllString(string(existingRules), "?"),
re.ReplaceAllString(rule_string, "?"),
re.ReplaceAllString(ruleString, "?"),
)
}

View file

@ -941,11 +941,11 @@ func (f *FlagSet) parseOne() (bool, string, error) {
// it's a flag. does it have an argument?
f.args = f.args[1:]
has_value := false
hasValue := false
value := ""
if i := strings.Index(name, "="); i != -1 {
value = trimQuotes(name[i+1:])
has_value = true
hasValue = true
name = name[:i]
}
@ -962,7 +962,7 @@ func (f *FlagSet) parseOne() (bool, string, error) {
return false, name, ErrRetry
}
if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
if has_value {
if hasValue {
if err := fv.Set(value); err != nil {
return false, "", f.failf("invalid boolean value %q for -%s: %v", value, name, err)
}
@ -971,12 +971,12 @@ func (f *FlagSet) parseOne() (bool, string, error) {
}
} else {
// It must have a value, which might be the next argument.
if !has_value && len(f.args) > 0 {
if !hasValue && len(f.args) > 0 {
// value is the next arg
has_value = true
hasValue = true
value, f.args = f.args[0], f.args[1:]
}
if !has_value {
if !hasValue {
return false, "", f.failf("flag needs an argument: -%s", name)
}
if err := flag.Value.Set(value); err != nil {

View file

@ -60,10 +60,10 @@ func (ipnet *netIPNet) MarshalJSON() ([]byte, error) {
}
func (ipnet *netIPNet) UnmarshalJSON(b []byte) (err error) {
var ipnet_str string
if err = json.Unmarshal(b, &ipnet_str); err == nil {
var ipnetStr string
if err = json.Unmarshal(b, &ipnetStr); err == nil {
var cidr *net.IPNet
if _, cidr, err = net.ParseCIDR(ipnet_str); err == nil {
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
*ipnet = netIPNet(*cidr)
}
}

View file

@ -171,7 +171,7 @@ func makePublicIndex() *IndexInfo {
return index
}
func makeServiceConfig(mirrors []string, insecure_registries []string) *ServiceConfig {
func makeServiceConfig(mirrors []string, insecureRegistries []string) *ServiceConfig {
options := &Options{
Mirrors: opts.NewListOpts(nil),
InsecureRegistries: opts.NewListOpts(nil),
@ -181,9 +181,9 @@ func makeServiceConfig(mirrors []string, insecure_registries []string) *ServiceC
options.Mirrors.Set(mirror)
}
}
if insecure_registries != nil {
for _, insecure_registries := range insecure_registries {
options.InsecureRegistries.Set(insecure_registries)
if insecureRegistries != nil {
for _, insecureRegistries := range insecureRegistries {
options.InsecureRegistries.Set(insecureRegistries)
}
}