Moves to official vendoring solution
This commit is contained in:
parent
d19044896e
commit
48ff0e472a
@ -3,9 +3,9 @@ sudo: required
|
||||
dist: trusty
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5.3
|
||||
- 1.6
|
||||
- tip
|
||||
|
||||
env:
|
||||
global:
|
||||
|
142
Godeps/Godeps.json
generated
142
Godeps/Godeps.json
generated
@ -1,6 +1,7 @@
|
||||
{
|
||||
"ImportPath": "github.com/appc/cni",
|
||||
"GoVersion": "go1.4.2",
|
||||
"GoVersion": "go1.6",
|
||||
"GodepVersion": "v58",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
@ -27,15 +28,154 @@
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/config",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/codelocation",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/containernode",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/failer",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/leafnodes",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/remote",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/spec",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/specrunner",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/suite",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/testingtproxy",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/writer",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/types",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/format",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/gbytes",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/gexec",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/assertion",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/asyncassertion",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/oraclematcher",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/testingtsupport",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/edge",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/node",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/util",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/types",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vishvananda/netlink",
|
||||
"Rev": "ecf47fd5739b3d2c3daf7c89c4b9715a2605c21b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vishvananda/netlink/nl",
|
||||
"Rev": "ecf47fd5739b3d2c3daf7c89c4b9715a2605c21b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "e11762ca30adc5b39fdbfd8c4250dabeb8e456d3"
|
||||
|
2
Godeps/_workspace/.gitignore
generated
vendored
2
Godeps/_workspace/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
||||
/pkg
|
||||
/bin
|
155
Godeps/_workspace/src/github.com/coreos/go-iptables/iptables/iptables_test.go
generated
vendored
155
Godeps/_workspace/src/github.com/coreos/go-iptables/iptables/iptables_test.go
generated
vendored
@ -1,155 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package iptables
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func randChain(t *testing.T) string {
|
||||
n, err := rand.Int(rand.Reader, big.NewInt(1000000))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to generate random chain name: %v", err)
|
||||
}
|
||||
|
||||
return "TEST-" + n.String()
|
||||
}
|
||||
|
||||
func TestChain(t *testing.T) {
|
||||
chain := randChain(t)
|
||||
|
||||
ipt, err := New()
|
||||
if err != nil {
|
||||
t.Fatalf("New failed: %v", err)
|
||||
}
|
||||
|
||||
// chain shouldn't exist, this will create new
|
||||
err = ipt.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearChain (of missing) failed: %v", err)
|
||||
}
|
||||
|
||||
// chain now exists
|
||||
err = ipt.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearChain (of empty) failed: %v", err)
|
||||
}
|
||||
|
||||
// put a simple rule in
|
||||
err = ipt.Append("filter", chain, "-s", "0.0.0.0/0", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Append failed: %v", err)
|
||||
}
|
||||
|
||||
// can't delete non-empty chain
|
||||
err = ipt.DeleteChain("filter", chain)
|
||||
if err == nil {
|
||||
t.Fatalf("DeleteChain of non-empty chain did not fail")
|
||||
}
|
||||
|
||||
err = ipt.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearChain (of non-empty) failed: %v", err)
|
||||
}
|
||||
|
||||
// rename the chain
|
||||
newChain := randChain(t)
|
||||
err = ipt.RenameChain("filter", chain, newChain)
|
||||
if err != nil {
|
||||
t.Fatalf("RenameChain failed: %v", err)
|
||||
}
|
||||
|
||||
// chain empty, should be ok
|
||||
err = ipt.DeleteChain("filter", newChain)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteChain of empty chain failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRules(t *testing.T) {
|
||||
chain := randChain(t)
|
||||
|
||||
ipt, err := New()
|
||||
if err != nil {
|
||||
t.Fatalf("New failed: %v", err)
|
||||
}
|
||||
|
||||
// chain shouldn't exist, this will create new
|
||||
err = ipt.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearChain (of missing) failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.Append("filter", chain, "-s", "10.1.0.0/16", "-d", "8.8.8.8/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Append failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.AppendUnique("filter", chain, "-s", "10.1.0.0/16", "-d", "8.8.8.8/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("AppendUnique failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.Append("filter", chain, "-s", "10.2.0.0/16", "-d", "8.8.8.8/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Append failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.Insert("filter", chain, 2, "-s", "10.2.0.0/16", "-d", "9.9.9.9/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Insert failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.Insert("filter", chain, 1, "-s", "10.1.0.0/16", "-d", "9.9.9.9/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Insert failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.Delete("filter", chain, "-s", "10.1.0.0/16", "-d", "9.9.9.9/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
rules, err := ipt.List("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
|
||||
expected := []string{
|
||||
"-N " + chain,
|
||||
"-A " + chain + " -s 10.1.0.0/16 -d 8.8.8.8/32 -j ACCEPT",
|
||||
"-A " + chain + " -s 10.2.0.0/16 -d 9.9.9.9/32 -j ACCEPT",
|
||||
"-A " + chain + " -s 10.2.0.0/16 -d 8.8.8.8/32 -j ACCEPT",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(rules, expected) {
|
||||
t.Fatalf("List mismatch: \ngot %#v \nneed %#v", rules, expected)
|
||||
}
|
||||
|
||||
// Clear the chain that was created.
|
||||
err = ipt.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to clear test chain: %v", err)
|
||||
}
|
||||
|
||||
// Delete the chain that was created
|
||||
err = ipt.DeleteChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete test chain: %v", err)
|
||||
}
|
||||
}
|
82
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/files_test.go
generated
vendored
82
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/files_test.go
generated
vendored
@ -1,82 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package activation
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// correctStringWritten fails the text if the correct string wasn't written
|
||||
// to the other side of the pipe.
|
||||
func correctStringWritten(t *testing.T, r *os.File, expected string) bool {
|
||||
bytes := make([]byte, len(expected))
|
||||
io.ReadAtLeast(r, bytes, len(expected))
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Unexpected string %s", string(bytes))
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestActivation forks out a copy of activation.go example and reads back two
|
||||
// strings from the pipes that are passed in.
|
||||
func TestActivation(t *testing.T) {
|
||||
cmd := exec.Command("go", "run", "../examples/activation/activation.go")
|
||||
|
||||
r1, w1, _ := os.Pipe()
|
||||
r2, w2, _ := os.Pipe()
|
||||
cmd.ExtraFiles = []*os.File{
|
||||
w1,
|
||||
w2,
|
||||
}
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
correctStringWritten(t, r1, "Hello world")
|
||||
correctStringWritten(t, r2, "Goodbye world")
|
||||
}
|
||||
|
||||
func TestActivationNoFix(t *testing.T) {
|
||||
cmd := exec.Command("go", "run", "../examples/activation/activation.go")
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "LISTEN_FDS=2")
|
||||
|
||||
out, _ := cmd.CombinedOutput()
|
||||
if bytes.Contains(out, []byte("No files")) == false {
|
||||
t.Fatalf("Child didn't error out as expected")
|
||||
}
|
||||
}
|
||||
|
||||
func TestActivationNoFiles(t *testing.T) {
|
||||
cmd := exec.Command("go", "run", "../examples/activation/activation.go")
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "LISTEN_FDS=0", "FIX_LISTEN_PID=1")
|
||||
|
||||
out, _ := cmd.CombinedOutput()
|
||||
if bytes.Contains(out, []byte("No files")) == false {
|
||||
t.Fatalf("Child didn't error out as expected")
|
||||
}
|
||||
}
|
86
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/listeners_test.go
generated
vendored
86
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/listeners_test.go
generated
vendored
@ -1,86 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package activation
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// correctStringWritten fails the text if the correct string wasn't written
|
||||
// to the other side of the pipe.
|
||||
func correctStringWrittenNet(t *testing.T, r net.Conn, expected string) bool {
|
||||
bytes := make([]byte, len(expected))
|
||||
io.ReadAtLeast(r, bytes, len(expected))
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Unexpected string %s", string(bytes))
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestActivation forks out a copy of activation.go example and reads back two
|
||||
// strings from the pipes that are passed in.
|
||||
func TestListeners(t *testing.T) {
|
||||
cmd := exec.Command("go", "run", "../examples/activation/listen.go")
|
||||
|
||||
l1, err := net.Listen("tcp", ":9999")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
l2, err := net.Listen("tcp", ":1234")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
t1 := l1.(*net.TCPListener)
|
||||
t2 := l2.(*net.TCPListener)
|
||||
|
||||
f1, _ := t1.File()
|
||||
f2, _ := t2.File()
|
||||
|
||||
cmd.ExtraFiles = []*os.File{
|
||||
f1,
|
||||
f2,
|
||||
}
|
||||
|
||||
r1, err := net.Dial("tcp", "127.0.0.1:9999")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
r1.Write([]byte("Hi"))
|
||||
|
||||
r2, err := net.Dial("tcp", "127.0.0.1:1234")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
r2.Write([]byte("Hi"))
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
|
||||
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
println(string(out))
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
correctStringWrittenNet(t, r1, "Hello world")
|
||||
correctStringWrittenNet(t, r2, "Goodbye world")
|
||||
}
|
68
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/packetconns_test.go
generated
vendored
68
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/packetconns_test.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package activation
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestActivation forks out a copy of activation.go example and reads back two
|
||||
// strings from the pipes that are passed in.
|
||||
func TestPacketConns(t *testing.T) {
|
||||
cmd := exec.Command("go", "run", "../examples/activation/udpconn.go")
|
||||
|
||||
u1, err := net.ListenUDP("udp", &net.UDPAddr{Port: 9999})
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
u2, err := net.ListenUDP("udp", &net.UDPAddr{Port: 1234})
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
f1, _ := u1.File()
|
||||
f2, _ := u2.File()
|
||||
|
||||
cmd.ExtraFiles = []*os.File{
|
||||
f1,
|
||||
f2,
|
||||
}
|
||||
|
||||
r1, err := net.Dial("udp", "127.0.0.1:9999")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
r1.Write([]byte("Hi"))
|
||||
|
||||
r2, err := net.Dial("udp", "127.0.0.1:1234")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
r2.Write([]byte("Hi"))
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("Cmd output '%s', err: '%s'\n", out, err)
|
||||
}
|
||||
|
||||
correctStringWrittenNet(t, r1, "Hello world")
|
||||
correctStringWrittenNet(t, r2, "Goodbye world")
|
||||
}
|
69
Godeps/_workspace/src/github.com/d2g/dhcp4client/client_test.go
generated
vendored
69
Godeps/_workspace/src/github.com/d2g/dhcp4client/client_test.go
generated
vendored
@ -1,69 +0,0 @@
|
||||
package dhcp4client
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
/*
|
||||
* Example Client
|
||||
*/
|
||||
func Test_ExampleClient(test *testing.T) {
|
||||
var err error
|
||||
|
||||
m, err := net.ParseMAC("08-00-27-00-A8-E8")
|
||||
if err != nil {
|
||||
log.Printf("MAC Error:%v\n", err)
|
||||
}
|
||||
|
||||
//Create a connection to use
|
||||
//We need to set the connection ports to 1068 and 1067 so we don't need root access
|
||||
c, err := NewInetSock(SetLocalAddr(net.UDPAddr{IP: net.IPv4(0, 0, 0, 0), Port: 1068}), SetRemoteAddr(net.UDPAddr{IP: net.IPv4bcast, Port: 1067}))
|
||||
if err != nil {
|
||||
test.Error("Client Conection Generation:" + err.Error())
|
||||
}
|
||||
|
||||
exampleClient, err := New(HardwareAddr(m), Connection(c))
|
||||
if err != nil {
|
||||
test.Fatalf("Error:%v\n", err)
|
||||
}
|
||||
|
||||
success, acknowledgementpacket, err := exampleClient.Request()
|
||||
|
||||
test.Logf("Success:%v\n", success)
|
||||
test.Logf("Packet:%v\n", acknowledgementpacket)
|
||||
|
||||
if err != nil {
|
||||
networkError, ok := err.(*net.OpError)
|
||||
if ok && networkError.Timeout() {
|
||||
test.Log("Test Skipping as it didn't find a DHCP Server")
|
||||
test.SkipNow()
|
||||
}
|
||||
test.Fatalf("Error:%v\n", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
test.Error("We didn't sucessfully get a DHCP Lease?")
|
||||
} else {
|
||||
log.Printf("IP Received:%v\n", acknowledgementpacket.YIAddr().String())
|
||||
}
|
||||
|
||||
test.Log("Start Renewing Lease")
|
||||
success, acknowledgementpacket, err = exampleClient.Renew(acknowledgementpacket)
|
||||
if err != nil {
|
||||
networkError, ok := err.(*net.OpError)
|
||||
if ok && networkError.Timeout() {
|
||||
test.Log("Renewal Failed! Because it didn't find the DHCP server very Strange")
|
||||
test.Errorf("Error" + err.Error())
|
||||
}
|
||||
test.Fatalf("Error:%v\n", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
test.Error("We didn't sucessfully Renew a DHCP Lease?")
|
||||
} else {
|
||||
log.Printf("IP Received:%v\n", acknowledgementpacket.YIAddr().String())
|
||||
}
|
||||
|
||||
}
|
98
Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table.go
generated
vendored
98
Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
/*
|
||||
|
||||
Table provides a simple DSL for Ginkgo-native Table-Driven Tests
|
||||
|
||||
The godoc documentation describes Table's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests
|
||||
|
||||
*/
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
/*
|
||||
DescribeTable describes a table-driven test.
|
||||
|
||||
For example:
|
||||
|
||||
DescribeTable("a simple table",
|
||||
func(x int, y int, expected bool) {
|
||||
Ω(x > y).Should(Equal(expected))
|
||||
},
|
||||
Entry("x > y", 1, 0, true),
|
||||
Entry("x == y", 0, 0, false),
|
||||
Entry("x < y", 0, 1, false),
|
||||
)
|
||||
|
||||
The first argument to `DescribeTable` is a string description.
|
||||
The second argument is a function that will be run for each table entry. Your assertions go here - the function is equivalent to a Ginkgo It.
|
||||
The subsequent arguments must be of type `TableEntry`. We recommend using the `Entry` convenience constructors.
|
||||
|
||||
The `Entry` constructor takes a string description followed by an arbitrary set of parameters. These parameters are passed into your function.
|
||||
|
||||
Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each `Entry` is turned into an `It` within the `Describe`.
|
||||
|
||||
It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run).
|
||||
|
||||
Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable.
|
||||
*/
|
||||
func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||
describeTable(description, itBody, entries, false, false)
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
|
||||
*/
|
||||
func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||
describeTable(description, itBody, entries, false, true)
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
|
||||
*/
|
||||
func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||
describeTable(description, itBody, entries, true, false)
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
|
||||
*/
|
||||
func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||
describeTable(description, itBody, entries, true, false)
|
||||
return true
|
||||
}
|
||||
|
||||
func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) {
|
||||
itBodyValue := reflect.ValueOf(itBody)
|
||||
if itBodyValue.Kind() != reflect.Func {
|
||||
panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody))
|
||||
}
|
||||
|
||||
if pending {
|
||||
ginkgo.PDescribe(description, func() {
|
||||
for _, entry := range entries {
|
||||
entry.generateIt(itBodyValue)
|
||||
}
|
||||
})
|
||||
} else if focused {
|
||||
ginkgo.FDescribe(description, func() {
|
||||
for _, entry := range entries {
|
||||
entry.generateIt(itBodyValue)
|
||||
}
|
||||
})
|
||||
} else {
|
||||
ginkgo.Describe(description, func() {
|
||||
for _, entry := range entries {
|
||||
entry.generateIt(itBodyValue)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
72
Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table_entry.go
generated
vendored
72
Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table_entry.go
generated
vendored
@ -1,72 +0,0 @@
|
||||
package table
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
/*
|
||||
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
|
||||
*/
|
||||
type TableEntry struct {
|
||||
Description string
|
||||
Parameters []interface{}
|
||||
Pending bool
|
||||
Focused bool
|
||||
}
|
||||
|
||||
func (t TableEntry) generateIt(itBody reflect.Value) {
|
||||
if t.Pending {
|
||||
ginkgo.PIt(t.Description)
|
||||
return
|
||||
}
|
||||
|
||||
values := []reflect.Value{}
|
||||
for _, param := range t.Parameters {
|
||||
values = append(values, reflect.ValueOf(param))
|
||||
}
|
||||
|
||||
body := func() {
|
||||
itBody.Call(values)
|
||||
}
|
||||
|
||||
if t.Focused {
|
||||
ginkgo.FIt(t.Description, body)
|
||||
} else {
|
||||
ginkgo.It(t.Description, body)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Entry constructs a TableEntry.
|
||||
|
||||
The first argument is a required description (this becomes the content of the generated Ginkgo `It`).
|
||||
Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`.
|
||||
|
||||
Each Entry ends up generating an individual Ginkgo It.
|
||||
*/
|
||||
func Entry(description string, parameters ...interface{}) TableEntry {
|
||||
return TableEntry{description, parameters, false, false}
|
||||
}
|
||||
|
||||
/*
|
||||
You can focus a particular entry with FEntry. This is equivalent to FIt.
|
||||
*/
|
||||
func FEntry(description string, parameters ...interface{}) TableEntry {
|
||||
return TableEntry{description, parameters, false, true}
|
||||
}
|
||||
|
||||
/*
|
||||
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
|
||||
*/
|
||||
func PEntry(description string, parameters ...interface{}) TableEntry {
|
||||
return TableEntry{description, parameters, true, false}
|
||||
}
|
||||
|
||||
/*
|
||||
You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
|
||||
*/
|
||||
func XEntry(description string, parameters ...interface{}) TableEntry {
|
||||
return TableEntry{description, parameters, true, false}
|
||||
}
|
182
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
generated
vendored
182
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
generated
vendored
@ -1,182 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"go/build"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||
)
|
||||
|
||||
func BuildBootstrapCommand() *Command {
|
||||
var agouti, noDot bool
|
||||
flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError)
|
||||
flagSet.BoolVar(&agouti, "agouti", false, "If set, bootstrap will generate a bootstrap file for writing Agouti tests")
|
||||
flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega")
|
||||
|
||||
return &Command{
|
||||
Name: "bootstrap",
|
||||
FlagSet: flagSet,
|
||||
UsageCommand: "ginkgo bootstrap <FLAGS>",
|
||||
Usage: []string{
|
||||
"Bootstrap a test suite for the current package",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
generateBootstrap(agouti, noDot)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var bootstrapText = `package {{.Package}}_test
|
||||
|
||||
import (
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
`
|
||||
|
||||
var agoutiBootstrapText = `package {{.Package}}_test
|
||||
|
||||
import (
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
"github.com/sclevine/agouti"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
|
||||
var agoutiDriver *agouti.WebDriver
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
// Choose a WebDriver:
|
||||
|
||||
agoutiDriver = agouti.PhantomJS()
|
||||
// agoutiDriver = agouti.Selenium()
|
||||
// agoutiDriver = agouti.ChromeDriver()
|
||||
|
||||
Expect(agoutiDriver.Start()).To(Succeed())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
Expect(agoutiDriver.Stop()).To(Succeed())
|
||||
})
|
||||
`
|
||||
|
||||
type bootstrapData struct {
|
||||
Package string
|
||||
FormattedName string
|
||||
GinkgoImport string
|
||||
GomegaImport string
|
||||
}
|
||||
|
||||
func getPackageAndFormattedName() (string, string, string) {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not get current working directory: \n" + err.Error())
|
||||
}
|
||||
|
||||
dirName := strings.Replace(filepath.Base(path), "-", "_", -1)
|
||||
dirName = strings.Replace(dirName, " ", "_", -1)
|
||||
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
packageName := pkg.Name
|
||||
if err != nil {
|
||||
packageName = dirName
|
||||
}
|
||||
|
||||
formattedName := prettifyPackageName(filepath.Base(path))
|
||||
return packageName, dirName, formattedName
|
||||
}
|
||||
|
||||
func prettifyPackageName(name string) string {
|
||||
name = strings.Replace(name, "-", " ", -1)
|
||||
name = strings.Replace(name, "_", " ", -1)
|
||||
name = strings.Title(name)
|
||||
name = strings.Replace(name, " ", "", -1)
|
||||
return name
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func generateBootstrap(agouti bool, noDot bool) {
|
||||
packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
data := bootstrapData{
|
||||
Package: packageName,
|
||||
FormattedName: formattedName,
|
||||
GinkgoImport: `. "github.com/onsi/ginkgo"`,
|
||||
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||
}
|
||||
|
||||
if noDot {
|
||||
data.GinkgoImport = `"github.com/onsi/ginkgo"`
|
||||
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
|
||||
if fileExists(targetFile) {
|
||||
fmt.Printf("%s already exists.\n\n", targetFile)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not create file: " + err.Error())
|
||||
panic(err.Error())
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if agouti {
|
||||
templateText = agoutiBootstrapText
|
||||
} else {
|
||||
templateText = bootstrapText
|
||||
}
|
||||
|
||||
bootstrapTemplate, err := template.New("bootstrap").Parse(templateText)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
bootstrapTemplate.Execute(buf, data)
|
||||
|
||||
if noDot {
|
||||
contents, err := nodot.ApplyNoDot(buf.Bytes())
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to import nodot declarations: " + err.Error())
|
||||
}
|
||||
fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot")
|
||||
buf = bytes.NewBuffer(contents)
|
||||
}
|
||||
|
||||
buf.WriteTo(f)
|
||||
|
||||
goFmt(targetFile)
|
||||
}
|
68
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/build_command.go
generated
vendored
68
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/build_command.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
)
|
||||
|
||||
func BuildBuildCommand() *Command {
|
||||
commandFlags := NewBuildCommandFlags(flag.NewFlagSet("build", flag.ExitOnError))
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
builder := &SpecBuilder{
|
||||
commandFlags: commandFlags,
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "build",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo build <FLAGS> <PACKAGES>",
|
||||
Usage: []string{
|
||||
"Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: builder.BuildSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
type SpecBuilder struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
}
|
||||
|
||||
func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
|
||||
r.commandFlags.computeNodes()
|
||||
|
||||
suites, _ := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, false)
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
passed := true
|
||||
for _, suite := range suites {
|
||||
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil)
|
||||
fmt.Printf("Compiling %s...\n", suite.PackageName)
|
||||
|
||||
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
|
||||
err := runner.CompileTo(path)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
passed = false
|
||||
} else {
|
||||
fmt.Printf(" compiled %s.test\n", suite.PackageName)
|
||||
}
|
||||
|
||||
runner.CleanUp()
|
||||
}
|
||||
|
||||
if passed {
|
||||
os.Exit(0)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
123
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
generated
vendored
123
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
generated
vendored
@ -1,123 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
/*
|
||||
* Creates a func init() node
|
||||
*/
|
||||
func createVarUnderscoreBlock() *ast.ValueSpec {
|
||||
valueSpec := &ast.ValueSpec{}
|
||||
object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0}
|
||||
ident := &ast.Ident{Name: "_", Obj: object}
|
||||
valueSpec.Names = append(valueSpec.Names, ident)
|
||||
return valueSpec
|
||||
}
|
||||
|
||||
/*
|
||||
* Creates a Describe("Testing with ginkgo", func() { }) node
|
||||
*/
|
||||
func createDescribeBlock() *ast.CallExpr {
|
||||
blockStatement := &ast.BlockStmt{List: []ast.Stmt{}}
|
||||
|
||||
fieldList := &ast.FieldList{}
|
||||
funcType := &ast.FuncType{Params: fieldList}
|
||||
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
|
||||
basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""}
|
||||
describeIdent := &ast.Ident{Name: "Describe"}
|
||||
return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}}
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience function to return the name of the *testing.T param
|
||||
* for a Test function that will be rewritten. This is useful because
|
||||
* we will want to replace the usage of this named *testing.T inside the
|
||||
* body of the function with a GinktoT.
|
||||
*/
|
||||
func namedTestingTArg(node *ast.FuncDecl) string {
|
||||
return node.Type.Params.List[0].Names[0].Name // *exhale*
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience function to return the block statement node for a Describe statement
|
||||
*/
|
||||
func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt {
|
||||
var funcLit *ast.FuncLit
|
||||
var found = false
|
||||
|
||||
for _, node := range desc.Args {
|
||||
switch node := node.(type) {
|
||||
case *ast.FuncLit:
|
||||
found = true
|
||||
funcLit = node
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.")
|
||||
}
|
||||
|
||||
return funcLit.Body
|
||||
}
|
||||
|
||||
/* convenience function for creating an It("TestNameHere")
|
||||
* with all the body of the test function inside the anonymous
|
||||
* func passed to It()
|
||||
*/
|
||||
func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt {
|
||||
blockStatement := &ast.BlockStmt{List: testFunc.Body.List}
|
||||
fieldList := &ast.FieldList{}
|
||||
funcType := &ast.FuncType{Params: fieldList}
|
||||
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
|
||||
|
||||
testName := rewriteTestName(testFunc.Name.Name)
|
||||
basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)}
|
||||
itBlockIdent := &ast.Ident{Name: "It"}
|
||||
callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}}
|
||||
return &ast.ExprStmt{X: callExpr}
|
||||
}
|
||||
|
||||
/*
|
||||
* rewrite test names to be human readable
|
||||
* eg: rewrites "TestSomethingAmazing" as "something amazing"
|
||||
*/
|
||||
func rewriteTestName(testName string) string {
|
||||
nameComponents := []string{}
|
||||
currentString := ""
|
||||
indexOfTest := strings.Index(testName, "Test")
|
||||
if indexOfTest != 0 {
|
||||
return testName
|
||||
}
|
||||
|
||||
testName = strings.Replace(testName, "Test", "", 1)
|
||||
first, rest := testName[0], testName[1:]
|
||||
testName = string(unicode.ToLower(rune(first))) + rest
|
||||
|
||||
for _, rune := range testName {
|
||||
if unicode.IsUpper(rune) {
|
||||
nameComponents = append(nameComponents, currentString)
|
||||
currentString = string(unicode.ToLower(rune))
|
||||
} else {
|
||||
currentString += string(rune)
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(append(nameComponents, currentString), " ")
|
||||
}
|
||||
|
||||
func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr {
|
||||
return &ast.CallExpr{
|
||||
Lparen: ident.NamePos + 1,
|
||||
Rparen: ident.NamePos + 2,
|
||||
Fun: &ast.Ident{Name: "GinkgoT"},
|
||||
}
|
||||
}
|
||||
|
||||
func newGinkgoTInterface() *ast.Ident {
|
||||
return &ast.Ident{Name: "GinkgoTInterface"}
|
||||
}
|
91
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/import.go
generated
vendored
91
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/import.go
generated
vendored
@ -1,91 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given the root node of an AST, returns the node containing the
|
||||
* import statements for the file.
|
||||
*/
|
||||
func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) {
|
||||
for _, declaration := range rootNode.Decls {
|
||||
decl, ok := declaration.(*ast.GenDecl)
|
||||
if !ok || len(decl.Specs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok = decl.Specs[0].(*ast.ImportSpec)
|
||||
if ok {
|
||||
imports = decl
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New(fmt.Sprintf("Could not find imports for root node:\n\t%#v\n", rootNode))
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes "testing" import, if present
|
||||
*/
|
||||
func removeTestingImport(rootNode *ast.File) {
|
||||
importDecl, err := importsForRootNode(rootNode)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
var index int
|
||||
for i, importSpec := range importDecl.Specs {
|
||||
importSpec := importSpec.(*ast.ImportSpec)
|
||||
if importSpec.Path.Value == "\"testing\"" {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...)
|
||||
}
|
||||
|
||||
/*
|
||||
* Adds import statements for onsi/ginkgo, if missing
|
||||
*/
|
||||
func addGinkgoImports(rootNode *ast.File) {
|
||||
importDecl, err := importsForRootNode(rootNode)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
if len(importDecl.Specs) == 0 {
|
||||
// TODO: might need to create a import decl here
|
||||
panic("unimplemented : expected to find an imports block")
|
||||
}
|
||||
|
||||
needsGinkgo := true
|
||||
for _, importSpec := range importDecl.Specs {
|
||||
importSpec, ok := importSpec.(*ast.ImportSpec)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" {
|
||||
needsGinkgo = false
|
||||
}
|
||||
}
|
||||
|
||||
if needsGinkgo {
|
||||
importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\""))
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* convenience function to create an import statement
|
||||
*/
|
||||
func createImport(name, path string) *ast.ImportSpec {
|
||||
return &ast.ImportSpec{
|
||||
Name: &ast.Ident{Name: name},
|
||||
Path: &ast.BasicLit{Kind: 9, Value: path},
|
||||
}
|
||||
}
|
127
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
generated
vendored
127
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
generated
vendored
@ -1,127 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
/*
|
||||
* RewritePackage takes a name (eg: my-package/tools), finds its test files using
|
||||
* Go's build package, and then rewrites them. A ginkgo test suite file will
|
||||
* also be added for this package, and all of its child packages.
|
||||
*/
|
||||
func RewritePackage(packageName string) {
|
||||
pkg, err := packageWithName(packageName)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
|
||||
}
|
||||
|
||||
for _, filename := range findTestsInPackage(pkg) {
|
||||
rewriteTestsInFile(filename)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a package, findTestsInPackage reads the test files in the directory,
|
||||
* and then recurses on each child package, returning a slice of all test files
|
||||
* found in this process.
|
||||
*/
|
||||
func findTestsInPackage(pkg *build.Package) (testfiles []string) {
|
||||
for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
|
||||
testfiles = append(testfiles, filepath.Join(pkg.Dir, file))
|
||||
}
|
||||
|
||||
dirFiles, err := ioutil.ReadDir(pkg.Dir)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error()))
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`^[._]`)
|
||||
|
||||
for _, file := range dirFiles {
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if re.Match([]byte(file.Name())) {
|
||||
continue
|
||||
}
|
||||
|
||||
packageName := filepath.Join(pkg.ImportPath, file.Name())
|
||||
subPackage, err := packageWithName(packageName)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
|
||||
}
|
||||
|
||||
testfiles = append(testfiles, findTestsInPackage(subPackage)...)
|
||||
}
|
||||
|
||||
addGinkgoSuiteForPackage(pkg)
|
||||
goFmtPackage(pkg)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Shells out to `ginkgo bootstrap` to create a test suite file
|
||||
*/
|
||||
func addGinkgoSuiteForPackage(pkg *build.Package) {
|
||||
originalDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go")
|
||||
|
||||
_, err = os.Stat(suite_test_file)
|
||||
if err == nil {
|
||||
return // test file already exists, this should be a no-op
|
||||
}
|
||||
|
||||
err = os.Chdir(pkg.Dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
output, err := exec.Command("ginkgo", "bootstrap").Output()
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error()))
|
||||
}
|
||||
|
||||
err = os.Chdir(originalDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Shells out to `go fmt` to format the package
|
||||
*/
|
||||
func goFmtPackage(pkg *build.Package) {
|
||||
output, err := exec.Command("go", "fmt", pkg.ImportPath).Output()
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Attempts to return a package with its test files already read.
|
||||
* The ImportMode arg to build.Import lets you specify if you want go to read the
|
||||
* buildable go files inside the package, but it fails if the package has no go files
|
||||
*/
|
||||
func packageWithName(name string) (pkg *build.Package, err error) {
|
||||
pkg, err = build.Default.Import(name, ".", build.ImportMode(0))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
pkg, err = build.Default.Import(name, ".", build.ImportMode(1))
|
||||
return
|
||||
}
|
56
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
generated
vendored
56
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
generated
vendored
@ -1,56 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given a root node, walks its top level statements and returns
|
||||
* points to function nodes to rewrite as It statements.
|
||||
* These functions, according to Go testing convention, must be named
|
||||
* TestWithCamelCasedName and receive a single *testing.T argument.
|
||||
*/
|
||||
func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) {
|
||||
testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+")
|
||||
|
||||
ast.Inspect(rootNode, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch node := node.(type) {
|
||||
case *ast.FuncDecl:
|
||||
matches := testNameRegexp.MatchString(node.Name.Name)
|
||||
|
||||
if matches && receivesTestingT(node) {
|
||||
testsToRewrite = append(testsToRewrite, node)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* convenience function that looks at args to a function and determines if its
|
||||
* params include an argument of type *testing.T
|
||||
*/
|
||||
func receivesTestingT(node *ast.FuncDecl) bool {
|
||||
if len(node.Type.Params.List) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
intermediate := base.X.(*ast.SelectorExpr)
|
||||
isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing"
|
||||
isTestingT := intermediate.Sel.Name == "T"
|
||||
|
||||
return isTestingPackage && isTestingT
|
||||
}
|
163
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
generated
vendored
163
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
generated
vendored
@ -1,163 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given a file path, rewrites any tests in the Ginkgo format.
|
||||
* First, we parse the AST, and update the imports declaration.
|
||||
* Then, we walk the first child elements in the file, returning tests to rewrite.
|
||||
* A top level init func is declared, with a single Describe func inside.
|
||||
* Then the test functions to rewrite are inserted as It statements inside the Describe.
|
||||
* Finally we walk the rest of the file, replacing other usages of *testing.T
|
||||
* Once that is complete, we write the AST back out again to its file.
|
||||
*/
|
||||
func rewriteTestsInFile(pathToFile string) {
|
||||
fileSet := token.NewFileSet()
|
||||
rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, 0)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
|
||||
}
|
||||
|
||||
addGinkgoImports(rootNode)
|
||||
removeTestingImport(rootNode)
|
||||
|
||||
varUnderscoreBlock := createVarUnderscoreBlock()
|
||||
describeBlock := createDescribeBlock()
|
||||
varUnderscoreBlock.Values = []ast.Expr{describeBlock}
|
||||
|
||||
for _, testFunc := range findTestFuncs(rootNode) {
|
||||
rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock)
|
||||
}
|
||||
|
||||
underscoreDecl := &ast.GenDecl{
|
||||
Tok: 85, // gah, magick numbers are needed to make this work
|
||||
TokPos: 14, // this tricks Go into writing "var _ = Describe"
|
||||
Specs: []ast.Spec{varUnderscoreBlock},
|
||||
}
|
||||
|
||||
imports := rootNode.Decls[0]
|
||||
tail := rootNode.Decls[1:]
|
||||
rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...)
|
||||
rewriteOtherFuncsToUseGinkgoT(rootNode.Decls)
|
||||
walkNodesInRootNodeReplacingTestingT(rootNode)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
if err = format.Node(&buffer, fileSet, rootNode); err != nil {
|
||||
panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error()))
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(pathToFile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
|
||||
}
|
||||
|
||||
ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a test func named TestDoesSomethingNeat, rewrites it as
|
||||
* It("does something neat", func() { __test_body_here__ }) and adds it
|
||||
* to the Describe's list of statements
|
||||
*/
|
||||
func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) {
|
||||
var funcIndex int = -1
|
||||
for index, child := range rootNode.Decls {
|
||||
if child == testFunc {
|
||||
funcIndex = index
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if funcIndex < 0 {
|
||||
panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name))
|
||||
}
|
||||
|
||||
var block *ast.BlockStmt = blockStatementFromDescribe(describe)
|
||||
block.List = append(block.List, createItStatementForTestFunc(testFunc))
|
||||
replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc))
|
||||
|
||||
// remove the old test func from the root node's declarations
|
||||
rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* walks nodes inside of a test func's statements and replaces the usage of
|
||||
* it's named *testing.T param with GinkgoT's
|
||||
*/
|
||||
func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) {
|
||||
ast.Inspect(statementsBlock, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
keyValueExpr, ok := node.(*ast.KeyValueExpr)
|
||||
if ok {
|
||||
replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT)
|
||||
return true
|
||||
}
|
||||
|
||||
funcLiteral, ok := node.(*ast.FuncLit)
|
||||
if ok {
|
||||
replaceTypeDeclTestingTsInFuncLiteral(funcLiteral)
|
||||
return true
|
||||
}
|
||||
|
||||
callExpr, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
replaceTestingTsInArgsLists(callExpr, testingT)
|
||||
|
||||
funCall, ok := callExpr.Fun.(*ast.SelectorExpr)
|
||||
if ok {
|
||||
replaceTestingTsMethodCalls(funCall, testingT)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* rewrite t.Fail() or any other *testing.T method by replacing with T().Fail()
|
||||
* This function receives a selector expression (eg: t.Fail()) and
|
||||
* the name of the *testing.T param from the function declaration. Rewrites the
|
||||
* selector expression in place if the target was a *testing.T
|
||||
*/
|
||||
func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) {
|
||||
ident, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
selectorExpr.X = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces usages of a named *testing.T param inside of a call expression
|
||||
* with a new GinkgoT object
|
||||
*/
|
||||
func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) {
|
||||
for index, arg := range callExpr.Args {
|
||||
ident, ok := arg.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
callExpr.Args[index] = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
}
|
130
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
generated
vendored
130
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
generated
vendored
@ -1,130 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
/*
|
||||
* Rewrites any other top level funcs that receive a *testing.T param
|
||||
*/
|
||||
func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) {
|
||||
for _, decl := range declarations {
|
||||
decl, ok := decl.(*ast.FuncDecl)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, param := range decl.Type.Params.List {
|
||||
starExpr, ok := param.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
xIdent, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok || xIdent.Name != "testing" {
|
||||
continue
|
||||
}
|
||||
|
||||
if selectorExpr.Sel.Name != "T" {
|
||||
continue
|
||||
}
|
||||
|
||||
param.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Walks all of the nodes in the file, replacing *testing.T in struct
|
||||
* and func literal nodes. eg:
|
||||
* type foo struct { *testing.T }
|
||||
* var bar = func(t *testing.T) { }
|
||||
*/
|
||||
func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) {
|
||||
ast.Inspect(rootNode, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch node := node.(type) {
|
||||
case *ast.StructType:
|
||||
replaceTestingTsInStructType(node)
|
||||
case *ast.FuncLit:
|
||||
replaceTypeDeclTestingTsInFuncLiteral(node)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces named *testing.T inside a composite literal
|
||||
*/
|
||||
func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) {
|
||||
ident, ok := kve.Value.(*ast.Ident)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
kve.Value = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces *testing.T params in a func literal with GinkgoT
|
||||
*/
|
||||
func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) {
|
||||
for _, arg := range functionLiteral.Type.Params.List {
|
||||
starExpr, ok := arg.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
target, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if target.Name == "testing" && selectorExpr.Sel.Name == "T" {
|
||||
arg.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Replaces *testing.T types inside of a struct declaration with a GinkgoT
|
||||
* eg: type foo struct { *testing.T }
|
||||
*/
|
||||
func replaceTestingTsInStructType(structType *ast.StructType) {
|
||||
for _, field := range structType.Fields.List {
|
||||
starExpr, ok := field.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
xIdent, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" {
|
||||
field.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
44
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert_command.go
generated
vendored
44
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert_command.go
generated
vendored
@ -1,44 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/ginkgo/convert"
|
||||
"os"
|
||||
)
|
||||
|
||||
func BuildConvertCommand() *Command {
|
||||
return &Command{
|
||||
Name: "convert",
|
||||
FlagSet: flag.NewFlagSet("convert", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo convert /path/to/package",
|
||||
Usage: []string{
|
||||
"Convert the package at the passed in path from an XUnit-style test to a Ginkgo-style test",
|
||||
},
|
||||
Command: convertPackage,
|
||||
}
|
||||
}
|
||||
|
||||
func convertPackage(args []string, additionalArgs []string) {
|
||||
if len(args) != 1 {
|
||||
println(fmt.Sprintf("usage: ginkgo convert /path/to/your/package"))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case error:
|
||||
println(err.Error())
|
||||
case string:
|
||||
println(err)
|
||||
default:
|
||||
println(fmt.Sprintf("unexpected error: %#v", err))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
convert.RewritePackage(args[0])
|
||||
}
|
164
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/generate_command.go
generated
vendored
164
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/generate_command.go
generated
vendored
@ -1,164 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
func BuildGenerateCommand() *Command {
|
||||
var agouti, noDot bool
|
||||
flagSet := flag.NewFlagSet("generate", flag.ExitOnError)
|
||||
flagSet.BoolVar(&agouti, "agouti", false, "If set, generate will generate a test file for writing Agouti tests")
|
||||
flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega")
|
||||
|
||||
return &Command{
|
||||
Name: "generate",
|
||||
FlagSet: flagSet,
|
||||
UsageCommand: "ginkgo generate <filename(s)>",
|
||||
Usage: []string{
|
||||
"Generate a test file named filename_test.go",
|
||||
"If the optional <filenames> argument is omitted, a file named after the package in the current directory will be created.",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
generateSpec(args, agouti, noDot)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var specText = `package {{.Package}}_test
|
||||
|
||||
import (
|
||||
. "{{.PackageImportPath}}"
|
||||
|
||||
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
|
||||
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
|
||||
)
|
||||
|
||||
var _ = Describe("{{.Subject}}", func() {
|
||||
|
||||
})
|
||||
`
|
||||
|
||||
var agoutiSpecText = `package {{.Package}}_test
|
||||
|
||||
import (
|
||||
. "{{.PackageImportPath}}"
|
||||
|
||||
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
|
||||
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
|
||||
. "github.com/sclevine/agouti/matchers"
|
||||
"github.com/sclevine/agouti"
|
||||
)
|
||||
|
||||
var _ = Describe("{{.Subject}}", func() {
|
||||
var page *agouti.Page
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
page, err = agoutiDriver.NewPage()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
Expect(page.Destroy()).To(Succeed())
|
||||
})
|
||||
})
|
||||
`
|
||||
|
||||
type specData struct {
|
||||
Package string
|
||||
Subject string
|
||||
PackageImportPath string
|
||||
IncludeImports bool
|
||||
}
|
||||
|
||||
func generateSpec(args []string, agouti, noDot bool) {
|
||||
if len(args) == 0 {
|
||||
err := generateSpecForSubject("", agouti, noDot)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
fmt.Println("")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("")
|
||||
return
|
||||
}
|
||||
|
||||
var failed bool
|
||||
for _, arg := range args {
|
||||
err := generateSpecForSubject(arg, agouti, noDot)
|
||||
if err != nil {
|
||||
failed = true
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
fmt.Println("")
|
||||
if failed {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func generateSpecForSubject(subject string, agouti, noDot bool) error {
|
||||
packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
if subject != "" {
|
||||
subject = strings.Split(subject, ".go")[0]
|
||||
subject = strings.Split(subject, "_test")[0]
|
||||
specFilePrefix = subject
|
||||
formattedName = prettifyPackageName(subject)
|
||||
}
|
||||
|
||||
data := specData{
|
||||
Package: packageName,
|
||||
Subject: formattedName,
|
||||
PackageImportPath: getPackageImportPath(),
|
||||
IncludeImports: !noDot,
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
|
||||
if fileExists(targetFile) {
|
||||
return fmt.Errorf("%s already exists.", targetFile)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if agouti {
|
||||
templateText = agoutiSpecText
|
||||
} else {
|
||||
templateText = specText
|
||||
}
|
||||
|
||||
specTemplate, err := template.New("spec").Parse(templateText)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
specTemplate.Execute(f, data)
|
||||
goFmt(targetFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPackageImportPath() string {
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
sep := string(filepath.Separator)
|
||||
paths := strings.Split(workingDir, sep+"src"+sep)
|
||||
if len(paths) == 1 {
|
||||
fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
|
||||
return "UNKNOWN_PACKAGE_PATH"
|
||||
}
|
||||
return filepath.ToSlash(paths[len(paths)-1])
|
||||
}
|
31
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/help_command.go
generated
vendored
31
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/help_command.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func BuildHelpCommand() *Command {
|
||||
return &Command{
|
||||
Name: "help",
|
||||
FlagSet: flag.NewFlagSet("help", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo help <COMAND>",
|
||||
Usage: []string{
|
||||
"Print usage information. If a command is passed in, print usage information just for that command.",
|
||||
},
|
||||
Command: printHelp,
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp(args []string, additionalArgs []string) {
|
||||
if len(args) == 0 {
|
||||
usage()
|
||||
} else {
|
||||
command, found := commandMatching(args[0])
|
||||
if !found {
|
||||
complainAndQuit(fmt.Sprintf("Unknown command: %s", args[0]))
|
||||
}
|
||||
|
||||
usageForCommand(command, true)
|
||||
}
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
package interrupthandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type InterruptHandler struct {
|
||||
interruptCount int
|
||||
lock *sync.Mutex
|
||||
C chan bool
|
||||
}
|
||||
|
||||
func NewInterruptHandler() *InterruptHandler {
|
||||
h := &InterruptHandler{
|
||||
lock: &sync.Mutex{},
|
||||
C: make(chan bool, 0),
|
||||
}
|
||||
|
||||
go h.handleInterrupt()
|
||||
SwallowSigQuit()
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *InterruptHandler) WasInterrupted() bool {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
return h.interruptCount > 0
|
||||
}
|
||||
|
||||
func (h *InterruptHandler) handleInterrupt() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
signal.Stop(c)
|
||||
|
||||
h.lock.Lock()
|
||||
h.interruptCount++
|
||||
if h.interruptCount == 1 {
|
||||
close(h.C)
|
||||
} else if h.interruptCount > 5 {
|
||||
os.Exit(1)
|
||||
}
|
||||
h.lock.Unlock()
|
||||
|
||||
go h.handleInterrupt()
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
// +build freebsd openbsd netbsd dragonfly darwin linux
|
||||
|
||||
package interrupthandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func SwallowSigQuit() {
|
||||
c := make(chan os.Signal, 1024)
|
||||
signal.Notify(c, syscall.SIGQUIT)
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package interrupthandler
|
||||
|
||||
func SwallowSigQuit() {
|
||||
//noop
|
||||
}
|
291
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/main.go
generated
vendored
291
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/main.go
generated
vendored
@ -1,291 +0,0 @@
|
||||
/*
|
||||
The Ginkgo CLI
|
||||
|
||||
The Ginkgo CLI is fully documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
|
||||
|
||||
You can also learn more by running:
|
||||
|
||||
ginkgo help
|
||||
|
||||
Here are some of the more commonly used commands:
|
||||
|
||||
To install:
|
||||
|
||||
go install github.com/onsi/ginkgo/ginkgo
|
||||
|
||||
To run tests:
|
||||
|
||||
ginkgo
|
||||
|
||||
To run tests in all subdirectories:
|
||||
|
||||
ginkgo -r
|
||||
|
||||
To run tests in particular packages:
|
||||
|
||||
ginkgo <flags> /path/to/package /path/to/another/package
|
||||
|
||||
To pass arguments/flags to your tests:
|
||||
|
||||
ginkgo <flags> <packages> -- <pass-throughs>
|
||||
|
||||
To run tests in parallel
|
||||
|
||||
ginkgo -p
|
||||
|
||||
this will automatically detect the optimal number of nodes to use. Alternatively, you can specify the number of nodes with:
|
||||
|
||||
ginkgo -nodes=N
|
||||
|
||||
(note that you don't need to provide -p in this case).
|
||||
|
||||
By default the Ginkgo CLI will spin up a server that the individual test processes send test output to. The CLI aggregates this output and then presents coherent test output, one test at a time, as each test completes.
|
||||
An alternative is to have the parallel nodes run and stream interleaved output back. This useful for debugging, particularly in contexts where tests hang/fail to start. To get this interleaved output:
|
||||
|
||||
ginkgo -nodes=N -stream=true
|
||||
|
||||
On windows, the default value for stream is true.
|
||||
|
||||
By default, when running multiple tests (with -r or a list of packages) Ginkgo will abort when a test fails. To have Ginkgo run subsequent test suites instead you can:
|
||||
|
||||
ginkgo -keepGoing
|
||||
|
||||
To monitor packages and rerun tests when changes occur:
|
||||
|
||||
ginkgo watch <-r> </path/to/package>
|
||||
|
||||
passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them.
|
||||
`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
|
||||
that depend on X are not rerun.
|
||||
|
||||
[OSX & Linux only] To receive (desktop) notifications when a test run completes:
|
||||
|
||||
ginkgo -notify
|
||||
|
||||
this is particularly useful with `ginkgo watch`. Notifications are currently only supported on OS X and require that you `brew install terminal-notifier`
|
||||
|
||||
Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails. You can do this with:
|
||||
|
||||
ginkgo -untilItFails
|
||||
|
||||
To bootstrap a test suite:
|
||||
|
||||
ginkgo bootstrap
|
||||
|
||||
To generate a test file:
|
||||
|
||||
ginkgo generate <test_file_name>
|
||||
|
||||
To bootstrap/generate test files without using "." imports:
|
||||
|
||||
ginkgo bootstrap --nodot
|
||||
ginkgo generate --nodot
|
||||
|
||||
this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions. When you pull to the latest Ginkgo/Gomega you'll want to run
|
||||
|
||||
ginkgo nodot
|
||||
|
||||
to refresh this list and pull in any new identifiers. In particular, this will pull in any new Gomega matchers that get added.
|
||||
|
||||
To convert an existing XUnit style test suite to a Ginkgo-style test suite:
|
||||
|
||||
ginkgo convert .
|
||||
|
||||
To unfocus tests:
|
||||
|
||||
ginkgo unfocus
|
||||
|
||||
or
|
||||
|
||||
ginkgo blur
|
||||
|
||||
To compile a test suite:
|
||||
|
||||
ginkgo build <path-to-package>
|
||||
|
||||
will output an executable file named `package.test`. This can be run directly or by invoking
|
||||
|
||||
ginkgo <path-to-package.test>
|
||||
|
||||
To print out Ginkgo's version:
|
||||
|
||||
ginkgo version
|
||||
|
||||
To get more help:
|
||||
|
||||
ginkgo help
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
const greenColor = "\x1b[32m"
|
||||
const redColor = "\x1b[91m"
|
||||
const defaultStyle = "\x1b[0m"
|
||||
const lightGrayColor = "\x1b[37m"
|
||||
|
||||
type Command struct {
|
||||
Name string
|
||||
AltName string
|
||||
FlagSet *flag.FlagSet
|
||||
Usage []string
|
||||
UsageCommand string
|
||||
Command func(args []string, additionalArgs []string)
|
||||
SuppressFlagDocumentation bool
|
||||
FlagDocSubstitute []string
|
||||
}
|
||||
|
||||
func (c *Command) Matches(name string) bool {
|
||||
return c.Name == name || (c.AltName != "" && c.AltName == name)
|
||||
}
|
||||
|
||||
func (c *Command) Run(args []string, additionalArgs []string) {
|
||||
c.FlagSet.Parse(args)
|
||||
c.Command(c.FlagSet.Args(), additionalArgs)
|
||||
}
|
||||
|
||||
var DefaultCommand *Command
|
||||
var Commands []*Command
|
||||
|
||||
func init() {
|
||||
DefaultCommand = BuildRunCommand()
|
||||
Commands = append(Commands, BuildWatchCommand())
|
||||
Commands = append(Commands, BuildBuildCommand())
|
||||
Commands = append(Commands, BuildBootstrapCommand())
|
||||
Commands = append(Commands, BuildGenerateCommand())
|
||||
Commands = append(Commands, BuildNodotCommand())
|
||||
Commands = append(Commands, BuildConvertCommand())
|
||||
Commands = append(Commands, BuildUnfocusCommand())
|
||||
Commands = append(Commands, BuildVersionCommand())
|
||||
Commands = append(Commands, BuildHelpCommand())
|
||||
}
|
||||
|
||||
func main() {
|
||||
args := []string{}
|
||||
additionalArgs := []string{}
|
||||
|
||||
foundDelimiter := false
|
||||
|
||||
for _, arg := range os.Args[1:] {
|
||||
if !foundDelimiter {
|
||||
if arg == "--" {
|
||||
foundDelimiter = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if foundDelimiter {
|
||||
additionalArgs = append(additionalArgs, arg)
|
||||
} else {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
commandToRun, found := commandMatching(args[0])
|
||||
if found {
|
||||
commandToRun.Run(args[1:], additionalArgs)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
DefaultCommand.Run(args, additionalArgs)
|
||||
}
|
||||
|
||||
func commandMatching(name string) (*Command, bool) {
|
||||
for _, command := range Commands {
|
||||
if command.Matches(name) {
|
||||
return command, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Ginkgo Version %s\n\n", config.VERSION)
|
||||
usageForCommand(DefaultCommand, false)
|
||||
for _, command := range Commands {
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
usageForCommand(command, false)
|
||||
}
|
||||
}
|
||||
|
||||
func usageForCommand(command *Command, longForm bool) {
|
||||
fmt.Fprintf(os.Stderr, "%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand)))
|
||||
fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.Usage, "\n"))
|
||||
if command.SuppressFlagDocumentation && !longForm {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.FlagDocSubstitute, "\n "))
|
||||
} else {
|
||||
command.FlagSet.PrintDefaults()
|
||||
}
|
||||
}
|
||||
|
||||
func complainAndQuit(complaint string) {
|
||||
fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func findSuites(args []string, recurse bool, skipPackage string, allowPrecompiled bool) ([]testsuite.TestSuite, []string) {
|
||||
suites := []testsuite.TestSuite{}
|
||||
|
||||
if len(args) > 0 {
|
||||
for _, arg := range args {
|
||||
if allowPrecompiled {
|
||||
suite, err := testsuite.PrecompiledTestSuite(arg)
|
||||
if err == nil {
|
||||
suites = append(suites, suite)
|
||||
continue
|
||||
}
|
||||
}
|
||||
suites = append(suites, testsuite.SuitesInDir(arg, recurse)...)
|
||||
}
|
||||
} else {
|
||||
suites = testsuite.SuitesInDir(".", recurse)
|
||||
}
|
||||
|
||||
skippedPackages := []string{}
|
||||
if skipPackage != "" {
|
||||
skipFilters := strings.Split(skipPackage, ",")
|
||||
filteredSuites := []testsuite.TestSuite{}
|
||||
for _, suite := range suites {
|
||||
skip := false
|
||||
for _, skipFilter := range skipFilters {
|
||||
if strings.Contains(suite.Path, skipFilter) {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if skip {
|
||||
skippedPackages = append(skippedPackages, suite.Path)
|
||||
} else {
|
||||
filteredSuites = append(filteredSuites, suite)
|
||||
}
|
||||
}
|
||||
suites = filteredSuites
|
||||
}
|
||||
|
||||
return suites, skippedPackages
|
||||
}
|
||||
|
||||
func goFmt(path string) {
|
||||
err := exec.Command("go", "fmt", path).Run()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not fmt: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func pluralizedWord(singular, plural string, count int) string {
|
||||
if count == 1 {
|
||||
return singular
|
||||
}
|
||||
return plural
|
||||
}
|
194
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
generated
vendored
194
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
generated
vendored
@ -1,194 +0,0 @@
|
||||
package nodot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ApplyNoDot(data []byte) ([]byte, error) {
|
||||
sections, err := generateNodotSections()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, section := range sections {
|
||||
data = section.createOrUpdateIn(data)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
type nodotSection struct {
|
||||
name string
|
||||
pkg string
|
||||
declarations []string
|
||||
types []string
|
||||
}
|
||||
|
||||
func (s nodotSection) createOrUpdateIn(data []byte) []byte {
|
||||
renames := map[string]string{}
|
||||
|
||||
contents := string(data)
|
||||
|
||||
lines := strings.Split(contents, "\n")
|
||||
|
||||
comment := "// Declarations for " + s.name
|
||||
|
||||
newLines := []string{}
|
||||
for _, line := range lines {
|
||||
if line == comment {
|
||||
continue
|
||||
}
|
||||
|
||||
words := strings.Split(line, " ")
|
||||
lastWord := words[len(words)-1]
|
||||
|
||||
if s.containsDeclarationOrType(lastWord) {
|
||||
renames[lastWord] = words[1]
|
||||
continue
|
||||
}
|
||||
|
||||
newLines = append(newLines, line)
|
||||
}
|
||||
|
||||
if len(newLines[len(newLines)-1]) > 0 {
|
||||
newLines = append(newLines, "")
|
||||
}
|
||||
|
||||
newLines = append(newLines, comment)
|
||||
|
||||
for _, typ := range s.types {
|
||||
name, ok := renames[s.prefix(typ)]
|
||||
if !ok {
|
||||
name = typ
|
||||
}
|
||||
newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ)))
|
||||
}
|
||||
|
||||
for _, decl := range s.declarations {
|
||||
name, ok := renames[s.prefix(decl)]
|
||||
if !ok {
|
||||
name = decl
|
||||
}
|
||||
newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl)))
|
||||
}
|
||||
|
||||
newLines = append(newLines, "")
|
||||
|
||||
newContents := strings.Join(newLines, "\n")
|
||||
|
||||
return []byte(newContents)
|
||||
}
|
||||
|
||||
func (s nodotSection) prefix(declOrType string) string {
|
||||
return s.pkg + "." + declOrType
|
||||
}
|
||||
|
||||
func (s nodotSection) containsDeclarationOrType(word string) bool {
|
||||
for _, declaration := range s.declarations {
|
||||
if s.prefix(declaration) == word {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for _, typ := range s.types {
|
||||
if s.prefix(typ) == word {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func generateNodotSections() ([]nodotSection, error) {
|
||||
sections := []nodotSection{}
|
||||
|
||||
declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Ginkgo DSL",
|
||||
pkg: "ginkgo",
|
||||
declarations: declarations,
|
||||
types: []string{"Done", "Benchmarker"},
|
||||
})
|
||||
|
||||
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Gomega DSL",
|
||||
pkg: "gomega",
|
||||
declarations: declarations,
|
||||
})
|
||||
|
||||
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Gomega Matchers",
|
||||
pkg: "gomega",
|
||||
declarations: declarations,
|
||||
})
|
||||
|
||||
return sections, nil
|
||||
}
|
||||
|
||||
func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) {
|
||||
pkg, err := build.Import(pkgPath, ".", 0)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename))
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
blacklistLookup := map[string]bool{}
|
||||
for _, declaration := range blacklist {
|
||||
blacklistLookup[declaration] = true
|
||||
}
|
||||
|
||||
filteredDeclarations := []string{}
|
||||
for _, declaration := range declarations {
|
||||
if blacklistLookup[declaration] {
|
||||
continue
|
||||
}
|
||||
filteredDeclarations = append(filteredDeclarations, declaration)
|
||||
}
|
||||
|
||||
return filteredDeclarations, nil
|
||||
}
|
||||
|
||||
func getExportedDeclarationsForFile(path string) ([]string, error) {
|
||||
fset := token.NewFileSet()
|
||||
tree, err := parser.ParseFile(fset, path, nil, 0)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
declarations := []string{}
|
||||
ast.FileExports(tree)
|
||||
for _, decl := range tree.Decls {
|
||||
switch x := decl.(type) {
|
||||
case *ast.GenDecl:
|
||||
switch s := x.Specs[0].(type) {
|
||||
case *ast.ValueSpec:
|
||||
declarations = append(declarations, s.Names[0].Name)
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
declarations = append(declarations, x.Name.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return declarations, nil
|
||||
}
|
76
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go
generated
vendored
76
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go
generated
vendored
@ -1,76 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func BuildNodotCommand() *Command {
|
||||
return &Command{
|
||||
Name: "nodot",
|
||||
FlagSet: flag.NewFlagSet("bootstrap", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo nodot",
|
||||
Usage: []string{
|
||||
"Update the nodot declarations in your test suite",
|
||||
"Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.",
|
||||
"If you've renamed a declaration, that name will be honored and not overwritten.",
|
||||
},
|
||||
Command: updateNodot,
|
||||
}
|
||||
}
|
||||
|
||||
func updateNodot(args []string, additionalArgs []string) {
|
||||
suiteFile, perm := findSuiteFile()
|
||||
|
||||
data, err := ioutil.ReadFile(suiteFile)
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to update nodot declarations: " + err.Error())
|
||||
}
|
||||
|
||||
content, err := nodot.ApplyNoDot(data)
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to update nodot declarations: " + err.Error())
|
||||
}
|
||||
ioutil.WriteFile(suiteFile, content, perm)
|
||||
|
||||
goFmt(suiteFile)
|
||||
}
|
||||
|
||||
func findSuiteFile() (string, os.FileMode) {
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
|
||||
files, err := ioutil.ReadDir(workingDir)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`)
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(workingDir, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if re.MatchReader(bufio.NewReader(f)) {
|
||||
return path, file.Mode()
|
||||
}
|
||||
}
|
||||
|
||||
complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.")
|
||||
|
||||
return "", 0
|
||||
}
|
141
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/notifications.go
generated
vendored
141
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/notifications.go
generated
vendored
@ -1,141 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type Notifier struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
}
|
||||
|
||||
func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
|
||||
return &Notifier{
|
||||
commandFlags: commandFlags,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) VerifyNotificationsAreAvailable() {
|
||||
if n.commandFlags.Notify {
|
||||
onLinux := (runtime.GOOS == "linux")
|
||||
onOSX := (runtime.GOOS == "darwin")
|
||||
if onOSX {
|
||||
|
||||
_, err := exec.LookPath("terminal-notifier")
|
||||
if err != nil {
|
||||
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
|
||||
|
||||
OSX:
|
||||
|
||||
To remedy this:
|
||||
|
||||
brew install terminal-notifier
|
||||
|
||||
To learn more about terminal-notifier:
|
||||
|
||||
https://github.com/alloy/terminal-notifier
|
||||
`)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
} else if onLinux {
|
||||
|
||||
_, err := exec.LookPath("notify-send")
|
||||
if err != nil {
|
||||
fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
|
||||
|
||||
Linux:
|
||||
|
||||
Download and install notify-send for your distribution
|
||||
`)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) {
|
||||
if suitePassed {
|
||||
n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName))
|
||||
} else {
|
||||
n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName))
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) SendNotification(title string, subtitle string) {
|
||||
|
||||
if n.commandFlags.Notify {
|
||||
onLinux := (runtime.GOOS == "linux")
|
||||
onOSX := (runtime.GOOS == "darwin")
|
||||
|
||||
if onOSX {
|
||||
|
||||
_, err := exec.LookPath("terminal-notifier")
|
||||
if err == nil {
|
||||
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
|
||||
terminal := os.Getenv("TERM_PROGRAM")
|
||||
if terminal == "iTerm.app" {
|
||||
args = append(args, "-activate", "com.googlecode.iterm2")
|
||||
} else if terminal == "Apple_Terminal" {
|
||||
args = append(args, "-activate", "com.apple.Terminal")
|
||||
}
|
||||
|
||||
exec.Command("terminal-notifier", args...).Run()
|
||||
}
|
||||
|
||||
} else if onLinux {
|
||||
|
||||
_, err := exec.LookPath("notify-send")
|
||||
if err == nil {
|
||||
args := []string{"-a", "ginkgo", title, subtitle}
|
||||
exec.Command("notify-send", args...).Run()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) {
|
||||
|
||||
command := n.commandFlags.AfterSuiteHook
|
||||
if command != "" {
|
||||
|
||||
// Allow for string replacement to pass input to the command
|
||||
passed := "[FAIL]"
|
||||
if suitePassed {
|
||||
passed = "[PASS]"
|
||||
}
|
||||
command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1)
|
||||
command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1)
|
||||
|
||||
// Must break command into parts
|
||||
splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
|
||||
parts := splitArgs.FindAllString(command, -1)
|
||||
|
||||
output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println("Post-suite command failed:")
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t%s\n", output)
|
||||
} else {
|
||||
fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle)
|
||||
}
|
||||
n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook))
|
||||
} else {
|
||||
fmt.Println("Post-suite command succeeded:")
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t%s\n", output)
|
||||
} else {
|
||||
fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
192
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_command.go
generated
vendored
192
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_command.go
generated
vendored
@ -1,192 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func BuildRunCommand() *Command {
|
||||
commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError))
|
||||
notifier := NewNotifier(commandFlags)
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
runner := &SpecRunner{
|
||||
commandFlags: commandFlags,
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
Usage: []string{
|
||||
"Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||
"Any arguments after -- will be passed to the test.",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: runner.RunSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
type SpecRunner struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
suiteRunner *SuiteRunner
|
||||
}
|
||||
|
||||
func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||
r.commandFlags.computeNodes()
|
||||
r.notifier.VerifyNotificationsAreAvailable()
|
||||
|
||||
suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, true)
|
||||
if len(skippedPackages) > 0 {
|
||||
fmt.Println("Will skip:")
|
||||
for _, skippedPackage := range skippedPackages {
|
||||
fmt.Println(" " + skippedPackage)
|
||||
}
|
||||
}
|
||||
|
||||
if len(skippedPackages) > 0 && len(suites) == 0 {
|
||||
fmt.Println("All tests skipped! Exiting...")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
r.ComputeSuccinctMode(len(suites))
|
||||
|
||||
t := time.Now()
|
||||
|
||||
runners := []*testrunner.TestRunner{}
|
||||
for _, suite := range suites {
|
||||
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, additionalArgs))
|
||||
}
|
||||
|
||||
numSuites := 0
|
||||
runResult := testrunner.PassingRunResult()
|
||||
if r.commandFlags.UntilItFails {
|
||||
iteration := 0
|
||||
for {
|
||||
r.UpdateSeed()
|
||||
randomizedRunners := r.randomizeOrder(runners)
|
||||
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
|
||||
iteration++
|
||||
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
break
|
||||
}
|
||||
|
||||
if runResult.Passed {
|
||||
fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration))
|
||||
} else {
|
||||
fmt.Printf("\nTests failed on attempt #%d\n\n", iteration)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
randomizedRunners := r.randomizeOrder(runners)
|
||||
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
|
||||
}
|
||||
|
||||
for _, runner := range runners {
|
||||
runner.CleanUp()
|
||||
}
|
||||
|
||||
fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
|
||||
|
||||
if runResult.Passed {
|
||||
if runResult.HasProgrammaticFocus {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
|
||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||
} else {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Test Suite Failed\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
|
||||
if config.DefaultReporterConfig.Verbose {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites > 1 && !r.commandFlags.wasSet("succinct") {
|
||||
config.DefaultReporterConfig.Succinct = true
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SpecRunner) UpdateSeed() {
|
||||
if !r.commandFlags.wasSet("seed") {
|
||||
config.GinkgoConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner {
|
||||
if !r.commandFlags.RandomizeSuites {
|
||||
return runners
|
||||
}
|
||||
|
||||
if len(runners) <= 1 {
|
||||
return runners
|
||||
}
|
||||
|
||||
randomizedRunners := make([]*testrunner.TestRunner, len(runners))
|
||||
randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed))
|
||||
permutation := randomizer.Perm(len(runners))
|
||||
for i, j := range permutation {
|
||||
randomizedRunners[i] = runners[j]
|
||||
}
|
||||
return randomizedRunners
|
||||
}
|
||||
|
||||
func orcMessage(iteration int) string {
|
||||
if iteration < 10 {
|
||||
return ""
|
||||
} else if iteration < 30 {
|
||||
return []string{
|
||||
"If at first you succeed...",
|
||||
"...try, try again.",
|
||||
"Looking good!",
|
||||
"Still good...",
|
||||
"I think your tests are fine....",
|
||||
"Yep, still passing",
|
||||
"Here we go again...",
|
||||
"Even the gophers are getting bored",
|
||||
"Did you try -race?",
|
||||
"Maybe you should stop now?",
|
||||
"I'm getting tired...",
|
||||
"What if I just made you a sandwich?",
|
||||
"Hit ^C, hit ^C, please hit ^C",
|
||||
"Make it stop. Please!",
|
||||
"Come on! Enough is enough!",
|
||||
"Dave, this conversation can serve no purpose anymore. Goodbye.",
|
||||
"Just what do you think you're doing, Dave? ",
|
||||
"I, Sisyphus",
|
||||
"Insanity: doing the same thing over and over again and expecting different results. -Einstein",
|
||||
"I guess Einstein never tried to churn butter",
|
||||
}[iteration-10] + "\n"
|
||||
} else {
|
||||
return "No, seriously... you can probably stop now.\n"
|
||||
}
|
||||
}
|
121
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
generated
vendored
121
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"runtime"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
)
|
||||
|
||||
type RunWatchAndBuildCommandFlags struct {
|
||||
Recurse bool
|
||||
Race bool
|
||||
Cover bool
|
||||
CoverPkg string
|
||||
SkipPackage string
|
||||
Tags string
|
||||
|
||||
//for run and watch commands
|
||||
NumCPU int
|
||||
NumCompilers int
|
||||
ParallelStream bool
|
||||
Notify bool
|
||||
AfterSuiteHook string
|
||||
AutoNodes bool
|
||||
|
||||
//only for run command
|
||||
KeepGoing bool
|
||||
UntilItFails bool
|
||||
RandomizeSuites bool
|
||||
|
||||
//only for watch command
|
||||
Depth int
|
||||
|
||||
FlagSet *flag.FlagSet
|
||||
}
|
||||
|
||||
const runMode = 1
|
||||
const watchMode = 2
|
||||
const buildMode = 3
|
||||
|
||||
func NewRunCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(runMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(watchMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func NewBuildCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(buildMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) wasSet(flagName string) bool {
|
||||
wasSet := false
|
||||
c.FlagSet.Visit(func(f *flag.Flag) {
|
||||
if f.Name == flagName {
|
||||
wasSet = true
|
||||
}
|
||||
})
|
||||
|
||||
return wasSet
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) computeNodes() {
|
||||
if c.wasSet("nodes") {
|
||||
return
|
||||
}
|
||||
if c.AutoNodes {
|
||||
switch n := runtime.NumCPU(); {
|
||||
case n <= 4:
|
||||
c.NumCPU = n
|
||||
default:
|
||||
c.NumCPU = n - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
||||
onWindows := (runtime.GOOS == "windows")
|
||||
|
||||
c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively")
|
||||
c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled")
|
||||
c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory")
|
||||
c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules")
|
||||
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
|
||||
c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
|
||||
|
||||
if mode == runMode || mode == watchMode {
|
||||
config.Flags(c.FlagSet, "", false)
|
||||
c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run")
|
||||
c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
|
||||
c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
|
||||
c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
|
||||
if !onWindows {
|
||||
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
|
||||
}
|
||||
c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
|
||||
}
|
||||
|
||||
if mode == runMode {
|
||||
c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running")
|
||||
c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs")
|
||||
c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run")
|
||||
}
|
||||
|
||||
if mode == watchMode {
|
||||
c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree")
|
||||
}
|
||||
}
|
172
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/suite_runner.go
generated
vendored
172
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/suite_runner.go
generated
vendored
@ -1,172 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type compilationInput struct {
|
||||
runner *testrunner.TestRunner
|
||||
result chan compilationOutput
|
||||
}
|
||||
|
||||
type compilationOutput struct {
|
||||
runner *testrunner.TestRunner
|
||||
err error
|
||||
}
|
||||
|
||||
type SuiteRunner struct {
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
}
|
||||
|
||||
func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
|
||||
return &SuiteRunner{
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
|
||||
//we return this to the consumer, it will return each runner in order as it compiles
|
||||
compilationOutputs := make(chan compilationOutput, len(runners))
|
||||
|
||||
//an array of channels - the nth runner's compilation output is sent to the nth channel in this array
|
||||
//we read from these channels in order to ensure we run the suites in order
|
||||
orderedCompilationOutputs := []chan compilationOutput{}
|
||||
for _ = range runners {
|
||||
orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
|
||||
}
|
||||
|
||||
//we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
|
||||
//we prefill the channel then close it, this ensures we compile things in the correct order
|
||||
workPool := make(chan compilationInput, len(runners))
|
||||
for i, runner := range runners {
|
||||
workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
|
||||
}
|
||||
close(workPool)
|
||||
|
||||
//pick a reasonable numCompilers
|
||||
if numCompilers == 0 {
|
||||
numCompilers = runtime.NumCPU()
|
||||
}
|
||||
|
||||
//a WaitGroup to help us wait for all compilers to shut down
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(numCompilers)
|
||||
|
||||
//spin up the concurrent compilers
|
||||
for i := 0; i < numCompilers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for input := range workPool {
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
return
|
||||
}
|
||||
|
||||
if willCompile != nil {
|
||||
willCompile(input.runner.Suite)
|
||||
}
|
||||
|
||||
//We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
|
||||
var err error
|
||||
retries := 0
|
||||
for retries <= 5 {
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
return
|
||||
}
|
||||
if err = input.runner.Compile(); err == nil {
|
||||
break
|
||||
}
|
||||
retries++
|
||||
}
|
||||
|
||||
input.result <- compilationOutput{input.runner, err}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
//read from the compilation output channels *in order* and send them to the caller
|
||||
//close the compilationOutputs channel to tell the caller we're done
|
||||
go func() {
|
||||
defer close(compilationOutputs)
|
||||
for _, orderedCompilationOutput := range orderedCompilationOutputs {
|
||||
select {
|
||||
case compilationOutput := <-orderedCompilationOutput:
|
||||
compilationOutputs <- compilationOutput
|
||||
case <-r.interruptHandler.C:
|
||||
//interrupt detected, wait for the compilers to shut down then bail
|
||||
//this ensure we clean up after ourselves as we don't leave any compilation processes running
|
||||
wg.Wait()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return compilationOutputs
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
|
||||
runResult := testrunner.PassingRunResult()
|
||||
|
||||
compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
|
||||
|
||||
numSuitesThatRan := 0
|
||||
suitesThatFailed := []testsuite.TestSuite{}
|
||||
for compilationOutput := range compilationOutputs {
|
||||
if compilationOutput.err != nil {
|
||||
fmt.Print(compilationOutput.err.Error())
|
||||
}
|
||||
numSuitesThatRan++
|
||||
suiteRunResult := testrunner.FailingRunResult()
|
||||
if compilationOutput.err == nil {
|
||||
suiteRunResult = compilationOutput.runner.Run()
|
||||
}
|
||||
r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||
r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||
runResult = runResult.Merge(suiteRunResult)
|
||||
if !suiteRunResult.Passed {
|
||||
suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
|
||||
if !keepGoing {
|
||||
break
|
||||
}
|
||||
}
|
||||
if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
|
||||
fmt.Println("")
|
||||
}
|
||||
}
|
||||
|
||||
if keepGoing && !runResult.Passed {
|
||||
r.listFailedSuites(suitesThatFailed)
|
||||
}
|
||||
|
||||
return runResult, numSuitesThatRan
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) {
|
||||
fmt.Println("")
|
||||
fmt.Println("There were failures detected in the following suites:")
|
||||
|
||||
maxPackageNameLength := 0
|
||||
for _, suite := range suitesThatFailed {
|
||||
if len(suite.PackageName) > maxPackageNameLength {
|
||||
maxPackageNameLength = len(suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
|
||||
|
||||
for _, suite := range suitesThatFailed {
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path)
|
||||
} else {
|
||||
fmt.Printf("\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle)
|
||||
}
|
||||
}
|
||||
}
|
52
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
generated
vendored
52
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
package testrunner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type logWriter struct {
|
||||
buffer *bytes.Buffer
|
||||
lock *sync.Mutex
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
func newLogWriter(target io.Writer, node int) *logWriter {
|
||||
return &logWriter{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
log: log.New(target, fmt.Sprintf("[%d] ", node), 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *logWriter) Write(data []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
w.buffer.Write(data)
|
||||
contents := w.buffer.String()
|
||||
|
||||
lines := strings.Split(contents, "\n")
|
||||
for _, line := range lines[0 : len(lines)-1] {
|
||||
w.log.Println(line)
|
||||
}
|
||||
|
||||
w.buffer.Reset()
|
||||
w.buffer.Write([]byte(lines[len(lines)-1]))
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (w *logWriter) Close() error {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
if w.buffer.Len() > 0 {
|
||||
w.log.Println(w.buffer.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
27
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
generated
vendored
27
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package testrunner
|
||||
|
||||
type RunResult struct {
|
||||
Passed bool
|
||||
HasProgrammaticFocus bool
|
||||
}
|
||||
|
||||
func PassingRunResult() RunResult {
|
||||
return RunResult{
|
||||
Passed: true,
|
||||
HasProgrammaticFocus: false,
|
||||
}
|
||||
}
|
||||
|
||||
func FailingRunResult() RunResult {
|
||||
return RunResult{
|
||||
Passed: false,
|
||||
HasProgrammaticFocus: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (r RunResult) Merge(o RunResult) RunResult {
|
||||
return RunResult{
|
||||
Passed: r.Passed && o.Passed,
|
||||
HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus,
|
||||
}
|
||||
}
|
460
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
generated
vendored
460
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
generated
vendored
@ -1,460 +0,0 @@
|
||||
package testrunner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
"github.com/onsi/ginkgo/internal/remote"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type TestRunner struct {
|
||||
Suite testsuite.TestSuite
|
||||
|
||||
compiled bool
|
||||
compilationTargetPath string
|
||||
|
||||
numCPU int
|
||||
parallelStream bool
|
||||
race bool
|
||||
cover bool
|
||||
coverPkg string
|
||||
tags string
|
||||
additionalArgs []string
|
||||
}
|
||||
|
||||
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, additionalArgs []string) *TestRunner {
|
||||
runner := &TestRunner{
|
||||
Suite: suite,
|
||||
numCPU: numCPU,
|
||||
parallelStream: parallelStream,
|
||||
race: race,
|
||||
cover: cover,
|
||||
coverPkg: coverPkg,
|
||||
tags: tags,
|
||||
additionalArgs: additionalArgs,
|
||||
}
|
||||
|
||||
if !suite.Precompiled {
|
||||
dir, err := ioutil.TempDir("", "ginkgo")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("coulnd't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
|
||||
}
|
||||
runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
|
||||
}
|
||||
|
||||
return runner
|
||||
}
|
||||
|
||||
func (t *TestRunner) Compile() error {
|
||||
return t.CompileTo(t.compilationTargetPath)
|
||||
}
|
||||
|
||||
func (t *TestRunner) CompileTo(path string) error {
|
||||
if t.compiled {
|
||||
return nil
|
||||
}
|
||||
|
||||
if t.Suite.Precompiled {
|
||||
return nil
|
||||
}
|
||||
|
||||
args := []string{"test", "-c", "-i", "-o", path}
|
||||
if t.race {
|
||||
args = append(args, "-race")
|
||||
}
|
||||
if t.cover || t.coverPkg != "" {
|
||||
args = append(args, "-cover", "-covermode=atomic")
|
||||
}
|
||||
if t.coverPkg != "" {
|
||||
args = append(args, fmt.Sprintf("-coverpkg=%s", t.coverPkg))
|
||||
}
|
||||
if t.tags != "" {
|
||||
args = append(args, fmt.Sprintf("-tags=%s", t.tags))
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", args...)
|
||||
|
||||
cmd.Dir = t.Suite.Path
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
fixedOutput := fixCompilationOutput(string(output), t.Suite.Path)
|
||||
if len(output) > 0 {
|
||||
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
|
||||
}
|
||||
return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
|
||||
}
|
||||
|
||||
if fileExists(path) == false {
|
||||
compiledFile := filepath.Join(t.Suite.Path, t.Suite.PackageName+".test")
|
||||
if fileExists(compiledFile) {
|
||||
// seems like we are on an old go version that does not support the -o flag on go test
|
||||
// move the compiled test file to the desired location by hand
|
||||
err = os.Rename(compiledFile, path)
|
||||
if err != nil {
|
||||
// We cannot move the file, perhaps because the source and destination
|
||||
// are on different partitions. We can copy the file, however.
|
||||
err = copyFile(compiledFile, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to copy compiled file: %s", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
|
||||
}
|
||||
}
|
||||
|
||||
t.compiled = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil || os.IsNotExist(err) == false
|
||||
}
|
||||
|
||||
// copyFile copies the contents of the file named src to the file named
|
||||
// by dst. The file will be created if it does not already exist. If the
|
||||
// destination file exists, all it's contents will be replaced by the contents
|
||||
// of the source file.
|
||||
func copyFile(src, dst string) error {
|
||||
srcInfo, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mode := srcInfo.Mode()
|
||||
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer in.Close()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := out.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = out.Sync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return out.Chmod(mode)
|
||||
}
|
||||
|
||||
/*
|
||||
go test -c -i spits package.test out into the cwd. there's no way to change this.
|
||||
|
||||
to make sure it doesn't generate conflicting .test files in the cwd, Compile() must switch the cwd to the test package.
|
||||
|
||||
unfortunately, this causes go test's compile output to be expressed *relative to the test package* instead of the cwd.
|
||||
|
||||
this makes it hard to reason about what failed, and also prevents iterm's Cmd+click from working.
|
||||
|
||||
fixCompilationOutput..... rewrites the output to fix the paths.
|
||||
|
||||
yeah......
|
||||
*/
|
||||
func fixCompilationOutput(output string, relToPath string) string {
|
||||
re := regexp.MustCompile(`^(\S.*\.go)\:\d+\:`)
|
||||
lines := strings.Split(output, "\n")
|
||||
for i, line := range lines {
|
||||
indices := re.FindStringSubmatchIndex(line)
|
||||
if len(indices) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
path := line[indices[2]:indices[3]]
|
||||
path = filepath.Join(relToPath, path)
|
||||
lines[i] = path + line[indices[3]:]
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func (t *TestRunner) Run() RunResult {
|
||||
if t.Suite.IsGinkgo {
|
||||
if t.numCPU > 1 {
|
||||
if t.parallelStream {
|
||||
return t.runAndStreamParallelGinkgoSuite()
|
||||
} else {
|
||||
return t.runParallelGinkgoSuite()
|
||||
}
|
||||
} else {
|
||||
return t.runSerialGinkgoSuite()
|
||||
}
|
||||
} else {
|
||||
return t.runGoTestSuite()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestRunner) CleanUp() {
|
||||
if t.Suite.Precompiled {
|
||||
return
|
||||
}
|
||||
os.RemoveAll(filepath.Dir(t.compilationTargetPath))
|
||||
}
|
||||
|
||||
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil)
|
||||
}
|
||||
|
||||
func (t *TestRunner) runGoTestSuite() RunResult {
|
||||
return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil)
|
||||
}
|
||||
|
||||
func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
|
||||
completions := make(chan RunResult)
|
||||
writers := make([]*logWriter, t.numCPU)
|
||||
|
||||
server, err := remote.NewServer(t.numCPU)
|
||||
if err != nil {
|
||||
panic("Failed to start parallel spec server")
|
||||
}
|
||||
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
config.GinkgoConfig.ParallelNode = cpu + 1
|
||||
config.GinkgoConfig.ParallelTotal = t.numCPU
|
||||
config.GinkgoConfig.SyncHost = server.Address()
|
||||
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
|
||||
writers[cpu] = newLogWriter(os.Stdout, cpu+1)
|
||||
|
||||
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
|
||||
|
||||
server.RegisterAlive(cpu+1, func() bool {
|
||||
if cmd.ProcessState == nil {
|
||||
return true
|
||||
}
|
||||
return !cmd.ProcessState.Exited()
|
||||
})
|
||||
|
||||
go t.run(cmd, completions)
|
||||
}
|
||||
|
||||
res := PassingRunResult()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
res = res.Merge(<-completions)
|
||||
}
|
||||
|
||||
for _, writer := range writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
||||
os.Stdout.Sync()
|
||||
|
||||
if t.cover || t.coverPkg != "" {
|
||||
t.combineCoverprofiles()
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
||||
result := make(chan bool)
|
||||
completions := make(chan RunResult)
|
||||
writers := make([]*logWriter, t.numCPU)
|
||||
reports := make([]*bytes.Buffer, t.numCPU)
|
||||
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
|
||||
aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
|
||||
|
||||
server, err := remote.NewServer(t.numCPU)
|
||||
if err != nil {
|
||||
panic("Failed to start parallel spec server")
|
||||
}
|
||||
server.RegisterReporters(aggregator)
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
config.GinkgoConfig.ParallelNode = cpu + 1
|
||||
config.GinkgoConfig.ParallelTotal = t.numCPU
|
||||
config.GinkgoConfig.SyncHost = server.Address()
|
||||
config.GinkgoConfig.StreamHost = server.Address()
|
||||
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
|
||||
reports[cpu] = &bytes.Buffer{}
|
||||
writers[cpu] = newLogWriter(reports[cpu], cpu+1)
|
||||
|
||||
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
|
||||
|
||||
server.RegisterAlive(cpu+1, func() bool {
|
||||
if cmd.ProcessState == nil {
|
||||
return true
|
||||
}
|
||||
return !cmd.ProcessState.Exited()
|
||||
})
|
||||
|
||||
go t.run(cmd, completions)
|
||||
}
|
||||
|
||||
res := PassingRunResult()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
res = res.Merge(<-completions)
|
||||
}
|
||||
|
||||
//all test processes are done, at this point
|
||||
//we should be able to wait for the aggregator to tell us that it's done
|
||||
|
||||
select {
|
||||
case <-result:
|
||||
fmt.Println("")
|
||||
case <-time.After(time.Second):
|
||||
//the aggregator never got back to us! something must have gone wrong
|
||||
fmt.Println(`
|
||||
-------------------------------------------------------------------
|
||||
| |
|
||||
| Ginkgo timed out waiting for all parallel nodes to report back! |
|
||||
| |
|
||||
-------------------------------------------------------------------
|
||||
`)
|
||||
|
||||
os.Stdout.Sync()
|
||||
|
||||
for _, writer := range writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
||||
for _, report := range reports {
|
||||
fmt.Print(report.String())
|
||||
}
|
||||
|
||||
os.Stdout.Sync()
|
||||
}
|
||||
|
||||
if t.cover || t.coverPkg != "" {
|
||||
t.combineCoverprofiles()
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
|
||||
args := []string{"--test.timeout=24h"}
|
||||
if t.cover || t.coverPkg != "" {
|
||||
coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
|
||||
if t.numCPU > 1 {
|
||||
coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
|
||||
}
|
||||
args = append(args, coverprofile)
|
||||
}
|
||||
|
||||
args = append(args, ginkgoArgs...)
|
||||
args = append(args, t.additionalArgs...)
|
||||
|
||||
path := t.compilationTargetPath
|
||||
if t.Suite.Precompiled {
|
||||
path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
|
||||
}
|
||||
|
||||
cmd := exec.Command(path, args...)
|
||||
|
||||
cmd.Dir = t.Suite.Path
|
||||
cmd.Stderr = stream
|
||||
cmd.Stdout = stream
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
|
||||
var res RunResult
|
||||
|
||||
defer func() {
|
||||
if completions != nil {
|
||||
completions <- res
|
||||
}
|
||||
}()
|
||||
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to run test suite!\n\t%s", err.Error())
|
||||
return res
|
||||
}
|
||||
|
||||
cmd.Wait()
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) combineCoverprofiles() {
|
||||
profiles := []string{}
|
||||
for cpu := 1; cpu <= t.numCPU; cpu++ {
|
||||
coverFile := fmt.Sprintf("%s.coverprofile.%d", t.Suite.PackageName, cpu)
|
||||
coverFile = filepath.Join(t.Suite.Path, coverFile)
|
||||
coverProfile, err := ioutil.ReadFile(coverFile)
|
||||
os.Remove(coverFile)
|
||||
|
||||
if err == nil {
|
||||
profiles = append(profiles, string(coverProfile))
|
||||
}
|
||||
}
|
||||
|
||||
if len(profiles) != t.numCPU {
|
||||
return
|
||||
}
|
||||
|
||||
lines := map[string]int{}
|
||||
lineOrder := []string{}
|
||||
for i, coverProfile := range profiles {
|
||||
for _, line := range strings.Split(string(coverProfile), "\n")[1:] {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
components := strings.Split(line, " ")
|
||||
count, _ := strconv.Atoi(components[len(components)-1])
|
||||
prefix := strings.Join(components[0:len(components)-1], " ")
|
||||
lines[prefix] += count
|
||||
if i == 0 {
|
||||
lineOrder = append(lineOrder, prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output := []string{"mode: atomic"}
|
||||
for _, line := range lineOrder {
|
||||
output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
|
||||
}
|
||||
finalOutput := strings.Join(output, "\n")
|
||||
ioutil.WriteFile(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.coverprofile", t.Suite.PackageName)), []byte(finalOutput), 0666)
|
||||
}
|
106
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
generated
vendored
106
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
package testsuite
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type TestSuite struct {
|
||||
Path string
|
||||
PackageName string
|
||||
IsGinkgo bool
|
||||
Precompiled bool
|
||||
}
|
||||
|
||||
func PrecompiledTestSuite(path string) (TestSuite, error) {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return TestSuite{}, err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return TestSuite{}, errors.New("this is a directory, not a file")
|
||||
}
|
||||
|
||||
if filepath.Ext(path) != ".test" {
|
||||
return TestSuite{}, errors.New("this is not a .test binary")
|
||||
}
|
||||
|
||||
if info.Mode()&0111 == 0 {
|
||||
return TestSuite{}, errors.New("this is not executable")
|
||||
}
|
||||
|
||||
dir := relPath(filepath.Dir(path))
|
||||
packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
|
||||
|
||||
return TestSuite{
|
||||
Path: dir,
|
||||
PackageName: packageName,
|
||||
IsGinkgo: true,
|
||||
Precompiled: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SuitesInDir(dir string, recurse bool) []TestSuite {
|
||||
suites := []TestSuite{}
|
||||
files, _ := ioutil.ReadDir(dir)
|
||||
re := regexp.MustCompile(`_test\.go$`)
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, New(dir, files))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if recurse {
|
||||
re = regexp.MustCompile(`^[._]`)
|
||||
for _, file := range files {
|
||||
if file.IsDir() && !re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return suites
|
||||
}
|
||||
|
||||
func relPath(dir string) string {
|
||||
dir, _ = filepath.Abs(dir)
|
||||
cwd, _ := os.Getwd()
|
||||
dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
|
||||
dir = "." + string(filepath.Separator) + dir
|
||||
return dir
|
||||
}
|
||||
|
||||
func New(dir string, files []os.FileInfo) TestSuite {
|
||||
return TestSuite{
|
||||
Path: relPath(dir),
|
||||
PackageName: packageNameForSuite(dir),
|
||||
IsGinkgo: filesHaveGinkgoSuite(dir, files),
|
||||
}
|
||||
}
|
||||
|
||||
func packageNameForSuite(dir string) string {
|
||||
path, _ := filepath.Abs(dir)
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool {
|
||||
reTestFile := regexp.MustCompile(`_test\.go$`)
|
||||
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`)
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
|
||||
contents, _ := ioutil.ReadFile(dir + "/" + file.Name())
|
||||
if reGinkgo.Match(contents) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
38
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
generated
vendored
38
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func BuildUnfocusCommand() *Command {
|
||||
return &Command{
|
||||
Name: "unfocus",
|
||||
AltName: "blur",
|
||||
FlagSet: flag.NewFlagSet("unfocus", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo unfocus (or ginkgo blur)",
|
||||
Usage: []string{
|
||||
"Recursively unfocuses any focused tests under the current directory",
|
||||
},
|
||||
Command: unfocusSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
func unfocusSpecs([]string, []string) {
|
||||
unfocus("Describe")
|
||||
unfocus("Context")
|
||||
unfocus("It")
|
||||
unfocus("Measure")
|
||||
unfocus("DescribeTable")
|
||||
unfocus("Entry")
|
||||
}
|
||||
|
||||
func unfocus(component string) {
|
||||
fmt.Printf("Removing F%s...\n", component)
|
||||
cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", ".")
|
||||
out, _ := cmd.CombinedOutput()
|
||||
if string(out) != "" {
|
||||
println(string(out))
|
||||
}
|
||||
}
|
23
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/version_command.go
generated
vendored
23
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/version_command.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
)
|
||||
|
||||
func BuildVersionCommand() *Command {
|
||||
return &Command{
|
||||
Name: "version",
|
||||
FlagSet: flag.NewFlagSet("version", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo version",
|
||||
Usage: []string{
|
||||
"Print Ginkgo's version",
|
||||
},
|
||||
Command: printVersion,
|
||||
}
|
||||
}
|
||||
|
||||
func printVersion([]string, []string) {
|
||||
fmt.Printf("Ginkgo Version %s\n", config.VERSION)
|
||||
}
|
22
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta.go
generated
vendored
22
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
package watch
|
||||
|
||||
import "sort"
|
||||
|
||||
type Delta struct {
|
||||
ModifiedPackages []string
|
||||
|
||||
NewSuites []*Suite
|
||||
RemovedSuites []*Suite
|
||||
modifiedSuites []*Suite
|
||||
}
|
||||
|
||||
type DescendingByDelta []*Suite
|
||||
|
||||
func (a DescendingByDelta) Len() int { return len(a) }
|
||||
func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
|
||||
|
||||
func (d Delta) ModifiedSuites() []*Suite {
|
||||
sort.Sort(DescendingByDelta(d.modifiedSuites))
|
||||
return d.modifiedSuites
|
||||
}
|
71
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
generated
vendored
71
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type SuiteErrors map[testsuite.TestSuite]error
|
||||
|
||||
type DeltaTracker struct {
|
||||
maxDepth int
|
||||
suites map[string]*Suite
|
||||
packageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewDeltaTracker(maxDepth int) *DeltaTracker {
|
||||
return &DeltaTracker{
|
||||
maxDepth: maxDepth,
|
||||
packageHashes: NewPackageHashes(),
|
||||
suites: map[string]*Suite{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) {
|
||||
errors = SuiteErrors{}
|
||||
delta.ModifiedPackages = d.packageHashes.CheckForChanges()
|
||||
|
||||
providedSuitePaths := map[string]bool{}
|
||||
for _, suite := range suites {
|
||||
providedSuitePaths[suite.Path] = true
|
||||
}
|
||||
|
||||
d.packageHashes.StartTrackingUsage()
|
||||
|
||||
for _, suite := range d.suites {
|
||||
if providedSuitePaths[suite.Suite.Path] {
|
||||
if suite.Delta() > 0 {
|
||||
delta.modifiedSuites = append(delta.modifiedSuites, suite)
|
||||
}
|
||||
} else {
|
||||
delta.RemovedSuites = append(delta.RemovedSuites, suite)
|
||||
}
|
||||
}
|
||||
|
||||
d.packageHashes.StopTrackingUsageAndPrune()
|
||||
|
||||
for _, suite := range suites {
|
||||
_, ok := d.suites[suite.Path]
|
||||
if !ok {
|
||||
s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
|
||||
if err != nil {
|
||||
errors[suite] = err
|
||||
continue
|
||||
}
|
||||
d.suites[suite.Path] = s
|
||||
delta.NewSuites = append(delta.NewSuites, s)
|
||||
}
|
||||
}
|
||||
|
||||
return delta, errors
|
||||
}
|
||||
|
||||
func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error {
|
||||
s, ok := d.suites[suite.Path]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown suite %s", suite.Path)
|
||||
}
|
||||
|
||||
return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
|
||||
}
|
91
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
generated
vendored
91
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
generated
vendored
@ -1,91 +0,0 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
|
||||
|
||||
type Dependencies struct {
|
||||
deps map[string]int
|
||||
}
|
||||
|
||||
func NewDependencies(path string, maxDepth int) (Dependencies, error) {
|
||||
d := Dependencies{
|
||||
deps: map[string]int{},
|
||||
}
|
||||
|
||||
if maxDepth == 0 {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
err := d.seedWithDepsForPackageAtPath(path)
|
||||
if err != nil {
|
||||
return d, err
|
||||
}
|
||||
|
||||
for depth := 1; depth < maxDepth; depth++ {
|
||||
n := len(d.deps)
|
||||
d.addDepsForDepth(depth)
|
||||
if n == len(d.deps) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d Dependencies) Dependencies() map[string]int {
|
||||
return d.deps
|
||||
}
|
||||
|
||||
func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.resolveAndAdd(pkg.Imports, 1)
|
||||
d.resolveAndAdd(pkg.TestImports, 1)
|
||||
d.resolveAndAdd(pkg.XTestImports, 1)
|
||||
|
||||
delete(d.deps, pkg.Dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepsForDepth(depth int) {
|
||||
for dep, depDepth := range d.deps {
|
||||
if depDepth == depth {
|
||||
d.addDepsForDep(dep, depth+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepsForDep(dep string, depth int) {
|
||||
pkg, err := build.ImportDir(dep, 0)
|
||||
if err != nil {
|
||||
println(err.Error())
|
||||
return
|
||||
}
|
||||
d.resolveAndAdd(pkg.Imports, depth)
|
||||
}
|
||||
|
||||
func (d Dependencies) resolveAndAdd(deps []string, depth int) {
|
||||
for _, dep := range deps {
|
||||
pkg, err := build.Import(dep, ".", 0)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if pkg.Goroot == false && !ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) {
|
||||
d.addDepIfNotPresent(pkg.Dir, depth)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
|
||||
_, ok := d.deps[dep]
|
||||
if !ok {
|
||||
d.deps[dep] = depth
|
||||
}
|
||||
}
|
103
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
generated
vendored
103
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
generated
vendored
@ -1,103 +0,0 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
var goRegExp = regexp.MustCompile(`\.go$`)
|
||||
var goTestRegExp = regexp.MustCompile(`_test\.go$`)
|
||||
|
||||
type PackageHash struct {
|
||||
CodeModifiedTime time.Time
|
||||
TestModifiedTime time.Time
|
||||
Deleted bool
|
||||
|
||||
path string
|
||||
codeHash string
|
||||
testHash string
|
||||
}
|
||||
|
||||
func NewPackageHash(path string) *PackageHash {
|
||||
p := &PackageHash{
|
||||
path: path,
|
||||
}
|
||||
|
||||
p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PackageHash) CheckForChanges() bool {
|
||||
codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
|
||||
|
||||
if deleted {
|
||||
if p.Deleted == false {
|
||||
t := time.Now()
|
||||
p.CodeModifiedTime = t
|
||||
p.TestModifiedTime = t
|
||||
}
|
||||
p.Deleted = true
|
||||
return true
|
||||
}
|
||||
|
||||
modified := false
|
||||
p.Deleted = false
|
||||
|
||||
if p.codeHash != codeHash {
|
||||
p.CodeModifiedTime = codeModifiedTime
|
||||
modified = true
|
||||
}
|
||||
if p.testHash != testHash {
|
||||
p.TestModifiedTime = testModifiedTime
|
||||
modified = true
|
||||
}
|
||||
|
||||
p.codeHash = codeHash
|
||||
p.testHash = testHash
|
||||
return modified
|
||||
}
|
||||
|
||||
func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
|
||||
infos, err := ioutil.ReadDir(p.path)
|
||||
|
||||
if err != nil {
|
||||
deleted = true
|
||||
return
|
||||
}
|
||||
|
||||
for _, info := range infos {
|
||||
if info.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if goTestRegExp.Match([]byte(info.Name())) {
|
||||
testHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(testModifiedTime) {
|
||||
testModifiedTime = info.ModTime()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if goRegExp.Match([]byte(info.Name())) {
|
||||
codeHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(codeModifiedTime) {
|
||||
codeModifiedTime = info.ModTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testHash += codeHash
|
||||
if codeModifiedTime.After(testModifiedTime) {
|
||||
testModifiedTime = codeModifiedTime
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
|
||||
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
|
||||
}
|
82
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
generated
vendored
82
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
generated
vendored
@ -1,82 +0,0 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type PackageHashes struct {
|
||||
PackageHashes map[string]*PackageHash
|
||||
usedPaths map[string]bool
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func NewPackageHashes() *PackageHashes {
|
||||
return &PackageHashes{
|
||||
PackageHashes: map[string]*PackageHash{},
|
||||
usedPaths: nil,
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageHashes) CheckForChanges() []string {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
modified := []string{}
|
||||
|
||||
for _, packageHash := range p.PackageHashes {
|
||||
if packageHash.CheckForChanges() {
|
||||
modified = append(modified, packageHash.path)
|
||||
}
|
||||
}
|
||||
|
||||
return modified
|
||||
}
|
||||
|
||||
func (p *PackageHashes) Add(path string) *PackageHash {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
path, _ = filepath.Abs(path)
|
||||
_, ok := p.PackageHashes[path]
|
||||
if !ok {
|
||||
p.PackageHashes[path] = NewPackageHash(path)
|
||||
}
|
||||
|
||||
if p.usedPaths != nil {
|
||||
p.usedPaths[path] = true
|
||||
}
|
||||
return p.PackageHashes[path]
|
||||
}
|
||||
|
||||
func (p *PackageHashes) Get(path string) *PackageHash {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
path, _ = filepath.Abs(path)
|
||||
if p.usedPaths != nil {
|
||||
p.usedPaths[path] = true
|
||||
}
|
||||
return p.PackageHashes[path]
|
||||
}
|
||||
|
||||
func (p *PackageHashes) StartTrackingUsage() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.usedPaths = map[string]bool{}
|
||||
}
|
||||
|
||||
func (p *PackageHashes) StopTrackingUsageAndPrune() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for path := range p.PackageHashes {
|
||||
if !p.usedPaths[path] {
|
||||
delete(p.PackageHashes, path)
|
||||
}
|
||||
}
|
||||
|
||||
p.usedPaths = nil
|
||||
}
|
87
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/suite.go
generated
vendored
87
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/suite.go
generated
vendored
@ -1,87 +0,0 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type Suite struct {
|
||||
Suite testsuite.TestSuite
|
||||
RunTime time.Time
|
||||
Dependencies Dependencies
|
||||
|
||||
sharedPackageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
|
||||
deps, err := NewDependencies(suite.Path, maxDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sharedPackageHashes.Add(suite.Path)
|
||||
for dep := range deps.Dependencies() {
|
||||
sharedPackageHashes.Add(dep)
|
||||
}
|
||||
|
||||
return &Suite{
|
||||
Suite: suite,
|
||||
Dependencies: deps,
|
||||
|
||||
sharedPackageHashes: sharedPackageHashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Suite) Delta() float64 {
|
||||
delta := s.delta(s.Suite.Path, true, 0) * 1000
|
||||
for dep, depth := range s.Dependencies.Dependencies() {
|
||||
delta += s.delta(dep, false, depth)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
|
||||
s.RunTime = time.Now()
|
||||
|
||||
deps, err := NewDependencies(s.Suite.Path, maxDepth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sharedPackageHashes.Add(s.Suite.Path)
|
||||
for dep := range deps.Dependencies() {
|
||||
s.sharedPackageHashes.Add(dep)
|
||||
}
|
||||
|
||||
s.Dependencies = deps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Suite) Description() string {
|
||||
numDeps := len(s.Dependencies.Dependencies())
|
||||
pluralizer := "ies"
|
||||
if numDeps == 1 {
|
||||
pluralizer = "y"
|
||||
}
|
||||
return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
|
||||
}
|
||||
|
||||
func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
|
||||
return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
|
||||
}
|
||||
|
||||
func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
|
||||
packageHash := s.sharedPackageHashes.Get(packagePath)
|
||||
var modifiedTime time.Time
|
||||
if includeTests {
|
||||
modifiedTime = packageHash.TestModifiedTime
|
||||
} else {
|
||||
modifiedTime = packageHash.CodeModifiedTime
|
||||
}
|
||||
|
||||
return modifiedTime.Sub(s.RunTime)
|
||||
}
|
172
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go
generated
vendored
172
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go
generated
vendored
@ -1,172 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
"github.com/onsi/ginkgo/ginkgo/watch"
|
||||
)
|
||||
|
||||
func BuildWatchCommand() *Command {
|
||||
commandFlags := NewWatchCommandFlags(flag.NewFlagSet("watch", flag.ExitOnError))
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
notifier := NewNotifier(commandFlags)
|
||||
watcher := &SpecWatcher{
|
||||
commandFlags: commandFlags,
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "watch",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo watch <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
Usage: []string{
|
||||
"Watches the tests in the passed in <PACKAGES> and runs them when changes occur.",
|
||||
"Any arguments after -- will be passed to the test.",
|
||||
},
|
||||
Command: watcher.WatchSpecs,
|
||||
SuppressFlagDocumentation: true,
|
||||
FlagDocSubstitute: []string{
|
||||
"Accepts all the flags that the ginkgo command accepts except for --keepGoing and --untilItFails",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type SpecWatcher struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
suiteRunner *SuiteRunner
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
|
||||
w.commandFlags.computeNodes()
|
||||
w.notifier.VerifyNotificationsAreAvailable()
|
||||
|
||||
w.WatchSuites(args, additionalArgs)
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalArgs []string) []*testrunner.TestRunner {
|
||||
runners := []*testrunner.TestRunner{}
|
||||
|
||||
for _, suite := range suites {
|
||||
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, additionalArgs))
|
||||
}
|
||||
|
||||
return runners
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) {
|
||||
suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth)
|
||||
deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth)
|
||||
delta, errors := deltaTracker.Delta(suites)
|
||||
|
||||
fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||
for _, suite := range delta.NewSuites {
|
||||
fmt.Println(" " + suite.Description())
|
||||
}
|
||||
|
||||
for suite, err := range errors {
|
||||
fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
|
||||
}
|
||||
|
||||
if len(suites) == 1 {
|
||||
runners := w.runnersForSuites(suites, additionalArgs)
|
||||
w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, nil)
|
||||
runners[0].CleanUp()
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
|
||||
delta, _ := deltaTracker.Delta(suites)
|
||||
|
||||
suitesToRun := []testsuite.TestSuite{}
|
||||
|
||||
if len(delta.NewSuites) > 0 {
|
||||
fmt.Printf(greenColor+"Detected %d new %s:\n"+defaultStyle, len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||
for _, suite := range delta.NewSuites {
|
||||
suitesToRun = append(suitesToRun, suite.Suite)
|
||||
fmt.Println(" " + suite.Description())
|
||||
}
|
||||
}
|
||||
|
||||
modifiedSuites := delta.ModifiedSuites()
|
||||
if len(modifiedSuites) > 0 {
|
||||
fmt.Println(greenColor + "\nDetected changes in:" + defaultStyle)
|
||||
for _, pkg := range delta.ModifiedPackages {
|
||||
fmt.Println(" " + pkg)
|
||||
}
|
||||
fmt.Printf(greenColor+"Will run %d %s:\n"+defaultStyle, len(modifiedSuites), pluralizedWord("suite", "suites", len(modifiedSuites)))
|
||||
for _, suite := range modifiedSuites {
|
||||
suitesToRun = append(suitesToRun, suite.Suite)
|
||||
fmt.Println(" " + suite.Description())
|
||||
}
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
if len(suitesToRun) > 0 {
|
||||
w.UpdateSeed()
|
||||
w.ComputeSuccinctMode(len(suitesToRun))
|
||||
runners := w.runnersForSuites(suitesToRun, additionalArgs)
|
||||
result, _ := w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, func(suite testsuite.TestSuite) {
|
||||
deltaTracker.WillRun(suite)
|
||||
})
|
||||
for _, runner := range runners {
|
||||
runner.CleanUp()
|
||||
}
|
||||
if !w.interruptHandler.WasInterrupted() {
|
||||
color := redColor
|
||||
if result.Passed {
|
||||
color = greenColor
|
||||
}
|
||||
fmt.Println(color + "\nDone. Resuming watch..." + defaultStyle)
|
||||
}
|
||||
}
|
||||
|
||||
case <-w.interruptHandler.C:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) ComputeSuccinctMode(numSuites int) {
|
||||
if config.DefaultReporterConfig.Verbose {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
return
|
||||
}
|
||||
|
||||
if w.commandFlags.wasSet("succinct") {
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites == 1 {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
}
|
||||
|
||||
if numSuites > 1 {
|
||||
config.DefaultReporterConfig.Succinct = true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) UpdateSeed() {
|
||||
if !w.commandFlags.wasSet("seed") {
|
||||
config.GinkgoConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
}
|
1
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/integration.go
generated
vendored
1
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/integration.go
generated
vendored
@ -1 +0,0 @@
|
||||
package integration
|
313
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/handlers.go
generated
vendored
313
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/handlers.go
generated
vendored
@ -1,313 +0,0 @@
|
||||
package ghttp
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
//CombineHandler takes variadic list of handlers and produces one handler
|
||||
//that calls each handler in order.
|
||||
func CombineHandlers(handlers ...http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
for _, handler := range handlers {
|
||||
handler(w, req)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//VerifyRequest returns a handler that verifies that a request uses the specified method to connect to the specified path
|
||||
//You may also pass in an optional rawQuery string which is tested against the request's `req.URL.RawQuery`
|
||||
//
|
||||
//For path, you may pass in a string, in which case strict equality will be applied
|
||||
//Alternatively you can pass in a matcher (ContainSubstring("/foo") and MatchRegexp("/foo/[a-f0-9]+") for example)
|
||||
func VerifyRequest(method string, path interface{}, rawQuery ...string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
Ω(req.Method).Should(Equal(method), "Method mismatch")
|
||||
switch p := path.(type) {
|
||||
case types.GomegaMatcher:
|
||||
Ω(req.URL.Path).Should(p, "Path mismatch")
|
||||
default:
|
||||
Ω(req.URL.Path).Should(Equal(path), "Path mismatch")
|
||||
}
|
||||
if len(rawQuery) > 0 {
|
||||
values, err := url.ParseQuery(rawQuery[0])
|
||||
Ω(err).ShouldNot(HaveOccurred(), "Expected RawQuery is malformed")
|
||||
|
||||
Ω(req.URL.Query()).Should(Equal(values), "RawQuery mismatch")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//VerifyContentType returns a handler that verifies that a request has a Content-Type header set to the
|
||||
//specified value
|
||||
func VerifyContentType(contentType string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
Ω(req.Header.Get("Content-Type")).Should(Equal(contentType))
|
||||
}
|
||||
}
|
||||
|
||||
//VerifyBasicAuth returns a handler that verifies the request contains a BasicAuth Authorization header
|
||||
//matching the passed in username and password
|
||||
func VerifyBasicAuth(username string, password string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
auth := req.Header.Get("Authorization")
|
||||
Ω(auth).ShouldNot(Equal(""), "Authorization header must be specified")
|
||||
|
||||
decoded, err := base64.StdEncoding.DecodeString(auth[6:])
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
|
||||
Ω(string(decoded)).Should(Equal(fmt.Sprintf("%s:%s", username, password)), "Authorization mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
//VerifyHeader returns a handler that verifies the request contains the passed in headers.
|
||||
//The passed in header keys are first canonicalized via http.CanonicalHeaderKey.
|
||||
//
|
||||
//The request must contain *all* the passed in headers, but it is allowed to have additional headers
|
||||
//beyond the passed in set.
|
||||
func VerifyHeader(header http.Header) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
for key, values := range header {
|
||||
key = http.CanonicalHeaderKey(key)
|
||||
Ω(req.Header[key]).Should(Equal(values), "Header mismatch for key: %s", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//VerifyHeaderKV returns a handler that verifies the request contains a header matching the passed in key and values
|
||||
//(recall that a `http.Header` is a mapping from string (key) to []string (values))
|
||||
//It is a convenience wrapper around `VerifyHeader` that allows you to avoid having to create an `http.Header` object.
|
||||
func VerifyHeaderKV(key string, values ...string) http.HandlerFunc {
|
||||
return VerifyHeader(http.Header{key: values})
|
||||
}
|
||||
|
||||
//VerifyBody returns a handler that verifies that the body of the request matches the passed in byte array.
|
||||
//It does this using Equal().
|
||||
func VerifyBody(expectedBody []byte) http.HandlerFunc {
|
||||
return CombineHandlers(
|
||||
func(w http.ResponseWriter, req *http.Request) {
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
req.Body.Close()
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
Ω(body).Should(Equal(expectedBody), "Body Mismatch")
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
//VerifyJSON returns a handler that verifies that the body of the request is a valid JSON representation
|
||||
//matching the passed in JSON string. It does this using Gomega's MatchJSON method
|
||||
//
|
||||
//VerifyJSON also verifies that the request's content type is application/json
|
||||
func VerifyJSON(expectedJSON string) http.HandlerFunc {
|
||||
return CombineHandlers(
|
||||
VerifyContentType("application/json"),
|
||||
func(w http.ResponseWriter, req *http.Request) {
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
req.Body.Close()
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
Ω(body).Should(MatchJSON(expectedJSON), "JSON Mismatch")
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
//VerifyJSONRepresenting is similar to VerifyJSON. Instead of taking a JSON string, however, it
|
||||
//takes an arbitrary JSON-encodable object and verifies that the requests's body is a JSON representation
|
||||
//that matches the object
|
||||
func VerifyJSONRepresenting(object interface{}) http.HandlerFunc {
|
||||
data, err := json.Marshal(object)
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
return CombineHandlers(
|
||||
VerifyContentType("application/json"),
|
||||
VerifyJSON(string(data)),
|
||||
)
|
||||
}
|
||||
|
||||
//VerifyForm returns a handler that verifies a request contains the specified form values.
|
||||
//
|
||||
//The request must contain *all* of the specified values, but it is allowed to have additional
|
||||
//form values beyond the passed in set.
|
||||
func VerifyForm(values url.Values) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
for key, vals := range values {
|
||||
Ω(r.Form[key]).Should(Equal(vals), "Form mismatch for key: %s", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//VerifyFormKV returns a handler that verifies a request contains a form key with the specified values.
|
||||
//
|
||||
//It is a convenience wrapper around `VerifyForm` that lets you avoid having to create a `url.Values` object.
|
||||
func VerifyFormKV(key string, values ...string) http.HandlerFunc {
|
||||
return VerifyForm(url.Values{key: values})
|
||||
}
|
||||
|
||||
//VerifyProtoRepresenting returns a handler that verifies that the body of the request is a valid protobuf
|
||||
//representation of the passed message.
|
||||
//
|
||||
//VerifyProtoRepresenting also verifies that the request's content type is application/x-protobuf
|
||||
func VerifyProtoRepresenting(expected proto.Message) http.HandlerFunc {
|
||||
return CombineHandlers(
|
||||
VerifyContentType("application/x-protobuf"),
|
||||
func(w http.ResponseWriter, req *http.Request) {
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
req.Body.Close()
|
||||
|
||||
expectedType := reflect.TypeOf(expected)
|
||||
actualValuePtr := reflect.New(expectedType.Elem())
|
||||
|
||||
actual, ok := actualValuePtr.Interface().(proto.Message)
|
||||
Ω(ok).Should(BeTrue(), "Message value is not a proto.Message")
|
||||
|
||||
err = proto.Unmarshal(body, actual)
|
||||
Ω(err).ShouldNot(HaveOccurred(), "Failed to unmarshal protobuf")
|
||||
|
||||
Ω(actual).Should(Equal(expected), "ProtoBuf Mismatch")
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func copyHeader(src http.Header, dst http.Header) {
|
||||
for key, value := range src {
|
||||
dst[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
RespondWith returns a handler that responds to a request with the specified status code and body
|
||||
|
||||
Body may be a string or []byte
|
||||
|
||||
Also, RespondWith can be given an optional http.Header. The headers defined therein will be added to the response headers.
|
||||
*/
|
||||
func RespondWith(statusCode int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
if len(optionalHeader) == 1 {
|
||||
copyHeader(optionalHeader[0], w.Header())
|
||||
}
|
||||
w.WriteHeader(statusCode)
|
||||
switch x := body.(type) {
|
||||
case string:
|
||||
w.Write([]byte(x))
|
||||
case []byte:
|
||||
w.Write(x)
|
||||
default:
|
||||
Ω(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
RespondWithPtr returns a handler that responds to a request with the specified status code and body
|
||||
|
||||
Unlike RespondWith, you pass RepondWithPtr a pointer to the status code and body allowing different tests
|
||||
to share the same setup but specify different status codes and bodies.
|
||||
|
||||
Also, RespondWithPtr can be given an optional http.Header. The headers defined therein will be added to the response headers.
|
||||
Since the http.Header can be mutated after the fact you don't need to pass in a pointer.
|
||||
*/
|
||||
func RespondWithPtr(statusCode *int, body interface{}, optionalHeader ...http.Header) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
if len(optionalHeader) == 1 {
|
||||
copyHeader(optionalHeader[0], w.Header())
|
||||
}
|
||||
w.WriteHeader(*statusCode)
|
||||
if body != nil {
|
||||
switch x := (body).(type) {
|
||||
case *string:
|
||||
w.Write([]byte(*x))
|
||||
case *[]byte:
|
||||
w.Write(*x)
|
||||
default:
|
||||
Ω(body).Should(BeNil(), "Invalid type for body. Should be string or []byte.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
RespondWithJSONEncoded returns a handler that responds to a request with the specified status code and a body
|
||||
containing the JSON-encoding of the passed in object
|
||||
|
||||
Also, RespondWithJSONEncoded can be given an optional http.Header. The headers defined therein will be added to the response headers.
|
||||
*/
|
||||
func RespondWithJSONEncoded(statusCode int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc {
|
||||
data, err := json.Marshal(object)
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
|
||||
var headers http.Header
|
||||
if len(optionalHeader) == 1 {
|
||||
headers = optionalHeader[0]
|
||||
} else {
|
||||
headers = make(http.Header)
|
||||
}
|
||||
if _, found := headers["Content-Type"]; !found {
|
||||
headers["Content-Type"] = []string{"application/json"}
|
||||
}
|
||||
return RespondWith(statusCode, string(data), headers)
|
||||
}
|
||||
|
||||
/*
|
||||
RespondWithJSONEncodedPtr behaves like RespondWithJSONEncoded but takes a pointer
|
||||
to a status code and object.
|
||||
|
||||
This allows different tests to share the same setup but specify different status codes and JSON-encoded
|
||||
objects.
|
||||
|
||||
Also, RespondWithJSONEncodedPtr can be given an optional http.Header. The headers defined therein will be added to the response headers.
|
||||
Since the http.Header can be mutated after the fact you don't need to pass in a pointer.
|
||||
*/
|
||||
func RespondWithJSONEncodedPtr(statusCode *int, object interface{}, optionalHeader ...http.Header) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
data, err := json.Marshal(object)
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
var headers http.Header
|
||||
if len(optionalHeader) == 1 {
|
||||
headers = optionalHeader[0]
|
||||
} else {
|
||||
headers = make(http.Header)
|
||||
}
|
||||
if _, found := headers["Content-Type"]; !found {
|
||||
headers["Content-Type"] = []string{"application/json"}
|
||||
}
|
||||
copyHeader(headers, w.Header())
|
||||
w.WriteHeader(*statusCode)
|
||||
w.Write(data)
|
||||
}
|
||||
}
|
||||
|
||||
//RespondWithProto returns a handler that responds to a request with the specified status code and a body
|
||||
//containing the protobuf serialization of the provided message.
|
||||
//
|
||||
//Also, RespondWithProto can be given an optional http.Header. The headers defined therein will be added to the response headers.
|
||||
func RespondWithProto(statusCode int, message proto.Message, optionalHeader ...http.Header) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
data, err := proto.Marshal(message)
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
|
||||
var headers http.Header
|
||||
if len(optionalHeader) == 1 {
|
||||
headers = optionalHeader[0]
|
||||
} else {
|
||||
headers = make(http.Header)
|
||||
}
|
||||
if _, found := headers["Content-Type"]; !found {
|
||||
headers["Content-Type"] = []string{"application/x-protobuf"}
|
||||
}
|
||||
copyHeader(headers, w.Header())
|
||||
|
||||
w.WriteHeader(statusCode)
|
||||
w.Write(data)
|
||||
}
|
||||
}
|
3
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/protobuf.go
generated
vendored
3
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/protobuf.go
generated
vendored
@ -1,3 +0,0 @@
|
||||
package protobuf
|
||||
|
||||
//go:generate protoc --go_out=. simple_message.proto
|
55
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go
generated
vendored
55
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/simple_message.pb.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: simple_message.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package protobuf is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
simple_message.proto
|
||||
|
||||
It has these top-level messages:
|
||||
SimpleMessage
|
||||
*/
|
||||
package protobuf
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
type SimpleMessage struct {
|
||||
Description *string `protobuf:"bytes,1,req,name=description" json:"description,omitempty"`
|
||||
Id *int32 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
|
||||
Metadata *string `protobuf:"bytes,3,opt,name=metadata" json:"metadata,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SimpleMessage) Reset() { *m = SimpleMessage{} }
|
||||
func (m *SimpleMessage) String() string { return proto.CompactTextString(m) }
|
||||
func (*SimpleMessage) ProtoMessage() {}
|
||||
|
||||
func (m *SimpleMessage) GetDescription() string {
|
||||
if m != nil && m.Description != nil {
|
||||
return *m.Description
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SimpleMessage) GetId() int32 {
|
||||
if m != nil && m.Id != nil {
|
||||
return *m.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SimpleMessage) GetMetadata() string {
|
||||
if m != nil && m.Metadata != nil {
|
||||
return *m.Metadata
|
||||
}
|
||||
return ""
|
||||
}
|
9
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto
generated
vendored
9
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/protobuf/simple_message.proto
generated
vendored
@ -1,9 +0,0 @@
|
||||
syntax = "proto2";
|
||||
|
||||
package protobuf;
|
||||
|
||||
message SimpleMessage {
|
||||
required string description = 1;
|
||||
required int32 id = 2;
|
||||
optional string metadata = 3;
|
||||
}
|
368
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/test_server.go
generated
vendored
368
Godeps/_workspace/src/github.com/onsi/gomega/ghttp/test_server.go
generated
vendored
@ -1,368 +0,0 @@
|
||||
/*
|
||||
Package ghttp supports testing HTTP clients by providing a test server (simply a thin wrapper around httptest's server) that supports
|
||||
registering multiple handlers. Incoming requests are not routed between the different handlers
|
||||
- rather it is merely the order of the handlers that matters. The first request is handled by the first
|
||||
registered handler, the second request by the second handler, etc.
|
||||
|
||||
The intent here is to have each handler *verify* that the incoming request is valid. To accomplish, ghttp
|
||||
also provides a collection of bite-size handlers that each perform one aspect of request verification. These can
|
||||
be composed together and registered with a ghttp server. The result is an expressive language for describing
|
||||
the requests generated by the client under test.
|
||||
|
||||
Here's a simple example, note that the server handler is only defined in one BeforeEach and then modified, as required, by the nested BeforeEaches.
|
||||
A more comprehensive example is available at https://onsi.github.io/gomega/#_testing_http_clients
|
||||
|
||||
var _ = Describe("A Sprockets Client", func() {
|
||||
var server *ghttp.Server
|
||||
var client *SprocketClient
|
||||
BeforeEach(func() {
|
||||
server = ghttp.NewServer()
|
||||
client = NewSprocketClient(server.URL(), "skywalker", "tk427")
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
server.Close()
|
||||
})
|
||||
|
||||
Describe("fetching sprockets", func() {
|
||||
var statusCode int
|
||||
var sprockets []Sprocket
|
||||
BeforeEach(func() {
|
||||
statusCode = http.StatusOK
|
||||
sprockets = []Sprocket{}
|
||||
server.AppendHandlers(ghttp.CombineHandlers(
|
||||
ghttp.VerifyRequest("GET", "/sprockets"),
|
||||
ghttp.VerifyBasicAuth("skywalker", "tk427"),
|
||||
ghttp.RespondWithJSONEncodedPtr(&statusCode, &sprockets),
|
||||
))
|
||||
})
|
||||
|
||||
Context("when requesting all sprockets", func() {
|
||||
Context("when the response is succesful", func() {
|
||||
BeforeEach(func() {
|
||||
sprockets = []Sprocket{
|
||||
NewSprocket("Alfalfa"),
|
||||
NewSprocket("Banana"),
|
||||
}
|
||||
})
|
||||
|
||||
It("should return the returned sprockets", func() {
|
||||
Ω(client.Sprockets()).Should(Equal(sprockets))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when the response is missing", func() {
|
||||
BeforeEach(func() {
|
||||
statusCode = http.StatusNotFound
|
||||
})
|
||||
|
||||
It("should return an empty list of sprockets", func() {
|
||||
Ω(client.Sprockets()).Should(BeEmpty())
|
||||
})
|
||||
})
|
||||
|
||||
Context("when the response fails to authenticate", func() {
|
||||
BeforeEach(func() {
|
||||
statusCode = http.StatusUnauthorized
|
||||
})
|
||||
|
||||
It("should return an AuthenticationError error", func() {
|
||||
sprockets, err := client.Sprockets()
|
||||
Ω(sprockets).Should(BeEmpty())
|
||||
Ω(err).Should(MatchError(AuthenticationError))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when the response is a server failure", func() {
|
||||
BeforeEach(func() {
|
||||
statusCode = http.StatusInternalServerError
|
||||
})
|
||||
|
||||
It("should return an InternalError error", func() {
|
||||
sprockets, err := client.Sprockets()
|
||||
Ω(sprockets).Should(BeEmpty())
|
||||
Ω(err).Should(MatchError(InternalError))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("when requesting some sprockets", func() {
|
||||
BeforeEach(func() {
|
||||
sprockets = []Sprocket{
|
||||
NewSprocket("Alfalfa"),
|
||||
NewSprocket("Banana"),
|
||||
}
|
||||
|
||||
server.WrapHandler(0, ghttp.VerifyRequest("GET", "/sprockets", "filter=FOOD"))
|
||||
})
|
||||
|
||||
It("should make the request with a filter", func() {
|
||||
Ω(client.Sprockets("food")).Should(Equal(sprockets))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
*/
|
||||
package ghttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func new() *Server {
|
||||
return &Server{
|
||||
AllowUnhandledRequests: false,
|
||||
UnhandledRequestStatusCode: http.StatusInternalServerError,
|
||||
writeLock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
type routedHandler struct {
|
||||
method string
|
||||
pathRegexp *regexp.Regexp
|
||||
path string
|
||||
handler http.HandlerFunc
|
||||
}
|
||||
|
||||
// NewServer returns a new `*ghttp.Server` that wraps an `httptest` server. The server is started automatically.
|
||||
func NewServer() *Server {
|
||||
s := new()
|
||||
s.HTTPTestServer = httptest.NewServer(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// NewUnstartedServer return a new, unstarted, `*ghttp.Server`. Useful for specifying a custom listener on `server.HTTPTestServer`.
|
||||
func NewUnstartedServer() *Server {
|
||||
s := new()
|
||||
s.HTTPTestServer = httptest.NewUnstartedServer(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// NewTLSServer returns a new `*ghttp.Server` that wraps an `httptest` TLS server. The server is started automatically.
|
||||
func NewTLSServer() *Server {
|
||||
s := new()
|
||||
s.HTTPTestServer = httptest.NewTLSServer(s)
|
||||
return s
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
//The underlying httptest server
|
||||
HTTPTestServer *httptest.Server
|
||||
|
||||
//Defaults to false. If set to true, the Server will allow more requests than there are registered handlers.
|
||||
AllowUnhandledRequests bool
|
||||
|
||||
//The status code returned when receiving an unhandled request.
|
||||
//Defaults to http.StatusInternalServerError.
|
||||
//Only applies if AllowUnhandledRequests is true
|
||||
UnhandledRequestStatusCode int
|
||||
|
||||
//If provided, ghttp will log about each request received to the provided io.Writer
|
||||
//Defaults to nil
|
||||
//If you're using Ginkgo, set this to GinkgoWriter to get improved output during failures
|
||||
Writer io.Writer
|
||||
|
||||
receivedRequests []*http.Request
|
||||
requestHandlers []http.HandlerFunc
|
||||
routedHandlers []routedHandler
|
||||
|
||||
writeLock *sync.Mutex
|
||||
calls int
|
||||
}
|
||||
|
||||
//Start() starts an unstarted ghttp server. It is a catastrophic error to call Start more than once (thanks, httptest).
|
||||
func (s *Server) Start() {
|
||||
s.HTTPTestServer.Start()
|
||||
}
|
||||
|
||||
//URL() returns a url that will hit the server
|
||||
func (s *Server) URL() string {
|
||||
return s.HTTPTestServer.URL
|
||||
}
|
||||
|
||||
//Addr() returns the address on which the server is listening.
|
||||
func (s *Server) Addr() string {
|
||||
return s.HTTPTestServer.Listener.Addr().String()
|
||||
}
|
||||
|
||||
//Close() should be called at the end of each test. It spins down and cleans up the test server.
|
||||
func (s *Server) Close() {
|
||||
s.writeLock.Lock()
|
||||
defer s.writeLock.Unlock()
|
||||
|
||||
server := s.HTTPTestServer
|
||||
s.HTTPTestServer = nil
|
||||
server.Close()
|
||||
}
|
||||
|
||||
//ServeHTTP() makes Server an http.Handler
|
||||
//When the server receives a request it handles the request in the following order:
|
||||
//
|
||||
//1. If the request matches a handler registered with RouteToHandler, that handler is called.
|
||||
//2. Otherwise, if there are handlers registered via AppendHandlers, those handlers are called in order.
|
||||
//3. If all registered handlers have been called then:
|
||||
// a) If AllowUnhandledRequests is true, the request will be handled with response code of UnhandledRequestStatusCode
|
||||
// b) If AllowUnhandledRequests is false, the request will not be handled and the current test will be marked as failed.
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
s.writeLock.Lock()
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
//If the handler panics GHTTP will silently succeed. This is bad™.
|
||||
//To catch this case we need to fail the test if the handler has panicked.
|
||||
//However, if the handler is panicking because Ginkgo's causing it to panic (i.e. an asswertion failed)
|
||||
//then we shouldn't double-report the error as this will confuse people.
|
||||
|
||||
//So: step 1, if this is a Ginkgo panic - do nothing, Ginkgo's aware of the failure
|
||||
eAsString, ok := e.(string)
|
||||
if ok && strings.Contains(eAsString, "defer GinkgoRecover()") {
|
||||
return
|
||||
}
|
||||
|
||||
//If we're here, we have to do step 2: assert that the error is nil. This assertion will
|
||||
//allow us to fail the test suite (note: we can't call Fail since Gomega is not allowed to import Ginkgo).
|
||||
//Since a failed assertion throws a panic, and we are likely in a goroutine, we need to defer within our defer!
|
||||
defer func() {
|
||||
recover()
|
||||
}()
|
||||
Ω(e).Should(BeNil(), "Handler Panicked")
|
||||
}()
|
||||
|
||||
if s.Writer != nil {
|
||||
s.Writer.Write([]byte(fmt.Sprintf("GHTTP Received Request: %s - %s\n", req.Method, req.URL)))
|
||||
}
|
||||
|
||||
s.receivedRequests = append(s.receivedRequests, req)
|
||||
if routedHandler, ok := s.handlerForRoute(req.Method, req.URL.Path); ok {
|
||||
s.writeLock.Unlock()
|
||||
routedHandler(w, req)
|
||||
} else if s.calls < len(s.requestHandlers) {
|
||||
h := s.requestHandlers[s.calls]
|
||||
s.calls++
|
||||
s.writeLock.Unlock()
|
||||
h(w, req)
|
||||
} else {
|
||||
s.writeLock.Unlock()
|
||||
if s.AllowUnhandledRequests {
|
||||
ioutil.ReadAll(req.Body)
|
||||
req.Body.Close()
|
||||
w.WriteHeader(s.UnhandledRequestStatusCode)
|
||||
} else {
|
||||
Ω(req).Should(BeNil(), "Received Unhandled Request")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//ReceivedRequests is an array containing all requests received by the server (both handled and unhandled requests)
|
||||
func (s *Server) ReceivedRequests() []*http.Request {
|
||||
s.writeLock.Lock()
|
||||
defer s.writeLock.Unlock()
|
||||
|
||||
return s.receivedRequests
|
||||
}
|
||||
|
||||
//RouteToHandler can be used to register handlers that will always handle requests that match
|
||||
//the passed in method and path.
|
||||
//
|
||||
//The path may be either a string object or a *regexp.Regexp.
|
||||
func (s *Server) RouteToHandler(method string, path interface{}, handler http.HandlerFunc) {
|
||||
s.writeLock.Lock()
|
||||
defer s.writeLock.Unlock()
|
||||
|
||||
rh := routedHandler{
|
||||
method: method,
|
||||
handler: handler,
|
||||
}
|
||||
|
||||
switch p := path.(type) {
|
||||
case *regexp.Regexp:
|
||||
rh.pathRegexp = p
|
||||
case string:
|
||||
rh.path = p
|
||||
default:
|
||||
panic("path must be a string or a regular expression")
|
||||
}
|
||||
|
||||
for i, existingRH := range s.routedHandlers {
|
||||
if existingRH.method == method &&
|
||||
reflect.DeepEqual(existingRH.pathRegexp, rh.pathRegexp) &&
|
||||
existingRH.path == rh.path {
|
||||
s.routedHandlers[i] = rh
|
||||
return
|
||||
}
|
||||
}
|
||||
s.routedHandlers = append(s.routedHandlers, rh)
|
||||
}
|
||||
|
||||
func (s *Server) handlerForRoute(method string, path string) (http.HandlerFunc, bool) {
|
||||
for _, rh := range s.routedHandlers {
|
||||
if rh.method == method {
|
||||
if rh.pathRegexp != nil {
|
||||
if rh.pathRegexp.Match([]byte(path)) {
|
||||
return rh.handler, true
|
||||
}
|
||||
} else if rh.path == path {
|
||||
return rh.handler, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
//AppendHandlers will appends http.HandlerFuncs to the server's list of registered handlers. The first incoming request is handled by the first handler, the second by the second, etc...
|
||||
func (s *Server) AppendHandlers(handlers ...http.HandlerFunc) {
|
||||
s.writeLock.Lock()
|
||||
defer s.writeLock.Unlock()
|
||||
|
||||
s.requestHandlers = append(s.requestHandlers, handlers...)
|
||||
}
|
||||
|
||||
//SetHandler overrides the registered handler at the passed in index with the passed in handler
|
||||
//This is useful, for example, when a server has been set up in a shared context, but must be tweaked
|
||||
//for a particular test.
|
||||
func (s *Server) SetHandler(index int, handler http.HandlerFunc) {
|
||||
s.writeLock.Lock()
|
||||
defer s.writeLock.Unlock()
|
||||
|
||||
s.requestHandlers[index] = handler
|
||||
}
|
||||
|
||||
//GetHandler returns the handler registered at the passed in index.
|
||||
func (s *Server) GetHandler(index int) http.HandlerFunc {
|
||||
s.writeLock.Lock()
|
||||
defer s.writeLock.Unlock()
|
||||
|
||||
return s.requestHandlers[index]
|
||||
}
|
||||
|
||||
//WrapHandler combines the passed in handler with the handler registered at the passed in index.
|
||||
//This is useful, for example, when a server has been set up in a shared context but must be tweaked
|
||||
//for a particular test.
|
||||
//
|
||||
//If the currently registered handler is A, and the new passed in handler is B then
|
||||
//WrapHandler will generate a new handler that first calls A, then calls B, and assign it to index
|
||||
func (s *Server) WrapHandler(index int, handler http.HandlerFunc) {
|
||||
existingHandler := s.GetHandler(index)
|
||||
s.SetHandler(index, CombineHandlers(existingHandler, handler))
|
||||
}
|
||||
|
||||
func (s *Server) CloseClientConnections() {
|
||||
s.writeLock.Lock()
|
||||
defer s.writeLock.Unlock()
|
||||
|
||||
s.HTTPTestServer.CloseClientConnections()
|
||||
}
|
23
Godeps/_workspace/src/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go
generated
vendored
23
Godeps/_workspace/src/github.com/onsi/gomega/internal/fakematcher/fake_matcher.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
package fakematcher
|
||||
|
||||
import "fmt"
|
||||
|
||||
type FakeMatcher struct {
|
||||
ReceivedActual interface{}
|
||||
MatchesToReturn bool
|
||||
ErrToReturn error
|
||||
}
|
||||
|
||||
func (matcher *FakeMatcher) Match(actual interface{}) (bool, error) {
|
||||
matcher.ReceivedActual = actual
|
||||
|
||||
return matcher.MatchesToReturn, matcher.ErrToReturn
|
||||
}
|
||||
|
||||
func (matcher *FakeMatcher) FailureMessage(actual interface{}) string {
|
||||
return fmt.Sprintf("positive: %v", actual)
|
||||
}
|
||||
|
||||
func (matcher *FakeMatcher) NegatedFailureMessage(actual interface{}) string {
|
||||
return fmt.Sprintf("negative: %v", actual)
|
||||
}
|
20
Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE
generated
vendored
20
Godeps/_workspace/src/github.com/onsi/gomega/matchers/support/goraph/MIT.LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
||||
Copyright (c) 2014 Amit Kumar Gupta
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
45
Godeps/_workspace/src/github.com/vishvananda/netlink/addr_test.go
generated
vendored
45
Godeps/_workspace/src/github.com/vishvananda/netlink/addr_test.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAddrAddDel(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
link, err := LinkByName("lo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr, err := ParseAddr("127.1.1.1/24 local")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = AddrAdd(link, addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addrs, err := AddrList(link, FAMILY_ALL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(addrs) != 1 || !addr.Equal(addrs[0]) || addrs[0].Label != addr.Label {
|
||||
t.Fatal("Address not added properly")
|
||||
}
|
||||
|
||||
if err = AddrDel(link, addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
addrs, err = AddrList(link, FAMILY_ALL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(addrs) != 0 {
|
||||
t.Fatal("Address not removed properly")
|
||||
}
|
||||
}
|
102
Godeps/_workspace/src/github.com/vishvananda/netlink/class_test.go
generated
vendored
102
Godeps/_workspace/src/github.com/vishvananda/netlink/class_test.go
generated
vendored
@ -1,102 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestClassAddDel(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkAdd(&Ifb{LinkAttrs{Name: "bar"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
link, err := LinkByName("foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkSetUp(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
attrs := QdiscAttrs{
|
||||
LinkIndex: link.Attrs().Index,
|
||||
Handle: MakeHandle(0xffff, 0),
|
||||
Parent: HANDLE_ROOT,
|
||||
}
|
||||
qdisc := NewHtb(attrs)
|
||||
if err := QdiscAdd(qdisc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdiscs, err := QdiscList(link)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(qdiscs) != 1 {
|
||||
t.Fatal("Failed to add qdisc")
|
||||
}
|
||||
_, ok := qdiscs[0].(*Htb)
|
||||
if !ok {
|
||||
t.Fatal("Qdisc is the wrong type")
|
||||
}
|
||||
|
||||
classattrs := ClassAttrs{
|
||||
LinkIndex: link.Attrs().Index,
|
||||
Parent: MakeHandle(0xffff, 0),
|
||||
Handle: MakeHandle(0xffff, 2),
|
||||
}
|
||||
|
||||
htbclassattrs := HtbClassAttrs{
|
||||
Rate: 1234000,
|
||||
Cbuffer: 1690,
|
||||
}
|
||||
class := NewHtbClass(classattrs, htbclassattrs)
|
||||
if err := ClassAdd(class); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
classes, err := ClassList(link, MakeHandle(0xffff, 2))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(classes) != 1 {
|
||||
t.Fatal("Failed to add class")
|
||||
}
|
||||
|
||||
htb, ok := classes[0].(*HtbClass)
|
||||
if !ok {
|
||||
t.Fatal("Class is the wrong type")
|
||||
}
|
||||
if htb.Rate != class.Rate {
|
||||
t.Fatal("Rate doesn't match")
|
||||
}
|
||||
if htb.Ceil != class.Ceil {
|
||||
t.Fatal("Ceil doesn't match")
|
||||
}
|
||||
if htb.Buffer != class.Buffer {
|
||||
t.Fatal("Buffer doesn't match")
|
||||
}
|
||||
if htb.Cbuffer != class.Cbuffer {
|
||||
t.Fatal("Cbuffer doesn't match")
|
||||
}
|
||||
if err := ClassDel(class); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
classes, err = ClassList(link, MakeHandle(0xffff, 0))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(classes) != 0 {
|
||||
t.Fatal("Failed to remove class")
|
||||
}
|
||||
if err := QdiscDel(qdisc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdiscs, err = QdiscList(link)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(qdiscs) != 0 {
|
||||
t.Fatal("Failed to remove qdisc")
|
||||
}
|
||||
}
|
91
Godeps/_workspace/src/github.com/vishvananda/netlink/filter_test.go
generated
vendored
91
Godeps/_workspace/src/github.com/vishvananda/netlink/filter_test.go
generated
vendored
@ -1,91 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilterAddDel(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkAdd(&Ifb{LinkAttrs{Name: "bar"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
link, err := LinkByName("foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkSetUp(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
redir, err := LinkByName("bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkSetUp(redir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdisc := &Ingress{
|
||||
QdiscAttrs: QdiscAttrs{
|
||||
LinkIndex: link.Attrs().Index,
|
||||
Handle: MakeHandle(0xffff, 0),
|
||||
Parent: HANDLE_INGRESS,
|
||||
},
|
||||
}
|
||||
if err := QdiscAdd(qdisc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdiscs, err := QdiscList(link)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(qdiscs) != 1 {
|
||||
t.Fatal("Failed to add qdisc")
|
||||
}
|
||||
_, ok := qdiscs[0].(*Ingress)
|
||||
if !ok {
|
||||
t.Fatal("Qdisc is the wrong type")
|
||||
}
|
||||
filter := &U32{
|
||||
FilterAttrs: FilterAttrs{
|
||||
LinkIndex: link.Attrs().Index,
|
||||
Parent: MakeHandle(0xffff, 0),
|
||||
Priority: 1,
|
||||
Protocol: syscall.ETH_P_IP,
|
||||
},
|
||||
RedirIndex: redir.Attrs().Index,
|
||||
}
|
||||
if err := FilterAdd(filter); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
filters, err := FilterList(link, MakeHandle(0xffff, 0))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(filters) != 1 {
|
||||
t.Fatal("Failed to add filter")
|
||||
}
|
||||
if err := FilterDel(filter); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
filters, err = FilterList(link, MakeHandle(0xffff, 0))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(filters) != 0 {
|
||||
t.Fatal("Failed to remove filter")
|
||||
}
|
||||
if err := QdiscDel(qdisc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdiscs, err = QdiscList(link)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(qdiscs) != 0 {
|
||||
t.Fatal("Failed to remove qdisc")
|
||||
}
|
||||
}
|
724
Godeps/_workspace/src/github.com/vishvananda/netlink/link_test.go
generated
vendored
724
Godeps/_workspace/src/github.com/vishvananda/netlink/link_test.go
generated
vendored
@ -1,724 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/vishvananda/netns"
|
||||
)
|
||||
|
||||
const (
|
||||
testTxQLen int = 100
|
||||
defaultTxQLen int = 1000
|
||||
)
|
||||
|
||||
func testLinkAddDel(t *testing.T, link Link) {
|
||||
links, err := LinkList()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
num := len(links)
|
||||
|
||||
if err := LinkAdd(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
base := link.Attrs()
|
||||
|
||||
result, err := LinkByName(base.Name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rBase := result.Attrs()
|
||||
|
||||
if vlan, ok := link.(*Vlan); ok {
|
||||
other, ok := result.(*Vlan)
|
||||
if !ok {
|
||||
t.Fatal("Result of create is not a vlan")
|
||||
}
|
||||
if vlan.VlanId != other.VlanId {
|
||||
t.Fatal("Link.VlanId id doesn't match")
|
||||
}
|
||||
}
|
||||
|
||||
if rBase.ParentIndex == 0 && base.ParentIndex != 0 {
|
||||
t.Fatal("Created link doesn't have a Parent but it should")
|
||||
} else if rBase.ParentIndex != 0 && base.ParentIndex == 0 {
|
||||
t.Fatal("Created link has a Parent but it shouldn't")
|
||||
} else if rBase.ParentIndex != 0 && base.ParentIndex != 0 {
|
||||
if rBase.ParentIndex != base.ParentIndex {
|
||||
t.Fatal("Link.ParentIndex doesn't match")
|
||||
}
|
||||
}
|
||||
|
||||
if veth, ok := result.(*Veth); ok {
|
||||
if rBase.TxQLen != base.TxQLen {
|
||||
t.Fatalf("qlen is %d, should be %d", rBase.TxQLen, base.TxQLen)
|
||||
}
|
||||
if rBase.MTU != base.MTU {
|
||||
t.Fatalf("MTU is %d, should be %d", rBase.MTU, base.MTU)
|
||||
}
|
||||
|
||||
if veth.PeerName != "" {
|
||||
var peer *Veth
|
||||
other, err := LinkByName(veth.PeerName)
|
||||
if err != nil {
|
||||
t.Fatalf("Peer %s not created", veth.PeerName)
|
||||
}
|
||||
if peer, ok = other.(*Veth); !ok {
|
||||
t.Fatalf("Peer %s is incorrect type", veth.PeerName)
|
||||
}
|
||||
if peer.TxQLen != testTxQLen {
|
||||
t.Fatalf("TxQLen of peer is %d, should be %d", peer.TxQLen, testTxQLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vxlan, ok := link.(*Vxlan); ok {
|
||||
other, ok := result.(*Vxlan)
|
||||
if !ok {
|
||||
t.Fatal("Result of create is not a vxlan")
|
||||
}
|
||||
compareVxlan(t, vxlan, other)
|
||||
}
|
||||
|
||||
if ipv, ok := link.(*IPVlan); ok {
|
||||
other, ok := result.(*IPVlan)
|
||||
if !ok {
|
||||
t.Fatal("Result of create is not a ipvlan")
|
||||
}
|
||||
if ipv.Mode != other.Mode {
|
||||
t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, ipv.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
if macv, ok := link.(*Macvlan); ok {
|
||||
other, ok := result.(*Macvlan)
|
||||
if !ok {
|
||||
t.Fatal("Result of create is not a macvlan")
|
||||
}
|
||||
if macv.Mode != other.Mode {
|
||||
t.Fatalf("Got unexpected mode: %d, expected: %d", other.Mode, macv.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
if err = LinkDel(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
links, err = LinkList()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(links) != num {
|
||||
t.Fatal("Link not removed properly")
|
||||
}
|
||||
}
|
||||
|
||||
func compareVxlan(t *testing.T, expected, actual *Vxlan) {
|
||||
|
||||
if actual.VxlanId != expected.VxlanId {
|
||||
t.Fatal("Vxlan.VxlanId doesn't match")
|
||||
}
|
||||
if expected.SrcAddr != nil && !actual.SrcAddr.Equal(expected.SrcAddr) {
|
||||
t.Fatal("Vxlan.SrcAddr doesn't match")
|
||||
}
|
||||
if expected.Group != nil && !actual.Group.Equal(expected.Group) {
|
||||
t.Fatal("Vxlan.Group doesn't match")
|
||||
}
|
||||
if expected.TTL != -1 && actual.TTL != expected.TTL {
|
||||
t.Fatal("Vxlan.TTL doesn't match")
|
||||
}
|
||||
if expected.TOS != -1 && actual.TOS != expected.TOS {
|
||||
t.Fatal("Vxlan.TOS doesn't match")
|
||||
}
|
||||
if actual.Learning != expected.Learning {
|
||||
t.Fatal("Vxlan.Learning doesn't match")
|
||||
}
|
||||
if actual.Proxy != expected.Proxy {
|
||||
t.Fatal("Vxlan.Proxy doesn't match")
|
||||
}
|
||||
if actual.RSC != expected.RSC {
|
||||
t.Fatal("Vxlan.RSC doesn't match")
|
||||
}
|
||||
if actual.L2miss != expected.L2miss {
|
||||
t.Fatal("Vxlan.L2miss doesn't match")
|
||||
}
|
||||
if actual.L3miss != expected.L3miss {
|
||||
t.Fatal("Vxlan.L3miss doesn't match")
|
||||
}
|
||||
if actual.GBP != expected.GBP {
|
||||
t.Fatal("Vxlan.GBP doesn't match")
|
||||
}
|
||||
if expected.NoAge {
|
||||
if !actual.NoAge {
|
||||
t.Fatal("Vxlan.NoAge doesn't match")
|
||||
}
|
||||
} else if expected.Age > 0 && actual.Age != expected.Age {
|
||||
t.Fatal("Vxlan.Age doesn't match")
|
||||
}
|
||||
if expected.Limit > 0 && actual.Limit != expected.Limit {
|
||||
t.Fatal("Vxlan.Limit doesn't match")
|
||||
}
|
||||
if expected.Port > 0 && actual.Port != expected.Port {
|
||||
t.Fatal("Vxlan.Port doesn't match")
|
||||
}
|
||||
if expected.PortLow > 0 || expected.PortHigh > 0 {
|
||||
if actual.PortLow != expected.PortLow {
|
||||
t.Fatal("Vxlan.PortLow doesn't match")
|
||||
}
|
||||
if actual.PortHigh != expected.PortHigh {
|
||||
t.Fatal("Vxlan.PortHigh doesn't match")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkAddDelDummy(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
testLinkAddDel(t, &Dummy{LinkAttrs{Name: "foo"}})
|
||||
}
|
||||
|
||||
func TestLinkAddDelIfb(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
testLinkAddDel(t, &Ifb{LinkAttrs{Name: "foo"}})
|
||||
}
|
||||
|
||||
func TestLinkAddDelBridge(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
testLinkAddDel(t, &Bridge{LinkAttrs{Name: "foo", MTU: 1400}})
|
||||
}
|
||||
|
||||
func TestLinkAddDelVlan(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
parent := &Dummy{LinkAttrs{Name: "foo"}}
|
||||
if err := LinkAdd(parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testLinkAddDel(t, &Vlan{LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index}, 900})
|
||||
|
||||
if err := LinkDel(parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkAddDelMacvlan(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
parent := &Dummy{LinkAttrs{Name: "foo"}}
|
||||
if err := LinkAdd(parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testLinkAddDel(t, &Macvlan{
|
||||
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
|
||||
Mode: MACVLAN_MODE_PRIVATE,
|
||||
})
|
||||
|
||||
if err := LinkDel(parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkAddDelMacvtap(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
parent := &Dummy{LinkAttrs{Name: "foo"}}
|
||||
if err := LinkAdd(parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testLinkAddDel(t, &Macvtap{
|
||||
Macvlan: Macvlan{
|
||||
LinkAttrs: LinkAttrs{Name: "bar", ParentIndex: parent.Attrs().Index},
|
||||
Mode: MACVLAN_MODE_PRIVATE,
|
||||
},
|
||||
})
|
||||
|
||||
if err := LinkDel(parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkAddDelVeth(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
testLinkAddDel(t, &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar"})
|
||||
}
|
||||
|
||||
func TestLinkAddVethWithDefaultTxQLen(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
la := NewLinkAttrs()
|
||||
la.Name = "foo"
|
||||
|
||||
veth := &Veth{LinkAttrs: la, PeerName: "bar"}
|
||||
if err := LinkAdd(veth); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
link, err := LinkByName("foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if veth, ok := link.(*Veth); !ok {
|
||||
t.Fatalf("unexpected link type: %T", link)
|
||||
} else {
|
||||
if veth.TxQLen != defaultTxQLen {
|
||||
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen)
|
||||
}
|
||||
}
|
||||
peer, err := LinkByName("bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if veth, ok := peer.(*Veth); !ok {
|
||||
t.Fatalf("unexpected link type: %T", link)
|
||||
} else {
|
||||
if veth.TxQLen != defaultTxQLen {
|
||||
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, defaultTxQLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkAddVethWithZeroTxQLen(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
la := NewLinkAttrs()
|
||||
la.Name = "foo"
|
||||
la.TxQLen = 0
|
||||
|
||||
veth := &Veth{LinkAttrs: la, PeerName: "bar"}
|
||||
if err := LinkAdd(veth); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
link, err := LinkByName("foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if veth, ok := link.(*Veth); !ok {
|
||||
t.Fatalf("unexpected link type: %T", link)
|
||||
} else {
|
||||
if veth.TxQLen != 0 {
|
||||
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0)
|
||||
}
|
||||
}
|
||||
peer, err := LinkByName("bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if veth, ok := peer.(*Veth); !ok {
|
||||
t.Fatalf("unexpected link type: %T", link)
|
||||
} else {
|
||||
if veth.TxQLen != 0 {
|
||||
t.Fatalf("TxQLen is %d, should be %d", veth.TxQLen, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkAddDummyWithTxQLen(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
la := NewLinkAttrs()
|
||||
la.Name = "foo"
|
||||
la.TxQLen = 1500
|
||||
|
||||
dummy := &Dummy{LinkAttrs: la}
|
||||
if err := LinkAdd(dummy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
link, err := LinkByName("foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if dummy, ok := link.(*Dummy); !ok {
|
||||
t.Fatalf("unexpected link type: %T", link)
|
||||
} else {
|
||||
if dummy.TxQLen != 1500 {
|
||||
t.Fatalf("TxQLen is %d, should be %d", dummy.TxQLen, 1500)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkAddDelBridgeMaster(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
master := &Bridge{LinkAttrs{Name: "foo"}}
|
||||
if err := LinkAdd(master); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testLinkAddDel(t, &Dummy{LinkAttrs{Name: "bar", MasterIndex: master.Attrs().Index}})
|
||||
|
||||
if err := LinkDel(master); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkSetUnsetResetMaster(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
master := &Bridge{LinkAttrs{Name: "foo"}}
|
||||
if err := LinkAdd(master); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newmaster := &Bridge{LinkAttrs{Name: "bar"}}
|
||||
if err := LinkAdd(newmaster); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
slave := &Dummy{LinkAttrs{Name: "baz"}}
|
||||
if err := LinkAdd(slave); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := LinkSetMaster(slave, master); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
link, err := LinkByName("baz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if link.Attrs().MasterIndex != master.Attrs().Index {
|
||||
t.Fatal("Master not set properly")
|
||||
}
|
||||
|
||||
if err := LinkSetMaster(slave, newmaster); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
link, err = LinkByName("baz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if link.Attrs().MasterIndex != newmaster.Attrs().Index {
|
||||
t.Fatal("Master not reset properly")
|
||||
}
|
||||
|
||||
if err := LinkSetMaster(slave, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
link, err = LinkByName("baz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if link.Attrs().MasterIndex != 0 {
|
||||
t.Fatal("Master not unset properly")
|
||||
}
|
||||
if err := LinkDel(slave); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := LinkDel(newmaster); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := LinkDel(master); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkSetNs(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
basens, err := netns.Get()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to get basens")
|
||||
}
|
||||
defer basens.Close()
|
||||
|
||||
newns, err := netns.New()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to create newns")
|
||||
}
|
||||
defer newns.Close()
|
||||
|
||||
link := &Veth{LinkAttrs{Name: "foo"}, "bar"}
|
||||
if err := LinkAdd(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
peer, err := LinkByName("bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
LinkSetNsFd(peer, int(basens))
|
||||
if err != nil {
|
||||
t.Fatal("Failed to set newns for link")
|
||||
}
|
||||
|
||||
_, err = LinkByName("bar")
|
||||
if err == nil {
|
||||
t.Fatal("Link bar is still in newns")
|
||||
}
|
||||
|
||||
err = netns.Set(basens)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to set basens")
|
||||
}
|
||||
|
||||
peer, err = LinkByName("bar")
|
||||
if err != nil {
|
||||
t.Fatal("Link is not in basens")
|
||||
}
|
||||
|
||||
if err := LinkDel(peer); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = netns.Set(newns)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to set newns")
|
||||
}
|
||||
|
||||
_, err = LinkByName("foo")
|
||||
if err == nil {
|
||||
t.Fatal("Other half of veth pair not deleted")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestLinkAddDelVxlan(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
parent := &Dummy{
|
||||
LinkAttrs{Name: "foo"},
|
||||
}
|
||||
if err := LinkAdd(parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
vxlan := Vxlan{
|
||||
LinkAttrs: LinkAttrs{
|
||||
Name: "bar",
|
||||
},
|
||||
VxlanId: 10,
|
||||
VtepDevIndex: parent.Index,
|
||||
Learning: true,
|
||||
L2miss: true,
|
||||
L3miss: true,
|
||||
}
|
||||
|
||||
testLinkAddDel(t, &vxlan)
|
||||
if err := LinkDel(parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkAddDelIPVlanL2(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
parent := &Dummy{LinkAttrs{Name: "foo"}}
|
||||
if err := LinkAdd(parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ipv := IPVlan{
|
||||
LinkAttrs: LinkAttrs{
|
||||
Name: "bar",
|
||||
ParentIndex: parent.Index,
|
||||
},
|
||||
Mode: IPVLAN_MODE_L2,
|
||||
}
|
||||
|
||||
testLinkAddDel(t, &ipv)
|
||||
}
|
||||
|
||||
func TestLinkAddDelIPVlanL3(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
parent := &Dummy{LinkAttrs{Name: "foo"}}
|
||||
if err := LinkAdd(parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ipv := IPVlan{
|
||||
LinkAttrs: LinkAttrs{
|
||||
Name: "bar",
|
||||
ParentIndex: parent.Index,
|
||||
},
|
||||
Mode: IPVLAN_MODE_L3,
|
||||
}
|
||||
|
||||
testLinkAddDel(t, &ipv)
|
||||
}
|
||||
|
||||
func TestLinkAddDelIPVlanNoParent(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
ipv := IPVlan{
|
||||
LinkAttrs: LinkAttrs{
|
||||
Name: "bar",
|
||||
},
|
||||
Mode: IPVLAN_MODE_L3,
|
||||
}
|
||||
err := LinkAdd(&ipv)
|
||||
if err == nil {
|
||||
t.Fatal("Add should fail if ipvlan creating without ParentIndex")
|
||||
}
|
||||
if err.Error() != "Can't create ipvlan link without ParentIndex" {
|
||||
t.Fatalf("Error should be about missing ParentIndex, got %q", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkByIndex(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
dummy := &Dummy{LinkAttrs{Name: "dummy"}}
|
||||
if err := LinkAdd(dummy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
found, err := LinkByIndex(dummy.Index)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if found.Attrs().Index != dummy.Attrs().Index {
|
||||
t.Fatalf("Indices don't match: %v != %v", found.Attrs().Index, dummy.Attrs().Index)
|
||||
}
|
||||
|
||||
LinkDel(dummy)
|
||||
|
||||
// test not found
|
||||
_, err = LinkByIndex(dummy.Attrs().Index)
|
||||
if err == nil {
|
||||
t.Fatalf("LinkByIndex(%v) found deleted link", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkSet(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
iface := &Dummy{LinkAttrs{Name: "foo"}}
|
||||
if err := LinkAdd(iface); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
link, err := LinkByName("foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = LinkSetName(link, "bar")
|
||||
if err != nil {
|
||||
t.Fatalf("Could not change interface name: %v", err)
|
||||
}
|
||||
|
||||
link, err = LinkByName("bar")
|
||||
if err != nil {
|
||||
t.Fatalf("Interface name not changed: %v", err)
|
||||
}
|
||||
|
||||
err = LinkSetMTU(link, 1400)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not set MTU: %v", err)
|
||||
}
|
||||
|
||||
link, err = LinkByName("bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if link.Attrs().MTU != 1400 {
|
||||
t.Fatal("MTU not changed!")
|
||||
}
|
||||
|
||||
addr, err := net.ParseMAC("00:12:34:56:78:AB")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = LinkSetHardwareAddr(link, addr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
link, err = LinkByName("bar")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(link.Attrs().HardwareAddr, addr) {
|
||||
t.Fatalf("hardware address not changed!")
|
||||
}
|
||||
}
|
||||
|
||||
func expectLinkUpdate(ch <-chan LinkUpdate, ifaceName string, up bool) bool {
|
||||
for {
|
||||
timeout := time.After(time.Minute)
|
||||
select {
|
||||
case update := <-ch:
|
||||
if ifaceName == update.Link.Attrs().Name && (update.IfInfomsg.Flags&syscall.IFF_UP != 0) == up {
|
||||
return true
|
||||
}
|
||||
case <-timeout:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinkSubscribe(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
ch := make(chan LinkUpdate)
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
if err := LinkSubscribe(ch, done); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
link := &Veth{LinkAttrs{Name: "foo", TxQLen: testTxQLen, MTU: 1400}, "bar"}
|
||||
if err := LinkAdd(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !expectLinkUpdate(ch, "foo", false) {
|
||||
t.Fatal("Add update not received as expected")
|
||||
}
|
||||
|
||||
if err := LinkSetUp(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !expectLinkUpdate(ch, "foo", true) {
|
||||
t.Fatal("Link Up update not received as expected")
|
||||
}
|
||||
|
||||
if err := LinkDel(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !expectLinkUpdate(ch, "foo", false) {
|
||||
t.Fatal("Del update not received as expected")
|
||||
}
|
||||
}
|
104
Godeps/_workspace/src/github.com/vishvananda/netlink/neigh_test.go
generated
vendored
104
Godeps/_workspace/src/github.com/vishvananda/netlink/neigh_test.go
generated
vendored
@ -1,104 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type arpEntry struct {
|
||||
ip net.IP
|
||||
mac net.HardwareAddr
|
||||
}
|
||||
|
||||
func parseMAC(s string) net.HardwareAddr {
|
||||
m, err := net.ParseMAC(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func dumpContains(dump []Neigh, e arpEntry) bool {
|
||||
for _, n := range dump {
|
||||
if n.IP.Equal(e.ip) && (n.State&NUD_INCOMPLETE) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func TestNeighAddDel(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
dummy := Dummy{LinkAttrs{Name: "neigh0"}}
|
||||
if err := LinkAdd(&dummy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ensureIndex(dummy.Attrs())
|
||||
|
||||
arpTable := []arpEntry{
|
||||
{net.ParseIP("10.99.0.1"), parseMAC("aa:bb:cc:dd:00:01")},
|
||||
{net.ParseIP("10.99.0.2"), parseMAC("aa:bb:cc:dd:00:02")},
|
||||
{net.ParseIP("10.99.0.3"), parseMAC("aa:bb:cc:dd:00:03")},
|
||||
{net.ParseIP("10.99.0.4"), parseMAC("aa:bb:cc:dd:00:04")},
|
||||
{net.ParseIP("10.99.0.5"), parseMAC("aa:bb:cc:dd:00:05")},
|
||||
}
|
||||
|
||||
// Add the arpTable
|
||||
for _, entry := range arpTable {
|
||||
err := NeighAdd(&Neigh{
|
||||
LinkIndex: dummy.Index,
|
||||
State: NUD_REACHABLE,
|
||||
IP: entry.ip,
|
||||
HardwareAddr: entry.mac,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Failed to NeighAdd: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Dump and see that all added entries are there
|
||||
dump, err := NeighList(dummy.Index, 0)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to NeighList: %v", err)
|
||||
}
|
||||
|
||||
for _, entry := range arpTable {
|
||||
if !dumpContains(dump, entry) {
|
||||
t.Errorf("Dump does not contain: %v", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the arpTable
|
||||
for _, entry := range arpTable {
|
||||
err := NeighDel(&Neigh{
|
||||
LinkIndex: dummy.Index,
|
||||
IP: entry.ip,
|
||||
HardwareAddr: entry.mac,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Failed to NeighDel: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: seems not working because of cache
|
||||
//// Dump and see that none of deleted entries are there
|
||||
//dump, err = NeighList(dummy.Index, 0)
|
||||
//if err != nil {
|
||||
//t.Errorf("Failed to NeighList: %v", err)
|
||||
//}
|
||||
|
||||
//for _, entry := range arpTable {
|
||||
//if dumpContains(dump, entry) {
|
||||
//t.Errorf("Dump contains: %v", entry)
|
||||
//}
|
||||
//}
|
||||
|
||||
if err := LinkDel(&dummy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
34
Godeps/_workspace/src/github.com/vishvananda/netlink/netlink_test.go
generated
vendored
34
Godeps/_workspace/src/github.com/vishvananda/netlink/netlink_test.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/vishvananda/netns"
|
||||
)
|
||||
|
||||
type tearDownNetlinkTest func()
|
||||
|
||||
func setUpNetlinkTest(t *testing.T) tearDownNetlinkTest {
|
||||
if os.Getuid() != 0 {
|
||||
msg := "Skipped test because it requires root privileges."
|
||||
log.Printf(msg)
|
||||
t.Skip(msg)
|
||||
}
|
||||
|
||||
// new temporary namespace so we don't pollute the host
|
||||
// lock thread since the namespace is thread local
|
||||
runtime.LockOSThread()
|
||||
var err error
|
||||
ns, err := netns.New()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to create newns", ns)
|
||||
}
|
||||
|
||||
return func() {
|
||||
ns.Close()
|
||||
runtime.UnlockOSThread()
|
||||
}
|
||||
}
|
39
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/addr_linux_test.go
generated
vendored
39
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/addr_linux_test.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
package nl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func (msg *IfAddrmsg) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
b[0] = msg.Family
|
||||
b[1] = msg.Prefixlen
|
||||
b[2] = msg.Flags
|
||||
b[3] = msg.Scope
|
||||
native.PutUint32(b[4:8], msg.Index)
|
||||
}
|
||||
|
||||
func (msg *IfAddrmsg) serializeSafe() []byte {
|
||||
len := syscall.SizeofIfAddrmsg
|
||||
b := make([]byte, len)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeIfAddrmsgSafe(b []byte) *IfAddrmsg {
|
||||
var msg = IfAddrmsg{}
|
||||
binary.Read(bytes.NewReader(b[0:syscall.SizeofIfAddrmsg]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestIfAddrmsgDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, syscall.SizeofIfAddrmsg)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeIfAddrmsgSafe(orig)
|
||||
msg := DeserializeIfAddrmsg(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
60
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/nl_linux_test.go
generated
vendored
60
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/nl_linux_test.go
generated
vendored
@ -1,60 +0,0 @@
|
||||
package nl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type testSerializer interface {
|
||||
serializeSafe() []byte
|
||||
Serialize() []byte
|
||||
}
|
||||
|
||||
func testDeserializeSerialize(t *testing.T, orig []byte, safemsg testSerializer, msg testSerializer) {
|
||||
if !reflect.DeepEqual(safemsg, msg) {
|
||||
t.Fatal("Deserialization failed.\n", safemsg, "\n", msg)
|
||||
}
|
||||
safe := msg.serializeSafe()
|
||||
if !bytes.Equal(safe, orig) {
|
||||
t.Fatal("Safe serialization failed.\n", safe, "\n", orig)
|
||||
}
|
||||
b := msg.Serialize()
|
||||
if !bytes.Equal(b, safe) {
|
||||
t.Fatal("Serialization failed.\n", b, "\n", safe)
|
||||
}
|
||||
}
|
||||
|
||||
func (msg *IfInfomsg) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
b[0] = msg.Family
|
||||
b[1] = msg.X__ifi_pad
|
||||
native.PutUint16(b[2:4], msg.Type)
|
||||
native.PutUint32(b[4:8], uint32(msg.Index))
|
||||
native.PutUint32(b[8:12], msg.Flags)
|
||||
native.PutUint32(b[12:16], msg.Change)
|
||||
}
|
||||
|
||||
func (msg *IfInfomsg) serializeSafe() []byte {
|
||||
length := syscall.SizeofIfInfomsg
|
||||
b := make([]byte, length)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeIfInfomsgSafe(b []byte) *IfInfomsg {
|
||||
var msg = IfInfomsg{}
|
||||
binary.Read(bytes.NewReader(b[0:syscall.SizeofIfInfomsg]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestIfInfomsgDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, syscall.SizeofIfInfomsg)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeIfInfomsgSafe(orig)
|
||||
msg := DeserializeIfInfomsg(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
43
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/route_linux_test.go
generated
vendored
43
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/route_linux_test.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package nl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func (msg *RtMsg) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
b[0] = msg.Family
|
||||
b[1] = msg.Dst_len
|
||||
b[2] = msg.Src_len
|
||||
b[3] = msg.Tos
|
||||
b[4] = msg.Table
|
||||
b[5] = msg.Protocol
|
||||
b[6] = msg.Scope
|
||||
b[7] = msg.Type
|
||||
native.PutUint32(b[8:12], msg.Flags)
|
||||
}
|
||||
|
||||
func (msg *RtMsg) serializeSafe() []byte {
|
||||
len := syscall.SizeofRtMsg
|
||||
b := make([]byte, len)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeRtMsgSafe(b []byte) *RtMsg {
|
||||
var msg = RtMsg{}
|
||||
binary.Read(bytes.NewReader(b[0:syscall.SizeofRtMsg]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestRtMsgDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, syscall.SizeofRtMsg)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeRtMsgSafe(orig)
|
||||
msg := DeserializeRtMsg(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
173
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/tc_linux_test.go
generated
vendored
173
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/tc_linux_test.go
generated
vendored
@ -1,173 +0,0 @@
|
||||
package nl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
)
|
||||
|
||||
/* TcMsg */
|
||||
func (msg *TcMsg) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
b[0] = msg.Family
|
||||
copy(b[1:4], msg.Pad[:])
|
||||
native.PutUint32(b[4:8], uint32(msg.Ifindex))
|
||||
native.PutUint32(b[8:12], msg.Handle)
|
||||
native.PutUint32(b[12:16], msg.Parent)
|
||||
native.PutUint32(b[16:20], msg.Info)
|
||||
}
|
||||
|
||||
func (msg *TcMsg) serializeSafe() []byte {
|
||||
length := SizeofTcMsg
|
||||
b := make([]byte, length)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeTcMsgSafe(b []byte) *TcMsg {
|
||||
var msg = TcMsg{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofTcMsg]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestTcMsgDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofTcMsg)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeTcMsgSafe(orig)
|
||||
msg := DeserializeTcMsg(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
/* TcActionMsg */
|
||||
func (msg *TcActionMsg) write(b []byte) {
|
||||
b[0] = msg.Family
|
||||
copy(b[1:4], msg.Pad[:])
|
||||
}
|
||||
|
||||
func (msg *TcActionMsg) serializeSafe() []byte {
|
||||
length := SizeofTcActionMsg
|
||||
b := make([]byte, length)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeTcActionMsgSafe(b []byte) *TcActionMsg {
|
||||
var msg = TcActionMsg{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofTcActionMsg]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestTcActionMsgDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofTcActionMsg)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeTcActionMsgSafe(orig)
|
||||
msg := DeserializeTcActionMsg(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
/* TcRateSpec */
|
||||
func (msg *TcRateSpec) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
b[0] = msg.CellLog
|
||||
b[1] = msg.Linklayer
|
||||
native.PutUint16(b[2:4], msg.Overhead)
|
||||
native.PutUint16(b[4:6], uint16(msg.CellAlign))
|
||||
native.PutUint16(b[6:8], msg.Mpu)
|
||||
native.PutUint32(b[8:12], msg.Rate)
|
||||
}
|
||||
|
||||
func (msg *TcRateSpec) serializeSafe() []byte {
|
||||
length := SizeofTcRateSpec
|
||||
b := make([]byte, length)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeTcRateSpecSafe(b []byte) *TcRateSpec {
|
||||
var msg = TcRateSpec{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofTcRateSpec]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestTcRateSpecDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofTcRateSpec)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeTcRateSpecSafe(orig)
|
||||
msg := DeserializeTcRateSpec(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
/* TcTbfQopt */
|
||||
func (msg *TcTbfQopt) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
msg.Rate.write(b[0:SizeofTcRateSpec])
|
||||
start := SizeofTcRateSpec
|
||||
msg.Peakrate.write(b[start : start+SizeofTcRateSpec])
|
||||
start += SizeofTcRateSpec
|
||||
native.PutUint32(b[start:start+4], msg.Limit)
|
||||
start += 4
|
||||
native.PutUint32(b[start:start+4], msg.Buffer)
|
||||
start += 4
|
||||
native.PutUint32(b[start:start+4], msg.Mtu)
|
||||
}
|
||||
|
||||
func (msg *TcTbfQopt) serializeSafe() []byte {
|
||||
length := SizeofTcTbfQopt
|
||||
b := make([]byte, length)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeTcTbfQoptSafe(b []byte) *TcTbfQopt {
|
||||
var msg = TcTbfQopt{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofTcTbfQopt]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestTcTbfQoptDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofTcTbfQopt)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeTcTbfQoptSafe(orig)
|
||||
msg := DeserializeTcTbfQopt(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
/* TcHtbCopt */
|
||||
func (msg *TcHtbCopt) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
msg.Rate.write(b[0:SizeofTcRateSpec])
|
||||
start := SizeofTcRateSpec
|
||||
msg.Ceil.write(b[start : start+SizeofTcRateSpec])
|
||||
start += SizeofTcRateSpec
|
||||
native.PutUint32(b[start:start+4], msg.Buffer)
|
||||
start += 4
|
||||
native.PutUint32(b[start:start+4], msg.Cbuffer)
|
||||
start += 4
|
||||
native.PutUint32(b[start:start+4], msg.Quantum)
|
||||
start += 4
|
||||
native.PutUint32(b[start:start+4], msg.Level)
|
||||
start += 4
|
||||
native.PutUint32(b[start:start+4], msg.Prio)
|
||||
}
|
||||
|
||||
func (msg *TcHtbCopt) serializeSafe() []byte {
|
||||
length := SizeofTcHtbCopt
|
||||
b := make([]byte, length)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeTcHtbCoptSafe(b []byte) *TcHtbCopt {
|
||||
var msg = TcHtbCopt{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofTcHtbCopt]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestTcHtbCoptDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofTcHtbCopt)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeTcHtbCoptSafe(orig)
|
||||
msg := DeserializeTcHtbCopt(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
161
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_linux_test.go
generated
vendored
161
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_linux_test.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
package nl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func (msg *XfrmAddress) write(b []byte) {
|
||||
copy(b[0:SizeofXfrmAddress], msg[:])
|
||||
}
|
||||
|
||||
func (msg *XfrmAddress) serializeSafe() []byte {
|
||||
b := make([]byte, SizeofXfrmAddress)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmAddressSafe(b []byte) *XfrmAddress {
|
||||
var msg = XfrmAddress{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmAddress]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmAddressDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmAddress)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmAddressSafe(orig)
|
||||
msg := DeserializeXfrmAddress(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmSelector) write(b []byte) {
|
||||
const AddrEnd = SizeofXfrmAddress * 2
|
||||
native := NativeEndian()
|
||||
msg.Daddr.write(b[0:SizeofXfrmAddress])
|
||||
msg.Saddr.write(b[SizeofXfrmAddress:AddrEnd])
|
||||
native.PutUint16(b[AddrEnd:AddrEnd+2], msg.Dport)
|
||||
native.PutUint16(b[AddrEnd+2:AddrEnd+4], msg.DportMask)
|
||||
native.PutUint16(b[AddrEnd+4:AddrEnd+6], msg.Sport)
|
||||
native.PutUint16(b[AddrEnd+6:AddrEnd+8], msg.SportMask)
|
||||
native.PutUint16(b[AddrEnd+8:AddrEnd+10], msg.Family)
|
||||
b[AddrEnd+10] = msg.PrefixlenD
|
||||
b[AddrEnd+11] = msg.PrefixlenS
|
||||
b[AddrEnd+12] = msg.Proto
|
||||
copy(b[AddrEnd+13:AddrEnd+16], msg.Pad[:])
|
||||
native.PutUint32(b[AddrEnd+16:AddrEnd+20], uint32(msg.Ifindex))
|
||||
native.PutUint32(b[AddrEnd+20:AddrEnd+24], msg.User)
|
||||
}
|
||||
|
||||
func (msg *XfrmSelector) serializeSafe() []byte {
|
||||
length := SizeofXfrmSelector
|
||||
b := make([]byte, length)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmSelectorSafe(b []byte) *XfrmSelector {
|
||||
var msg = XfrmSelector{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmSelector]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmSelectorDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmSelector)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmSelectorSafe(orig)
|
||||
msg := DeserializeXfrmSelector(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmLifetimeCfg) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
native.PutUint64(b[0:8], msg.SoftByteLimit)
|
||||
native.PutUint64(b[8:16], msg.HardByteLimit)
|
||||
native.PutUint64(b[16:24], msg.SoftPacketLimit)
|
||||
native.PutUint64(b[24:32], msg.HardPacketLimit)
|
||||
native.PutUint64(b[32:40], msg.SoftAddExpiresSeconds)
|
||||
native.PutUint64(b[40:48], msg.HardAddExpiresSeconds)
|
||||
native.PutUint64(b[48:56], msg.SoftUseExpiresSeconds)
|
||||
native.PutUint64(b[56:64], msg.HardUseExpiresSeconds)
|
||||
}
|
||||
|
||||
func (msg *XfrmLifetimeCfg) serializeSafe() []byte {
|
||||
length := SizeofXfrmLifetimeCfg
|
||||
b := make([]byte, length)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmLifetimeCfgSafe(b []byte) *XfrmLifetimeCfg {
|
||||
var msg = XfrmLifetimeCfg{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmLifetimeCfg]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmLifetimeCfgDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmLifetimeCfg)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmLifetimeCfgSafe(orig)
|
||||
msg := DeserializeXfrmLifetimeCfg(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmLifetimeCur) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
native.PutUint64(b[0:8], msg.Bytes)
|
||||
native.PutUint64(b[8:16], msg.Packets)
|
||||
native.PutUint64(b[16:24], msg.AddTime)
|
||||
native.PutUint64(b[24:32], msg.UseTime)
|
||||
}
|
||||
|
||||
func (msg *XfrmLifetimeCur) serializeSafe() []byte {
|
||||
length := SizeofXfrmLifetimeCur
|
||||
b := make([]byte, length)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmLifetimeCurSafe(b []byte) *XfrmLifetimeCur {
|
||||
var msg = XfrmLifetimeCur{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmLifetimeCur]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmLifetimeCurDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmLifetimeCur)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmLifetimeCurSafe(orig)
|
||||
msg := DeserializeXfrmLifetimeCur(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmId) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
msg.Daddr.write(b[0:SizeofXfrmAddress])
|
||||
native.PutUint32(b[SizeofXfrmAddress:SizeofXfrmAddress+4], msg.Spi)
|
||||
b[SizeofXfrmAddress+4] = msg.Proto
|
||||
copy(b[SizeofXfrmAddress+5:SizeofXfrmAddress+8], msg.Pad[:])
|
||||
}
|
||||
|
||||
func (msg *XfrmId) serializeSafe() []byte {
|
||||
b := make([]byte, SizeofXfrmId)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmIdSafe(b []byte) *XfrmId {
|
||||
var msg = XfrmId{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmId]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmIdDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmId)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmIdSafe(orig)
|
||||
msg := DeserializeXfrmId(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
109
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go
generated
vendored
109
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux_test.go
generated
vendored
@ -1,109 +0,0 @@
|
||||
package nl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func (msg *XfrmUserpolicyId) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
msg.Sel.write(b[0:SizeofXfrmSelector])
|
||||
native.PutUint32(b[SizeofXfrmSelector:SizeofXfrmSelector+4], msg.Index)
|
||||
b[SizeofXfrmSelector+4] = msg.Dir
|
||||
copy(b[SizeofXfrmSelector+5:SizeofXfrmSelector+8], msg.Pad[:])
|
||||
}
|
||||
|
||||
func (msg *XfrmUserpolicyId) serializeSafe() []byte {
|
||||
b := make([]byte, SizeofXfrmUserpolicyId)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmUserpolicyIdSafe(b []byte) *XfrmUserpolicyId {
|
||||
var msg = XfrmUserpolicyId{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmUserpolicyId]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmUserpolicyIdDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmUserpolicyId)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmUserpolicyIdSafe(orig)
|
||||
msg := DeserializeXfrmUserpolicyId(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmUserpolicyInfo) write(b []byte) {
|
||||
const CfgEnd = SizeofXfrmSelector + SizeofXfrmLifetimeCfg
|
||||
const CurEnd = CfgEnd + SizeofXfrmLifetimeCur
|
||||
native := NativeEndian()
|
||||
msg.Sel.write(b[0:SizeofXfrmSelector])
|
||||
msg.Lft.write(b[SizeofXfrmSelector:CfgEnd])
|
||||
msg.Curlft.write(b[CfgEnd:CurEnd])
|
||||
native.PutUint32(b[CurEnd:CurEnd+4], msg.Priority)
|
||||
native.PutUint32(b[CurEnd+4:CurEnd+8], msg.Index)
|
||||
b[CurEnd+8] = msg.Dir
|
||||
b[CurEnd+9] = msg.Action
|
||||
b[CurEnd+10] = msg.Flags
|
||||
b[CurEnd+11] = msg.Share
|
||||
copy(b[CurEnd+12:CurEnd+16], msg.Pad[:])
|
||||
}
|
||||
|
||||
func (msg *XfrmUserpolicyInfo) serializeSafe() []byte {
|
||||
b := make([]byte, SizeofXfrmUserpolicyInfo)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmUserpolicyInfoSafe(b []byte) *XfrmUserpolicyInfo {
|
||||
var msg = XfrmUserpolicyInfo{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmUserpolicyInfo]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmUserpolicyInfoDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmUserpolicyInfo)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmUserpolicyInfoSafe(orig)
|
||||
msg := DeserializeXfrmUserpolicyInfo(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmUserTmpl) write(b []byte) {
|
||||
const AddrEnd = SizeofXfrmId + 4 + SizeofXfrmAddress
|
||||
native := NativeEndian()
|
||||
msg.XfrmId.write(b[0:SizeofXfrmId])
|
||||
native.PutUint16(b[SizeofXfrmId:SizeofXfrmId+2], msg.Family)
|
||||
copy(b[SizeofXfrmId+2:SizeofXfrmId+4], msg.Pad1[:])
|
||||
msg.Saddr.write(b[SizeofXfrmId+4 : AddrEnd])
|
||||
native.PutUint32(b[AddrEnd:AddrEnd+4], msg.Reqid)
|
||||
b[AddrEnd+4] = msg.Mode
|
||||
b[AddrEnd+5] = msg.Share
|
||||
b[AddrEnd+6] = msg.Optional
|
||||
b[AddrEnd+7] = msg.Pad2
|
||||
native.PutUint32(b[AddrEnd+8:AddrEnd+12], msg.Aalgos)
|
||||
native.PutUint32(b[AddrEnd+12:AddrEnd+16], msg.Ealgos)
|
||||
native.PutUint32(b[AddrEnd+16:AddrEnd+20], msg.Calgos)
|
||||
}
|
||||
|
||||
func (msg *XfrmUserTmpl) serializeSafe() []byte {
|
||||
b := make([]byte, SizeofXfrmUserTmpl)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmUserTmplSafe(b []byte) *XfrmUserTmpl {
|
||||
var msg = XfrmUserTmpl{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmUserTmpl]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmUserTmplDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmUserTmpl)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmUserTmplSafe(orig)
|
||||
msg := DeserializeXfrmUserTmpl(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
207
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go
generated
vendored
207
Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_state_linux_test.go
generated
vendored
@ -1,207 +0,0 @@
|
||||
package nl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func (msg *XfrmUsersaId) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
msg.Daddr.write(b[0:SizeofXfrmAddress])
|
||||
native.PutUint32(b[SizeofXfrmAddress:SizeofXfrmAddress+4], msg.Spi)
|
||||
native.PutUint16(b[SizeofXfrmAddress+4:SizeofXfrmAddress+6], msg.Family)
|
||||
b[SizeofXfrmAddress+6] = msg.Proto
|
||||
b[SizeofXfrmAddress+7] = msg.Pad
|
||||
}
|
||||
|
||||
func (msg *XfrmUsersaId) serializeSafe() []byte {
|
||||
b := make([]byte, SizeofXfrmUsersaId)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmUsersaIdSafe(b []byte) *XfrmUsersaId {
|
||||
var msg = XfrmUsersaId{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmUsersaId]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmUsersaIdDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmUsersaId)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmUsersaIdSafe(orig)
|
||||
msg := DeserializeXfrmUsersaId(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmStats) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
native.PutUint32(b[0:4], msg.ReplayWindow)
|
||||
native.PutUint32(b[4:8], msg.Replay)
|
||||
native.PutUint32(b[8:12], msg.IntegrityFailed)
|
||||
}
|
||||
|
||||
func (msg *XfrmStats) serializeSafe() []byte {
|
||||
b := make([]byte, SizeofXfrmStats)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmStatsSafe(b []byte) *XfrmStats {
|
||||
var msg = XfrmStats{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmStats]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmStatsDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmStats)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmStatsSafe(orig)
|
||||
msg := DeserializeXfrmStats(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmUsersaInfo) write(b []byte) {
|
||||
const IdEnd = SizeofXfrmSelector + SizeofXfrmId
|
||||
const AddressEnd = IdEnd + SizeofXfrmAddress
|
||||
const CfgEnd = AddressEnd + SizeofXfrmLifetimeCfg
|
||||
const CurEnd = CfgEnd + SizeofXfrmLifetimeCur
|
||||
const StatsEnd = CurEnd + SizeofXfrmStats
|
||||
native := NativeEndian()
|
||||
msg.Sel.write(b[0:SizeofXfrmSelector])
|
||||
msg.Id.write(b[SizeofXfrmSelector:IdEnd])
|
||||
msg.Saddr.write(b[IdEnd:AddressEnd])
|
||||
msg.Lft.write(b[AddressEnd:CfgEnd])
|
||||
msg.Curlft.write(b[CfgEnd:CurEnd])
|
||||
msg.Stats.write(b[CurEnd:StatsEnd])
|
||||
native.PutUint32(b[StatsEnd:StatsEnd+4], msg.Seq)
|
||||
native.PutUint32(b[StatsEnd+4:StatsEnd+8], msg.Reqid)
|
||||
native.PutUint16(b[StatsEnd+8:StatsEnd+10], msg.Family)
|
||||
b[StatsEnd+10] = msg.Mode
|
||||
b[StatsEnd+11] = msg.ReplayWindow
|
||||
b[StatsEnd+12] = msg.Flags
|
||||
copy(b[StatsEnd+13:StatsEnd+20], msg.Pad[:])
|
||||
}
|
||||
|
||||
func (msg *XfrmUsersaInfo) serializeSafe() []byte {
|
||||
b := make([]byte, SizeofXfrmUsersaInfo)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmUsersaInfoSafe(b []byte) *XfrmUsersaInfo {
|
||||
var msg = XfrmUsersaInfo{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmUsersaInfo]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmUsersaInfoDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmUsersaInfo)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmUsersaInfoSafe(orig)
|
||||
msg := DeserializeXfrmUsersaInfo(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmAlgo) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
copy(b[0:64], msg.AlgName[:])
|
||||
native.PutUint32(b[64:68], msg.AlgKeyLen)
|
||||
copy(b[68:msg.Len()], msg.AlgKey[:])
|
||||
}
|
||||
|
||||
func (msg *XfrmAlgo) serializeSafe() []byte {
|
||||
b := make([]byte, msg.Len())
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmAlgoSafe(b []byte) *XfrmAlgo {
|
||||
var msg = XfrmAlgo{}
|
||||
copy(msg.AlgName[:], b[0:64])
|
||||
binary.Read(bytes.NewReader(b[64:68]), NativeEndian(), &msg.AlgKeyLen)
|
||||
msg.AlgKey = b[68:msg.Len()]
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmAlgoDeserializeSerialize(t *testing.T) {
|
||||
// use a 32 byte key len
|
||||
var orig = make([]byte, SizeofXfrmAlgo+32)
|
||||
rand.Read(orig)
|
||||
// set the key len to 256 bits
|
||||
orig[64] = 0
|
||||
orig[65] = 1
|
||||
orig[66] = 0
|
||||
orig[67] = 0
|
||||
safemsg := deserializeXfrmAlgoSafe(orig)
|
||||
msg := DeserializeXfrmAlgo(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmAlgoAuth) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
copy(b[0:64], msg.AlgName[:])
|
||||
native.PutUint32(b[64:68], msg.AlgKeyLen)
|
||||
native.PutUint32(b[68:72], msg.AlgTruncLen)
|
||||
copy(b[72:msg.Len()], msg.AlgKey[:])
|
||||
}
|
||||
|
||||
func (msg *XfrmAlgoAuth) serializeSafe() []byte {
|
||||
b := make([]byte, msg.Len())
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmAlgoAuthSafe(b []byte) *XfrmAlgoAuth {
|
||||
var msg = XfrmAlgoAuth{}
|
||||
copy(msg.AlgName[:], b[0:64])
|
||||
binary.Read(bytes.NewReader(b[64:68]), NativeEndian(), &msg.AlgKeyLen)
|
||||
binary.Read(bytes.NewReader(b[68:72]), NativeEndian(), &msg.AlgTruncLen)
|
||||
msg.AlgKey = b[72:msg.Len()]
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmAlgoAuthDeserializeSerialize(t *testing.T) {
|
||||
// use a 32 byte key len
|
||||
var orig = make([]byte, SizeofXfrmAlgoAuth+32)
|
||||
rand.Read(orig)
|
||||
// set the key len to 256 bits
|
||||
orig[64] = 0
|
||||
orig[65] = 1
|
||||
orig[66] = 0
|
||||
orig[67] = 0
|
||||
safemsg := deserializeXfrmAlgoAuthSafe(orig)
|
||||
msg := DeserializeXfrmAlgoAuth(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
||||
|
||||
func (msg *XfrmEncapTmpl) write(b []byte) {
|
||||
native := NativeEndian()
|
||||
native.PutUint16(b[0:2], msg.EncapType)
|
||||
native.PutUint16(b[2:4], msg.EncapSport)
|
||||
native.PutUint16(b[4:6], msg.EncapDport)
|
||||
copy(b[6:8], msg.Pad[:])
|
||||
msg.EncapOa.write(b[8:SizeofXfrmAddress])
|
||||
}
|
||||
|
||||
func (msg *XfrmEncapTmpl) serializeSafe() []byte {
|
||||
b := make([]byte, SizeofXfrmEncapTmpl)
|
||||
msg.write(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func deserializeXfrmEncapTmplSafe(b []byte) *XfrmEncapTmpl {
|
||||
var msg = XfrmEncapTmpl{}
|
||||
binary.Read(bytes.NewReader(b[0:SizeofXfrmEncapTmpl]), NativeEndian(), &msg)
|
||||
return &msg
|
||||
}
|
||||
|
||||
func TestXfrmEncapTmplDeserializeSerialize(t *testing.T) {
|
||||
var orig = make([]byte, SizeofXfrmEncapTmpl)
|
||||
rand.Read(orig)
|
||||
safemsg := deserializeXfrmEncapTmplSafe(orig)
|
||||
msg := DeserializeXfrmEncapTmpl(orig)
|
||||
testDeserializeSerialize(t, orig, safemsg, msg)
|
||||
}
|
98
Godeps/_workspace/src/github.com/vishvananda/netlink/protinfo_test.go
generated
vendored
98
Godeps/_workspace/src/github.com/vishvananda/netlink/protinfo_test.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestProtinfo(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
master := &Bridge{LinkAttrs{Name: "foo"}}
|
||||
if err := LinkAdd(master); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
iface1 := &Dummy{LinkAttrs{Name: "bar1", MasterIndex: master.Index}}
|
||||
iface2 := &Dummy{LinkAttrs{Name: "bar2", MasterIndex: master.Index}}
|
||||
iface3 := &Dummy{LinkAttrs{Name: "bar3"}}
|
||||
|
||||
if err := LinkAdd(iface1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkAdd(iface2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkAdd(iface3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
oldpi1, err := LinkGetProtinfo(iface1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
oldpi2, err := LinkGetProtinfo(iface2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := LinkSetHairpin(iface1, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := LinkSetRootBlock(iface1, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pi1, err := LinkGetProtinfo(iface1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !pi1.Hairpin {
|
||||
t.Fatalf("Hairpin mode is not enabled for %s, but should", iface1.Name)
|
||||
}
|
||||
if !pi1.RootBlock {
|
||||
t.Fatalf("RootBlock is not enabled for %s, but should", iface1.Name)
|
||||
}
|
||||
if pi1.Guard != oldpi1.Guard {
|
||||
t.Fatalf("Guard field was changed for %s but shouldn't", iface1.Name)
|
||||
}
|
||||
if pi1.FastLeave != oldpi1.FastLeave {
|
||||
t.Fatalf("FastLeave field was changed for %s but shouldn't", iface1.Name)
|
||||
}
|
||||
if pi1.Learning != oldpi1.Learning {
|
||||
t.Fatalf("Learning field was changed for %s but shouldn't", iface1.Name)
|
||||
}
|
||||
if pi1.Flood != oldpi1.Flood {
|
||||
t.Fatalf("Flood field was changed for %s but shouldn't", iface1.Name)
|
||||
}
|
||||
|
||||
if err := LinkSetGuard(iface2, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkSetLearning(iface2, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pi2, err := LinkGetProtinfo(iface2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if pi2.Hairpin {
|
||||
t.Fatalf("Hairpin mode is enabled for %s, but shouldn't", iface2.Name)
|
||||
}
|
||||
if !pi2.Guard {
|
||||
t.Fatalf("Guard is not enabled for %s, but should", iface2.Name)
|
||||
}
|
||||
if pi2.Learning {
|
||||
t.Fatalf("Learning is enabled for %s, but shouldn't", iface2.Name)
|
||||
}
|
||||
if pi2.RootBlock != oldpi2.RootBlock {
|
||||
t.Fatalf("RootBlock field was changed for %s but shouldn't", iface2.Name)
|
||||
}
|
||||
if pi2.FastLeave != oldpi2.FastLeave {
|
||||
t.Fatalf("FastLeave field was changed for %s but shouldn't", iface2.Name)
|
||||
}
|
||||
if pi2.Flood != oldpi2.Flood {
|
||||
t.Fatalf("Flood field was changed for %s but shouldn't", iface2.Name)
|
||||
}
|
||||
|
||||
if err := LinkSetHairpin(iface3, true); err == nil || err.Error() != "operation not supported" {
|
||||
t.Fatalf("Set protinfo attrs for link without master is not supported, but err: %s", err)
|
||||
}
|
||||
}
|
171
Godeps/_workspace/src/github.com/vishvananda/netlink/qdisc_test.go
generated
vendored
171
Godeps/_workspace/src/github.com/vishvananda/netlink/qdisc_test.go
generated
vendored
@ -1,171 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTbfAddDel(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
link, err := LinkByName("foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkSetUp(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdisc := &Tbf{
|
||||
QdiscAttrs: QdiscAttrs{
|
||||
LinkIndex: link.Attrs().Index,
|
||||
Handle: MakeHandle(1, 0),
|
||||
Parent: HANDLE_ROOT,
|
||||
},
|
||||
Rate: 131072,
|
||||
Limit: 1220703,
|
||||
Buffer: 16793,
|
||||
}
|
||||
if err := QdiscAdd(qdisc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdiscs, err := QdiscList(link)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(qdiscs) != 1 {
|
||||
t.Fatal("Failed to add qdisc")
|
||||
}
|
||||
tbf, ok := qdiscs[0].(*Tbf)
|
||||
if !ok {
|
||||
t.Fatal("Qdisc is the wrong type")
|
||||
}
|
||||
if tbf.Rate != qdisc.Rate {
|
||||
t.Fatal("Rate doesn't match")
|
||||
}
|
||||
if tbf.Limit != qdisc.Limit {
|
||||
t.Fatal("Limit doesn't match")
|
||||
}
|
||||
if tbf.Buffer != qdisc.Buffer {
|
||||
t.Fatal("Buffer doesn't match")
|
||||
}
|
||||
if err := QdiscDel(qdisc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdiscs, err = QdiscList(link)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(qdiscs) != 0 {
|
||||
t.Fatal("Failed to remove qdisc")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHtbAddDel(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
link, err := LinkByName("foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkSetUp(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
attrs := QdiscAttrs{
|
||||
LinkIndex: link.Attrs().Index,
|
||||
Handle: MakeHandle(1, 0),
|
||||
Parent: HANDLE_ROOT,
|
||||
}
|
||||
|
||||
qdisc := NewHtb(attrs)
|
||||
qdisc.Rate2Quantum = 5
|
||||
if err := QdiscAdd(qdisc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
/*
|
||||
cmd := exec.Command("tc", "qdisc")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
fmt.Printf("%s\n", out)
|
||||
}
|
||||
*/
|
||||
qdiscs, err := QdiscList(link)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(qdiscs) != 1 {
|
||||
t.Fatal("Failed to add qdisc")
|
||||
}
|
||||
htb, ok := qdiscs[0].(*Htb)
|
||||
if !ok {
|
||||
t.Fatal("Qdisc is the wrong type")
|
||||
}
|
||||
if htb.Defcls != qdisc.Defcls {
|
||||
t.Fatal("Defcls doesn't match")
|
||||
}
|
||||
if htb.Rate2Quantum != qdisc.Rate2Quantum {
|
||||
t.Fatal("Rate2Quantum doesn't match")
|
||||
}
|
||||
if htb.Debug != qdisc.Debug {
|
||||
t.Fatal("Debug doesn't match")
|
||||
}
|
||||
if err := QdiscDel(qdisc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdiscs, err = QdiscList(link)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(qdiscs) != 0 {
|
||||
t.Fatal("Failed to remove qdisc")
|
||||
}
|
||||
}
|
||||
func TestPrioAddDel(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
if err := LinkAdd(&Ifb{LinkAttrs{Name: "foo"}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
link, err := LinkByName("foo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := LinkSetUp(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdisc := NewPrio(QdiscAttrs{
|
||||
LinkIndex: link.Attrs().Index,
|
||||
Handle: MakeHandle(1, 0),
|
||||
Parent: HANDLE_ROOT,
|
||||
})
|
||||
if err := QdiscAdd(qdisc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdiscs, err := QdiscList(link)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(qdiscs) != 1 {
|
||||
t.Fatal("Failed to add qdisc")
|
||||
}
|
||||
_, ok := qdiscs[0].(*Prio)
|
||||
if !ok {
|
||||
t.Fatal("Qdisc is the wrong type")
|
||||
}
|
||||
if err := QdiscDel(qdisc); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
qdiscs, err = QdiscList(link)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(qdiscs) != 0 {
|
||||
t.Fatal("Failed to remove qdisc")
|
||||
}
|
||||
}
|
146
Godeps/_workspace/src/github.com/vishvananda/netlink/route_test.go
generated
vendored
146
Godeps/_workspace/src/github.com/vishvananda/netlink/route_test.go
generated
vendored
@ -1,146 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRouteAddDel(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
// get loopback interface
|
||||
link, err := LinkByName("lo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// bring the interface up
|
||||
if err = LinkSetUp(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// add a gateway route
|
||||
_, dst, err := net.ParseCIDR("192.168.0.0/24")
|
||||
|
||||
ip := net.ParseIP("127.1.1.1")
|
||||
route := Route{LinkIndex: link.Attrs().Index, Dst: dst, Src: ip}
|
||||
err = RouteAdd(&route)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
routes, err := RouteList(link, FAMILY_V4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(routes) != 1 {
|
||||
t.Fatal("Link not added properly")
|
||||
}
|
||||
|
||||
dstIP := net.ParseIP("192.168.0.42")
|
||||
routeToDstIP, err := RouteGet(dstIP)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(routeToDstIP) == 0 {
|
||||
t.Fatal("Default route not present")
|
||||
}
|
||||
|
||||
err = RouteDel(&route)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
routes, err = RouteList(link, FAMILY_V4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(routes) != 0 {
|
||||
t.Fatal("Route not removed properly")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestRouteAddIncomplete(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
// get loopback interface
|
||||
link, err := LinkByName("lo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// bring the interface up
|
||||
if err = LinkSetUp(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
route := Route{LinkIndex: link.Attrs().Index}
|
||||
if err := RouteAdd(&route); err == nil {
|
||||
t.Fatal("Adding incomplete route should fail")
|
||||
}
|
||||
}
|
||||
|
||||
func expectRouteUpdate(ch <-chan RouteUpdate, t uint16, dst net.IP) bool {
|
||||
for {
|
||||
timeout := time.After(time.Minute)
|
||||
select {
|
||||
case update := <-ch:
|
||||
if update.Type == t && update.Route.Dst.IP.Equal(dst) {
|
||||
return true
|
||||
}
|
||||
case <-timeout:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouteSubscribe(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
ch := make(chan RouteUpdate)
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
if err := RouteSubscribe(ch, done); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// get loopback interface
|
||||
link, err := LinkByName("lo")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// bring the interface up
|
||||
if err = LinkSetUp(link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// add a gateway route
|
||||
_, dst, err := net.ParseCIDR("192.168.0.0/24")
|
||||
|
||||
ip := net.ParseIP("127.1.1.1")
|
||||
route := Route{LinkIndex: link.Attrs().Index, Dst: dst, Src: ip}
|
||||
err = RouteAdd(&route)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !expectRouteUpdate(ch, syscall.RTM_NEWROUTE, dst.IP) {
|
||||
t.Fatal("Add update not received as expected")
|
||||
}
|
||||
|
||||
err = RouteDel(&route)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !expectRouteUpdate(ch, syscall.RTM_DELROUTE, dst.IP) {
|
||||
t.Fatal("Del update not received as expected")
|
||||
}
|
||||
}
|
49
Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_policy_test.go
generated
vendored
49
Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_policy_test.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestXfrmPolicyAddDel(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
src, _ := ParseIPNet("127.1.1.1/32")
|
||||
dst, _ := ParseIPNet("127.1.1.2/32")
|
||||
policy := XfrmPolicy{
|
||||
Src: src,
|
||||
Dst: dst,
|
||||
Dir: XFRM_DIR_OUT,
|
||||
}
|
||||
tmpl := XfrmPolicyTmpl{
|
||||
Src: net.ParseIP("127.0.0.1"),
|
||||
Dst: net.ParseIP("127.0.0.2"),
|
||||
Proto: XFRM_PROTO_ESP,
|
||||
Mode: XFRM_MODE_TUNNEL,
|
||||
}
|
||||
policy.Tmpls = append(policy.Tmpls, tmpl)
|
||||
if err := XfrmPolicyAdd(&policy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
policies, err := XfrmPolicyList(FAMILY_ALL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(policies) != 1 {
|
||||
t.Fatal("Policy not added properly")
|
||||
}
|
||||
|
||||
if err = XfrmPolicyDel(&policy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
policies, err = XfrmPolicyList(FAMILY_ALL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(policies) != 0 {
|
||||
t.Fatal("Policy not removed properly")
|
||||
}
|
||||
}
|
50
Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_state_test.go
generated
vendored
50
Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_state_test.go
generated
vendored
@ -1,50 +0,0 @@
|
||||
package netlink
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestXfrmStateAddDel(t *testing.T) {
|
||||
tearDown := setUpNetlinkTest(t)
|
||||
defer tearDown()
|
||||
|
||||
state := XfrmState{
|
||||
Src: net.ParseIP("127.0.0.1"),
|
||||
Dst: net.ParseIP("127.0.0.2"),
|
||||
Proto: XFRM_PROTO_ESP,
|
||||
Mode: XFRM_MODE_TUNNEL,
|
||||
Spi: 1,
|
||||
Auth: &XfrmStateAlgo{
|
||||
Name: "hmac(sha256)",
|
||||
Key: []byte("abcdefghijklmnopqrstuvwzyzABCDEF"),
|
||||
},
|
||||
Crypt: &XfrmStateAlgo{
|
||||
Name: "cbc(aes)",
|
||||
Key: []byte("abcdefghijklmnopqrstuvwzyzABCDEF"),
|
||||
},
|
||||
}
|
||||
if err := XfrmStateAdd(&state); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
policies, err := XfrmStateList(FAMILY_ALL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(policies) != 1 {
|
||||
t.Fatal("State not added properly")
|
||||
}
|
||||
|
||||
if err = XfrmStateDel(&state); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
policies, err = XfrmStateList(FAMILY_ALL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(policies) != 0 {
|
||||
t.Fatal("State not removed properly")
|
||||
}
|
||||
}
|
115
Godeps/_workspace/src/golang.org/x/sys/unix/creds_test.go
generated
vendored
115
Godeps/_workspace/src/golang.org/x/sys/unix/creds_test.go
generated
vendored
@ -1,115 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package unix_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// TestSCMCredentials tests the sending and receiving of credentials
|
||||
// (PID, UID, GID) in an ancillary message between two UNIX
|
||||
// sockets. The SO_PASSCRED socket option is enabled on the sending
|
||||
// socket for this to work.
|
||||
func TestSCMCredentials(t *testing.T) {
|
||||
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Socketpair: %v", err)
|
||||
}
|
||||
defer unix.Close(fds[0])
|
||||
defer unix.Close(fds[1])
|
||||
|
||||
err = unix.SetsockoptInt(fds[0], unix.SOL_SOCKET, unix.SO_PASSCRED, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("SetsockoptInt: %v", err)
|
||||
}
|
||||
|
||||
srvFile := os.NewFile(uintptr(fds[0]), "server")
|
||||
defer srvFile.Close()
|
||||
srv, err := net.FileConn(srvFile)
|
||||
if err != nil {
|
||||
t.Errorf("FileConn: %v", err)
|
||||
return
|
||||
}
|
||||
defer srv.Close()
|
||||
|
||||
cliFile := os.NewFile(uintptr(fds[1]), "client")
|
||||
defer cliFile.Close()
|
||||
cli, err := net.FileConn(cliFile)
|
||||
if err != nil {
|
||||
t.Errorf("FileConn: %v", err)
|
||||
return
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
var ucred unix.Ucred
|
||||
if os.Getuid() != 0 {
|
||||
ucred.Pid = int32(os.Getpid())
|
||||
ucred.Uid = 0
|
||||
ucred.Gid = 0
|
||||
oob := unix.UnixCredentials(&ucred)
|
||||
_, _, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil)
|
||||
if err.(*net.OpError).Err != syscall.EPERM {
|
||||
t.Fatalf("WriteMsgUnix failed with %v, want EPERM", err)
|
||||
}
|
||||
}
|
||||
|
||||
ucred.Pid = int32(os.Getpid())
|
||||
ucred.Uid = uint32(os.Getuid())
|
||||
ucred.Gid = uint32(os.Getgid())
|
||||
oob := unix.UnixCredentials(&ucred)
|
||||
|
||||
// this is going to send a dummy byte
|
||||
n, oobn, err := cli.(*net.UnixConn).WriteMsgUnix(nil, oob, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteMsgUnix: %v", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatalf("WriteMsgUnix n = %d, want 0", n)
|
||||
}
|
||||
if oobn != len(oob) {
|
||||
t.Fatalf("WriteMsgUnix oobn = %d, want %d", oobn, len(oob))
|
||||
}
|
||||
|
||||
oob2 := make([]byte, 10*len(oob))
|
||||
n, oobn2, flags, _, err := srv.(*net.UnixConn).ReadMsgUnix(nil, oob2)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadMsgUnix: %v", err)
|
||||
}
|
||||
if flags != 0 {
|
||||
t.Fatalf("ReadMsgUnix flags = 0x%x, want 0", flags)
|
||||
}
|
||||
if n != 1 {
|
||||
t.Fatalf("ReadMsgUnix n = %d, want 1 (dummy byte)", n)
|
||||
}
|
||||
if oobn2 != oobn {
|
||||
// without SO_PASSCRED set on the socket, ReadMsgUnix will
|
||||
// return zero oob bytes
|
||||
t.Fatalf("ReadMsgUnix oobn = %d, want %d", oobn2, oobn)
|
||||
}
|
||||
oob2 = oob2[:oobn2]
|
||||
if !bytes.Equal(oob, oob2) {
|
||||
t.Fatal("ReadMsgUnix oob bytes don't match")
|
||||
}
|
||||
|
||||
scm, err := unix.ParseSocketControlMessage(oob2)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseSocketControlMessage: %v", err)
|
||||
}
|
||||
newUcred, err := unix.ParseUnixCredentials(&scm[0])
|
||||
if err != nil {
|
||||
t.Fatalf("ParseUnixCredentials: %v", err)
|
||||
}
|
||||
if *newUcred != ucred {
|
||||
t.Fatalf("ParseUnixCredentials = %+v, want %+v", newUcred, ucred)
|
||||
}
|
||||
}
|
23
Godeps/_workspace/src/golang.org/x/sys/unix/mmap_unix_test.go
generated
vendored
23
Godeps/_workspace/src/golang.org/x/sys/unix/mmap_unix_test.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd
|
||||
|
||||
package unix_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func TestMmap(t *testing.T) {
|
||||
b, err := unix.Mmap(-1, 0, unix.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_PRIVATE)
|
||||
if err != nil {
|
||||
t.Fatalf("Mmap: %v", err)
|
||||
}
|
||||
if err := unix.Munmap(b); err != nil {
|
||||
t.Fatalf("Munmap: %v", err)
|
||||
}
|
||||
}
|
35
Godeps/_workspace/src/golang.org/x/sys/unix/syscall_bsd_test.go
generated
vendored
35
Godeps/_workspace/src/golang.org/x/sys/unix/syscall_bsd_test.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd openbsd
|
||||
|
||||
package unix_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const MNT_WAIT = 1
|
||||
|
||||
func TestGetfsstat(t *testing.T) {
|
||||
n, err := unix.Getfsstat(nil, MNT_WAIT)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
data := make([]unix.Statfs_t, n)
|
||||
n, err = unix.Getfsstat(data, MNT_WAIT)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
empty := unix.Statfs_t{}
|
||||
for _, stat := range data {
|
||||
if stat == empty {
|
||||
t.Fatal("an empty Statfs_t struct was returned")
|
||||
}
|
||||
}
|
||||
}
|
33
Godeps/_workspace/src/golang.org/x/sys/unix/syscall_test.go
generated
vendored
33
Godeps/_workspace/src/golang.org/x/sys/unix/syscall_test.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package unix_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func testSetGetenv(t *testing.T, key, value string) {
|
||||
err := unix.Setenv(key, value)
|
||||
if err != nil {
|
||||
t.Fatalf("Setenv failed to set %q: %v", value, err)
|
||||
}
|
||||
newvalue, found := unix.Getenv(key)
|
||||
if !found {
|
||||
t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value)
|
||||
}
|
||||
if newvalue != value {
|
||||
t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnv(t *testing.T) {
|
||||
testSetGetenv(t, "TESTENV", "AVALUE")
|
||||
// make sure TESTENV gets set to "", not deleted
|
||||
testSetGetenv(t, "TESTENV", "")
|
||||
}
|
318
Godeps/_workspace/src/golang.org/x/sys/unix/syscall_unix_test.go
generated
vendored
318
Godeps/_workspace/src/golang.org/x/sys/unix/syscall_unix_test.go
generated
vendored
@ -1,318 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package unix_test
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Tests that below functions, structures and constants are consistent
|
||||
// on all Unix-like systems.
|
||||
func _() {
|
||||
// program scheduling priority functions and constants
|
||||
var (
|
||||
_ func(int, int, int) error = unix.Setpriority
|
||||
_ func(int, int) (int, error) = unix.Getpriority
|
||||
)
|
||||
const (
|
||||
_ int = unix.PRIO_USER
|
||||
_ int = unix.PRIO_PROCESS
|
||||
_ int = unix.PRIO_PGRP
|
||||
)
|
||||
|
||||
// termios constants
|
||||
const (
|
||||
_ int = unix.TCIFLUSH
|
||||
_ int = unix.TCIOFLUSH
|
||||
_ int = unix.TCOFLUSH
|
||||
)
|
||||
|
||||
// fcntl file locking structure and constants
|
||||
var (
|
||||
_ = unix.Flock_t{
|
||||
Type: int16(0),
|
||||
Whence: int16(0),
|
||||
Start: int64(0),
|
||||
Len: int64(0),
|
||||
Pid: int32(0),
|
||||
}
|
||||
)
|
||||
const (
|
||||
_ = unix.F_GETLK
|
||||
_ = unix.F_SETLK
|
||||
_ = unix.F_SETLKW
|
||||
)
|
||||
}
|
||||
|
||||
// TestFcntlFlock tests whether the file locking structure matches
|
||||
// the calling convention of each kernel.
|
||||
func TestFcntlFlock(t *testing.T) {
|
||||
name := filepath.Join(os.TempDir(), "TestFcntlFlock")
|
||||
fd, err := unix.Open(name, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
defer unix.Unlink(name)
|
||||
defer unix.Close(fd)
|
||||
flock := unix.Flock_t{
|
||||
Type: unix.F_RDLCK,
|
||||
Start: 0, Len: 0, Whence: 1,
|
||||
}
|
||||
if err := unix.FcntlFlock(uintptr(fd), unix.F_GETLK, &flock); err != nil {
|
||||
t.Fatalf("FcntlFlock failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPassFD tests passing a file descriptor over a Unix socket.
|
||||
//
|
||||
// This test involved both a parent and child process. The parent
|
||||
// process is invoked as a normal test, with "go test", which then
|
||||
// runs the child process by running the current test binary with args
|
||||
// "-test.run=^TestPassFD$" and an environment variable used to signal
|
||||
// that the test should become the child process instead.
|
||||
func TestPassFD(t *testing.T) {
|
||||
switch runtime.GOOS {
|
||||
case "dragonfly":
|
||||
// TODO(jsing): Figure out why sendmsg is returning EINVAL.
|
||||
t.Skip("skipping test on dragonfly")
|
||||
case "solaris":
|
||||
// TODO(aram): Figure out why ReadMsgUnix is returning empty message.
|
||||
t.Skip("skipping test on solaris, see issue 7402")
|
||||
}
|
||||
if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" {
|
||||
passFDChild()
|
||||
return
|
||||
}
|
||||
|
||||
tempDir, err := ioutil.TempDir("", "TestPassFD")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Socketpair: %v", err)
|
||||
}
|
||||
defer unix.Close(fds[0])
|
||||
defer unix.Close(fds[1])
|
||||
writeFile := os.NewFile(uintptr(fds[0]), "child-writes")
|
||||
readFile := os.NewFile(uintptr(fds[1]), "parent-reads")
|
||||
defer writeFile.Close()
|
||||
defer readFile.Close()
|
||||
|
||||
cmd := exec.Command(os.Args[0], "-test.run=^TestPassFD$", "--", tempDir)
|
||||
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
|
||||
if lp := os.Getenv("LD_LIBRARY_PATH"); lp != "" {
|
||||
cmd.Env = append(cmd.Env, "LD_LIBRARY_PATH="+lp)
|
||||
}
|
||||
cmd.ExtraFiles = []*os.File{writeFile}
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) > 0 || err != nil {
|
||||
t.Fatalf("child process: %q, %v", out, err)
|
||||
}
|
||||
|
||||
c, err := net.FileConn(readFile)
|
||||
if err != nil {
|
||||
t.Fatalf("FileConn: %v", err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
uc, ok := c.(*net.UnixConn)
|
||||
if !ok {
|
||||
t.Fatalf("unexpected FileConn type; expected UnixConn, got %T", c)
|
||||
}
|
||||
|
||||
buf := make([]byte, 32) // expect 1 byte
|
||||
oob := make([]byte, 32) // expect 24 bytes
|
||||
closeUnix := time.AfterFunc(5*time.Second, func() {
|
||||
t.Logf("timeout reading from unix socket")
|
||||
uc.Close()
|
||||
})
|
||||
_, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)
|
||||
closeUnix.Stop()
|
||||
|
||||
scms, err := unix.ParseSocketControlMessage(oob[:oobn])
|
||||
if err != nil {
|
||||
t.Fatalf("ParseSocketControlMessage: %v", err)
|
||||
}
|
||||
if len(scms) != 1 {
|
||||
t.Fatalf("expected 1 SocketControlMessage; got scms = %#v", scms)
|
||||
}
|
||||
scm := scms[0]
|
||||
gotFds, err := unix.ParseUnixRights(&scm)
|
||||
if err != nil {
|
||||
t.Fatalf("unix.ParseUnixRights: %v", err)
|
||||
}
|
||||
if len(gotFds) != 1 {
|
||||
t.Fatalf("wanted 1 fd; got %#v", gotFds)
|
||||
}
|
||||
|
||||
f := os.NewFile(uintptr(gotFds[0]), "fd-from-child")
|
||||
defer f.Close()
|
||||
|
||||
got, err := ioutil.ReadAll(f)
|
||||
want := "Hello from child process!\n"
|
||||
if string(got) != want {
|
||||
t.Errorf("child process ReadAll: %q, %v; want %q", got, err, want)
|
||||
}
|
||||
}
|
||||
|
||||
// passFDChild is the child process used by TestPassFD.
|
||||
func passFDChild() {
|
||||
defer os.Exit(0)
|
||||
|
||||
// Look for our fd. It should be fd 3, but we work around an fd leak
|
||||
// bug here (http://golang.org/issue/2603) to let it be elsewhere.
|
||||
var uc *net.UnixConn
|
||||
for fd := uintptr(3); fd <= 10; fd++ {
|
||||
f := os.NewFile(fd, "unix-conn")
|
||||
var ok bool
|
||||
netc, _ := net.FileConn(f)
|
||||
uc, ok = netc.(*net.UnixConn)
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
if uc == nil {
|
||||
fmt.Println("failed to find unix fd")
|
||||
return
|
||||
}
|
||||
|
||||
// Make a file f to send to our parent process on uc.
|
||||
// We make it in tempDir, which our parent will clean up.
|
||||
flag.Parse()
|
||||
tempDir := flag.Arg(0)
|
||||
f, err := ioutil.TempFile(tempDir, "")
|
||||
if err != nil {
|
||||
fmt.Printf("TempFile: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
f.Write([]byte("Hello from child process!\n"))
|
||||
f.Seek(0, 0)
|
||||
|
||||
rights := unix.UnixRights(int(f.Fd()))
|
||||
dummyByte := []byte("x")
|
||||
n, oobn, err := uc.WriteMsgUnix(dummyByte, rights, nil)
|
||||
if err != nil {
|
||||
fmt.Printf("WriteMsgUnix: %v", err)
|
||||
return
|
||||
}
|
||||
if n != 1 || oobn != len(rights) {
|
||||
fmt.Printf("WriteMsgUnix = %d, %d; want 1, %d", n, oobn, len(rights))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TestUnixRightsRoundtrip tests that UnixRights, ParseSocketControlMessage,
|
||||
// and ParseUnixRights are able to successfully round-trip lists of file descriptors.
|
||||
func TestUnixRightsRoundtrip(t *testing.T) {
|
||||
testCases := [...][][]int{
|
||||
{{42}},
|
||||
{{1, 2}},
|
||||
{{3, 4, 5}},
|
||||
{{}},
|
||||
{{1, 2}, {3, 4, 5}, {}, {7}},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
b := []byte{}
|
||||
var n int
|
||||
for _, fds := range testCase {
|
||||
// Last assignment to n wins
|
||||
n = len(b) + unix.CmsgLen(4*len(fds))
|
||||
b = append(b, unix.UnixRights(fds...)...)
|
||||
}
|
||||
// Truncate b
|
||||
b = b[:n]
|
||||
|
||||
scms, err := unix.ParseSocketControlMessage(b)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseSocketControlMessage: %v", err)
|
||||
}
|
||||
if len(scms) != len(testCase) {
|
||||
t.Fatalf("expected %v SocketControlMessage; got scms = %#v", len(testCase), scms)
|
||||
}
|
||||
for i, scm := range scms {
|
||||
gotFds, err := unix.ParseUnixRights(&scm)
|
||||
if err != nil {
|
||||
t.Fatalf("ParseUnixRights: %v", err)
|
||||
}
|
||||
wantFds := testCase[i]
|
||||
if len(gotFds) != len(wantFds) {
|
||||
t.Fatalf("expected %v fds, got %#v", len(wantFds), gotFds)
|
||||
}
|
||||
for j, fd := range gotFds {
|
||||
if fd != wantFds[j] {
|
||||
t.Fatalf("expected fd %v, got %v", wantFds[j], fd)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRlimit(t *testing.T) {
|
||||
var rlimit, zero unix.Rlimit
|
||||
err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit)
|
||||
if err != nil {
|
||||
t.Fatalf("Getrlimit: save failed: %v", err)
|
||||
}
|
||||
if zero == rlimit {
|
||||
t.Fatalf("Getrlimit: save failed: got zero value %#v", rlimit)
|
||||
}
|
||||
set := rlimit
|
||||
set.Cur = set.Max - 1
|
||||
err = unix.Setrlimit(unix.RLIMIT_NOFILE, &set)
|
||||
if err != nil {
|
||||
t.Fatalf("Setrlimit: set failed: %#v %v", set, err)
|
||||
}
|
||||
var get unix.Rlimit
|
||||
err = unix.Getrlimit(unix.RLIMIT_NOFILE, &get)
|
||||
if err != nil {
|
||||
t.Fatalf("Getrlimit: get failed: %v", err)
|
||||
}
|
||||
set = rlimit
|
||||
set.Cur = set.Max - 1
|
||||
if set != get {
|
||||
// Seems like Darwin requires some privilege to
|
||||
// increase the soft limit of rlimit sandbox, though
|
||||
// Setrlimit never reports an error.
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
default:
|
||||
t.Fatalf("Rlimit: change failed: wanted %#v got %#v", set, get)
|
||||
}
|
||||
}
|
||||
err = unix.Setrlimit(unix.RLIMIT_NOFILE, &rlimit)
|
||||
if err != nil {
|
||||
t.Fatalf("Setrlimit: restore failed: %#v %v", rlimit, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeekFailure(t *testing.T) {
|
||||
_, err := unix.Seek(-1, 0, 0)
|
||||
if err == nil {
|
||||
t.Fatalf("Seek(-1, 0, 0) did not fail")
|
||||
}
|
||||
str := err.Error() // used to crash on Linux
|
||||
t.Logf("Seek: %v", str)
|
||||
if str == "" {
|
||||
t.Fatalf("Seek(-1, 0, 0) return error with empty message")
|
||||
}
|
||||
}
|
@ -20,7 +20,10 @@ Hence we are proposing this specification, along with an initial set of plugins
|
||||
## How do I use CNI?
|
||||
|
||||
## Requirements
|
||||
CNI requires Go 1.4+ to build.
|
||||
CNI requires Go 1.5+ to build.
|
||||
|
||||
Go 1.5 users will need to set GO15VENDOREXPERIMENT=1 to get vendored
|
||||
dependencies. This flag is set by default in 1.6.
|
||||
|
||||
## Included Plugins
|
||||
This repository includes a number of common plugins that can be found in plugins/ directory.
|
||||
|
191
vendor/github.com/coreos/go-iptables/LICENSE
generated
vendored
Normal file
191
vendor/github.com/coreos/go-iptables/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
191
vendor/github.com/coreos/go-systemd/LICENSE
generated
vendored
Normal file
191
vendor/github.com/coreos/go-systemd/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user