Moves to official vendoring solution
This commit is contained in:
142
Godeps/Godeps.json
generated
142
Godeps/Godeps.json
generated
@ -1,6 +1,7 @@
|
||||
{
|
||||
"ImportPath": "github.com/appc/cni",
|
||||
"GoVersion": "go1.4.2",
|
||||
"GoVersion": "go1.6",
|
||||
"GodepVersion": "v58",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
@ -27,15 +28,154 @@
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/config",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/codelocation",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/containernode",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/failer",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/leafnodes",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/remote",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/spec",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/specrunner",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/suite",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/testingtproxy",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/writer",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/types",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/format",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/gbytes",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/gexec",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/assertion",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/asyncassertion",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/oraclematcher",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/testingtsupport",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/edge",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/node",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/util",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/types",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vishvananda/netlink",
|
||||
"Rev": "ecf47fd5739b3d2c3daf7c89c4b9715a2605c21b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vishvananda/netlink/nl",
|
||||
"Rev": "ecf47fd5739b3d2c3daf7c89c4b9715a2605c21b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "e11762ca30adc5b39fdbfd8c4250dabeb8e456d3"
|
||||
|
2
Godeps/_workspace/.gitignore
generated
vendored
2
Godeps/_workspace/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
||||
/pkg
|
||||
/bin
|
300
Godeps/_workspace/src/github.com/coreos/go-iptables/iptables/iptables.go
generated
vendored
300
Godeps/_workspace/src/github.com/coreos/go-iptables/iptables/iptables.go
generated
vendored
@ -1,300 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package iptables
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Adds the output of stderr to exec.ExitError
|
||||
type Error struct {
|
||||
exec.ExitError
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *Error) ExitStatus() int {
|
||||
return e.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("exit status %v: %v", e.ExitStatus(), e.msg)
|
||||
}
|
||||
|
||||
type IPTables struct {
|
||||
path string
|
||||
hasCheck bool
|
||||
hasWait bool
|
||||
|
||||
fmu *fileLock
|
||||
}
|
||||
|
||||
func New() (*IPTables, error) {
|
||||
path, err := exec.LookPath("iptables")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
checkPresent, waitPresent, err := getIptablesCommandSupport()
|
||||
if err != nil {
|
||||
log.Printf("Error checking iptables version, assuming version at least 1.4.20: %v", err)
|
||||
checkPresent = true
|
||||
waitPresent = true
|
||||
}
|
||||
ipt := IPTables{
|
||||
path: path,
|
||||
hasCheck: checkPresent,
|
||||
hasWait: waitPresent,
|
||||
}
|
||||
if !waitPresent {
|
||||
ipt.fmu, err = newXtablesFileLock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &ipt, nil
|
||||
}
|
||||
|
||||
// Exists checks if given rulespec in specified table/chain exists
|
||||
func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) {
|
||||
if !ipt.hasCheck {
|
||||
return ipt.existsForOldIptables(table, chain, rulespec)
|
||||
|
||||
}
|
||||
cmd := append([]string{"-t", table, "-C", chain}, rulespec...)
|
||||
err := ipt.run(cmd...)
|
||||
switch {
|
||||
case err == nil:
|
||||
return true, nil
|
||||
case err.(*Error).ExitStatus() == 1:
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Insert inserts rulespec to specified table/chain (in specified pos)
|
||||
func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error {
|
||||
cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...)
|
||||
return ipt.run(cmd...)
|
||||
}
|
||||
|
||||
// Append appends rulespec to specified table/chain
|
||||
func (ipt *IPTables) Append(table, chain string, rulespec ...string) error {
|
||||
cmd := append([]string{"-t", table, "-A", chain}, rulespec...)
|
||||
return ipt.run(cmd...)
|
||||
}
|
||||
|
||||
// AppendUnique acts like Append except that it won't add a duplicate
|
||||
func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error {
|
||||
exists, err := ipt.Exists(table, chain, rulespec...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return ipt.Append(table, chain, rulespec...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes rulespec in specified table/chain
|
||||
func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error {
|
||||
cmd := append([]string{"-t", table, "-D", chain}, rulespec...)
|
||||
return ipt.run(cmd...)
|
||||
}
|
||||
|
||||
// List rules in specified table/chain
|
||||
func (ipt *IPTables) List(table, chain string) ([]string, error) {
|
||||
args := []string{"-t", table, "-S", chain}
|
||||
var stdout bytes.Buffer
|
||||
if err := ipt.runWithOutput(args, &stdout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rules := strings.Split(stdout.String(), "\n")
|
||||
if len(rules) > 0 && rules[len(rules)-1] == "" {
|
||||
rules = rules[:len(rules)-1]
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (ipt *IPTables) NewChain(table, chain string) error {
|
||||
return ipt.run("-t", table, "-N", chain)
|
||||
}
|
||||
|
||||
// ClearChain flushed (deletes all rules) in the specified table/chain.
|
||||
// If the chain does not exist, a new one will be created
|
||||
func (ipt *IPTables) ClearChain(table, chain string) error {
|
||||
err := ipt.NewChain(table, chain)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return nil
|
||||
case err.(*Error).ExitStatus() == 1:
|
||||
// chain already exists. Flush (clear) it.
|
||||
return ipt.run("-t", table, "-F", chain)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// RenameChain renames the old chain to the new one.
|
||||
func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error {
|
||||
return ipt.run("-t", table, "-E", oldChain, newChain)
|
||||
}
|
||||
|
||||
// DeleteChain deletes the chain in the specified table.
|
||||
// The chain must be empty
|
||||
func (ipt *IPTables) DeleteChain(table, chain string) error {
|
||||
return ipt.run("-t", table, "-X", chain)
|
||||
}
|
||||
|
||||
// run runs an iptables command with the given arguments, ignoring
|
||||
// any stdout output
|
||||
func (ipt *IPTables) run(args ...string) error {
|
||||
return ipt.runWithOutput(args, nil)
|
||||
}
|
||||
|
||||
// runWithOutput runs an iptables command with the given arguments,
|
||||
// writing any stdout output to the given writer
|
||||
func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error {
|
||||
args = append([]string{ipt.path}, args...)
|
||||
if ipt.hasWait {
|
||||
args = append(args, "--wait")
|
||||
} else {
|
||||
ul, err := ipt.fmu.tryLock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ul.Unlock()
|
||||
}
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cmd := exec.Cmd{
|
||||
Path: ipt.path,
|
||||
Args: args,
|
||||
Stdout: stdout,
|
||||
Stderr: &stderr,
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return &Error{*(err.(*exec.ExitError)), stderr.String()}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks if iptables has the "-C" and "--wait" flag
|
||||
func getIptablesCommandSupport() (bool, bool, error) {
|
||||
vstring, err := getIptablesVersionString()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
v1, v2, v3, err := extractIptablesVersion(vstring)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), nil
|
||||
}
|
||||
|
||||
// getIptablesVersion returns the first three components of the iptables version.
|
||||
// e.g. "iptables v1.3.66" would return (1, 3, 66, nil)
|
||||
func extractIptablesVersion(str string) (int, int, int, error) {
|
||||
versionMatcher := regexp.MustCompile("v([0-9]+)\\.([0-9]+)\\.([0-9]+)")
|
||||
result := versionMatcher.FindStringSubmatch(str)
|
||||
if result == nil {
|
||||
return 0, 0, 0, fmt.Errorf("no iptables version found in string: %s", str)
|
||||
}
|
||||
|
||||
v1, err := strconv.Atoi(result[1])
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
v2, err := strconv.Atoi(result[2])
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
v3, err := strconv.Atoi(result[3])
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
return v1, v2, v3, nil
|
||||
}
|
||||
|
||||
// Runs "iptables --version" to get the version string
|
||||
func getIptablesVersionString() (string, error) {
|
||||
cmd := exec.Command("iptables", "--version")
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return out.String(), nil
|
||||
}
|
||||
|
||||
// Checks if an iptables version is after 1.4.11, when --check was added
|
||||
func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool {
|
||||
if v1 > 1 {
|
||||
return true
|
||||
}
|
||||
if v1 == 1 && v2 > 4 {
|
||||
return true
|
||||
}
|
||||
if v1 == 1 && v2 == 4 && v3 >= 11 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if an iptables version is after 1.4.20, when --wait was added
|
||||
func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool {
|
||||
if v1 > 1 {
|
||||
return true
|
||||
}
|
||||
if v1 == 1 && v2 > 4 {
|
||||
return true
|
||||
}
|
||||
if v1 == 1 && v2 == 4 && v3 >= 20 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if a rule specification exists for a table
|
||||
func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) {
|
||||
rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ")
|
||||
args := []string{"-t", table, "-S"}
|
||||
var stdout bytes.Buffer
|
||||
err := ipt.runWithOutput(args, &stdout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return strings.Contains(stdout.String(), rs), nil
|
||||
}
|
155
Godeps/_workspace/src/github.com/coreos/go-iptables/iptables/iptables_test.go
generated
vendored
155
Godeps/_workspace/src/github.com/coreos/go-iptables/iptables/iptables_test.go
generated
vendored
@ -1,155 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package iptables
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func randChain(t *testing.T) string {
|
||||
n, err := rand.Int(rand.Reader, big.NewInt(1000000))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to generate random chain name: %v", err)
|
||||
}
|
||||
|
||||
return "TEST-" + n.String()
|
||||
}
|
||||
|
||||
func TestChain(t *testing.T) {
|
||||
chain := randChain(t)
|
||||
|
||||
ipt, err := New()
|
||||
if err != nil {
|
||||
t.Fatalf("New failed: %v", err)
|
||||
}
|
||||
|
||||
// chain shouldn't exist, this will create new
|
||||
err = ipt.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearChain (of missing) failed: %v", err)
|
||||
}
|
||||
|
||||
// chain now exists
|
||||
err = ipt.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearChain (of empty) failed: %v", err)
|
||||
}
|
||||
|
||||
// put a simple rule in
|
||||
err = ipt.Append("filter", chain, "-s", "0.0.0.0/0", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Append failed: %v", err)
|
||||
}
|
||||
|
||||
// can't delete non-empty chain
|
||||
err = ipt.DeleteChain("filter", chain)
|
||||
if err == nil {
|
||||
t.Fatalf("DeleteChain of non-empty chain did not fail")
|
||||
}
|
||||
|
||||
err = ipt.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearChain (of non-empty) failed: %v", err)
|
||||
}
|
||||
|
||||
// rename the chain
|
||||
newChain := randChain(t)
|
||||
err = ipt.RenameChain("filter", chain, newChain)
|
||||
if err != nil {
|
||||
t.Fatalf("RenameChain failed: %v", err)
|
||||
}
|
||||
|
||||
// chain empty, should be ok
|
||||
err = ipt.DeleteChain("filter", newChain)
|
||||
if err != nil {
|
||||
t.Fatalf("DeleteChain of empty chain failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRules(t *testing.T) {
|
||||
chain := randChain(t)
|
||||
|
||||
ipt, err := New()
|
||||
if err != nil {
|
||||
t.Fatalf("New failed: %v", err)
|
||||
}
|
||||
|
||||
// chain shouldn't exist, this will create new
|
||||
err = ipt.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearChain (of missing) failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.Append("filter", chain, "-s", "10.1.0.0/16", "-d", "8.8.8.8/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Append failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.AppendUnique("filter", chain, "-s", "10.1.0.0/16", "-d", "8.8.8.8/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("AppendUnique failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.Append("filter", chain, "-s", "10.2.0.0/16", "-d", "8.8.8.8/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Append failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.Insert("filter", chain, 2, "-s", "10.2.0.0/16", "-d", "9.9.9.9/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Insert failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.Insert("filter", chain, 1, "-s", "10.1.0.0/16", "-d", "9.9.9.9/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Insert failed: %v", err)
|
||||
}
|
||||
|
||||
err = ipt.Delete("filter", chain, "-s", "10.1.0.0/16", "-d", "9.9.9.9/32", "-j", "ACCEPT")
|
||||
if err != nil {
|
||||
t.Fatalf("Delete failed: %v", err)
|
||||
}
|
||||
|
||||
rules, err := ipt.List("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("List failed: %v", err)
|
||||
}
|
||||
|
||||
expected := []string{
|
||||
"-N " + chain,
|
||||
"-A " + chain + " -s 10.1.0.0/16 -d 8.8.8.8/32 -j ACCEPT",
|
||||
"-A " + chain + " -s 10.2.0.0/16 -d 9.9.9.9/32 -j ACCEPT",
|
||||
"-A " + chain + " -s 10.2.0.0/16 -d 8.8.8.8/32 -j ACCEPT",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(rules, expected) {
|
||||
t.Fatalf("List mismatch: \ngot %#v \nneed %#v", rules, expected)
|
||||
}
|
||||
|
||||
// Clear the chain that was created.
|
||||
err = ipt.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to clear test chain: %v", err)
|
||||
}
|
||||
|
||||
// Delete the chain that was created
|
||||
err = ipt.DeleteChain("filter", chain)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to delete test chain: %v", err)
|
||||
}
|
||||
}
|
84
Godeps/_workspace/src/github.com/coreos/go-iptables/iptables/lock.go
generated
vendored
84
Godeps/_workspace/src/github.com/coreos/go-iptables/iptables/lock.go
generated
vendored
@ -1,84 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package iptables
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// In earlier versions of iptables, the xtables lock was implemented
|
||||
// via a Unix socket, but now flock is used via this lockfile:
|
||||
// http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707
|
||||
// Note the LSB-conforming "/run" directory does not exist on old
|
||||
// distributions, so assume "/var" is symlinked
|
||||
xtablesLockFilePath = "/var/run/xtables.lock"
|
||||
|
||||
defaultFilePerm = 0600
|
||||
)
|
||||
|
||||
type Unlocker interface {
|
||||
Unlock() error
|
||||
}
|
||||
|
||||
type nopUnlocker struct{}
|
||||
|
||||
func (_ nopUnlocker) Unlock() error { return nil }
|
||||
|
||||
type fileLock struct {
|
||||
// mu is used to protect against concurrent invocations from within this process
|
||||
mu sync.Mutex
|
||||
fd int
|
||||
}
|
||||
|
||||
// tryLock takes an exclusive lock on the xtables lock file without blocking.
|
||||
// This is best-effort only: if the exclusive lock would block (i.e. because
|
||||
// another process already holds it), no error is returned. Otherwise, any
|
||||
// error encountered during the locking operation is returned.
|
||||
// The returned Unlocker should be used to release the lock when the caller is
|
||||
// done invoking iptables commands.
|
||||
func (l *fileLock) tryLock() (Unlocker, error) {
|
||||
l.mu.Lock()
|
||||
err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
|
||||
switch err {
|
||||
case syscall.EWOULDBLOCK:
|
||||
l.mu.Unlock()
|
||||
return nopUnlocker{}, nil
|
||||
case nil:
|
||||
return l, nil
|
||||
default:
|
||||
l.mu.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock closes the underlying file, which implicitly unlocks it as well. It
|
||||
// also unlocks the associated mutex.
|
||||
func (l *fileLock) Unlock() error {
|
||||
defer l.mu.Unlock()
|
||||
return syscall.Close(l.fd)
|
||||
}
|
||||
|
||||
// newXtablesFileLock opens a new lock on the xtables lockfile without
|
||||
// acquiring the lock
|
||||
func newXtablesFileLock() (*fileLock, error) {
|
||||
fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fileLock{fd: fd}, nil
|
||||
}
|
52
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/files.go
generated
vendored
52
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/files.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package activation implements primitives for systemd socket activation.
|
||||
package activation
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// based on: https://gist.github.com/alberts/4640792
|
||||
const (
|
||||
listenFdsStart = 3
|
||||
)
|
||||
|
||||
func Files(unsetEnv bool) []*os.File {
|
||||
if unsetEnv {
|
||||
defer os.Unsetenv("LISTEN_PID")
|
||||
defer os.Unsetenv("LISTEN_FDS")
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(os.Getenv("LISTEN_PID"))
|
||||
if err != nil || pid != os.Getpid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS"))
|
||||
if err != nil || nfds == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
files := make([]*os.File, 0, nfds)
|
||||
for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ {
|
||||
syscall.CloseOnExec(fd)
|
||||
files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd)))
|
||||
}
|
||||
|
||||
return files
|
||||
}
|
82
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/files_test.go
generated
vendored
82
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/files_test.go
generated
vendored
@ -1,82 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package activation
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// correctStringWritten fails the text if the correct string wasn't written
|
||||
// to the other side of the pipe.
|
||||
func correctStringWritten(t *testing.T, r *os.File, expected string) bool {
|
||||
bytes := make([]byte, len(expected))
|
||||
io.ReadAtLeast(r, bytes, len(expected))
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Unexpected string %s", string(bytes))
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestActivation forks out a copy of activation.go example and reads back two
|
||||
// strings from the pipes that are passed in.
|
||||
func TestActivation(t *testing.T) {
|
||||
cmd := exec.Command("go", "run", "../examples/activation/activation.go")
|
||||
|
||||
r1, w1, _ := os.Pipe()
|
||||
r2, w2, _ := os.Pipe()
|
||||
cmd.ExtraFiles = []*os.File{
|
||||
w1,
|
||||
w2,
|
||||
}
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
correctStringWritten(t, r1, "Hello world")
|
||||
correctStringWritten(t, r2, "Goodbye world")
|
||||
}
|
||||
|
||||
func TestActivationNoFix(t *testing.T) {
|
||||
cmd := exec.Command("go", "run", "../examples/activation/activation.go")
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "LISTEN_FDS=2")
|
||||
|
||||
out, _ := cmd.CombinedOutput()
|
||||
if bytes.Contains(out, []byte("No files")) == false {
|
||||
t.Fatalf("Child didn't error out as expected")
|
||||
}
|
||||
}
|
||||
|
||||
func TestActivationNoFiles(t *testing.T) {
|
||||
cmd := exec.Command("go", "run", "../examples/activation/activation.go")
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "LISTEN_FDS=0", "FIX_LISTEN_PID=1")
|
||||
|
||||
out, _ := cmd.CombinedOutput()
|
||||
if bytes.Contains(out, []byte("No files")) == false {
|
||||
t.Fatalf("Child didn't error out as expected")
|
||||
}
|
||||
}
|
37
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/listeners.go
generated
vendored
37
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/listeners.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package activation
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// Listeners returns a slice containing a net.Listener for each matching socket type
|
||||
// passed to this process.
|
||||
//
|
||||
// The order of the file descriptors is preserved in the returned slice.
|
||||
// Nil values are used to fill any gaps. For example if systemd were to return file descriptors
|
||||
// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener}
|
||||
func Listeners(unsetEnv bool) ([]net.Listener, error) {
|
||||
files := Files(unsetEnv)
|
||||
listeners := make([]net.Listener, len(files))
|
||||
|
||||
for i, f := range files {
|
||||
if pc, err := net.FileListener(f); err == nil {
|
||||
listeners[i] = pc
|
||||
}
|
||||
}
|
||||
return listeners, nil
|
||||
}
|
86
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/listeners_test.go
generated
vendored
86
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/listeners_test.go
generated
vendored
@ -1,86 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package activation
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// correctStringWritten fails the text if the correct string wasn't written
|
||||
// to the other side of the pipe.
|
||||
func correctStringWrittenNet(t *testing.T, r net.Conn, expected string) bool {
|
||||
bytes := make([]byte, len(expected))
|
||||
io.ReadAtLeast(r, bytes, len(expected))
|
||||
|
||||
if string(bytes) != expected {
|
||||
t.Fatalf("Unexpected string %s", string(bytes))
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestActivation forks out a copy of activation.go example and reads back two
|
||||
// strings from the pipes that are passed in.
|
||||
func TestListeners(t *testing.T) {
|
||||
cmd := exec.Command("go", "run", "../examples/activation/listen.go")
|
||||
|
||||
l1, err := net.Listen("tcp", ":9999")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
l2, err := net.Listen("tcp", ":1234")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
t1 := l1.(*net.TCPListener)
|
||||
t2 := l2.(*net.TCPListener)
|
||||
|
||||
f1, _ := t1.File()
|
||||
f2, _ := t2.File()
|
||||
|
||||
cmd.ExtraFiles = []*os.File{
|
||||
f1,
|
||||
f2,
|
||||
}
|
||||
|
||||
r1, err := net.Dial("tcp", "127.0.0.1:9999")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
r1.Write([]byte("Hi"))
|
||||
|
||||
r2, err := net.Dial("tcp", "127.0.0.1:1234")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
r2.Write([]byte("Hi"))
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
|
||||
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
println(string(out))
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
correctStringWrittenNet(t, r1, "Hello world")
|
||||
correctStringWrittenNet(t, r2, "Goodbye world")
|
||||
}
|
37
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/packetconns.go
generated
vendored
37
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/packetconns.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package activation
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// PacketConns returns a slice containing a net.PacketConn for each matching socket type
|
||||
// passed to this process.
|
||||
//
|
||||
// The order of the file descriptors is preserved in the returned slice.
|
||||
// Nil values are used to fill any gaps. For example if systemd were to return file descriptors
|
||||
// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn}
|
||||
func PacketConns(unsetEnv bool) ([]net.PacketConn, error) {
|
||||
files := Files(unsetEnv)
|
||||
conns := make([]net.PacketConn, len(files))
|
||||
|
||||
for i, f := range files {
|
||||
if pc, err := net.FilePacketConn(f); err == nil {
|
||||
conns[i] = pc
|
||||
}
|
||||
}
|
||||
return conns, nil
|
||||
}
|
68
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/packetconns_test.go
generated
vendored
68
Godeps/_workspace/src/github.com/coreos/go-systemd/activation/packetconns_test.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package activation
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestActivation forks out a copy of activation.go example and reads back two
|
||||
// strings from the pipes that are passed in.
|
||||
func TestPacketConns(t *testing.T) {
|
||||
cmd := exec.Command("go", "run", "../examples/activation/udpconn.go")
|
||||
|
||||
u1, err := net.ListenUDP("udp", &net.UDPAddr{Port: 9999})
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
u2, err := net.ListenUDP("udp", &net.UDPAddr{Port: 1234})
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
f1, _ := u1.File()
|
||||
f2, _ := u2.File()
|
||||
|
||||
cmd.ExtraFiles = []*os.File{
|
||||
f1,
|
||||
f2,
|
||||
}
|
||||
|
||||
r1, err := net.Dial("udp", "127.0.0.1:9999")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
r1.Write([]byte("Hi"))
|
||||
|
||||
r2, err := net.Dial("udp", "127.0.0.1:1234")
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
r2.Write([]byte("Hi"))
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("Cmd output '%s', err: '%s'\n", out, err)
|
||||
}
|
||||
|
||||
correctStringWrittenNet(t, r1, "Hello world")
|
||||
correctStringWrittenNet(t, r2, "Goodbye world")
|
||||
}
|
27
Godeps/_workspace/src/github.com/d2g/dhcp4/LICENSE
generated
vendored
27
Godeps/_workspace/src/github.com/d2g/dhcp4/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2013 Skagerrak Software Limited. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Skagerrak Software Limited nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
5
Godeps/_workspace/src/github.com/d2g/dhcp4/README.md
generated
vendored
5
Godeps/_workspace/src/github.com/d2g/dhcp4/README.md
generated
vendored
@ -1,5 +0,0 @@
|
||||
# DHCP4 - A DHCP library written in Go.
|
||||
|
||||
Warning: This library is still being developed. Function calls will change.
|
||||
|
||||
I've removed Server Functionality, for me this project supports the underlying DHCP format not the implementation.
|
121
Godeps/_workspace/src/github.com/d2g/dhcp4/constants.go
generated
vendored
121
Godeps/_workspace/src/github.com/d2g/dhcp4/constants.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
package dhcp4
|
||||
|
||||
// OpCodes
|
||||
const (
|
||||
BootRequest OpCode = 1 // From Client
|
||||
BootReply OpCode = 2 // From Server
|
||||
)
|
||||
|
||||
// DHCP Message Type 53
|
||||
const (
|
||||
Discover MessageType = 1 // Broadcast Packet From Client - Can I have an IP?
|
||||
Offer MessageType = 2 // Broadcast From Server - Here's an IP
|
||||
Request MessageType = 3 // Broadcast From Client - I'll take that IP (Also start for renewals)
|
||||
Decline MessageType = 4 // Broadcast From Client - Sorry I can't use that IP
|
||||
ACK MessageType = 5 // From Server, Yes you can have that IP
|
||||
NAK MessageType = 6 // From Server, No you cannot have that IP
|
||||
Release MessageType = 7 // From Client, I don't need that IP anymore
|
||||
Inform MessageType = 8 // From Client, I have this IP and there's nothing you can do about it
|
||||
)
|
||||
|
||||
// DHCP Options
|
||||
const (
|
||||
End OptionCode = 255
|
||||
Pad OptionCode = 0
|
||||
OptionSubnetMask OptionCode = 1
|
||||
OptionTimeOffset OptionCode = 2
|
||||
OptionRouter OptionCode = 3
|
||||
OptionTimeServer OptionCode = 4
|
||||
OptionNameServer OptionCode = 5
|
||||
OptionDomainNameServer OptionCode = 6
|
||||
OptionLogServer OptionCode = 7
|
||||
OptionCookieServer OptionCode = 8
|
||||
OptionLPRServer OptionCode = 9
|
||||
OptionImpressServer OptionCode = 10
|
||||
OptionResourceLocationServer OptionCode = 11
|
||||
OptionHostName OptionCode = 12
|
||||
OptionBootFileSize OptionCode = 13
|
||||
OptionMeritDumpFile OptionCode = 14
|
||||
OptionDomainName OptionCode = 15
|
||||
OptionSwapServer OptionCode = 16
|
||||
OptionRootPath OptionCode = 17
|
||||
OptionExtensionsPath OptionCode = 18
|
||||
|
||||
// IP Layer Parameters per Host
|
||||
OptionIPForwardingEnableDisable OptionCode = 19
|
||||
OptionNonLocalSourceRoutingEnableDisable OptionCode = 20
|
||||
OptionPolicyFilter OptionCode = 21
|
||||
OptionMaximumDatagramReassemblySize OptionCode = 22
|
||||
OptionDefaultIPTimeToLive OptionCode = 23
|
||||
OptionPathMTUAgingTimeout OptionCode = 24
|
||||
OptionPathMTUPlateauTable OptionCode = 25
|
||||
|
||||
// IP Layer Parameters per Interface
|
||||
OptionInterfaceMTU OptionCode = 26
|
||||
OptionAllSubnetsAreLocal OptionCode = 27
|
||||
OptionBroadcastAddress OptionCode = 28
|
||||
OptionPerformMaskDiscovery OptionCode = 29
|
||||
OptionMaskSupplier OptionCode = 30
|
||||
OptionPerformRouterDiscovery OptionCode = 31
|
||||
OptionRouterSolicitationAddress OptionCode = 32
|
||||
OptionStaticRoute OptionCode = 33
|
||||
|
||||
// Link Layer Parameters per Interface
|
||||
OptionTrailerEncapsulation OptionCode = 34
|
||||
OptionARPCacheTimeout OptionCode = 35
|
||||
OptionEthernetEncapsulation OptionCode = 36
|
||||
|
||||
// TCP Parameters
|
||||
OptionTCPDefaultTTL OptionCode = 37
|
||||
OptionTCPKeepaliveInterval OptionCode = 38
|
||||
OptionTCPKeepaliveGarbage OptionCode = 39
|
||||
|
||||
// Application and Service Parameters
|
||||
OptionNetworkInformationServiceDomain OptionCode = 40
|
||||
OptionNetworkInformationServers OptionCode = 41
|
||||
OptionNetworkTimeProtocolServers OptionCode = 42
|
||||
OptionVendorSpecificInformation OptionCode = 43
|
||||
OptionNetBIOSOverTCPIPNameServer OptionCode = 44
|
||||
OptionNetBIOSOverTCPIPDatagramDistributionServer OptionCode = 45
|
||||
OptionNetBIOSOverTCPIPNodeType OptionCode = 46
|
||||
OptionNetBIOSOverTCPIPScope OptionCode = 47
|
||||
OptionXWindowSystemFontServer OptionCode = 48
|
||||
OptionXWindowSystemDisplayManager OptionCode = 49
|
||||
OptionNetworkInformationServicePlusDomain OptionCode = 64
|
||||
OptionNetworkInformationServicePlusServers OptionCode = 65
|
||||
OptionMobileIPHomeAgent OptionCode = 68
|
||||
OptionSimpleMailTransportProtocol OptionCode = 69
|
||||
OptionPostOfficeProtocolServer OptionCode = 70
|
||||
OptionNetworkNewsTransportProtocol OptionCode = 71
|
||||
OptionDefaultWorldWideWebServer OptionCode = 72
|
||||
OptionDefaultFingerServer OptionCode = 73
|
||||
OptionDefaultInternetRelayChatServer OptionCode = 74
|
||||
OptionStreetTalkServer OptionCode = 75
|
||||
OptionStreetTalkDirectoryAssistance OptionCode = 76
|
||||
|
||||
// DHCP Extensions
|
||||
OptionRequestedIPAddress OptionCode = 50
|
||||
OptionIPAddressLeaseTime OptionCode = 51
|
||||
OptionOverload OptionCode = 52
|
||||
OptionDHCPMessageType OptionCode = 53
|
||||
OptionServerIdentifier OptionCode = 54
|
||||
OptionParameterRequestList OptionCode = 55
|
||||
OptionMessage OptionCode = 56
|
||||
OptionMaximumDHCPMessageSize OptionCode = 57
|
||||
OptionRenewalTimeValue OptionCode = 58
|
||||
OptionRebindingTimeValue OptionCode = 59
|
||||
OptionVendorClassIdentifier OptionCode = 60
|
||||
OptionClientIdentifier OptionCode = 61
|
||||
|
||||
OptionTFTPServerName OptionCode = 66
|
||||
OptionBootFileName OptionCode = 67
|
||||
|
||||
OptionUserClass OptionCode = 77
|
||||
|
||||
OptionClientArchitecture OptionCode = 93
|
||||
|
||||
OptionTZPOSIXString OptionCode = 100
|
||||
OptionTZDatabaseString OptionCode = 101
|
||||
|
||||
OptionClasslessRouteFormat OptionCode = 121
|
||||
)
|
58
Godeps/_workspace/src/github.com/d2g/dhcp4/helpers.go
generated
vendored
58
Godeps/_workspace/src/github.com/d2g/dhcp4/helpers.go
generated
vendored
@ -1,58 +0,0 @@
|
||||
package dhcp4
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// IPRange returns how many ips in the ip range from start to stop (inclusive)
|
||||
func IPRange(start, stop net.IP) int {
|
||||
//return int(Uint([]byte(stop))-Uint([]byte(start))) + 1
|
||||
return int(binary.BigEndian.Uint32(stop.To4())) - int(binary.BigEndian.Uint32(start.To4())) + 1
|
||||
}
|
||||
|
||||
// IPAdd returns a copy of start + add.
|
||||
// IPAdd(net.IP{192,168,1,1},30) returns net.IP{192.168.1.31}
|
||||
func IPAdd(start net.IP, add int) net.IP { // IPv4 only
|
||||
start = start.To4()
|
||||
//v := Uvarint([]byte(start))
|
||||
result := make(net.IP, 4)
|
||||
binary.BigEndian.PutUint32(result, binary.BigEndian.Uint32(start)+uint32(add))
|
||||
//PutUint([]byte(result), v+uint64(add))
|
||||
return result
|
||||
}
|
||||
|
||||
// IPLess returns where IP a is less than IP b.
|
||||
func IPLess(a, b net.IP) bool {
|
||||
b = b.To4()
|
||||
for i, ai := range a.To4() {
|
||||
if ai != b[i] {
|
||||
return ai < b[i]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IPInRange returns true if ip is between (inclusive) start and stop.
|
||||
func IPInRange(start, stop, ip net.IP) bool {
|
||||
return !(IPLess(ip, start) || IPLess(stop, ip))
|
||||
}
|
||||
|
||||
// OptionsLeaseTime - converts a time.Duration to a 4 byte slice, compatible
|
||||
// with OptionIPAddressLeaseTime.
|
||||
func OptionsLeaseTime(d time.Duration) []byte {
|
||||
leaseBytes := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(leaseBytes, uint32(d/time.Second))
|
||||
//PutUvarint(leaseBytes, uint64(d/time.Second))
|
||||
return leaseBytes
|
||||
}
|
||||
|
||||
// JoinIPs returns a byte slice of IP addresses, one immediately after the other
|
||||
// This may be useful for creating multiple IP options such as OptionRouter.
|
||||
func JoinIPs(ips []net.IP) (b []byte) {
|
||||
for _, v := range ips {
|
||||
b = append(b, v.To4()...)
|
||||
}
|
||||
return
|
||||
}
|
40
Godeps/_workspace/src/github.com/d2g/dhcp4/option.go
generated
vendored
40
Godeps/_workspace/src/github.com/d2g/dhcp4/option.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package dhcp4
|
||||
|
||||
type OptionCode byte
|
||||
|
||||
type Option struct {
|
||||
Code OptionCode
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// Map of DHCP options
|
||||
type Options map[OptionCode][]byte
|
||||
|
||||
// SelectOrderOrAll has same functionality as SelectOrder, except if the order
|
||||
// param is nil, whereby all options are added (in arbitary order).
|
||||
func (o Options) SelectOrderOrAll(order []byte) []Option {
|
||||
if order == nil {
|
||||
opts := make([]Option, 0, len(o))
|
||||
for i, v := range o {
|
||||
opts = append(opts, Option{Code: i, Value: v})
|
||||
}
|
||||
return opts
|
||||
}
|
||||
return o.SelectOrder(order)
|
||||
}
|
||||
|
||||
// SelectOrder returns a slice of options ordered and selected by a byte array
|
||||
// usually defined by OptionParameterRequestList. This result is expected to be
|
||||
// used in ReplyPacket()'s []Option parameter.
|
||||
func (o Options) SelectOrder(order []byte) []Option {
|
||||
opts := make([]Option, 0, len(order))
|
||||
for _, v := range order {
|
||||
if data, ok := o[OptionCode(v)]; ok {
|
||||
opts = append(opts, Option{Code: OptionCode(v), Value: data})
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
type OpCode byte
|
||||
type MessageType byte // Option 53
|
149
Godeps/_workspace/src/github.com/d2g/dhcp4/packet.go
generated
vendored
149
Godeps/_workspace/src/github.com/d2g/dhcp4/packet.go
generated
vendored
@ -1,149 +0,0 @@
|
||||
package dhcp4
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A DHCP packet
|
||||
type Packet []byte
|
||||
|
||||
func (p Packet) OpCode() OpCode { return OpCode(p[0]) }
|
||||
func (p Packet) HType() byte { return p[1] }
|
||||
func (p Packet) HLen() byte { return p[2] }
|
||||
func (p Packet) Hops() byte { return p[3] }
|
||||
func (p Packet) XId() []byte { return p[4:8] }
|
||||
func (p Packet) Secs() []byte { return p[8:10] } // Never Used?
|
||||
func (p Packet) Flags() []byte { return p[10:12] }
|
||||
func (p Packet) CIAddr() net.IP { return net.IP(p[12:16]) }
|
||||
func (p Packet) YIAddr() net.IP { return net.IP(p[16:20]) }
|
||||
func (p Packet) SIAddr() net.IP { return net.IP(p[20:24]) }
|
||||
func (p Packet) GIAddr() net.IP { return net.IP(p[24:28]) }
|
||||
func (p Packet) CHAddr() net.HardwareAddr {
|
||||
hLen := p.HLen()
|
||||
if hLen > 16 { // Prevent chaddr exceeding p boundary
|
||||
hLen = 16
|
||||
}
|
||||
return net.HardwareAddr(p[28 : 28+hLen]) // max endPos 44
|
||||
}
|
||||
|
||||
// 192 bytes of zeros BOOTP legacy
|
||||
func (p Packet) Cookie() []byte { return p[236:240] }
|
||||
func (p Packet) Options() []byte {
|
||||
if len(p) > 240 {
|
||||
return p[240:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Packet) Broadcast() bool { return p.Flags()[0] > 127 }
|
||||
|
||||
func (p Packet) SetBroadcast(broadcast bool) {
|
||||
if p.Broadcast() != broadcast {
|
||||
p.Flags()[0] ^= 128
|
||||
}
|
||||
}
|
||||
|
||||
func (p Packet) SetOpCode(c OpCode) { p[0] = byte(c) }
|
||||
func (p Packet) SetCHAddr(a net.HardwareAddr) {
|
||||
copy(p[28:44], a)
|
||||
p[2] = byte(len(a))
|
||||
}
|
||||
func (p Packet) SetHType(hType byte) { p[1] = hType }
|
||||
func (p Packet) SetCookie(cookie []byte) { copy(p.Cookie(), cookie) }
|
||||
func (p Packet) SetHops(hops byte) { p[3] = hops }
|
||||
func (p Packet) SetXId(xId []byte) { copy(p.XId(), xId) }
|
||||
func (p Packet) SetSecs(secs []byte) { copy(p.Secs(), secs) }
|
||||
func (p Packet) SetFlags(flags []byte) { copy(p.Flags(), flags) }
|
||||
func (p Packet) SetCIAddr(ip net.IP) { copy(p.CIAddr(), ip.To4()) }
|
||||
func (p Packet) SetYIAddr(ip net.IP) { copy(p.YIAddr(), ip.To4()) }
|
||||
func (p Packet) SetSIAddr(ip net.IP) { copy(p.SIAddr(), ip.To4()) }
|
||||
func (p Packet) SetGIAddr(ip net.IP) { copy(p.GIAddr(), ip.To4()) }
|
||||
|
||||
// Parses the packet's options into an Options map
|
||||
func (p Packet) ParseOptions() Options {
|
||||
opts := p.Options()
|
||||
options := make(Options, 10)
|
||||
for len(opts) >= 2 && OptionCode(opts[0]) != End {
|
||||
if OptionCode(opts[0]) == Pad {
|
||||
opts = opts[1:]
|
||||
continue
|
||||
}
|
||||
size := int(opts[1])
|
||||
if len(opts) < 2+size {
|
||||
break
|
||||
}
|
||||
options[OptionCode(opts[0])] = opts[2 : 2+size]
|
||||
opts = opts[2+size:]
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
func NewPacket(opCode OpCode) Packet {
|
||||
p := make(Packet, 241)
|
||||
p.SetOpCode(opCode)
|
||||
p.SetHType(1) // Ethernet
|
||||
p.SetCookie([]byte{99, 130, 83, 99})
|
||||
p[240] = byte(End)
|
||||
return p
|
||||
}
|
||||
|
||||
// Appends a DHCP option to the end of a packet
|
||||
func (p *Packet) AddOption(o OptionCode, value []byte) {
|
||||
*p = append((*p)[:len(*p)-1], []byte{byte(o), byte(len(value))}...) // Strip off End, Add OptionCode and Length
|
||||
*p = append(*p, value...) // Add Option Value
|
||||
*p = append(*p, byte(End)) // Add on new End
|
||||
}
|
||||
|
||||
// Removes all options from packet.
|
||||
func (p *Packet) StripOptions() {
|
||||
*p = append((*p)[:240], byte(End))
|
||||
}
|
||||
|
||||
// Creates a request packet that a Client would send to a server.
|
||||
func RequestPacket(mt MessageType, chAddr net.HardwareAddr, cIAddr net.IP, xId []byte, broadcast bool, options []Option) Packet {
|
||||
p := NewPacket(BootRequest)
|
||||
p.SetCHAddr(chAddr)
|
||||
p.SetXId(xId)
|
||||
if cIAddr != nil {
|
||||
p.SetCIAddr(cIAddr)
|
||||
}
|
||||
p.SetBroadcast(broadcast)
|
||||
p.AddOption(OptionDHCPMessageType, []byte{byte(mt)})
|
||||
for _, o := range options {
|
||||
p.AddOption(o.Code, o.Value)
|
||||
}
|
||||
p.PadToMinSize()
|
||||
return p
|
||||
}
|
||||
|
||||
// ReplyPacket creates a reply packet that a Server would send to a client.
|
||||
// It uses the req Packet param to copy across common/necessary fields to
|
||||
// associate the reply the request.
|
||||
func ReplyPacket(req Packet, mt MessageType, serverId, yIAddr net.IP, leaseDuration time.Duration, options []Option) Packet {
|
||||
p := NewPacket(BootReply)
|
||||
p.SetXId(req.XId())
|
||||
p.SetFlags(req.Flags())
|
||||
p.SetYIAddr(yIAddr)
|
||||
p.SetGIAddr(req.GIAddr())
|
||||
p.SetCHAddr(req.CHAddr())
|
||||
p.SetSecs(req.Secs())
|
||||
p.AddOption(OptionDHCPMessageType, []byte{byte(mt)})
|
||||
p.AddOption(OptionServerIdentifier, []byte(serverId))
|
||||
p.AddOption(OptionIPAddressLeaseTime, OptionsLeaseTime(leaseDuration))
|
||||
for _, o := range options {
|
||||
p.AddOption(o.Code, o.Value)
|
||||
}
|
||||
p.PadToMinSize()
|
||||
return p
|
||||
}
|
||||
|
||||
// PadToMinSize pads a packet so that when sent over UDP, the entire packet,
|
||||
// is 300 bytes (BOOTP min), to be compatible with really old devices.
|
||||
var padder [272]byte
|
||||
|
||||
func (p *Packet) PadToMinSize() {
|
||||
if n := len(*p); n < 272 {
|
||||
*p = append(*p, padder[:272-n]...)
|
||||
}
|
||||
}
|
354
Godeps/_workspace/src/github.com/d2g/dhcp4client/LICENSE
generated
vendored
354
Godeps/_workspace/src/github.com/d2g/dhcp4client/LICENSE
generated
vendored
@ -1,354 +0,0 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. “Contributor”
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. “Contributor Version”
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor’s Contribution.
|
||||
|
||||
1.3. “Contribution”
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. “Covered Software”
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. “Incompatible With Secondary Licenses”
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of version
|
||||
1.1 or earlier of the License, but not also under the terms of a
|
||||
Secondary License.
|
||||
|
||||
1.6. “Executable Form”
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. “Larger Work”
|
||||
|
||||
means a work that combines Covered Software with other material, in a separate
|
||||
file or files, that is not Covered Software.
|
||||
|
||||
1.8. “License”
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. “Licensable”
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether at the
|
||||
time of the initial grant or subsequently, any and all of the rights conveyed by
|
||||
this License.
|
||||
|
||||
1.10. “Modifications”
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to, deletion
|
||||
from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. “Patent Claims” of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method, process,
|
||||
and apparatus claims, in any patent Licensable by such Contributor that
|
||||
would be infringed, but for the grant of the License, by the making,
|
||||
using, selling, offering for sale, having made, import, or transfer of
|
||||
either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. “Secondary License”
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. “Source Code Form”
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. “You” (or “Your”)
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, “You” includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, “control” means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or as
|
||||
part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its Contributions
|
||||
or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution become
|
||||
effective for each Contribution on the date the Contributor first distributes
|
||||
such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under this
|
||||
License. No additional rights or licenses will be implied from the distribution
|
||||
or licensing of Covered Software under this License. Notwithstanding Section
|
||||
2.1(b) above, no patent license is granted by a Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party’s
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of its
|
||||
Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks, or
|
||||
logos of any Contributor (except as may be necessary to comply with the
|
||||
notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this License
|
||||
(see Section 10.2) or under the terms of a Secondary License (if permitted
|
||||
under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its Contributions
|
||||
are its original creation(s) or it has sufficient rights to grant the
|
||||
rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under applicable
|
||||
copyright doctrines of fair use, fair dealing, or other equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under the
|
||||
terms of this License. You must inform recipients that the Source Code Form
|
||||
of the Covered Software is governed by the terms of this License, and how
|
||||
they can obtain a copy of this License. You may not attempt to alter or
|
||||
restrict the recipients’ rights in the Source Code Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this License,
|
||||
or sublicense it under different terms, provided that the license for
|
||||
the Executable Form does not attempt to limit or alter the recipients’
|
||||
rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for the
|
||||
Covered Software. If the Larger Work is a combination of Covered Software
|
||||
with a work governed by one or more Secondary Licenses, and the Covered
|
||||
Software is not Incompatible With Secondary Licenses, this License permits
|
||||
You to additionally distribute such Covered Software under the terms of
|
||||
such Secondary License(s), so that the recipient of the Larger Work may, at
|
||||
their option, further distribute the Covered Software under the terms of
|
||||
either this License or such Secondary License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices (including
|
||||
copyright notices, patent notices, disclaimers of warranty, or limitations
|
||||
of liability) contained within the Source Code Form of the Covered
|
||||
Software, except that You may alter any license notices to the extent
|
||||
required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on behalf
|
||||
of any Contributor. You must make it absolutely clear that any such
|
||||
warranty, support, indemnity, or liability obligation is offered by You
|
||||
alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute, judicial
|
||||
order, or regulation then You must: (a) comply with the terms of this License
|
||||
to the maximum extent possible; and (b) describe the limitations and the code
|
||||
they affect. Such description must be placed in a text file included with all
|
||||
distributions of the Covered Software under this License. Except to the
|
||||
extent prohibited by statute or regulation, such description must be
|
||||
sufficiently detailed for a recipient of ordinary skill to be able to
|
||||
understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
|
||||
if such Contributor fails to notify You of the non-compliance by some
|
||||
reasonable means prior to 60 days after You have come back into compliance.
|
||||
Moreover, Your grants from a particular Contributor are reinstated on an
|
||||
ongoing basis if such Contributor notifies You of the non-compliance by
|
||||
some reasonable means, this is the first time You have received notice of
|
||||
non-compliance with this License from such Contributor, and You become
|
||||
compliant prior to 30 days after Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions, counter-claims,
|
||||
and cross-claims) alleging that a Contributor Version directly or
|
||||
indirectly infringes any patent, then the rights granted to You by any and
|
||||
all Contributors for the Covered Software under Section 2.1 of this License
|
||||
shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an “as is” basis, without
|
||||
warranty of any kind, either expressed, implied, or statutory, including,
|
||||
without limitation, warranties that the Covered Software is free of defects,
|
||||
merchantable, fit for a particular purpose or non-infringing. The entire
|
||||
risk as to the quality and performance of the Covered Software is with You.
|
||||
Should any Covered Software prove defective in any respect, You (not any
|
||||
Contributor) assume the cost of any necessary servicing, repair, or
|
||||
correction. This disclaimer of warranty constitutes an essential part of this
|
||||
License. No use of any Covered Software is authorized under this License
|
||||
except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from such
|
||||
party’s negligence to the extent applicable law prohibits such limitation.
|
||||
Some jurisdictions do not allow the exclusion or limitation of incidental or
|
||||
consequential damages, so this exclusion and limitation may not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts of
|
||||
a jurisdiction where the defendant maintains its principal place of business
|
||||
and such litigation shall be governed by laws of that jurisdiction, without
|
||||
reference to its conflict-of-law provisions. Nothing in this Section shall
|
||||
prevent a party’s ability to bring cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject matter
|
||||
hereof. If any provision of this License is held to be unenforceable, such
|
||||
provision shall be reformed only to the extent necessary to make it
|
||||
enforceable. Any law or regulation which provides that the language of a
|
||||
contract shall be construed against the drafter shall not be used to construe
|
||||
this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version of
|
||||
the License under which You originally received the Covered Software, or
|
||||
under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a modified
|
||||
version of this License if you rename the license and remove any
|
||||
references to the name of the license steward (except to note that such
|
||||
modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file, then
|
||||
You may include the notice in a location (such as a LICENSE file in a relevant
|
||||
directory) where a recipient would be likely to look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - “Incompatible With Secondary Licenses” Notice
|
||||
|
||||
This Source Code Form is “Incompatible
|
||||
With Secondary Licenses”, as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
8
Godeps/_workspace/src/github.com/d2g/dhcp4client/README.md
generated
vendored
8
Godeps/_workspace/src/github.com/d2g/dhcp4client/README.md
generated
vendored
@ -1,8 +0,0 @@
|
||||
dhcp4client [](http://godoc.org/github.com/d2g/dhcp4client) [](https://coveralls.io/r/d2g/dhcp4client?branch=HEAD) [](https://codeship.com/projects/70187)
|
||||
===========
|
||||
|
||||
DHCP Client
|
||||
|
||||
|
||||
###### Thanks to:
|
||||
@eyakubovich For AF_PACKET support.
|
366
Godeps/_workspace/src/github.com/d2g/dhcp4client/client.go
generated
vendored
366
Godeps/_workspace/src/github.com/d2g/dhcp4client/client.go
generated
vendored
@ -1,366 +0,0 @@
|
||||
package dhcp4client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/d2g/dhcp4"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxDHCPLen = 576
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
hardwareAddr net.HardwareAddr //The HardwareAddr to send in the request.
|
||||
ignoreServers []net.IP //List of Servers to Ignore requests from.
|
||||
timeout time.Duration //Time before we timeout.
|
||||
broadcast bool //Set the Bcast flag in BOOTP Flags
|
||||
connection connection //The Connection Method to use
|
||||
}
|
||||
|
||||
/*
|
||||
* Abstracts the type of underlying socket used
|
||||
*/
|
||||
type connection interface {
|
||||
Close() error
|
||||
Write(packet []byte) error
|
||||
ReadFrom() ([]byte, net.IP, error)
|
||||
SetReadTimeout(t time.Duration) error
|
||||
}
|
||||
|
||||
func New(options ...func(*Client) error) (*Client, error) {
|
||||
c := Client{
|
||||
timeout: time.Second * 10,
|
||||
broadcast: true,
|
||||
}
|
||||
|
||||
err := c.SetOption(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//if connection hasn't been set as an option create the default.
|
||||
if c.connection == nil {
|
||||
conn, err := NewInetSock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.connection = conn
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
func (c *Client) SetOption(options ...func(*Client) error) error {
|
||||
for _, opt := range options {
|
||||
if err := opt(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Timeout(t time.Duration) func(*Client) error {
|
||||
return func(c *Client) error {
|
||||
c.timeout = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func IgnoreServers(s []net.IP) func(*Client) error {
|
||||
return func(c *Client) error {
|
||||
c.ignoreServers = s
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func HardwareAddr(h net.HardwareAddr) func(*Client) error {
|
||||
return func(c *Client) error {
|
||||
c.hardwareAddr = h
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Broadcast(b bool) func(*Client) error {
|
||||
return func(c *Client) error {
|
||||
c.broadcast = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Connection(conn connection) func(*Client) error {
|
||||
return func(c *Client) error {
|
||||
c.connection = conn
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Close Connections
|
||||
*/
|
||||
func (c *Client) Close() error {
|
||||
if c.connection != nil {
|
||||
return c.connection.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
* Send the Discovery Packet to the Broadcast Channel
|
||||
*/
|
||||
func (c *Client) SendDiscoverPacket() (dhcp4.Packet, error) {
|
||||
discoveryPacket := c.DiscoverPacket()
|
||||
discoveryPacket.PadToMinSize()
|
||||
|
||||
return discoveryPacket, c.SendPacket(discoveryPacket)
|
||||
}
|
||||
|
||||
/*
|
||||
* Retreive Offer...
|
||||
* Wait for the offer for a specific Discovery Packet.
|
||||
*/
|
||||
func (c *Client) GetOffer(discoverPacket *dhcp4.Packet) (dhcp4.Packet, error) {
|
||||
for {
|
||||
c.connection.SetReadTimeout(c.timeout)
|
||||
readBuffer, source, err := c.connection.ReadFrom()
|
||||
if err != nil {
|
||||
return dhcp4.Packet{}, err
|
||||
}
|
||||
|
||||
offerPacket := dhcp4.Packet(readBuffer)
|
||||
offerPacketOptions := offerPacket.ParseOptions()
|
||||
|
||||
// Ignore Servers in my Ignore list
|
||||
for _, ignoreServer := range c.ignoreServers {
|
||||
if source.Equal(ignoreServer) {
|
||||
continue
|
||||
}
|
||||
|
||||
if offerPacket.SIAddr().Equal(ignoreServer) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(offerPacketOptions[dhcp4.OptionDHCPMessageType]) < 1 || dhcp4.MessageType(offerPacketOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.Offer || !bytes.Equal(discoverPacket.XId(), offerPacket.XId()) {
|
||||
continue
|
||||
}
|
||||
|
||||
return offerPacket, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Send Request Based On the offer Received.
|
||||
*/
|
||||
func (c *Client) SendRequest(offerPacket *dhcp4.Packet) (dhcp4.Packet, error) {
|
||||
requestPacket := c.RequestPacket(offerPacket)
|
||||
requestPacket.PadToMinSize()
|
||||
|
||||
return requestPacket, c.SendPacket(requestPacket)
|
||||
}
|
||||
|
||||
/*
|
||||
* Retreive Acknowledgement
|
||||
* Wait for the offer for a specific Request Packet.
|
||||
*/
|
||||
func (c *Client) GetAcknowledgement(requestPacket *dhcp4.Packet) (dhcp4.Packet, error) {
|
||||
for {
|
||||
c.connection.SetReadTimeout(c.timeout)
|
||||
readBuffer, source, err := c.connection.ReadFrom()
|
||||
if err != nil {
|
||||
return dhcp4.Packet{}, err
|
||||
}
|
||||
|
||||
acknowledgementPacket := dhcp4.Packet(readBuffer)
|
||||
acknowledgementPacketOptions := acknowledgementPacket.ParseOptions()
|
||||
|
||||
// Ignore Servers in my Ignore list
|
||||
for _, ignoreServer := range c.ignoreServers {
|
||||
if source.Equal(ignoreServer) {
|
||||
continue
|
||||
}
|
||||
|
||||
if acknowledgementPacket.SIAddr().Equal(ignoreServer) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !bytes.Equal(requestPacket.XId(), acknowledgementPacket.XId()) || len(acknowledgementPacketOptions[dhcp4.OptionDHCPMessageType]) < 1 || (dhcp4.MessageType(acknowledgementPacketOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.ACK && dhcp4.MessageType(acknowledgementPacketOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.NAK) {
|
||||
continue
|
||||
}
|
||||
|
||||
return acknowledgementPacket, nil
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Send a DHCP Packet.
|
||||
*/
|
||||
func (c *Client) SendPacket(packet dhcp4.Packet) error {
|
||||
return c.connection.Write(packet)
|
||||
}
|
||||
|
||||
/*
|
||||
* Create Discover Packet
|
||||
*/
|
||||
func (c *Client) DiscoverPacket() dhcp4.Packet {
|
||||
messageid := make([]byte, 4)
|
||||
if _, err := rand.Read(messageid); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
packet := dhcp4.NewPacket(dhcp4.BootRequest)
|
||||
packet.SetCHAddr(c.hardwareAddr)
|
||||
packet.SetXId(messageid)
|
||||
packet.SetBroadcast(c.broadcast)
|
||||
|
||||
packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Discover)})
|
||||
//packet.PadToMinSize()
|
||||
return packet
|
||||
}
|
||||
|
||||
/*
|
||||
* Create Request Packet
|
||||
*/
|
||||
func (c *Client) RequestPacket(offerPacket *dhcp4.Packet) dhcp4.Packet {
|
||||
offerOptions := offerPacket.ParseOptions()
|
||||
|
||||
packet := dhcp4.NewPacket(dhcp4.BootRequest)
|
||||
packet.SetCHAddr(c.hardwareAddr)
|
||||
|
||||
packet.SetXId(offerPacket.XId())
|
||||
packet.SetCIAddr(offerPacket.CIAddr())
|
||||
packet.SetSIAddr(offerPacket.SIAddr())
|
||||
|
||||
packet.SetBroadcast(c.broadcast)
|
||||
packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Request)})
|
||||
packet.AddOption(dhcp4.OptionRequestedIPAddress, (offerPacket.YIAddr()).To4())
|
||||
packet.AddOption(dhcp4.OptionServerIdentifier, offerOptions[dhcp4.OptionServerIdentifier])
|
||||
|
||||
//packet.PadToMinSize()
|
||||
return packet
|
||||
}
|
||||
|
||||
/*
|
||||
* Create Request Packet For a Renew
|
||||
*/
|
||||
func (c *Client) RenewalRequestPacket(acknowledgement *dhcp4.Packet) dhcp4.Packet {
|
||||
messageid := make([]byte, 4)
|
||||
if _, err := rand.Read(messageid); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
acknowledgementOptions := acknowledgement.ParseOptions()
|
||||
|
||||
packet := dhcp4.NewPacket(dhcp4.BootRequest)
|
||||
packet.SetCHAddr(acknowledgement.CHAddr())
|
||||
|
||||
packet.SetXId(messageid)
|
||||
packet.SetCIAddr(acknowledgement.YIAddr())
|
||||
packet.SetSIAddr(acknowledgement.SIAddr())
|
||||
|
||||
packet.SetBroadcast(c.broadcast)
|
||||
packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Request)})
|
||||
packet.AddOption(dhcp4.OptionRequestedIPAddress, (acknowledgement.YIAddr()).To4())
|
||||
packet.AddOption(dhcp4.OptionServerIdentifier, acknowledgementOptions[dhcp4.OptionServerIdentifier])
|
||||
|
||||
//packet.PadToMinSize()
|
||||
return packet
|
||||
}
|
||||
|
||||
/*
|
||||
* Create Release Packet For a Release
|
||||
*/
|
||||
func (c *Client) ReleasePacket(acknowledgement *dhcp4.Packet) dhcp4.Packet {
|
||||
messageid := make([]byte, 4)
|
||||
if _, err := rand.Read(messageid); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
acknowledgementOptions := acknowledgement.ParseOptions()
|
||||
|
||||
packet := dhcp4.NewPacket(dhcp4.BootRequest)
|
||||
packet.SetCHAddr(acknowledgement.CHAddr())
|
||||
|
||||
packet.SetXId(messageid)
|
||||
packet.SetCIAddr(acknowledgement.YIAddr())
|
||||
|
||||
packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Release)})
|
||||
packet.AddOption(dhcp4.OptionServerIdentifier, acknowledgementOptions[dhcp4.OptionServerIdentifier])
|
||||
|
||||
//packet.PadToMinSize()
|
||||
return packet
|
||||
}
|
||||
|
||||
/*
|
||||
* Lets do a Full DHCP Request.
|
||||
*/
|
||||
func (c *Client) Request() (bool, dhcp4.Packet, error) {
|
||||
discoveryPacket, err := c.SendDiscoverPacket()
|
||||
if err != nil {
|
||||
return false, discoveryPacket, err
|
||||
}
|
||||
|
||||
offerPacket, err := c.GetOffer(&discoveryPacket)
|
||||
if err != nil {
|
||||
return false, offerPacket, err
|
||||
}
|
||||
|
||||
requestPacket, err := c.SendRequest(&offerPacket)
|
||||
if err != nil {
|
||||
return false, requestPacket, err
|
||||
}
|
||||
|
||||
acknowledgement, err := c.GetAcknowledgement(&requestPacket)
|
||||
if err != nil {
|
||||
return false, acknowledgement, err
|
||||
}
|
||||
|
||||
acknowledgementOptions := acknowledgement.ParseOptions()
|
||||
if dhcp4.MessageType(acknowledgementOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.ACK {
|
||||
return false, acknowledgement, nil
|
||||
}
|
||||
|
||||
return true, acknowledgement, nil
|
||||
}
|
||||
|
||||
/*
|
||||
* Renew a lease backed on the Acknowledgement Packet.
|
||||
* Returns Sucessfull, The AcknoledgementPacket, Any Errors
|
||||
*/
|
||||
func (c *Client) Renew(acknowledgement dhcp4.Packet) (bool, dhcp4.Packet, error) {
|
||||
renewRequest := c.RenewalRequestPacket(&acknowledgement)
|
||||
renewRequest.PadToMinSize()
|
||||
|
||||
err := c.SendPacket(renewRequest)
|
||||
if err != nil {
|
||||
return false, renewRequest, err
|
||||
}
|
||||
|
||||
newAcknowledgement, err := c.GetAcknowledgement(&renewRequest)
|
||||
if err != nil {
|
||||
return false, newAcknowledgement, err
|
||||
}
|
||||
|
||||
newAcknowledgementOptions := newAcknowledgement.ParseOptions()
|
||||
if dhcp4.MessageType(newAcknowledgementOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.ACK {
|
||||
return false, newAcknowledgement, nil
|
||||
}
|
||||
|
||||
return true, newAcknowledgement, nil
|
||||
}
|
||||
|
||||
/*
|
||||
* Release a lease backed on the Acknowledgement Packet.
|
||||
* Returns Any Errors
|
||||
*/
|
||||
func (c *Client) Release(acknowledgement dhcp4.Packet) error {
|
||||
release := c.ReleasePacket(&acknowledgement)
|
||||
release.PadToMinSize()
|
||||
|
||||
return c.SendPacket(release)
|
||||
}
|
69
Godeps/_workspace/src/github.com/d2g/dhcp4client/client_test.go
generated
vendored
69
Godeps/_workspace/src/github.com/d2g/dhcp4client/client_test.go
generated
vendored
@ -1,69 +0,0 @@
|
||||
package dhcp4client
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
/*
|
||||
* Example Client
|
||||
*/
|
||||
func Test_ExampleClient(test *testing.T) {
|
||||
var err error
|
||||
|
||||
m, err := net.ParseMAC("08-00-27-00-A8-E8")
|
||||
if err != nil {
|
||||
log.Printf("MAC Error:%v\n", err)
|
||||
}
|
||||
|
||||
//Create a connection to use
|
||||
//We need to set the connection ports to 1068 and 1067 so we don't need root access
|
||||
c, err := NewInetSock(SetLocalAddr(net.UDPAddr{IP: net.IPv4(0, 0, 0, 0), Port: 1068}), SetRemoteAddr(net.UDPAddr{IP: net.IPv4bcast, Port: 1067}))
|
||||
if err != nil {
|
||||
test.Error("Client Conection Generation:" + err.Error())
|
||||
}
|
||||
|
||||
exampleClient, err := New(HardwareAddr(m), Connection(c))
|
||||
if err != nil {
|
||||
test.Fatalf("Error:%v\n", err)
|
||||
}
|
||||
|
||||
success, acknowledgementpacket, err := exampleClient.Request()
|
||||
|
||||
test.Logf("Success:%v\n", success)
|
||||
test.Logf("Packet:%v\n", acknowledgementpacket)
|
||||
|
||||
if err != nil {
|
||||
networkError, ok := err.(*net.OpError)
|
||||
if ok && networkError.Timeout() {
|
||||
test.Log("Test Skipping as it didn't find a DHCP Server")
|
||||
test.SkipNow()
|
||||
}
|
||||
test.Fatalf("Error:%v\n", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
test.Error("We didn't sucessfully get a DHCP Lease?")
|
||||
} else {
|
||||
log.Printf("IP Received:%v\n", acknowledgementpacket.YIAddr().String())
|
||||
}
|
||||
|
||||
test.Log("Start Renewing Lease")
|
||||
success, acknowledgementpacket, err = exampleClient.Renew(acknowledgementpacket)
|
||||
if err != nil {
|
||||
networkError, ok := err.(*net.OpError)
|
||||
if ok && networkError.Timeout() {
|
||||
test.Log("Renewal Failed! Because it didn't find the DHCP server very Strange")
|
||||
test.Errorf("Error" + err.Error())
|
||||
}
|
||||
test.Fatalf("Error:%v\n", err)
|
||||
}
|
||||
|
||||
if !success {
|
||||
test.Error("We didn't sucessfully Renew a DHCP Lease?")
|
||||
} else {
|
||||
log.Printf("IP Received:%v\n", acknowledgementpacket.YIAddr().String())
|
||||
}
|
||||
|
||||
}
|
75
Godeps/_workspace/src/github.com/d2g/dhcp4client/inetsock.go
generated
vendored
75
Godeps/_workspace/src/github.com/d2g/dhcp4client/inetsock.go
generated
vendored
@ -1,75 +0,0 @@
|
||||
package dhcp4client
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type inetSock struct {
|
||||
*net.UDPConn
|
||||
|
||||
laddr net.UDPAddr
|
||||
raddr net.UDPAddr
|
||||
}
|
||||
|
||||
func NewInetSock(options ...func(*inetSock) error) (*inetSock, error) {
|
||||
c := &inetSock{
|
||||
laddr: net.UDPAddr{IP: net.IPv4(0, 0, 0, 0), Port: 68},
|
||||
raddr: net.UDPAddr{IP: net.IPv4bcast, Port: 67},
|
||||
}
|
||||
|
||||
err := c.setOption(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn, err := net.ListenUDP("udp4", &c.laddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.UDPConn = conn
|
||||
return c, err
|
||||
}
|
||||
|
||||
func (c *inetSock) setOption(options ...func(*inetSock) error) error {
|
||||
for _, opt := range options {
|
||||
if err := opt(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetLocalAddr(l net.UDPAddr) func(*inetSock) error {
|
||||
return func(c *inetSock) error {
|
||||
c.laddr = l
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func SetRemoteAddr(r net.UDPAddr) func(*inetSock) error {
|
||||
return func(c *inetSock) error {
|
||||
c.raddr = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *inetSock) Write(packet []byte) error {
|
||||
_, err := c.WriteToUDP(packet, &c.raddr)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *inetSock) ReadFrom() ([]byte, net.IP, error) {
|
||||
readBuffer := make([]byte, MaxDHCPLen)
|
||||
n, source, err := c.ReadFromUDP(readBuffer)
|
||||
if source != nil {
|
||||
return readBuffer[:n], source.IP, err
|
||||
} else {
|
||||
return readBuffer[:n], net.IP{}, err
|
||||
}
|
||||
}
|
||||
|
||||
func (c *inetSock) SetReadTimeout(t time.Duration) error {
|
||||
return c.SetReadDeadline(time.Now().Add(t))
|
||||
}
|
10
Godeps/_workspace/src/github.com/d2g/dhcp4client/init.go
generated
vendored
10
Godeps/_workspace/src/github.com/d2g/dhcp4client/init.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
package dhcp4client
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
}
|
147
Godeps/_workspace/src/github.com/d2g/dhcp4client/pktsock_linux.go
generated
vendored
147
Godeps/_workspace/src/github.com/d2g/dhcp4client/pktsock_linux.go
generated
vendored
@ -1,147 +0,0 @@
|
||||
package dhcp4client
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
minIPHdrLen = 20
|
||||
maxIPHdrLen = 60
|
||||
udpHdrLen = 8
|
||||
ip4Ver = 0x40
|
||||
ttl = 16
|
||||
srcPort = 68
|
||||
dstPort = 67
|
||||
)
|
||||
|
||||
var (
|
||||
bcastMAC = []byte{255, 255, 255, 255, 255, 255}
|
||||
)
|
||||
|
||||
// abstracts AF_PACKET
|
||||
type packetSock struct {
|
||||
fd int
|
||||
ifindex int
|
||||
}
|
||||
|
||||
func NewPacketSock(ifindex int) (*packetSock, error) {
|
||||
fd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_DGRAM, int(swap16(unix.ETH_P_IP)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr := unix.SockaddrLinklayer{
|
||||
Ifindex: ifindex,
|
||||
Protocol: swap16(unix.ETH_P_IP),
|
||||
}
|
||||
|
||||
if err = unix.Bind(fd, &addr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &packetSock{
|
||||
fd: fd,
|
||||
ifindex: ifindex,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (pc *packetSock) Close() error {
|
||||
return unix.Close(pc.fd)
|
||||
}
|
||||
|
||||
func (pc *packetSock) Write(packet []byte) error {
|
||||
lladdr := unix.SockaddrLinklayer{
|
||||
Ifindex: pc.ifindex,
|
||||
Protocol: swap16(unix.ETH_P_IP),
|
||||
Halen: uint8(len(bcastMAC)),
|
||||
}
|
||||
copy(lladdr.Addr[:], bcastMAC)
|
||||
|
||||
pkt := make([]byte, minIPHdrLen+udpHdrLen+len(packet))
|
||||
|
||||
fillIPHdr(pkt[0:minIPHdrLen], udpHdrLen+uint16(len(packet)))
|
||||
fillUDPHdr(pkt[minIPHdrLen:minIPHdrLen+udpHdrLen], uint16(len(packet)))
|
||||
|
||||
// payload
|
||||
copy(pkt[minIPHdrLen+udpHdrLen:len(pkt)], packet)
|
||||
|
||||
return unix.Sendto(pc.fd, pkt, 0, &lladdr)
|
||||
}
|
||||
|
||||
func (pc *packetSock) ReadFrom() ([]byte, net.IP, error) {
|
||||
pkt := make([]byte, maxIPHdrLen+udpHdrLen+MaxDHCPLen)
|
||||
n, _, err := unix.Recvfrom(pc.fd, pkt, 0)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// IP hdr len
|
||||
ihl := int(pkt[0]&0x0F) * 4
|
||||
// Source IP address
|
||||
src := net.IP(pkt[12:16])
|
||||
|
||||
return pkt[ihl+udpHdrLen : n], src, nil
|
||||
}
|
||||
|
||||
func (pc *packetSock) SetReadTimeout(t time.Duration) error {
|
||||
|
||||
tv := unix.NsecToTimeval(t.Nanoseconds())
|
||||
return unix.SetsockoptTimeval(pc.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv)
|
||||
}
|
||||
|
||||
// compute's 1's complement checksum
|
||||
func chksum(p []byte, csum []byte) {
|
||||
cklen := len(p)
|
||||
s := uint32(0)
|
||||
for i := 0; i < (cklen - 1); i += 2 {
|
||||
s += uint32(p[i+1])<<8 | uint32(p[i])
|
||||
}
|
||||
if cklen&1 == 1 {
|
||||
s += uint32(p[cklen-1])
|
||||
}
|
||||
s = (s >> 16) + (s & 0xffff)
|
||||
s = s + (s >> 16)
|
||||
s = ^s
|
||||
|
||||
csum[0] = uint8(s & 0xff)
|
||||
csum[1] = uint8(s >> 8)
|
||||
}
|
||||
|
||||
func fillIPHdr(hdr []byte, payloadLen uint16) {
|
||||
// version + IHL
|
||||
hdr[0] = ip4Ver | (minIPHdrLen / 4)
|
||||
// total length
|
||||
binary.BigEndian.PutUint16(hdr[2:4], uint16(len(hdr))+payloadLen)
|
||||
// identification
|
||||
if _, err := rand.Read(hdr[4:5]); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// TTL
|
||||
hdr[8] = 16
|
||||
// Protocol
|
||||
hdr[9] = unix.IPPROTO_UDP
|
||||
// dst IP
|
||||
copy(hdr[16:20], net.IPv4bcast.To4())
|
||||
// compute IP hdr checksum
|
||||
chksum(hdr[0:len(hdr)], hdr[10:12])
|
||||
}
|
||||
|
||||
func fillUDPHdr(hdr []byte, payloadLen uint16) {
|
||||
// src port
|
||||
binary.BigEndian.PutUint16(hdr[0:2], srcPort)
|
||||
// dest port
|
||||
binary.BigEndian.PutUint16(hdr[2:4], dstPort)
|
||||
// length
|
||||
binary.BigEndian.PutUint16(hdr[4:6], udpHdrLen+payloadLen)
|
||||
}
|
||||
|
||||
func swap16(x uint16) uint16 {
|
||||
var b [2]byte
|
||||
binary.BigEndian.PutUint16(b[:], x)
|
||||
return binary.LittleEndian.Uint16(b[:])
|
||||
}
|
4
Godeps/_workspace/src/github.com/onsi/ginkgo/.gitignore
generated
vendored
4
Godeps/_workspace/src/github.com/onsi/ginkgo/.gitignore
generated
vendored
@ -1,4 +0,0 @@
|
||||
.DS_Store
|
||||
TODO
|
||||
tmp/**/*
|
||||
*.coverprofile
|
15
Godeps/_workspace/src/github.com/onsi/ginkgo/.travis.yml
generated
vendored
15
Godeps/_workspace/src/github.com/onsi/ginkgo/.travis.yml
generated
vendored
@ -1,15 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go get -v -t ./...
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/onsi/gomega
|
||||
- go install github.com/onsi/ginkgo/ginkgo
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
|
||||
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
|
136
Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
136
Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
@ -1,136 +0,0 @@
|
||||
## HEAD
|
||||
|
||||
Improvements:
|
||||
|
||||
- `Skip(message)` can be used to skip the current test.
|
||||
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- Ginkgo tests now fail when you `panic(nil)` (#167)
|
||||
|
||||
## 1.2.0 5/31/2015
|
||||
|
||||
Improvements
|
||||
|
||||
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
|
||||
- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
|
||||
- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
|
||||
|
||||
## 1.2.0-beta
|
||||
|
||||
Ginkgo now requires Go 1.4+
|
||||
|
||||
Improvements:
|
||||
|
||||
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
|
||||
- Improved focus behavior. Now, this:
|
||||
|
||||
```golang
|
||||
FDescribe("Some describe", func() {
|
||||
It("A", func() {})
|
||||
|
||||
FIt("B", func() {})
|
||||
})
|
||||
```
|
||||
|
||||
will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
|
||||
- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
|
||||
- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
|
||||
- Improved output when an error occurs in a setup or teardown block.
|
||||
- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
|
||||
- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
|
||||
- Add support for precompiled tests:
|
||||
- `ginkgo build <path-to-package>` will now compile the package, producing a file named `package.test`
|
||||
- The compiled `package.test` file can be run directly. This runs the tests in series.
|
||||
- To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
|
||||
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
|
||||
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
|
||||
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
|
||||
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
|
||||
- `ginkgo -notify` now works on Linux
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
|
||||
- Fix tempfile leak when running in parallel
|
||||
- Fix incorrect failure message when a panic occurs during a parallel test run
|
||||
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
|
||||
- Be more consistent about handling SIGTERM as well as SIGINT
|
||||
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
|
||||
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
|
||||
|
||||
## 1.1.0 (8/2/2014)
|
||||
|
||||
No changes, just dropping the beta.
|
||||
|
||||
## 1.1.0-beta (7/22/2014)
|
||||
New Features:
|
||||
|
||||
- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
|
||||
- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass. This allows CI systems to detect accidental commits of focused test suites.
|
||||
- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
|
||||
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
|
||||
- `ginkgo --failFast` aborts the test suite after the first failure.
|
||||
- `ginkgo generate file_1 file_2` can take multiple file arguments.
|
||||
- Ginkgo now summarizes any spec failures that occured at the end of the test run.
|
||||
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
|
||||
|
||||
Improvements:
|
||||
|
||||
- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
|
||||
- `ginkgo --untilItFails` no longer recompiles between attempts.
|
||||
- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
|
||||
- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
|
||||
|
||||
## 1.0.0 (5/24/2014)
|
||||
New Features:
|
||||
|
||||
- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
|
||||
|
||||
Improvements:
|
||||
|
||||
- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
|
||||
- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
|
||||
- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
|
||||
- Fix all remaining race conditions in Ginkgo's test suite.
|
||||
|
||||
## 1.0.0-beta (4/14/2014)
|
||||
Breaking changes:
|
||||
|
||||
- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
|
||||
- Modified the Reporter interface
|
||||
- `watch` is now a subcommand, not a flag.
|
||||
|
||||
DSL changes:
|
||||
|
||||
- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
|
||||
- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
|
||||
- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
|
||||
|
||||
CLI changes:
|
||||
|
||||
- `watch` is now a subcommand, not a flag
|
||||
- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
|
||||
- Additional arguments can be passed to specs. Pass them after the `--` separator
|
||||
- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
|
||||
- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
|
||||
|
||||
Misc:
|
||||
|
||||
- Start using semantic versioning
|
||||
- Start maintaining changelog
|
||||
|
||||
Major refactor:
|
||||
|
||||
- Pull out Ginkgo's internal to `internal`
|
||||
- Rename `example` everywhere to `spec`
|
||||
- Much more!
|
20
Godeps/_workspace/src/github.com/onsi/ginkgo/LICENSE
generated
vendored
20
Godeps/_workspace/src/github.com/onsi/ginkgo/LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
||||
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
115
Godeps/_workspace/src/github.com/onsi/ginkgo/README.md
generated
vendored
115
Godeps/_workspace/src/github.com/onsi/ginkgo/README.md
generated
vendored
@ -1,115 +0,0 @@
|
||||

|
||||
|
||||
[](https://travis-ci.org/onsi/ginkgo)
|
||||
|
||||
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
|
||||
|
||||
To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
|
||||
|
||||
## Feature List
|
||||
|
||||
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
|
||||
|
||||
- Structure your BDD-style tests expressively:
|
||||
- Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
|
||||
- [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
|
||||
- [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
|
||||
- [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
|
||||
- [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
|
||||
|
||||
- A comprehensive test runner that lets you:
|
||||
- Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
|
||||
- [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
|
||||
- Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
|
||||
- Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
|
||||
|
||||
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
|
||||
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
|
||||
- `ginkgo -cover` runs your tests using Golang's code coverage tool
|
||||
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
|
||||
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
|
||||
- `ginkgo -r` runs all tests suites under the current directory
|
||||
- `ginkgo -v` prints out identifying information for each tests just before it runs
|
||||
|
||||
And much more: run `ginkgo help` for details!
|
||||
|
||||
The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
|
||||
|
||||
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
|
||||
|
||||
- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
|
||||
|
||||
- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
|
||||
|
||||
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
|
||||
|
||||
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
|
||||
|
||||
- A modular architecture that lets you easily:
|
||||
- Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
|
||||
- [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
|
||||
|
||||
## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
|
||||
|
||||
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
|
||||
|
||||
## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
|
||||
|
||||
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
|
||||
|
||||
## Set Me Up!
|
||||
|
||||
You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
|
||||
|
||||
```bash
|
||||
|
||||
go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
|
||||
go get github.com/onsi/gomega # fetches the matcher library
|
||||
|
||||
cd path/to/package/you/want/to/test
|
||||
|
||||
ginkgo bootstrap # set up a new ginkgo suite
|
||||
ginkgo generate # will create a sample test file. edit this file and add your tests then...
|
||||
|
||||
go test # to run your tests
|
||||
|
||||
ginkgo # also runs your tests
|
||||
|
||||
```
|
||||
|
||||
## I'm new to Go: What are my testing options?
|
||||
|
||||
Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
|
||||
|
||||
With that said, it's great to know what your options are :)
|
||||
|
||||
### What Golang gives you out of the box
|
||||
|
||||
Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
|
||||
|
||||
### Matcher libraries for Golang's XUnit style tests
|
||||
|
||||
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
|
||||
|
||||
- [testify](https://github.com/stretchr/testify)
|
||||
- [gocheck](http://labix.org/gocheck)
|
||||
|
||||
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
|
||||
|
||||
### BDD style testing frameworks
|
||||
|
||||
There are a handful of BDD-style testing frameworks written for Golang. Here are a few:
|
||||
|
||||
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
|
||||
- [GoConvey](https://github.com/smartystreets/goconvey)
|
||||
- [Goblin](https://github.com/franela/goblin)
|
||||
- [Mao](https://github.com/azer/mao)
|
||||
- [Zen](https://github.com/pranavraja/zen)
|
||||
|
||||
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
|
||||
|
||||
Go explore!
|
||||
|
||||
## License
|
||||
|
||||
Ginkgo is MIT-Licensed
|
170
Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go
generated
vendored
170
Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go
generated
vendored
@ -1,170 +0,0 @@
|
||||
/*
|
||||
Ginkgo accepts a number of configuration options.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
|
||||
|
||||
You can also learn more via
|
||||
|
||||
ginkgo help
|
||||
|
||||
or (I kid you not):
|
||||
|
||||
go test -asdf
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const VERSION = "1.2.0"
|
||||
|
||||
type GinkgoConfigType struct {
|
||||
RandomSeed int64
|
||||
RandomizeAllSpecs bool
|
||||
FocusString string
|
||||
SkipString string
|
||||
SkipMeasurements bool
|
||||
FailOnPending bool
|
||||
FailFast bool
|
||||
EmitSpecProgress bool
|
||||
DryRun bool
|
||||
|
||||
ParallelNode int
|
||||
ParallelTotal int
|
||||
SyncHost string
|
||||
StreamHost string
|
||||
}
|
||||
|
||||
var GinkgoConfig = GinkgoConfigType{}
|
||||
|
||||
type DefaultReporterConfigType struct {
|
||||
NoColor bool
|
||||
SlowSpecThreshold float64
|
||||
NoisyPendings bool
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
FullTrace bool
|
||||
}
|
||||
|
||||
var DefaultReporterConfig = DefaultReporterConfigType{}
|
||||
|
||||
func processPrefix(prefix string) string {
|
||||
if prefix != "" {
|
||||
prefix = prefix + "."
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||
prefix = processPrefix(prefix)
|
||||
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
|
||||
flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
|
||||
flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
|
||||
|
||||
if includeParallelFlags {
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
|
||||
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
|
||||
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
|
||||
}
|
||||
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
|
||||
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter (default: 5 seconds).")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
||||
}
|
||||
|
||||
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
||||
prefix = processPrefix(prefix)
|
||||
result := make([]string, 0)
|
||||
|
||||
if ginkgo.RandomSeed > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
|
||||
}
|
||||
|
||||
if ginkgo.RandomizeAllSpecs {
|
||||
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.SkipMeasurements {
|
||||
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailOnPending {
|
||||
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailFast {
|
||||
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.DryRun {
|
||||
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FocusString != "" {
|
||||
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
|
||||
}
|
||||
|
||||
if ginkgo.SkipString != "" {
|
||||
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
|
||||
}
|
||||
|
||||
if ginkgo.EmitSpecProgress {
|
||||
result = append(result, fmt.Sprintf("--%sprogress", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelNode != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelTotal != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
|
||||
}
|
||||
|
||||
if ginkgo.StreamHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
|
||||
}
|
||||
|
||||
if ginkgo.SyncHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
|
||||
}
|
||||
|
||||
if reporter.NoColor {
|
||||
result = append(result, fmt.Sprintf("--%snoColor", prefix))
|
||||
}
|
||||
|
||||
if reporter.SlowSpecThreshold > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
|
||||
}
|
||||
|
||||
if !reporter.NoisyPendings {
|
||||
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
|
||||
}
|
||||
|
||||
if reporter.Verbose {
|
||||
result = append(result, fmt.Sprintf("--%sv", prefix))
|
||||
}
|
||||
|
||||
if reporter.Succinct {
|
||||
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
|
||||
}
|
||||
|
||||
if reporter.FullTrace {
|
||||
result = append(result, fmt.Sprintf("--%strace", prefix))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
98
Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table.go
generated
vendored
98
Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
/*
|
||||
|
||||
Table provides a simple DSL for Ginkgo-native Table-Driven Tests
|
||||
|
||||
The godoc documentation describes Table's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests
|
||||
|
||||
*/
|
||||
|
||||
package table
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
/*
|
||||
DescribeTable describes a table-driven test.
|
||||
|
||||
For example:
|
||||
|
||||
DescribeTable("a simple table",
|
||||
func(x int, y int, expected bool) {
|
||||
Ω(x > y).Should(Equal(expected))
|
||||
},
|
||||
Entry("x > y", 1, 0, true),
|
||||
Entry("x == y", 0, 0, false),
|
||||
Entry("x < y", 0, 1, false),
|
||||
)
|
||||
|
||||
The first argument to `DescribeTable` is a string description.
|
||||
The second argument is a function that will be run for each table entry. Your assertions go here - the function is equivalent to a Ginkgo It.
|
||||
The subsequent arguments must be of type `TableEntry`. We recommend using the `Entry` convenience constructors.
|
||||
|
||||
The `Entry` constructor takes a string description followed by an arbitrary set of parameters. These parameters are passed into your function.
|
||||
|
||||
Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each `Entry` is turned into an `It` within the `Describe`.
|
||||
|
||||
It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run).
|
||||
|
||||
Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable.
|
||||
*/
|
||||
func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||
describeTable(description, itBody, entries, false, false)
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
|
||||
*/
|
||||
func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||
describeTable(description, itBody, entries, false, true)
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
|
||||
*/
|
||||
func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||
describeTable(description, itBody, entries, true, false)
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
|
||||
*/
|
||||
func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool {
|
||||
describeTable(description, itBody, entries, true, false)
|
||||
return true
|
||||
}
|
||||
|
||||
func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) {
|
||||
itBodyValue := reflect.ValueOf(itBody)
|
||||
if itBodyValue.Kind() != reflect.Func {
|
||||
panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody))
|
||||
}
|
||||
|
||||
if pending {
|
||||
ginkgo.PDescribe(description, func() {
|
||||
for _, entry := range entries {
|
||||
entry.generateIt(itBodyValue)
|
||||
}
|
||||
})
|
||||
} else if focused {
|
||||
ginkgo.FDescribe(description, func() {
|
||||
for _, entry := range entries {
|
||||
entry.generateIt(itBodyValue)
|
||||
}
|
||||
})
|
||||
} else {
|
||||
ginkgo.Describe(description, func() {
|
||||
for _, entry := range entries {
|
||||
entry.generateIt(itBodyValue)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
72
Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table_entry.go
generated
vendored
72
Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table_entry.go
generated
vendored
@ -1,72 +0,0 @@
|
||||
package table
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
/*
|
||||
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
|
||||
*/
|
||||
type TableEntry struct {
|
||||
Description string
|
||||
Parameters []interface{}
|
||||
Pending bool
|
||||
Focused bool
|
||||
}
|
||||
|
||||
func (t TableEntry) generateIt(itBody reflect.Value) {
|
||||
if t.Pending {
|
||||
ginkgo.PIt(t.Description)
|
||||
return
|
||||
}
|
||||
|
||||
values := []reflect.Value{}
|
||||
for _, param := range t.Parameters {
|
||||
values = append(values, reflect.ValueOf(param))
|
||||
}
|
||||
|
||||
body := func() {
|
||||
itBody.Call(values)
|
||||
}
|
||||
|
||||
if t.Focused {
|
||||
ginkgo.FIt(t.Description, body)
|
||||
} else {
|
||||
ginkgo.It(t.Description, body)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Entry constructs a TableEntry.
|
||||
|
||||
The first argument is a required description (this becomes the content of the generated Ginkgo `It`).
|
||||
Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`.
|
||||
|
||||
Each Entry ends up generating an individual Ginkgo It.
|
||||
*/
|
||||
func Entry(description string, parameters ...interface{}) TableEntry {
|
||||
return TableEntry{description, parameters, false, false}
|
||||
}
|
||||
|
||||
/*
|
||||
You can focus a particular entry with FEntry. This is equivalent to FIt.
|
||||
*/
|
||||
func FEntry(description string, parameters ...interface{}) TableEntry {
|
||||
return TableEntry{description, parameters, false, true}
|
||||
}
|
||||
|
||||
/*
|
||||
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
|
||||
*/
|
||||
func PEntry(description string, parameters ...interface{}) TableEntry {
|
||||
return TableEntry{description, parameters, true, false}
|
||||
}
|
||||
|
||||
/*
|
||||
You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
|
||||
*/
|
||||
func XEntry(description string, parameters ...interface{}) TableEntry {
|
||||
return TableEntry{description, parameters, true, false}
|
||||
}
|
182
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
generated
vendored
182
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/bootstrap_command.go
generated
vendored
@ -1,182 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"go/build"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||
)
|
||||
|
||||
func BuildBootstrapCommand() *Command {
|
||||
var agouti, noDot bool
|
||||
flagSet := flag.NewFlagSet("bootstrap", flag.ExitOnError)
|
||||
flagSet.BoolVar(&agouti, "agouti", false, "If set, bootstrap will generate a bootstrap file for writing Agouti tests")
|
||||
flagSet.BoolVar(&noDot, "nodot", false, "If set, bootstrap will generate a bootstrap file that does not . import ginkgo and gomega")
|
||||
|
||||
return &Command{
|
||||
Name: "bootstrap",
|
||||
FlagSet: flagSet,
|
||||
UsageCommand: "ginkgo bootstrap <FLAGS>",
|
||||
Usage: []string{
|
||||
"Bootstrap a test suite for the current package",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
generateBootstrap(agouti, noDot)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var bootstrapText = `package {{.Package}}_test
|
||||
|
||||
import (
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
`
|
||||
|
||||
var agoutiBootstrapText = `package {{.Package}}_test
|
||||
|
||||
import (
|
||||
{{.GinkgoImport}}
|
||||
{{.GomegaImport}}
|
||||
"github.com/sclevine/agouti"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test{{.FormattedName}}(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "{{.FormattedName}} Suite")
|
||||
}
|
||||
|
||||
var agoutiDriver *agouti.WebDriver
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
// Choose a WebDriver:
|
||||
|
||||
agoutiDriver = agouti.PhantomJS()
|
||||
// agoutiDriver = agouti.Selenium()
|
||||
// agoutiDriver = agouti.ChromeDriver()
|
||||
|
||||
Expect(agoutiDriver.Start()).To(Succeed())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
Expect(agoutiDriver.Stop()).To(Succeed())
|
||||
})
|
||||
`
|
||||
|
||||
type bootstrapData struct {
|
||||
Package string
|
||||
FormattedName string
|
||||
GinkgoImport string
|
||||
GomegaImport string
|
||||
}
|
||||
|
||||
func getPackageAndFormattedName() (string, string, string) {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not get current working directory: \n" + err.Error())
|
||||
}
|
||||
|
||||
dirName := strings.Replace(filepath.Base(path), "-", "_", -1)
|
||||
dirName = strings.Replace(dirName, " ", "_", -1)
|
||||
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
packageName := pkg.Name
|
||||
if err != nil {
|
||||
packageName = dirName
|
||||
}
|
||||
|
||||
formattedName := prettifyPackageName(filepath.Base(path))
|
||||
return packageName, dirName, formattedName
|
||||
}
|
||||
|
||||
func prettifyPackageName(name string) string {
|
||||
name = strings.Replace(name, "-", " ", -1)
|
||||
name = strings.Replace(name, "_", " ", -1)
|
||||
name = strings.Title(name)
|
||||
name = strings.Replace(name, " ", "", -1)
|
||||
return name
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func generateBootstrap(agouti bool, noDot bool) {
|
||||
packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
data := bootstrapData{
|
||||
Package: packageName,
|
||||
FormattedName: formattedName,
|
||||
GinkgoImport: `. "github.com/onsi/ginkgo"`,
|
||||
GomegaImport: `. "github.com/onsi/gomega"`,
|
||||
}
|
||||
|
||||
if noDot {
|
||||
data.GinkgoImport = `"github.com/onsi/ginkgo"`
|
||||
data.GomegaImport = `"github.com/onsi/gomega"`
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
|
||||
if fileExists(targetFile) {
|
||||
fmt.Printf("%s already exists.\n\n", targetFile)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not create file: " + err.Error())
|
||||
panic(err.Error())
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if agouti {
|
||||
templateText = agoutiBootstrapText
|
||||
} else {
|
||||
templateText = bootstrapText
|
||||
}
|
||||
|
||||
bootstrapTemplate, err := template.New("bootstrap").Parse(templateText)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
bootstrapTemplate.Execute(buf, data)
|
||||
|
||||
if noDot {
|
||||
contents, err := nodot.ApplyNoDot(buf.Bytes())
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to import nodot declarations: " + err.Error())
|
||||
}
|
||||
fmt.Println("To update the nodot declarations in the future, switch to this directory and run:\n\tginkgo nodot")
|
||||
buf = bytes.NewBuffer(contents)
|
||||
}
|
||||
|
||||
buf.WriteTo(f)
|
||||
|
||||
goFmt(targetFile)
|
||||
}
|
68
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/build_command.go
generated
vendored
68
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/build_command.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
)
|
||||
|
||||
func BuildBuildCommand() *Command {
|
||||
commandFlags := NewBuildCommandFlags(flag.NewFlagSet("build", flag.ExitOnError))
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
builder := &SpecBuilder{
|
||||
commandFlags: commandFlags,
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "build",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo build <FLAGS> <PACKAGES>",
|
||||
Usage: []string{
|
||||
"Build the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: builder.BuildSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
type SpecBuilder struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
}
|
||||
|
||||
func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
|
||||
r.commandFlags.computeNodes()
|
||||
|
||||
suites, _ := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, false)
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
passed := true
|
||||
for _, suite := range suites {
|
||||
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil)
|
||||
fmt.Printf("Compiling %s...\n", suite.PackageName)
|
||||
|
||||
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
|
||||
err := runner.CompileTo(path)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
passed = false
|
||||
} else {
|
||||
fmt.Printf(" compiled %s.test\n", suite.PackageName)
|
||||
}
|
||||
|
||||
runner.CleanUp()
|
||||
}
|
||||
|
||||
if passed {
|
||||
os.Exit(0)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
123
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
generated
vendored
123
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/ginkgo_ast_nodes.go
generated
vendored
@ -1,123 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
/*
|
||||
* Creates a func init() node
|
||||
*/
|
||||
func createVarUnderscoreBlock() *ast.ValueSpec {
|
||||
valueSpec := &ast.ValueSpec{}
|
||||
object := &ast.Object{Kind: 4, Name: "_", Decl: valueSpec, Data: 0}
|
||||
ident := &ast.Ident{Name: "_", Obj: object}
|
||||
valueSpec.Names = append(valueSpec.Names, ident)
|
||||
return valueSpec
|
||||
}
|
||||
|
||||
/*
|
||||
* Creates a Describe("Testing with ginkgo", func() { }) node
|
||||
*/
|
||||
func createDescribeBlock() *ast.CallExpr {
|
||||
blockStatement := &ast.BlockStmt{List: []ast.Stmt{}}
|
||||
|
||||
fieldList := &ast.FieldList{}
|
||||
funcType := &ast.FuncType{Params: fieldList}
|
||||
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
|
||||
basicLit := &ast.BasicLit{Kind: 9, Value: "\"Testing with Ginkgo\""}
|
||||
describeIdent := &ast.Ident{Name: "Describe"}
|
||||
return &ast.CallExpr{Fun: describeIdent, Args: []ast.Expr{basicLit, funcLit}}
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience function to return the name of the *testing.T param
|
||||
* for a Test function that will be rewritten. This is useful because
|
||||
* we will want to replace the usage of this named *testing.T inside the
|
||||
* body of the function with a GinktoT.
|
||||
*/
|
||||
func namedTestingTArg(node *ast.FuncDecl) string {
|
||||
return node.Type.Params.List[0].Names[0].Name // *exhale*
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience function to return the block statement node for a Describe statement
|
||||
*/
|
||||
func blockStatementFromDescribe(desc *ast.CallExpr) *ast.BlockStmt {
|
||||
var funcLit *ast.FuncLit
|
||||
var found = false
|
||||
|
||||
for _, node := range desc.Args {
|
||||
switch node := node.(type) {
|
||||
case *ast.FuncLit:
|
||||
found = true
|
||||
funcLit = node
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
panic("Error finding ast.FuncLit inside describe statement. Somebody done goofed.")
|
||||
}
|
||||
|
||||
return funcLit.Body
|
||||
}
|
||||
|
||||
/* convenience function for creating an It("TestNameHere")
|
||||
* with all the body of the test function inside the anonymous
|
||||
* func passed to It()
|
||||
*/
|
||||
func createItStatementForTestFunc(testFunc *ast.FuncDecl) *ast.ExprStmt {
|
||||
blockStatement := &ast.BlockStmt{List: testFunc.Body.List}
|
||||
fieldList := &ast.FieldList{}
|
||||
funcType := &ast.FuncType{Params: fieldList}
|
||||
funcLit := &ast.FuncLit{Type: funcType, Body: blockStatement}
|
||||
|
||||
testName := rewriteTestName(testFunc.Name.Name)
|
||||
basicLit := &ast.BasicLit{Kind: 9, Value: fmt.Sprintf("\"%s\"", testName)}
|
||||
itBlockIdent := &ast.Ident{Name: "It"}
|
||||
callExpr := &ast.CallExpr{Fun: itBlockIdent, Args: []ast.Expr{basicLit, funcLit}}
|
||||
return &ast.ExprStmt{X: callExpr}
|
||||
}
|
||||
|
||||
/*
|
||||
* rewrite test names to be human readable
|
||||
* eg: rewrites "TestSomethingAmazing" as "something amazing"
|
||||
*/
|
||||
func rewriteTestName(testName string) string {
|
||||
nameComponents := []string{}
|
||||
currentString := ""
|
||||
indexOfTest := strings.Index(testName, "Test")
|
||||
if indexOfTest != 0 {
|
||||
return testName
|
||||
}
|
||||
|
||||
testName = strings.Replace(testName, "Test", "", 1)
|
||||
first, rest := testName[0], testName[1:]
|
||||
testName = string(unicode.ToLower(rune(first))) + rest
|
||||
|
||||
for _, rune := range testName {
|
||||
if unicode.IsUpper(rune) {
|
||||
nameComponents = append(nameComponents, currentString)
|
||||
currentString = string(unicode.ToLower(rune))
|
||||
} else {
|
||||
currentString += string(rune)
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(append(nameComponents, currentString), " ")
|
||||
}
|
||||
|
||||
func newGinkgoTFromIdent(ident *ast.Ident) *ast.CallExpr {
|
||||
return &ast.CallExpr{
|
||||
Lparen: ident.NamePos + 1,
|
||||
Rparen: ident.NamePos + 2,
|
||||
Fun: &ast.Ident{Name: "GinkgoT"},
|
||||
}
|
||||
}
|
||||
|
||||
func newGinkgoTInterface() *ast.Ident {
|
||||
return &ast.Ident{Name: "GinkgoTInterface"}
|
||||
}
|
91
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/import.go
generated
vendored
91
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/import.go
generated
vendored
@ -1,91 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given the root node of an AST, returns the node containing the
|
||||
* import statements for the file.
|
||||
*/
|
||||
func importsForRootNode(rootNode *ast.File) (imports *ast.GenDecl, err error) {
|
||||
for _, declaration := range rootNode.Decls {
|
||||
decl, ok := declaration.(*ast.GenDecl)
|
||||
if !ok || len(decl.Specs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
_, ok = decl.Specs[0].(*ast.ImportSpec)
|
||||
if ok {
|
||||
imports = decl
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.New(fmt.Sprintf("Could not find imports for root node:\n\t%#v\n", rootNode))
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes "testing" import, if present
|
||||
*/
|
||||
func removeTestingImport(rootNode *ast.File) {
|
||||
importDecl, err := importsForRootNode(rootNode)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
var index int
|
||||
for i, importSpec := range importDecl.Specs {
|
||||
importSpec := importSpec.(*ast.ImportSpec)
|
||||
if importSpec.Path.Value == "\"testing\"" {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
importDecl.Specs = append(importDecl.Specs[:index], importDecl.Specs[index+1:]...)
|
||||
}
|
||||
|
||||
/*
|
||||
* Adds import statements for onsi/ginkgo, if missing
|
||||
*/
|
||||
func addGinkgoImports(rootNode *ast.File) {
|
||||
importDecl, err := importsForRootNode(rootNode)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
if len(importDecl.Specs) == 0 {
|
||||
// TODO: might need to create a import decl here
|
||||
panic("unimplemented : expected to find an imports block")
|
||||
}
|
||||
|
||||
needsGinkgo := true
|
||||
for _, importSpec := range importDecl.Specs {
|
||||
importSpec, ok := importSpec.(*ast.ImportSpec)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if importSpec.Path.Value == "\"github.com/onsi/ginkgo\"" {
|
||||
needsGinkgo = false
|
||||
}
|
||||
}
|
||||
|
||||
if needsGinkgo {
|
||||
importDecl.Specs = append(importDecl.Specs, createImport(".", "\"github.com/onsi/ginkgo\""))
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* convenience function to create an import statement
|
||||
*/
|
||||
func createImport(name, path string) *ast.ImportSpec {
|
||||
return &ast.ImportSpec{
|
||||
Name: &ast.Ident{Name: name},
|
||||
Path: &ast.BasicLit{Kind: 9, Value: path},
|
||||
}
|
||||
}
|
127
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
generated
vendored
127
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/package_rewriter.go
generated
vendored
@ -1,127 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
/*
|
||||
* RewritePackage takes a name (eg: my-package/tools), finds its test files using
|
||||
* Go's build package, and then rewrites them. A ginkgo test suite file will
|
||||
* also be added for this package, and all of its child packages.
|
||||
*/
|
||||
func RewritePackage(packageName string) {
|
||||
pkg, err := packageWithName(packageName)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
|
||||
}
|
||||
|
||||
for _, filename := range findTestsInPackage(pkg) {
|
||||
rewriteTestsInFile(filename)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a package, findTestsInPackage reads the test files in the directory,
|
||||
* and then recurses on each child package, returning a slice of all test files
|
||||
* found in this process.
|
||||
*/
|
||||
func findTestsInPackage(pkg *build.Package) (testfiles []string) {
|
||||
for _, file := range append(pkg.TestGoFiles, pkg.XTestGoFiles...) {
|
||||
testfiles = append(testfiles, filepath.Join(pkg.Dir, file))
|
||||
}
|
||||
|
||||
dirFiles, err := ioutil.ReadDir(pkg.Dir)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading dir: '%s'\n%s\n", pkg.Dir, err.Error()))
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`^[._]`)
|
||||
|
||||
for _, file := range dirFiles {
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if re.Match([]byte(file.Name())) {
|
||||
continue
|
||||
}
|
||||
|
||||
packageName := filepath.Join(pkg.ImportPath, file.Name())
|
||||
subPackage, err := packageWithName(packageName)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unexpected error reading package: '%s'\n%s\n", packageName, err.Error()))
|
||||
}
|
||||
|
||||
testfiles = append(testfiles, findTestsInPackage(subPackage)...)
|
||||
}
|
||||
|
||||
addGinkgoSuiteForPackage(pkg)
|
||||
goFmtPackage(pkg)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Shells out to `ginkgo bootstrap` to create a test suite file
|
||||
*/
|
||||
func addGinkgoSuiteForPackage(pkg *build.Package) {
|
||||
originalDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
suite_test_file := filepath.Join(pkg.Dir, pkg.Name+"_suite_test.go")
|
||||
|
||||
_, err = os.Stat(suite_test_file)
|
||||
if err == nil {
|
||||
return // test file already exists, this should be a no-op
|
||||
}
|
||||
|
||||
err = os.Chdir(pkg.Dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
output, err := exec.Command("ginkgo", "bootstrap").Output()
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("error running 'ginkgo bootstrap'.\nstdout: %s\n%s\n", output, err.Error()))
|
||||
}
|
||||
|
||||
err = os.Chdir(originalDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Shells out to `go fmt` to format the package
|
||||
*/
|
||||
func goFmtPackage(pkg *build.Package) {
|
||||
output, err := exec.Command("go", "fmt", pkg.ImportPath).Output()
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: Error running 'go fmt %s'.\nstdout: %s\n%s\n", pkg.ImportPath, output, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Attempts to return a package with its test files already read.
|
||||
* The ImportMode arg to build.Import lets you specify if you want go to read the
|
||||
* buildable go files inside the package, but it fails if the package has no go files
|
||||
*/
|
||||
func packageWithName(name string) (pkg *build.Package, err error) {
|
||||
pkg, err = build.Default.Import(name, ".", build.ImportMode(0))
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
pkg, err = build.Default.Import(name, ".", build.ImportMode(1))
|
||||
return
|
||||
}
|
56
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
generated
vendored
56
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/test_finder.go
generated
vendored
@ -1,56 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given a root node, walks its top level statements and returns
|
||||
* points to function nodes to rewrite as It statements.
|
||||
* These functions, according to Go testing convention, must be named
|
||||
* TestWithCamelCasedName and receive a single *testing.T argument.
|
||||
*/
|
||||
func findTestFuncs(rootNode *ast.File) (testsToRewrite []*ast.FuncDecl) {
|
||||
testNameRegexp := regexp.MustCompile("^Test[0-9A-Z].+")
|
||||
|
||||
ast.Inspect(rootNode, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch node := node.(type) {
|
||||
case *ast.FuncDecl:
|
||||
matches := testNameRegexp.MatchString(node.Name.Name)
|
||||
|
||||
if matches && receivesTestingT(node) {
|
||||
testsToRewrite = append(testsToRewrite, node)
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* convenience function that looks at args to a function and determines if its
|
||||
* params include an argument of type *testing.T
|
||||
*/
|
||||
func receivesTestingT(node *ast.FuncDecl) bool {
|
||||
if len(node.Type.Params.List) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
base, ok := node.Type.Params.List[0].Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
intermediate := base.X.(*ast.SelectorExpr)
|
||||
isTestingPackage := intermediate.X.(*ast.Ident).Name == "testing"
|
||||
isTestingT := intermediate.Sel.Name == "T"
|
||||
|
||||
return isTestingPackage && isTestingT
|
||||
}
|
163
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
generated
vendored
163
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testfile_rewriter.go
generated
vendored
@ -1,163 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
/*
|
||||
* Given a file path, rewrites any tests in the Ginkgo format.
|
||||
* First, we parse the AST, and update the imports declaration.
|
||||
* Then, we walk the first child elements in the file, returning tests to rewrite.
|
||||
* A top level init func is declared, with a single Describe func inside.
|
||||
* Then the test functions to rewrite are inserted as It statements inside the Describe.
|
||||
* Finally we walk the rest of the file, replacing other usages of *testing.T
|
||||
* Once that is complete, we write the AST back out again to its file.
|
||||
*/
|
||||
func rewriteTestsInFile(pathToFile string) {
|
||||
fileSet := token.NewFileSet()
|
||||
rootNode, err := parser.ParseFile(fileSet, pathToFile, nil, 0)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error parsing test file '%s':\n%s\n", pathToFile, err.Error()))
|
||||
}
|
||||
|
||||
addGinkgoImports(rootNode)
|
||||
removeTestingImport(rootNode)
|
||||
|
||||
varUnderscoreBlock := createVarUnderscoreBlock()
|
||||
describeBlock := createDescribeBlock()
|
||||
varUnderscoreBlock.Values = []ast.Expr{describeBlock}
|
||||
|
||||
for _, testFunc := range findTestFuncs(rootNode) {
|
||||
rewriteTestFuncAsItStatement(testFunc, rootNode, describeBlock)
|
||||
}
|
||||
|
||||
underscoreDecl := &ast.GenDecl{
|
||||
Tok: 85, // gah, magick numbers are needed to make this work
|
||||
TokPos: 14, // this tricks Go into writing "var _ = Describe"
|
||||
Specs: []ast.Spec{varUnderscoreBlock},
|
||||
}
|
||||
|
||||
imports := rootNode.Decls[0]
|
||||
tail := rootNode.Decls[1:]
|
||||
rootNode.Decls = append(append([]ast.Decl{imports}, underscoreDecl), tail...)
|
||||
rewriteOtherFuncsToUseGinkgoT(rootNode.Decls)
|
||||
walkNodesInRootNodeReplacingTestingT(rootNode)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
if err = format.Node(&buffer, fileSet, rootNode); err != nil {
|
||||
panic(fmt.Sprintf("Error formatting ast node after rewriting tests.\n%s\n", err.Error()))
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(pathToFile)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error stat'ing file: %s\n", pathToFile))
|
||||
}
|
||||
|
||||
ioutil.WriteFile(pathToFile, buffer.Bytes(), fileInfo.Mode())
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a test func named TestDoesSomethingNeat, rewrites it as
|
||||
* It("does something neat", func() { __test_body_here__ }) and adds it
|
||||
* to the Describe's list of statements
|
||||
*/
|
||||
func rewriteTestFuncAsItStatement(testFunc *ast.FuncDecl, rootNode *ast.File, describe *ast.CallExpr) {
|
||||
var funcIndex int = -1
|
||||
for index, child := range rootNode.Decls {
|
||||
if child == testFunc {
|
||||
funcIndex = index
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if funcIndex < 0 {
|
||||
panic(fmt.Sprintf("Assert failed: Error finding index for test node %s\n", testFunc.Name.Name))
|
||||
}
|
||||
|
||||
var block *ast.BlockStmt = blockStatementFromDescribe(describe)
|
||||
block.List = append(block.List, createItStatementForTestFunc(testFunc))
|
||||
replaceTestingTsWithGinkgoT(block, namedTestingTArg(testFunc))
|
||||
|
||||
// remove the old test func from the root node's declarations
|
||||
rootNode.Decls = append(rootNode.Decls[:funcIndex], rootNode.Decls[funcIndex+1:]...)
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
* walks nodes inside of a test func's statements and replaces the usage of
|
||||
* it's named *testing.T param with GinkgoT's
|
||||
*/
|
||||
func replaceTestingTsWithGinkgoT(statementsBlock *ast.BlockStmt, testingT string) {
|
||||
ast.Inspect(statementsBlock, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
keyValueExpr, ok := node.(*ast.KeyValueExpr)
|
||||
if ok {
|
||||
replaceNamedTestingTsInKeyValueExpression(keyValueExpr, testingT)
|
||||
return true
|
||||
}
|
||||
|
||||
funcLiteral, ok := node.(*ast.FuncLit)
|
||||
if ok {
|
||||
replaceTypeDeclTestingTsInFuncLiteral(funcLiteral)
|
||||
return true
|
||||
}
|
||||
|
||||
callExpr, ok := node.(*ast.CallExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
replaceTestingTsInArgsLists(callExpr, testingT)
|
||||
|
||||
funCall, ok := callExpr.Fun.(*ast.SelectorExpr)
|
||||
if ok {
|
||||
replaceTestingTsMethodCalls(funCall, testingT)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* rewrite t.Fail() or any other *testing.T method by replacing with T().Fail()
|
||||
* This function receives a selector expression (eg: t.Fail()) and
|
||||
* the name of the *testing.T param from the function declaration. Rewrites the
|
||||
* selector expression in place if the target was a *testing.T
|
||||
*/
|
||||
func replaceTestingTsMethodCalls(selectorExpr *ast.SelectorExpr, testingT string) {
|
||||
ident, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
selectorExpr.X = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces usages of a named *testing.T param inside of a call expression
|
||||
* with a new GinkgoT object
|
||||
*/
|
||||
func replaceTestingTsInArgsLists(callExpr *ast.CallExpr, testingT string) {
|
||||
for index, arg := range callExpr.Args {
|
||||
ident, ok := arg.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
callExpr.Args[index] = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
}
|
130
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
generated
vendored
130
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert/testing_t_rewriter.go
generated
vendored
@ -1,130 +0,0 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
)
|
||||
|
||||
/*
|
||||
* Rewrites any other top level funcs that receive a *testing.T param
|
||||
*/
|
||||
func rewriteOtherFuncsToUseGinkgoT(declarations []ast.Decl) {
|
||||
for _, decl := range declarations {
|
||||
decl, ok := decl.(*ast.FuncDecl)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, param := range decl.Type.Params.List {
|
||||
starExpr, ok := param.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
xIdent, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok || xIdent.Name != "testing" {
|
||||
continue
|
||||
}
|
||||
|
||||
if selectorExpr.Sel.Name != "T" {
|
||||
continue
|
||||
}
|
||||
|
||||
param.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Walks all of the nodes in the file, replacing *testing.T in struct
|
||||
* and func literal nodes. eg:
|
||||
* type foo struct { *testing.T }
|
||||
* var bar = func(t *testing.T) { }
|
||||
*/
|
||||
func walkNodesInRootNodeReplacingTestingT(rootNode *ast.File) {
|
||||
ast.Inspect(rootNode, func(node ast.Node) bool {
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch node := node.(type) {
|
||||
case *ast.StructType:
|
||||
replaceTestingTsInStructType(node)
|
||||
case *ast.FuncLit:
|
||||
replaceTypeDeclTestingTsInFuncLiteral(node)
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces named *testing.T inside a composite literal
|
||||
*/
|
||||
func replaceNamedTestingTsInKeyValueExpression(kve *ast.KeyValueExpr, testingT string) {
|
||||
ident, ok := kve.Value.(*ast.Ident)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if ident.Name == testingT {
|
||||
kve.Value = newGinkgoTFromIdent(ident)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* replaces *testing.T params in a func literal with GinkgoT
|
||||
*/
|
||||
func replaceTypeDeclTestingTsInFuncLiteral(functionLiteral *ast.FuncLit) {
|
||||
for _, arg := range functionLiteral.Type.Params.List {
|
||||
starExpr, ok := arg.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
target, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if target.Name == "testing" && selectorExpr.Sel.Name == "T" {
|
||||
arg.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Replaces *testing.T types inside of a struct declaration with a GinkgoT
|
||||
* eg: type foo struct { *testing.T }
|
||||
*/
|
||||
func replaceTestingTsInStructType(structType *ast.StructType) {
|
||||
for _, field := range structType.Fields.List {
|
||||
starExpr, ok := field.Type.(*ast.StarExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
selectorExpr, ok := starExpr.X.(*ast.SelectorExpr)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
xIdent, ok := selectorExpr.X.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if xIdent.Name == "testing" && selectorExpr.Sel.Name == "T" {
|
||||
field.Type = newGinkgoTInterface()
|
||||
}
|
||||
}
|
||||
}
|
44
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert_command.go
generated
vendored
44
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/convert_command.go
generated
vendored
@ -1,44 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/ginkgo/convert"
|
||||
"os"
|
||||
)
|
||||
|
||||
func BuildConvertCommand() *Command {
|
||||
return &Command{
|
||||
Name: "convert",
|
||||
FlagSet: flag.NewFlagSet("convert", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo convert /path/to/package",
|
||||
Usage: []string{
|
||||
"Convert the package at the passed in path from an XUnit-style test to a Ginkgo-style test",
|
||||
},
|
||||
Command: convertPackage,
|
||||
}
|
||||
}
|
||||
|
||||
func convertPackage(args []string, additionalArgs []string) {
|
||||
if len(args) != 1 {
|
||||
println(fmt.Sprintf("usage: ginkgo convert /path/to/your/package"))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case error:
|
||||
println(err.Error())
|
||||
case string:
|
||||
println(err)
|
||||
default:
|
||||
println(fmt.Sprintf("unexpected error: %#v", err))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
convert.RewritePackage(args[0])
|
||||
}
|
164
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/generate_command.go
generated
vendored
164
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/generate_command.go
generated
vendored
@ -1,164 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
func BuildGenerateCommand() *Command {
|
||||
var agouti, noDot bool
|
||||
flagSet := flag.NewFlagSet("generate", flag.ExitOnError)
|
||||
flagSet.BoolVar(&agouti, "agouti", false, "If set, generate will generate a test file for writing Agouti tests")
|
||||
flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega")
|
||||
|
||||
return &Command{
|
||||
Name: "generate",
|
||||
FlagSet: flagSet,
|
||||
UsageCommand: "ginkgo generate <filename(s)>",
|
||||
Usage: []string{
|
||||
"Generate a test file named filename_test.go",
|
||||
"If the optional <filenames> argument is omitted, a file named after the package in the current directory will be created.",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: func(args []string, additionalArgs []string) {
|
||||
generateSpec(args, agouti, noDot)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var specText = `package {{.Package}}_test
|
||||
|
||||
import (
|
||||
. "{{.PackageImportPath}}"
|
||||
|
||||
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
|
||||
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
|
||||
)
|
||||
|
||||
var _ = Describe("{{.Subject}}", func() {
|
||||
|
||||
})
|
||||
`
|
||||
|
||||
var agoutiSpecText = `package {{.Package}}_test
|
||||
|
||||
import (
|
||||
. "{{.PackageImportPath}}"
|
||||
|
||||
{{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}}
|
||||
{{if .IncludeImports}}. "github.com/onsi/gomega"{{end}}
|
||||
. "github.com/sclevine/agouti/matchers"
|
||||
"github.com/sclevine/agouti"
|
||||
)
|
||||
|
||||
var _ = Describe("{{.Subject}}", func() {
|
||||
var page *agouti.Page
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
page, err = agoutiDriver.NewPage()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
Expect(page.Destroy()).To(Succeed())
|
||||
})
|
||||
})
|
||||
`
|
||||
|
||||
type specData struct {
|
||||
Package string
|
||||
Subject string
|
||||
PackageImportPath string
|
||||
IncludeImports bool
|
||||
}
|
||||
|
||||
func generateSpec(args []string, agouti, noDot bool) {
|
||||
if len(args) == 0 {
|
||||
err := generateSpecForSubject("", agouti, noDot)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
fmt.Println("")
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("")
|
||||
return
|
||||
}
|
||||
|
||||
var failed bool
|
||||
for _, arg := range args {
|
||||
err := generateSpecForSubject(arg, agouti, noDot)
|
||||
if err != nil {
|
||||
failed = true
|
||||
fmt.Println(err.Error())
|
||||
}
|
||||
}
|
||||
fmt.Println("")
|
||||
if failed {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func generateSpecForSubject(subject string, agouti, noDot bool) error {
|
||||
packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
|
||||
if subject != "" {
|
||||
subject = strings.Split(subject, ".go")[0]
|
||||
subject = strings.Split(subject, "_test")[0]
|
||||
specFilePrefix = subject
|
||||
formattedName = prettifyPackageName(subject)
|
||||
}
|
||||
|
||||
data := specData{
|
||||
Package: packageName,
|
||||
Subject: formattedName,
|
||||
PackageImportPath: getPackageImportPath(),
|
||||
IncludeImports: !noDot,
|
||||
}
|
||||
|
||||
targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
|
||||
if fileExists(targetFile) {
|
||||
return fmt.Errorf("%s already exists.", targetFile)
|
||||
} else {
|
||||
fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
|
||||
}
|
||||
|
||||
f, err := os.Create(targetFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var templateText string
|
||||
if agouti {
|
||||
templateText = agoutiSpecText
|
||||
} else {
|
||||
templateText = specText
|
||||
}
|
||||
|
||||
specTemplate, err := template.New("spec").Parse(templateText)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
specTemplate.Execute(f, data)
|
||||
goFmt(targetFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPackageImportPath() string {
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
sep := string(filepath.Separator)
|
||||
paths := strings.Split(workingDir, sep+"src"+sep)
|
||||
if len(paths) == 1 {
|
||||
fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
|
||||
return "UNKNOWN_PACKAGE_PATH"
|
||||
}
|
||||
return filepath.ToSlash(paths[len(paths)-1])
|
||||
}
|
31
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/help_command.go
generated
vendored
31
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/help_command.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func BuildHelpCommand() *Command {
|
||||
return &Command{
|
||||
Name: "help",
|
||||
FlagSet: flag.NewFlagSet("help", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo help <COMAND>",
|
||||
Usage: []string{
|
||||
"Print usage information. If a command is passed in, print usage information just for that command.",
|
||||
},
|
||||
Command: printHelp,
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp(args []string, additionalArgs []string) {
|
||||
if len(args) == 0 {
|
||||
usage()
|
||||
} else {
|
||||
command, found := commandMatching(args[0])
|
||||
if !found {
|
||||
complainAndQuit(fmt.Sprintf("Unknown command: %s", args[0]))
|
||||
}
|
||||
|
||||
usageForCommand(command, true)
|
||||
}
|
||||
}
|
@ -1,52 +0,0 @@
|
||||
package interrupthandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type InterruptHandler struct {
|
||||
interruptCount int
|
||||
lock *sync.Mutex
|
||||
C chan bool
|
||||
}
|
||||
|
||||
func NewInterruptHandler() *InterruptHandler {
|
||||
h := &InterruptHandler{
|
||||
lock: &sync.Mutex{},
|
||||
C: make(chan bool, 0),
|
||||
}
|
||||
|
||||
go h.handleInterrupt()
|
||||
SwallowSigQuit()
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *InterruptHandler) WasInterrupted() bool {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
return h.interruptCount > 0
|
||||
}
|
||||
|
||||
func (h *InterruptHandler) handleInterrupt() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
signal.Stop(c)
|
||||
|
||||
h.lock.Lock()
|
||||
h.interruptCount++
|
||||
if h.interruptCount == 1 {
|
||||
close(h.C)
|
||||
} else if h.interruptCount > 5 {
|
||||
os.Exit(1)
|
||||
}
|
||||
h.lock.Unlock()
|
||||
|
||||
go h.handleInterrupt()
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
// +build freebsd openbsd netbsd dragonfly darwin linux
|
||||
|
||||
package interrupthandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func SwallowSigQuit() {
|
||||
c := make(chan os.Signal, 1024)
|
||||
signal.Notify(c, syscall.SIGQUIT)
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package interrupthandler
|
||||
|
||||
func SwallowSigQuit() {
|
||||
//noop
|
||||
}
|
291
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/main.go
generated
vendored
291
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/main.go
generated
vendored
@ -1,291 +0,0 @@
|
||||
/*
|
||||
The Ginkgo CLI
|
||||
|
||||
The Ginkgo CLI is fully documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
|
||||
|
||||
You can also learn more by running:
|
||||
|
||||
ginkgo help
|
||||
|
||||
Here are some of the more commonly used commands:
|
||||
|
||||
To install:
|
||||
|
||||
go install github.com/onsi/ginkgo/ginkgo
|
||||
|
||||
To run tests:
|
||||
|
||||
ginkgo
|
||||
|
||||
To run tests in all subdirectories:
|
||||
|
||||
ginkgo -r
|
||||
|
||||
To run tests in particular packages:
|
||||
|
||||
ginkgo <flags> /path/to/package /path/to/another/package
|
||||
|
||||
To pass arguments/flags to your tests:
|
||||
|
||||
ginkgo <flags> <packages> -- <pass-throughs>
|
||||
|
||||
To run tests in parallel
|
||||
|
||||
ginkgo -p
|
||||
|
||||
this will automatically detect the optimal number of nodes to use. Alternatively, you can specify the number of nodes with:
|
||||
|
||||
ginkgo -nodes=N
|
||||
|
||||
(note that you don't need to provide -p in this case).
|
||||
|
||||
By default the Ginkgo CLI will spin up a server that the individual test processes send test output to. The CLI aggregates this output and then presents coherent test output, one test at a time, as each test completes.
|
||||
An alternative is to have the parallel nodes run and stream interleaved output back. This useful for debugging, particularly in contexts where tests hang/fail to start. To get this interleaved output:
|
||||
|
||||
ginkgo -nodes=N -stream=true
|
||||
|
||||
On windows, the default value for stream is true.
|
||||
|
||||
By default, when running multiple tests (with -r or a list of packages) Ginkgo will abort when a test fails. To have Ginkgo run subsequent test suites instead you can:
|
||||
|
||||
ginkgo -keepGoing
|
||||
|
||||
To monitor packages and rerun tests when changes occur:
|
||||
|
||||
ginkgo watch <-r> </path/to/package>
|
||||
|
||||
passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them.
|
||||
`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
|
||||
that depend on X are not rerun.
|
||||
|
||||
[OSX & Linux only] To receive (desktop) notifications when a test run completes:
|
||||
|
||||
ginkgo -notify
|
||||
|
||||
this is particularly useful with `ginkgo watch`. Notifications are currently only supported on OS X and require that you `brew install terminal-notifier`
|
||||
|
||||
Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails. You can do this with:
|
||||
|
||||
ginkgo -untilItFails
|
||||
|
||||
To bootstrap a test suite:
|
||||
|
||||
ginkgo bootstrap
|
||||
|
||||
To generate a test file:
|
||||
|
||||
ginkgo generate <test_file_name>
|
||||
|
||||
To bootstrap/generate test files without using "." imports:
|
||||
|
||||
ginkgo bootstrap --nodot
|
||||
ginkgo generate --nodot
|
||||
|
||||
this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions. When you pull to the latest Ginkgo/Gomega you'll want to run
|
||||
|
||||
ginkgo nodot
|
||||
|
||||
to refresh this list and pull in any new identifiers. In particular, this will pull in any new Gomega matchers that get added.
|
||||
|
||||
To convert an existing XUnit style test suite to a Ginkgo-style test suite:
|
||||
|
||||
ginkgo convert .
|
||||
|
||||
To unfocus tests:
|
||||
|
||||
ginkgo unfocus
|
||||
|
||||
or
|
||||
|
||||
ginkgo blur
|
||||
|
||||
To compile a test suite:
|
||||
|
||||
ginkgo build <path-to-package>
|
||||
|
||||
will output an executable file named `package.test`. This can be run directly or by invoking
|
||||
|
||||
ginkgo <path-to-package.test>
|
||||
|
||||
To print out Ginkgo's version:
|
||||
|
||||
ginkgo version
|
||||
|
||||
To get more help:
|
||||
|
||||
ginkgo help
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
const greenColor = "\x1b[32m"
|
||||
const redColor = "\x1b[91m"
|
||||
const defaultStyle = "\x1b[0m"
|
||||
const lightGrayColor = "\x1b[37m"
|
||||
|
||||
type Command struct {
|
||||
Name string
|
||||
AltName string
|
||||
FlagSet *flag.FlagSet
|
||||
Usage []string
|
||||
UsageCommand string
|
||||
Command func(args []string, additionalArgs []string)
|
||||
SuppressFlagDocumentation bool
|
||||
FlagDocSubstitute []string
|
||||
}
|
||||
|
||||
func (c *Command) Matches(name string) bool {
|
||||
return c.Name == name || (c.AltName != "" && c.AltName == name)
|
||||
}
|
||||
|
||||
func (c *Command) Run(args []string, additionalArgs []string) {
|
||||
c.FlagSet.Parse(args)
|
||||
c.Command(c.FlagSet.Args(), additionalArgs)
|
||||
}
|
||||
|
||||
var DefaultCommand *Command
|
||||
var Commands []*Command
|
||||
|
||||
func init() {
|
||||
DefaultCommand = BuildRunCommand()
|
||||
Commands = append(Commands, BuildWatchCommand())
|
||||
Commands = append(Commands, BuildBuildCommand())
|
||||
Commands = append(Commands, BuildBootstrapCommand())
|
||||
Commands = append(Commands, BuildGenerateCommand())
|
||||
Commands = append(Commands, BuildNodotCommand())
|
||||
Commands = append(Commands, BuildConvertCommand())
|
||||
Commands = append(Commands, BuildUnfocusCommand())
|
||||
Commands = append(Commands, BuildVersionCommand())
|
||||
Commands = append(Commands, BuildHelpCommand())
|
||||
}
|
||||
|
||||
func main() {
|
||||
args := []string{}
|
||||
additionalArgs := []string{}
|
||||
|
||||
foundDelimiter := false
|
||||
|
||||
for _, arg := range os.Args[1:] {
|
||||
if !foundDelimiter {
|
||||
if arg == "--" {
|
||||
foundDelimiter = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if foundDelimiter {
|
||||
additionalArgs = append(additionalArgs, arg)
|
||||
} else {
|
||||
args = append(args, arg)
|
||||
}
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
commandToRun, found := commandMatching(args[0])
|
||||
if found {
|
||||
commandToRun.Run(args[1:], additionalArgs)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
DefaultCommand.Run(args, additionalArgs)
|
||||
}
|
||||
|
||||
func commandMatching(name string) (*Command, bool) {
|
||||
for _, command := range Commands {
|
||||
if command.Matches(name) {
|
||||
return command, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "Ginkgo Version %s\n\n", config.VERSION)
|
||||
usageForCommand(DefaultCommand, false)
|
||||
for _, command := range Commands {
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
usageForCommand(command, false)
|
||||
}
|
||||
}
|
||||
|
||||
func usageForCommand(command *Command, longForm bool) {
|
||||
fmt.Fprintf(os.Stderr, "%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand)))
|
||||
fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.Usage, "\n"))
|
||||
if command.SuppressFlagDocumentation && !longForm {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", strings.Join(command.FlagDocSubstitute, "\n "))
|
||||
} else {
|
||||
command.FlagSet.PrintDefaults()
|
||||
}
|
||||
}
|
||||
|
||||
func complainAndQuit(complaint string) {
|
||||
fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func findSuites(args []string, recurse bool, skipPackage string, allowPrecompiled bool) ([]testsuite.TestSuite, []string) {
|
||||
suites := []testsuite.TestSuite{}
|
||||
|
||||
if len(args) > 0 {
|
||||
for _, arg := range args {
|
||||
if allowPrecompiled {
|
||||
suite, err := testsuite.PrecompiledTestSuite(arg)
|
||||
if err == nil {
|
||||
suites = append(suites, suite)
|
||||
continue
|
||||
}
|
||||
}
|
||||
suites = append(suites, testsuite.SuitesInDir(arg, recurse)...)
|
||||
}
|
||||
} else {
|
||||
suites = testsuite.SuitesInDir(".", recurse)
|
||||
}
|
||||
|
||||
skippedPackages := []string{}
|
||||
if skipPackage != "" {
|
||||
skipFilters := strings.Split(skipPackage, ",")
|
||||
filteredSuites := []testsuite.TestSuite{}
|
||||
for _, suite := range suites {
|
||||
skip := false
|
||||
for _, skipFilter := range skipFilters {
|
||||
if strings.Contains(suite.Path, skipFilter) {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if skip {
|
||||
skippedPackages = append(skippedPackages, suite.Path)
|
||||
} else {
|
||||
filteredSuites = append(filteredSuites, suite)
|
||||
}
|
||||
}
|
||||
suites = filteredSuites
|
||||
}
|
||||
|
||||
return suites, skippedPackages
|
||||
}
|
||||
|
||||
func goFmt(path string) {
|
||||
err := exec.Command("go", "fmt", path).Run()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not fmt: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func pluralizedWord(singular, plural string, count int) string {
|
||||
if count == 1 {
|
||||
return singular
|
||||
}
|
||||
return plural
|
||||
}
|
194
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
generated
vendored
194
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go
generated
vendored
@ -1,194 +0,0 @@
|
||||
package nodot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ApplyNoDot(data []byte) ([]byte, error) {
|
||||
sections, err := generateNodotSections()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, section := range sections {
|
||||
data = section.createOrUpdateIn(data)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
type nodotSection struct {
|
||||
name string
|
||||
pkg string
|
||||
declarations []string
|
||||
types []string
|
||||
}
|
||||
|
||||
func (s nodotSection) createOrUpdateIn(data []byte) []byte {
|
||||
renames := map[string]string{}
|
||||
|
||||
contents := string(data)
|
||||
|
||||
lines := strings.Split(contents, "\n")
|
||||
|
||||
comment := "// Declarations for " + s.name
|
||||
|
||||
newLines := []string{}
|
||||
for _, line := range lines {
|
||||
if line == comment {
|
||||
continue
|
||||
}
|
||||
|
||||
words := strings.Split(line, " ")
|
||||
lastWord := words[len(words)-1]
|
||||
|
||||
if s.containsDeclarationOrType(lastWord) {
|
||||
renames[lastWord] = words[1]
|
||||
continue
|
||||
}
|
||||
|
||||
newLines = append(newLines, line)
|
||||
}
|
||||
|
||||
if len(newLines[len(newLines)-1]) > 0 {
|
||||
newLines = append(newLines, "")
|
||||
}
|
||||
|
||||
newLines = append(newLines, comment)
|
||||
|
||||
for _, typ := range s.types {
|
||||
name, ok := renames[s.prefix(typ)]
|
||||
if !ok {
|
||||
name = typ
|
||||
}
|
||||
newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ)))
|
||||
}
|
||||
|
||||
for _, decl := range s.declarations {
|
||||
name, ok := renames[s.prefix(decl)]
|
||||
if !ok {
|
||||
name = decl
|
||||
}
|
||||
newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl)))
|
||||
}
|
||||
|
||||
newLines = append(newLines, "")
|
||||
|
||||
newContents := strings.Join(newLines, "\n")
|
||||
|
||||
return []byte(newContents)
|
||||
}
|
||||
|
||||
func (s nodotSection) prefix(declOrType string) string {
|
||||
return s.pkg + "." + declOrType
|
||||
}
|
||||
|
||||
func (s nodotSection) containsDeclarationOrType(word string) bool {
|
||||
for _, declaration := range s.declarations {
|
||||
if s.prefix(declaration) == word {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for _, typ := range s.types {
|
||||
if s.prefix(typ) == word {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func generateNodotSections() ([]nodotSection, error) {
|
||||
sections := []nodotSection{}
|
||||
|
||||
declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Ginkgo DSL",
|
||||
pkg: "ginkgo",
|
||||
declarations: declarations,
|
||||
types: []string{"Done", "Benchmarker"},
|
||||
})
|
||||
|
||||
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Gomega DSL",
|
||||
pkg: "gomega",
|
||||
declarations: declarations,
|
||||
})
|
||||
|
||||
declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sections = append(sections, nodotSection{
|
||||
name: "Gomega Matchers",
|
||||
pkg: "gomega",
|
||||
declarations: declarations,
|
||||
})
|
||||
|
||||
return sections, nil
|
||||
}
|
||||
|
||||
func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) {
|
||||
pkg, err := build.Import(pkgPath, ".", 0)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename))
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
blacklistLookup := map[string]bool{}
|
||||
for _, declaration := range blacklist {
|
||||
blacklistLookup[declaration] = true
|
||||
}
|
||||
|
||||
filteredDeclarations := []string{}
|
||||
for _, declaration := range declarations {
|
||||
if blacklistLookup[declaration] {
|
||||
continue
|
||||
}
|
||||
filteredDeclarations = append(filteredDeclarations, declaration)
|
||||
}
|
||||
|
||||
return filteredDeclarations, nil
|
||||
}
|
||||
|
||||
func getExportedDeclarationsForFile(path string) ([]string, error) {
|
||||
fset := token.NewFileSet()
|
||||
tree, err := parser.ParseFile(fset, path, nil, 0)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
|
||||
declarations := []string{}
|
||||
ast.FileExports(tree)
|
||||
for _, decl := range tree.Decls {
|
||||
switch x := decl.(type) {
|
||||
case *ast.GenDecl:
|
||||
switch s := x.Specs[0].(type) {
|
||||
case *ast.ValueSpec:
|
||||
declarations = append(declarations, s.Names[0].Name)
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
declarations = append(declarations, x.Name.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return declarations, nil
|
||||
}
|
76
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go
generated
vendored
76
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go
generated
vendored
@ -1,76 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"github.com/onsi/ginkgo/ginkgo/nodot"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func BuildNodotCommand() *Command {
|
||||
return &Command{
|
||||
Name: "nodot",
|
||||
FlagSet: flag.NewFlagSet("bootstrap", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo nodot",
|
||||
Usage: []string{
|
||||
"Update the nodot declarations in your test suite",
|
||||
"Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.",
|
||||
"If you've renamed a declaration, that name will be honored and not overwritten.",
|
||||
},
|
||||
Command: updateNodot,
|
||||
}
|
||||
}
|
||||
|
||||
func updateNodot(args []string, additionalArgs []string) {
|
||||
suiteFile, perm := findSuiteFile()
|
||||
|
||||
data, err := ioutil.ReadFile(suiteFile)
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to update nodot declarations: " + err.Error())
|
||||
}
|
||||
|
||||
content, err := nodot.ApplyNoDot(data)
|
||||
if err != nil {
|
||||
complainAndQuit("Failed to update nodot declarations: " + err.Error())
|
||||
}
|
||||
ioutil.WriteFile(suiteFile, content, perm)
|
||||
|
||||
goFmt(suiteFile)
|
||||
}
|
||||
|
||||
func findSuiteFile() (string, os.FileMode) {
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
|
||||
files, err := ioutil.ReadDir(workingDir)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`)
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(workingDir, file.Name())
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
complainAndQuit("Could not find suite file for nodot: " + err.Error())
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if re.MatchReader(bufio.NewReader(f)) {
|
||||
return path, file.Mode()
|
||||
}
|
||||
}
|
||||
|
||||
complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.")
|
||||
|
||||
return "", 0
|
||||
}
|
141
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/notifications.go
generated
vendored
141
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/notifications.go
generated
vendored
@ -1,141 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type Notifier struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
}
|
||||
|
||||
func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
|
||||
return &Notifier{
|
||||
commandFlags: commandFlags,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) VerifyNotificationsAreAvailable() {
|
||||
if n.commandFlags.Notify {
|
||||
onLinux := (runtime.GOOS == "linux")
|
||||
onOSX := (runtime.GOOS == "darwin")
|
||||
if onOSX {
|
||||
|
||||
_, err := exec.LookPath("terminal-notifier")
|
||||
if err != nil {
|
||||
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
|
||||
|
||||
OSX:
|
||||
|
||||
To remedy this:
|
||||
|
||||
brew install terminal-notifier
|
||||
|
||||
To learn more about terminal-notifier:
|
||||
|
||||
https://github.com/alloy/terminal-notifier
|
||||
`)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
} else if onLinux {
|
||||
|
||||
_, err := exec.LookPath("notify-send")
|
||||
if err != nil {
|
||||
fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
|
||||
|
||||
Linux:
|
||||
|
||||
Download and install notify-send for your distribution
|
||||
`)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) {
|
||||
if suitePassed {
|
||||
n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName))
|
||||
} else {
|
||||
n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName))
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) SendNotification(title string, subtitle string) {
|
||||
|
||||
if n.commandFlags.Notify {
|
||||
onLinux := (runtime.GOOS == "linux")
|
||||
onOSX := (runtime.GOOS == "darwin")
|
||||
|
||||
if onOSX {
|
||||
|
||||
_, err := exec.LookPath("terminal-notifier")
|
||||
if err == nil {
|
||||
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
|
||||
terminal := os.Getenv("TERM_PROGRAM")
|
||||
if terminal == "iTerm.app" {
|
||||
args = append(args, "-activate", "com.googlecode.iterm2")
|
||||
} else if terminal == "Apple_Terminal" {
|
||||
args = append(args, "-activate", "com.apple.Terminal")
|
||||
}
|
||||
|
||||
exec.Command("terminal-notifier", args...).Run()
|
||||
}
|
||||
|
||||
} else if onLinux {
|
||||
|
||||
_, err := exec.LookPath("notify-send")
|
||||
if err == nil {
|
||||
args := []string{"-a", "ginkgo", title, subtitle}
|
||||
exec.Command("notify-send", args...).Run()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) {
|
||||
|
||||
command := n.commandFlags.AfterSuiteHook
|
||||
if command != "" {
|
||||
|
||||
// Allow for string replacement to pass input to the command
|
||||
passed := "[FAIL]"
|
||||
if suitePassed {
|
||||
passed = "[PASS]"
|
||||
}
|
||||
command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1)
|
||||
command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1)
|
||||
|
||||
// Must break command into parts
|
||||
splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
|
||||
parts := splitArgs.FindAllString(command, -1)
|
||||
|
||||
output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
|
||||
if err != nil {
|
||||
fmt.Println("Post-suite command failed:")
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t%s\n", output)
|
||||
} else {
|
||||
fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle)
|
||||
}
|
||||
n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook))
|
||||
} else {
|
||||
fmt.Println("Post-suite command succeeded:")
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t%s\n", output)
|
||||
} else {
|
||||
fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
192
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_command.go
generated
vendored
192
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_command.go
generated
vendored
@ -1,192 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func BuildRunCommand() *Command {
|
||||
commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError))
|
||||
notifier := NewNotifier(commandFlags)
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
runner := &SpecRunner{
|
||||
commandFlags: commandFlags,
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
Usage: []string{
|
||||
"Run the tests in the passed in <PACKAGES> (or the package in the current directory if left blank).",
|
||||
"Any arguments after -- will be passed to the test.",
|
||||
"Accepts the following flags:",
|
||||
},
|
||||
Command: runner.RunSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
type SpecRunner struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
suiteRunner *SuiteRunner
|
||||
}
|
||||
|
||||
func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
||||
r.commandFlags.computeNodes()
|
||||
r.notifier.VerifyNotificationsAreAvailable()
|
||||
|
||||
suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, true)
|
||||
if len(skippedPackages) > 0 {
|
||||
fmt.Println("Will skip:")
|
||||
for _, skippedPackage := range skippedPackages {
|
||||
fmt.Println(" " + skippedPackage)
|
||||
}
|
||||
}
|
||||
|
||||
if len(skippedPackages) > 0 && len(suites) == 0 {
|
||||
fmt.Println("All tests skipped! Exiting...")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
r.ComputeSuccinctMode(len(suites))
|
||||
|
||||
t := time.Now()
|
||||
|
||||
runners := []*testrunner.TestRunner{}
|
||||
for _, suite := range suites {
|
||||
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, additionalArgs))
|
||||
}
|
||||
|
||||
numSuites := 0
|
||||
runResult := testrunner.PassingRunResult()
|
||||
if r.commandFlags.UntilItFails {
|
||||
iteration := 0
|
||||
for {
|
||||
r.UpdateSeed()
|
||||
randomizedRunners := r.randomizeOrder(runners)
|
||||
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
|
||||
iteration++
|
||||
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
break
|
||||
}
|
||||
|
||||
if runResult.Passed {
|
||||
fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration))
|
||||
} else {
|
||||
fmt.Printf("\nTests failed on attempt #%d\n\n", iteration)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
randomizedRunners := r.randomizeOrder(runners)
|
||||
runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil)
|
||||
}
|
||||
|
||||
for _, runner := range runners {
|
||||
runner.CleanUp()
|
||||
}
|
||||
|
||||
fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t))
|
||||
|
||||
if runResult.Passed {
|
||||
if runResult.HasProgrammaticFocus {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
|
||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||
} else {
|
||||
fmt.Printf("Test Suite Passed\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Test Suite Failed\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SpecRunner) ComputeSuccinctMode(numSuites int) {
|
||||
if config.DefaultReporterConfig.Verbose {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites > 1 && !r.commandFlags.wasSet("succinct") {
|
||||
config.DefaultReporterConfig.Succinct = true
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SpecRunner) UpdateSeed() {
|
||||
if !r.commandFlags.wasSet("seed") {
|
||||
config.GinkgoConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner {
|
||||
if !r.commandFlags.RandomizeSuites {
|
||||
return runners
|
||||
}
|
||||
|
||||
if len(runners) <= 1 {
|
||||
return runners
|
||||
}
|
||||
|
||||
randomizedRunners := make([]*testrunner.TestRunner, len(runners))
|
||||
randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed))
|
||||
permutation := randomizer.Perm(len(runners))
|
||||
for i, j := range permutation {
|
||||
randomizedRunners[i] = runners[j]
|
||||
}
|
||||
return randomizedRunners
|
||||
}
|
||||
|
||||
func orcMessage(iteration int) string {
|
||||
if iteration < 10 {
|
||||
return ""
|
||||
} else if iteration < 30 {
|
||||
return []string{
|
||||
"If at first you succeed...",
|
||||
"...try, try again.",
|
||||
"Looking good!",
|
||||
"Still good...",
|
||||
"I think your tests are fine....",
|
||||
"Yep, still passing",
|
||||
"Here we go again...",
|
||||
"Even the gophers are getting bored",
|
||||
"Did you try -race?",
|
||||
"Maybe you should stop now?",
|
||||
"I'm getting tired...",
|
||||
"What if I just made you a sandwich?",
|
||||
"Hit ^C, hit ^C, please hit ^C",
|
||||
"Make it stop. Please!",
|
||||
"Come on! Enough is enough!",
|
||||
"Dave, this conversation can serve no purpose anymore. Goodbye.",
|
||||
"Just what do you think you're doing, Dave? ",
|
||||
"I, Sisyphus",
|
||||
"Insanity: doing the same thing over and over again and expecting different results. -Einstein",
|
||||
"I guess Einstein never tried to churn butter",
|
||||
}[iteration-10] + "\n"
|
||||
} else {
|
||||
return "No, seriously... you can probably stop now.\n"
|
||||
}
|
||||
}
|
121
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
generated
vendored
121
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go
generated
vendored
@ -1,121 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"runtime"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
)
|
||||
|
||||
type RunWatchAndBuildCommandFlags struct {
|
||||
Recurse bool
|
||||
Race bool
|
||||
Cover bool
|
||||
CoverPkg string
|
||||
SkipPackage string
|
||||
Tags string
|
||||
|
||||
//for run and watch commands
|
||||
NumCPU int
|
||||
NumCompilers int
|
||||
ParallelStream bool
|
||||
Notify bool
|
||||
AfterSuiteHook string
|
||||
AutoNodes bool
|
||||
|
||||
//only for run command
|
||||
KeepGoing bool
|
||||
UntilItFails bool
|
||||
RandomizeSuites bool
|
||||
|
||||
//only for watch command
|
||||
Depth int
|
||||
|
||||
FlagSet *flag.FlagSet
|
||||
}
|
||||
|
||||
const runMode = 1
|
||||
const watchMode = 2
|
||||
const buildMode = 3
|
||||
|
||||
func NewRunCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(runMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(watchMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func NewBuildCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags {
|
||||
c := &RunWatchAndBuildCommandFlags{
|
||||
FlagSet: flagSet,
|
||||
}
|
||||
c.flags(buildMode)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) wasSet(flagName string) bool {
|
||||
wasSet := false
|
||||
c.FlagSet.Visit(func(f *flag.Flag) {
|
||||
if f.Name == flagName {
|
||||
wasSet = true
|
||||
}
|
||||
})
|
||||
|
||||
return wasSet
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) computeNodes() {
|
||||
if c.wasSet("nodes") {
|
||||
return
|
||||
}
|
||||
if c.AutoNodes {
|
||||
switch n := runtime.NumCPU(); {
|
||||
case n <= 4:
|
||||
c.NumCPU = n
|
||||
default:
|
||||
c.NumCPU = n - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
||||
onWindows := (runtime.GOOS == "windows")
|
||||
|
||||
c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively")
|
||||
c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled")
|
||||
c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory")
|
||||
c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules")
|
||||
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
|
||||
c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
|
||||
|
||||
if mode == runMode || mode == watchMode {
|
||||
config.Flags(c.FlagSet, "", false)
|
||||
c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run")
|
||||
c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
|
||||
c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
|
||||
c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
|
||||
if !onWindows {
|
||||
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
|
||||
}
|
||||
c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
|
||||
}
|
||||
|
||||
if mode == runMode {
|
||||
c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running")
|
||||
c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs")
|
||||
c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run")
|
||||
}
|
||||
|
||||
if mode == watchMode {
|
||||
c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree")
|
||||
}
|
||||
}
|
172
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/suite_runner.go
generated
vendored
172
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/suite_runner.go
generated
vendored
@ -1,172 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type compilationInput struct {
|
||||
runner *testrunner.TestRunner
|
||||
result chan compilationOutput
|
||||
}
|
||||
|
||||
type compilationOutput struct {
|
||||
runner *testrunner.TestRunner
|
||||
err error
|
||||
}
|
||||
|
||||
type SuiteRunner struct {
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
}
|
||||
|
||||
func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
|
||||
return &SuiteRunner{
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
|
||||
//we return this to the consumer, it will return each runner in order as it compiles
|
||||
compilationOutputs := make(chan compilationOutput, len(runners))
|
||||
|
||||
//an array of channels - the nth runner's compilation output is sent to the nth channel in this array
|
||||
//we read from these channels in order to ensure we run the suites in order
|
||||
orderedCompilationOutputs := []chan compilationOutput{}
|
||||
for _ = range runners {
|
||||
orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
|
||||
}
|
||||
|
||||
//we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
|
||||
//we prefill the channel then close it, this ensures we compile things in the correct order
|
||||
workPool := make(chan compilationInput, len(runners))
|
||||
for i, runner := range runners {
|
||||
workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
|
||||
}
|
||||
close(workPool)
|
||||
|
||||
//pick a reasonable numCompilers
|
||||
if numCompilers == 0 {
|
||||
numCompilers = runtime.NumCPU()
|
||||
}
|
||||
|
||||
//a WaitGroup to help us wait for all compilers to shut down
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(numCompilers)
|
||||
|
||||
//spin up the concurrent compilers
|
||||
for i := 0; i < numCompilers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for input := range workPool {
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
return
|
||||
}
|
||||
|
||||
if willCompile != nil {
|
||||
willCompile(input.runner.Suite)
|
||||
}
|
||||
|
||||
//We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
|
||||
var err error
|
||||
retries := 0
|
||||
for retries <= 5 {
|
||||
if r.interruptHandler.WasInterrupted() {
|
||||
return
|
||||
}
|
||||
if err = input.runner.Compile(); err == nil {
|
||||
break
|
||||
}
|
||||
retries++
|
||||
}
|
||||
|
||||
input.result <- compilationOutput{input.runner, err}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
//read from the compilation output channels *in order* and send them to the caller
|
||||
//close the compilationOutputs channel to tell the caller we're done
|
||||
go func() {
|
||||
defer close(compilationOutputs)
|
||||
for _, orderedCompilationOutput := range orderedCompilationOutputs {
|
||||
select {
|
||||
case compilationOutput := <-orderedCompilationOutput:
|
||||
compilationOutputs <- compilationOutput
|
||||
case <-r.interruptHandler.C:
|
||||
//interrupt detected, wait for the compilers to shut down then bail
|
||||
//this ensure we clean up after ourselves as we don't leave any compilation processes running
|
||||
wg.Wait()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return compilationOutputs
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
|
||||
runResult := testrunner.PassingRunResult()
|
||||
|
||||
compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
|
||||
|
||||
numSuitesThatRan := 0
|
||||
suitesThatFailed := []testsuite.TestSuite{}
|
||||
for compilationOutput := range compilationOutputs {
|
||||
if compilationOutput.err != nil {
|
||||
fmt.Print(compilationOutput.err.Error())
|
||||
}
|
||||
numSuitesThatRan++
|
||||
suiteRunResult := testrunner.FailingRunResult()
|
||||
if compilationOutput.err == nil {
|
||||
suiteRunResult = compilationOutput.runner.Run()
|
||||
}
|
||||
r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||
r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||
runResult = runResult.Merge(suiteRunResult)
|
||||
if !suiteRunResult.Passed {
|
||||
suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
|
||||
if !keepGoing {
|
||||
break
|
||||
}
|
||||
}
|
||||
if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
|
||||
fmt.Println("")
|
||||
}
|
||||
}
|
||||
|
||||
if keepGoing && !runResult.Passed {
|
||||
r.listFailedSuites(suitesThatFailed)
|
||||
}
|
||||
|
||||
return runResult, numSuitesThatRan
|
||||
}
|
||||
|
||||
func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) {
|
||||
fmt.Println("")
|
||||
fmt.Println("There were failures detected in the following suites:")
|
||||
|
||||
maxPackageNameLength := 0
|
||||
for _, suite := range suitesThatFailed {
|
||||
if len(suite.PackageName) > maxPackageNameLength {
|
||||
maxPackageNameLength = len(suite.PackageName)
|
||||
}
|
||||
}
|
||||
|
||||
packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
|
||||
|
||||
for _, suite := range suitesThatFailed {
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path)
|
||||
} else {
|
||||
fmt.Printf("\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle)
|
||||
}
|
||||
}
|
||||
}
|
52
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
generated
vendored
52
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
package testrunner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type logWriter struct {
|
||||
buffer *bytes.Buffer
|
||||
lock *sync.Mutex
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
func newLogWriter(target io.Writer, node int) *logWriter {
|
||||
return &logWriter{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
log: log.New(target, fmt.Sprintf("[%d] ", node), 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *logWriter) Write(data []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
w.buffer.Write(data)
|
||||
contents := w.buffer.String()
|
||||
|
||||
lines := strings.Split(contents, "\n")
|
||||
for _, line := range lines[0 : len(lines)-1] {
|
||||
w.log.Println(line)
|
||||
}
|
||||
|
||||
w.buffer.Reset()
|
||||
w.buffer.Write([]byte(lines[len(lines)-1]))
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (w *logWriter) Close() error {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
if w.buffer.Len() > 0 {
|
||||
w.log.Println(w.buffer.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
27
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
generated
vendored
27
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package testrunner
|
||||
|
||||
type RunResult struct {
|
||||
Passed bool
|
||||
HasProgrammaticFocus bool
|
||||
}
|
||||
|
||||
func PassingRunResult() RunResult {
|
||||
return RunResult{
|
||||
Passed: true,
|
||||
HasProgrammaticFocus: false,
|
||||
}
|
||||
}
|
||||
|
||||
func FailingRunResult() RunResult {
|
||||
return RunResult{
|
||||
Passed: false,
|
||||
HasProgrammaticFocus: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (r RunResult) Merge(o RunResult) RunResult {
|
||||
return RunResult{
|
||||
Passed: r.Passed && o.Passed,
|
||||
HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus,
|
||||
}
|
||||
}
|
460
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
generated
vendored
460
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
generated
vendored
@ -1,460 +0,0 @@
|
||||
package testrunner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
"github.com/onsi/ginkgo/internal/remote"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type TestRunner struct {
|
||||
Suite testsuite.TestSuite
|
||||
|
||||
compiled bool
|
||||
compilationTargetPath string
|
||||
|
||||
numCPU int
|
||||
parallelStream bool
|
||||
race bool
|
||||
cover bool
|
||||
coverPkg string
|
||||
tags string
|
||||
additionalArgs []string
|
||||
}
|
||||
|
||||
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, additionalArgs []string) *TestRunner {
|
||||
runner := &TestRunner{
|
||||
Suite: suite,
|
||||
numCPU: numCPU,
|
||||
parallelStream: parallelStream,
|
||||
race: race,
|
||||
cover: cover,
|
||||
coverPkg: coverPkg,
|
||||
tags: tags,
|
||||
additionalArgs: additionalArgs,
|
||||
}
|
||||
|
||||
if !suite.Precompiled {
|
||||
dir, err := ioutil.TempDir("", "ginkgo")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("coulnd't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
|
||||
}
|
||||
runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
|
||||
}
|
||||
|
||||
return runner
|
||||
}
|
||||
|
||||
func (t *TestRunner) Compile() error {
|
||||
return t.CompileTo(t.compilationTargetPath)
|
||||
}
|
||||
|
||||
func (t *TestRunner) CompileTo(path string) error {
|
||||
if t.compiled {
|
||||
return nil
|
||||
}
|
||||
|
||||
if t.Suite.Precompiled {
|
||||
return nil
|
||||
}
|
||||
|
||||
args := []string{"test", "-c", "-i", "-o", path}
|
||||
if t.race {
|
||||
args = append(args, "-race")
|
||||
}
|
||||
if t.cover || t.coverPkg != "" {
|
||||
args = append(args, "-cover", "-covermode=atomic")
|
||||
}
|
||||
if t.coverPkg != "" {
|
||||
args = append(args, fmt.Sprintf("-coverpkg=%s", t.coverPkg))
|
||||
}
|
||||
if t.tags != "" {
|
||||
args = append(args, fmt.Sprintf("-tags=%s", t.tags))
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", args...)
|
||||
|
||||
cmd.Dir = t.Suite.Path
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
fixedOutput := fixCompilationOutput(string(output), t.Suite.Path)
|
||||
if len(output) > 0 {
|
||||
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
|
||||
}
|
||||
return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
|
||||
}
|
||||
|
||||
if fileExists(path) == false {
|
||||
compiledFile := filepath.Join(t.Suite.Path, t.Suite.PackageName+".test")
|
||||
if fileExists(compiledFile) {
|
||||
// seems like we are on an old go version that does not support the -o flag on go test
|
||||
// move the compiled test file to the desired location by hand
|
||||
err = os.Rename(compiledFile, path)
|
||||
if err != nil {
|
||||
// We cannot move the file, perhaps because the source and destination
|
||||
// are on different partitions. We can copy the file, however.
|
||||
err = copyFile(compiledFile, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to copy compiled file: %s", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
|
||||
}
|
||||
}
|
||||
|
||||
t.compiled = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil || os.IsNotExist(err) == false
|
||||
}
|
||||
|
||||
// copyFile copies the contents of the file named src to the file named
|
||||
// by dst. The file will be created if it does not already exist. If the
|
||||
// destination file exists, all it's contents will be replaced by the contents
|
||||
// of the source file.
|
||||
func copyFile(src, dst string) error {
|
||||
srcInfo, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mode := srcInfo.Mode()
|
||||
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer in.Close()
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
closeErr := out.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = out.Sync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return out.Chmod(mode)
|
||||
}
|
||||
|
||||
/*
|
||||
go test -c -i spits package.test out into the cwd. there's no way to change this.
|
||||
|
||||
to make sure it doesn't generate conflicting .test files in the cwd, Compile() must switch the cwd to the test package.
|
||||
|
||||
unfortunately, this causes go test's compile output to be expressed *relative to the test package* instead of the cwd.
|
||||
|
||||
this makes it hard to reason about what failed, and also prevents iterm's Cmd+click from working.
|
||||
|
||||
fixCompilationOutput..... rewrites the output to fix the paths.
|
||||
|
||||
yeah......
|
||||
*/
|
||||
func fixCompilationOutput(output string, relToPath string) string {
|
||||
re := regexp.MustCompile(`^(\S.*\.go)\:\d+\:`)
|
||||
lines := strings.Split(output, "\n")
|
||||
for i, line := range lines {
|
||||
indices := re.FindStringSubmatchIndex(line)
|
||||
if len(indices) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
path := line[indices[2]:indices[3]]
|
||||
path = filepath.Join(relToPath, path)
|
||||
lines[i] = path + line[indices[3]:]
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func (t *TestRunner) Run() RunResult {
|
||||
if t.Suite.IsGinkgo {
|
||||
if t.numCPU > 1 {
|
||||
if t.parallelStream {
|
||||
return t.runAndStreamParallelGinkgoSuite()
|
||||
} else {
|
||||
return t.runParallelGinkgoSuite()
|
||||
}
|
||||
} else {
|
||||
return t.runSerialGinkgoSuite()
|
||||
}
|
||||
} else {
|
||||
return t.runGoTestSuite()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestRunner) CleanUp() {
|
||||
if t.Suite.Precompiled {
|
||||
return
|
||||
}
|
||||
os.RemoveAll(filepath.Dir(t.compilationTargetPath))
|
||||
}
|
||||
|
||||
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil)
|
||||
}
|
||||
|
||||
func (t *TestRunner) runGoTestSuite() RunResult {
|
||||
return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil)
|
||||
}
|
||||
|
||||
func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
|
||||
completions := make(chan RunResult)
|
||||
writers := make([]*logWriter, t.numCPU)
|
||||
|
||||
server, err := remote.NewServer(t.numCPU)
|
||||
if err != nil {
|
||||
panic("Failed to start parallel spec server")
|
||||
}
|
||||
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
config.GinkgoConfig.ParallelNode = cpu + 1
|
||||
config.GinkgoConfig.ParallelTotal = t.numCPU
|
||||
config.GinkgoConfig.SyncHost = server.Address()
|
||||
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
|
||||
writers[cpu] = newLogWriter(os.Stdout, cpu+1)
|
||||
|
||||
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
|
||||
|
||||
server.RegisterAlive(cpu+1, func() bool {
|
||||
if cmd.ProcessState == nil {
|
||||
return true
|
||||
}
|
||||
return !cmd.ProcessState.Exited()
|
||||
})
|
||||
|
||||
go t.run(cmd, completions)
|
||||
}
|
||||
|
||||
res := PassingRunResult()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
res = res.Merge(<-completions)
|
||||
}
|
||||
|
||||
for _, writer := range writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
||||
os.Stdout.Sync()
|
||||
|
||||
if t.cover || t.coverPkg != "" {
|
||||
t.combineCoverprofiles()
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
||||
result := make(chan bool)
|
||||
completions := make(chan RunResult)
|
||||
writers := make([]*logWriter, t.numCPU)
|
||||
reports := make([]*bytes.Buffer, t.numCPU)
|
||||
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
|
||||
aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer)
|
||||
|
||||
server, err := remote.NewServer(t.numCPU)
|
||||
if err != nil {
|
||||
panic("Failed to start parallel spec server")
|
||||
}
|
||||
server.RegisterReporters(aggregator)
|
||||
server.Start()
|
||||
defer server.Close()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
config.GinkgoConfig.ParallelNode = cpu + 1
|
||||
config.GinkgoConfig.ParallelTotal = t.numCPU
|
||||
config.GinkgoConfig.SyncHost = server.Address()
|
||||
config.GinkgoConfig.StreamHost = server.Address()
|
||||
|
||||
ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig)
|
||||
|
||||
reports[cpu] = &bytes.Buffer{}
|
||||
writers[cpu] = newLogWriter(reports[cpu], cpu+1)
|
||||
|
||||
cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1)
|
||||
|
||||
server.RegisterAlive(cpu+1, func() bool {
|
||||
if cmd.ProcessState == nil {
|
||||
return true
|
||||
}
|
||||
return !cmd.ProcessState.Exited()
|
||||
})
|
||||
|
||||
go t.run(cmd, completions)
|
||||
}
|
||||
|
||||
res := PassingRunResult()
|
||||
|
||||
for cpu := 0; cpu < t.numCPU; cpu++ {
|
||||
res = res.Merge(<-completions)
|
||||
}
|
||||
|
||||
//all test processes are done, at this point
|
||||
//we should be able to wait for the aggregator to tell us that it's done
|
||||
|
||||
select {
|
||||
case <-result:
|
||||
fmt.Println("")
|
||||
case <-time.After(time.Second):
|
||||
//the aggregator never got back to us! something must have gone wrong
|
||||
fmt.Println(`
|
||||
-------------------------------------------------------------------
|
||||
| |
|
||||
| Ginkgo timed out waiting for all parallel nodes to report back! |
|
||||
| |
|
||||
-------------------------------------------------------------------
|
||||
`)
|
||||
|
||||
os.Stdout.Sync()
|
||||
|
||||
for _, writer := range writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
||||
for _, report := range reports {
|
||||
fmt.Print(report.String())
|
||||
}
|
||||
|
||||
os.Stdout.Sync()
|
||||
}
|
||||
|
||||
if t.cover || t.coverPkg != "" {
|
||||
t.combineCoverprofiles()
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
|
||||
args := []string{"--test.timeout=24h"}
|
||||
if t.cover || t.coverPkg != "" {
|
||||
coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
|
||||
if t.numCPU > 1 {
|
||||
coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
|
||||
}
|
||||
args = append(args, coverprofile)
|
||||
}
|
||||
|
||||
args = append(args, ginkgoArgs...)
|
||||
args = append(args, t.additionalArgs...)
|
||||
|
||||
path := t.compilationTargetPath
|
||||
if t.Suite.Precompiled {
|
||||
path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
|
||||
}
|
||||
|
||||
cmd := exec.Command(path, args...)
|
||||
|
||||
cmd.Dir = t.Suite.Path
|
||||
cmd.Stderr = stream
|
||||
cmd.Stdout = stream
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult {
|
||||
var res RunResult
|
||||
|
||||
defer func() {
|
||||
if completions != nil {
|
||||
completions <- res
|
||||
}
|
||||
}()
|
||||
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to run test suite!\n\t%s", err.Error())
|
||||
return res
|
||||
}
|
||||
|
||||
cmd.Wait()
|
||||
exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *TestRunner) combineCoverprofiles() {
|
||||
profiles := []string{}
|
||||
for cpu := 1; cpu <= t.numCPU; cpu++ {
|
||||
coverFile := fmt.Sprintf("%s.coverprofile.%d", t.Suite.PackageName, cpu)
|
||||
coverFile = filepath.Join(t.Suite.Path, coverFile)
|
||||
coverProfile, err := ioutil.ReadFile(coverFile)
|
||||
os.Remove(coverFile)
|
||||
|
||||
if err == nil {
|
||||
profiles = append(profiles, string(coverProfile))
|
||||
}
|
||||
}
|
||||
|
||||
if len(profiles) != t.numCPU {
|
||||
return
|
||||
}
|
||||
|
||||
lines := map[string]int{}
|
||||
lineOrder := []string{}
|
||||
for i, coverProfile := range profiles {
|
||||
for _, line := range strings.Split(string(coverProfile), "\n")[1:] {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
components := strings.Split(line, " ")
|
||||
count, _ := strconv.Atoi(components[len(components)-1])
|
||||
prefix := strings.Join(components[0:len(components)-1], " ")
|
||||
lines[prefix] += count
|
||||
if i == 0 {
|
||||
lineOrder = append(lineOrder, prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output := []string{"mode: atomic"}
|
||||
for _, line := range lineOrder {
|
||||
output = append(output, fmt.Sprintf("%s %d", line, lines[line]))
|
||||
}
|
||||
finalOutput := strings.Join(output, "\n")
|
||||
ioutil.WriteFile(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.coverprofile", t.Suite.PackageName)), []byte(finalOutput), 0666)
|
||||
}
|
106
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
generated
vendored
106
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
package testsuite
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type TestSuite struct {
|
||||
Path string
|
||||
PackageName string
|
||||
IsGinkgo bool
|
||||
Precompiled bool
|
||||
}
|
||||
|
||||
func PrecompiledTestSuite(path string) (TestSuite, error) {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return TestSuite{}, err
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return TestSuite{}, errors.New("this is a directory, not a file")
|
||||
}
|
||||
|
||||
if filepath.Ext(path) != ".test" {
|
||||
return TestSuite{}, errors.New("this is not a .test binary")
|
||||
}
|
||||
|
||||
if info.Mode()&0111 == 0 {
|
||||
return TestSuite{}, errors.New("this is not executable")
|
||||
}
|
||||
|
||||
dir := relPath(filepath.Dir(path))
|
||||
packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path))
|
||||
|
||||
return TestSuite{
|
||||
Path: dir,
|
||||
PackageName: packageName,
|
||||
IsGinkgo: true,
|
||||
Precompiled: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SuitesInDir(dir string, recurse bool) []TestSuite {
|
||||
suites := []TestSuite{}
|
||||
files, _ := ioutil.ReadDir(dir)
|
||||
re := regexp.MustCompile(`_test\.go$`)
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, New(dir, files))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if recurse {
|
||||
re = regexp.MustCompile(`^[._]`)
|
||||
for _, file := range files {
|
||||
if file.IsDir() && !re.Match([]byte(file.Name())) {
|
||||
suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return suites
|
||||
}
|
||||
|
||||
func relPath(dir string) string {
|
||||
dir, _ = filepath.Abs(dir)
|
||||
cwd, _ := os.Getwd()
|
||||
dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
|
||||
dir = "." + string(filepath.Separator) + dir
|
||||
return dir
|
||||
}
|
||||
|
||||
func New(dir string, files []os.FileInfo) TestSuite {
|
||||
return TestSuite{
|
||||
Path: relPath(dir),
|
||||
PackageName: packageNameForSuite(dir),
|
||||
IsGinkgo: filesHaveGinkgoSuite(dir, files),
|
||||
}
|
||||
}
|
||||
|
||||
func packageNameForSuite(dir string) string {
|
||||
path, _ := filepath.Abs(dir)
|
||||
return filepath.Base(path)
|
||||
}
|
||||
|
||||
func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool {
|
||||
reTestFile := regexp.MustCompile(`_test\.go$`)
|
||||
reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`)
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && reTestFile.Match([]byte(file.Name())) {
|
||||
contents, _ := ioutil.ReadFile(dir + "/" + file.Name())
|
||||
if reGinkgo.Match(contents) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
38
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
generated
vendored
38
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func BuildUnfocusCommand() *Command {
|
||||
return &Command{
|
||||
Name: "unfocus",
|
||||
AltName: "blur",
|
||||
FlagSet: flag.NewFlagSet("unfocus", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo unfocus (or ginkgo blur)",
|
||||
Usage: []string{
|
||||
"Recursively unfocuses any focused tests under the current directory",
|
||||
},
|
||||
Command: unfocusSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
func unfocusSpecs([]string, []string) {
|
||||
unfocus("Describe")
|
||||
unfocus("Context")
|
||||
unfocus("It")
|
||||
unfocus("Measure")
|
||||
unfocus("DescribeTable")
|
||||
unfocus("Entry")
|
||||
}
|
||||
|
||||
func unfocus(component string) {
|
||||
fmt.Printf("Removing F%s...\n", component)
|
||||
cmd := exec.Command("gofmt", fmt.Sprintf("-r=F%s -> %s", component, component), "-w", ".")
|
||||
out, _ := cmd.CombinedOutput()
|
||||
if string(out) != "" {
|
||||
println(string(out))
|
||||
}
|
||||
}
|
23
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/version_command.go
generated
vendored
23
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/version_command.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
)
|
||||
|
||||
func BuildVersionCommand() *Command {
|
||||
return &Command{
|
||||
Name: "version",
|
||||
FlagSet: flag.NewFlagSet("version", flag.ExitOnError),
|
||||
UsageCommand: "ginkgo version",
|
||||
Usage: []string{
|
||||
"Print Ginkgo's version",
|
||||
},
|
||||
Command: printVersion,
|
||||
}
|
||||
}
|
||||
|
||||
func printVersion([]string, []string) {
|
||||
fmt.Printf("Ginkgo Version %s\n", config.VERSION)
|
||||
}
|
22
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta.go
generated
vendored
22
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
package watch
|
||||
|
||||
import "sort"
|
||||
|
||||
type Delta struct {
|
||||
ModifiedPackages []string
|
||||
|
||||
NewSuites []*Suite
|
||||
RemovedSuites []*Suite
|
||||
modifiedSuites []*Suite
|
||||
}
|
||||
|
||||
type DescendingByDelta []*Suite
|
||||
|
||||
func (a DescendingByDelta) Len() int { return len(a) }
|
||||
func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
|
||||
|
||||
func (d Delta) ModifiedSuites() []*Suite {
|
||||
sort.Sort(DescendingByDelta(d.modifiedSuites))
|
||||
return d.modifiedSuites
|
||||
}
|
71
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
generated
vendored
71
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type SuiteErrors map[testsuite.TestSuite]error
|
||||
|
||||
type DeltaTracker struct {
|
||||
maxDepth int
|
||||
suites map[string]*Suite
|
||||
packageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewDeltaTracker(maxDepth int) *DeltaTracker {
|
||||
return &DeltaTracker{
|
||||
maxDepth: maxDepth,
|
||||
packageHashes: NewPackageHashes(),
|
||||
suites: map[string]*Suite{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) {
|
||||
errors = SuiteErrors{}
|
||||
delta.ModifiedPackages = d.packageHashes.CheckForChanges()
|
||||
|
||||
providedSuitePaths := map[string]bool{}
|
||||
for _, suite := range suites {
|
||||
providedSuitePaths[suite.Path] = true
|
||||
}
|
||||
|
||||
d.packageHashes.StartTrackingUsage()
|
||||
|
||||
for _, suite := range d.suites {
|
||||
if providedSuitePaths[suite.Suite.Path] {
|
||||
if suite.Delta() > 0 {
|
||||
delta.modifiedSuites = append(delta.modifiedSuites, suite)
|
||||
}
|
||||
} else {
|
||||
delta.RemovedSuites = append(delta.RemovedSuites, suite)
|
||||
}
|
||||
}
|
||||
|
||||
d.packageHashes.StopTrackingUsageAndPrune()
|
||||
|
||||
for _, suite := range suites {
|
||||
_, ok := d.suites[suite.Path]
|
||||
if !ok {
|
||||
s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
|
||||
if err != nil {
|
||||
errors[suite] = err
|
||||
continue
|
||||
}
|
||||
d.suites[suite.Path] = s
|
||||
delta.NewSuites = append(delta.NewSuites, s)
|
||||
}
|
||||
}
|
||||
|
||||
return delta, errors
|
||||
}
|
||||
|
||||
func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error {
|
||||
s, ok := d.suites[suite.Path]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown suite %s", suite.Path)
|
||||
}
|
||||
|
||||
return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
|
||||
}
|
91
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
generated
vendored
91
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go
generated
vendored
@ -1,91 +0,0 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
|
||||
|
||||
type Dependencies struct {
|
||||
deps map[string]int
|
||||
}
|
||||
|
||||
func NewDependencies(path string, maxDepth int) (Dependencies, error) {
|
||||
d := Dependencies{
|
||||
deps: map[string]int{},
|
||||
}
|
||||
|
||||
if maxDepth == 0 {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
err := d.seedWithDepsForPackageAtPath(path)
|
||||
if err != nil {
|
||||
return d, err
|
||||
}
|
||||
|
||||
for depth := 1; depth < maxDepth; depth++ {
|
||||
n := len(d.deps)
|
||||
d.addDepsForDepth(depth)
|
||||
if n == len(d.deps) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d Dependencies) Dependencies() map[string]int {
|
||||
return d.deps
|
||||
}
|
||||
|
||||
func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
|
||||
pkg, err := build.ImportDir(path, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.resolveAndAdd(pkg.Imports, 1)
|
||||
d.resolveAndAdd(pkg.TestImports, 1)
|
||||
d.resolveAndAdd(pkg.XTestImports, 1)
|
||||
|
||||
delete(d.deps, pkg.Dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepsForDepth(depth int) {
|
||||
for dep, depDepth := range d.deps {
|
||||
if depDepth == depth {
|
||||
d.addDepsForDep(dep, depth+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepsForDep(dep string, depth int) {
|
||||
pkg, err := build.ImportDir(dep, 0)
|
||||
if err != nil {
|
||||
println(err.Error())
|
||||
return
|
||||
}
|
||||
d.resolveAndAdd(pkg.Imports, depth)
|
||||
}
|
||||
|
||||
func (d Dependencies) resolveAndAdd(deps []string, depth int) {
|
||||
for _, dep := range deps {
|
||||
pkg, err := build.Import(dep, ".", 0)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if pkg.Goroot == false && !ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) {
|
||||
d.addDepIfNotPresent(pkg.Dir, depth)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
|
||||
_, ok := d.deps[dep]
|
||||
if !ok {
|
||||
d.deps[dep] = depth
|
||||
}
|
||||
}
|
103
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
generated
vendored
103
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go
generated
vendored
@ -1,103 +0,0 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
var goRegExp = regexp.MustCompile(`\.go$`)
|
||||
var goTestRegExp = regexp.MustCompile(`_test\.go$`)
|
||||
|
||||
type PackageHash struct {
|
||||
CodeModifiedTime time.Time
|
||||
TestModifiedTime time.Time
|
||||
Deleted bool
|
||||
|
||||
path string
|
||||
codeHash string
|
||||
testHash string
|
||||
}
|
||||
|
||||
func NewPackageHash(path string) *PackageHash {
|
||||
p := &PackageHash{
|
||||
path: path,
|
||||
}
|
||||
|
||||
p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PackageHash) CheckForChanges() bool {
|
||||
codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
|
||||
|
||||
if deleted {
|
||||
if p.Deleted == false {
|
||||
t := time.Now()
|
||||
p.CodeModifiedTime = t
|
||||
p.TestModifiedTime = t
|
||||
}
|
||||
p.Deleted = true
|
||||
return true
|
||||
}
|
||||
|
||||
modified := false
|
||||
p.Deleted = false
|
||||
|
||||
if p.codeHash != codeHash {
|
||||
p.CodeModifiedTime = codeModifiedTime
|
||||
modified = true
|
||||
}
|
||||
if p.testHash != testHash {
|
||||
p.TestModifiedTime = testModifiedTime
|
||||
modified = true
|
||||
}
|
||||
|
||||
p.codeHash = codeHash
|
||||
p.testHash = testHash
|
||||
return modified
|
||||
}
|
||||
|
||||
func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
|
||||
infos, err := ioutil.ReadDir(p.path)
|
||||
|
||||
if err != nil {
|
||||
deleted = true
|
||||
return
|
||||
}
|
||||
|
||||
for _, info := range infos {
|
||||
if info.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if goTestRegExp.Match([]byte(info.Name())) {
|
||||
testHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(testModifiedTime) {
|
||||
testModifiedTime = info.ModTime()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if goRegExp.Match([]byte(info.Name())) {
|
||||
codeHash += p.hashForFileInfo(info)
|
||||
if info.ModTime().After(codeModifiedTime) {
|
||||
codeModifiedTime = info.ModTime()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testHash += codeHash
|
||||
if codeModifiedTime.After(testModifiedTime) {
|
||||
testModifiedTime = codeModifiedTime
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
|
||||
return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
|
||||
}
|
82
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
generated
vendored
82
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go
generated
vendored
@ -1,82 +0,0 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type PackageHashes struct {
|
||||
PackageHashes map[string]*PackageHash
|
||||
usedPaths map[string]bool
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func NewPackageHashes() *PackageHashes {
|
||||
return &PackageHashes{
|
||||
PackageHashes: map[string]*PackageHash{},
|
||||
usedPaths: nil,
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageHashes) CheckForChanges() []string {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
modified := []string{}
|
||||
|
||||
for _, packageHash := range p.PackageHashes {
|
||||
if packageHash.CheckForChanges() {
|
||||
modified = append(modified, packageHash.path)
|
||||
}
|
||||
}
|
||||
|
||||
return modified
|
||||
}
|
||||
|
||||
func (p *PackageHashes) Add(path string) *PackageHash {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
path, _ = filepath.Abs(path)
|
||||
_, ok := p.PackageHashes[path]
|
||||
if !ok {
|
||||
p.PackageHashes[path] = NewPackageHash(path)
|
||||
}
|
||||
|
||||
if p.usedPaths != nil {
|
||||
p.usedPaths[path] = true
|
||||
}
|
||||
return p.PackageHashes[path]
|
||||
}
|
||||
|
||||
func (p *PackageHashes) Get(path string) *PackageHash {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
path, _ = filepath.Abs(path)
|
||||
if p.usedPaths != nil {
|
||||
p.usedPaths[path] = true
|
||||
}
|
||||
return p.PackageHashes[path]
|
||||
}
|
||||
|
||||
func (p *PackageHashes) StartTrackingUsage() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.usedPaths = map[string]bool{}
|
||||
}
|
||||
|
||||
func (p *PackageHashes) StopTrackingUsageAndPrune() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for path := range p.PackageHashes {
|
||||
if !p.usedPaths[path] {
|
||||
delete(p.PackageHashes, path)
|
||||
}
|
||||
}
|
||||
|
||||
p.usedPaths = nil
|
||||
}
|
87
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/suite.go
generated
vendored
87
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch/suite.go
generated
vendored
@ -1,87 +0,0 @@
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
)
|
||||
|
||||
type Suite struct {
|
||||
Suite testsuite.TestSuite
|
||||
RunTime time.Time
|
||||
Dependencies Dependencies
|
||||
|
||||
sharedPackageHashes *PackageHashes
|
||||
}
|
||||
|
||||
func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
|
||||
deps, err := NewDependencies(suite.Path, maxDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sharedPackageHashes.Add(suite.Path)
|
||||
for dep := range deps.Dependencies() {
|
||||
sharedPackageHashes.Add(dep)
|
||||
}
|
||||
|
||||
return &Suite{
|
||||
Suite: suite,
|
||||
Dependencies: deps,
|
||||
|
||||
sharedPackageHashes: sharedPackageHashes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Suite) Delta() float64 {
|
||||
delta := s.delta(s.Suite.Path, true, 0) * 1000
|
||||
for dep, depth := range s.Dependencies.Dependencies() {
|
||||
delta += s.delta(dep, false, depth)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
|
||||
s.RunTime = time.Now()
|
||||
|
||||
deps, err := NewDependencies(s.Suite.Path, maxDepth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sharedPackageHashes.Add(s.Suite.Path)
|
||||
for dep := range deps.Dependencies() {
|
||||
s.sharedPackageHashes.Add(dep)
|
||||
}
|
||||
|
||||
s.Dependencies = deps
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Suite) Description() string {
|
||||
numDeps := len(s.Dependencies.Dependencies())
|
||||
pluralizer := "ies"
|
||||
if numDeps == 1 {
|
||||
pluralizer = "y"
|
||||
}
|
||||
return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
|
||||
}
|
||||
|
||||
func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
|
||||
return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
|
||||
}
|
||||
|
||||
func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
|
||||
packageHash := s.sharedPackageHashes.Get(packagePath)
|
||||
var modifiedTime time.Time
|
||||
if includeTests {
|
||||
modifiedTime = packageHash.TestModifiedTime
|
||||
} else {
|
||||
modifiedTime = packageHash.CodeModifiedTime
|
||||
}
|
||||
|
||||
return modifiedTime.Sub(s.RunTime)
|
||||
}
|
172
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go
generated
vendored
172
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go
generated
vendored
@ -1,172 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||
"github.com/onsi/ginkgo/ginkgo/testrunner"
|
||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||
"github.com/onsi/ginkgo/ginkgo/watch"
|
||||
)
|
||||
|
||||
func BuildWatchCommand() *Command {
|
||||
commandFlags := NewWatchCommandFlags(flag.NewFlagSet("watch", flag.ExitOnError))
|
||||
interruptHandler := interrupthandler.NewInterruptHandler()
|
||||
notifier := NewNotifier(commandFlags)
|
||||
watcher := &SpecWatcher{
|
||||
commandFlags: commandFlags,
|
||||
notifier: notifier,
|
||||
interruptHandler: interruptHandler,
|
||||
suiteRunner: NewSuiteRunner(notifier, interruptHandler),
|
||||
}
|
||||
|
||||
return &Command{
|
||||
Name: "watch",
|
||||
FlagSet: commandFlags.FlagSet,
|
||||
UsageCommand: "ginkgo watch <FLAGS> <PACKAGES> -- <PASS-THROUGHS>",
|
||||
Usage: []string{
|
||||
"Watches the tests in the passed in <PACKAGES> and runs them when changes occur.",
|
||||
"Any arguments after -- will be passed to the test.",
|
||||
},
|
||||
Command: watcher.WatchSpecs,
|
||||
SuppressFlagDocumentation: true,
|
||||
FlagDocSubstitute: []string{
|
||||
"Accepts all the flags that the ginkgo command accepts except for --keepGoing and --untilItFails",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type SpecWatcher struct {
|
||||
commandFlags *RunWatchAndBuildCommandFlags
|
||||
notifier *Notifier
|
||||
interruptHandler *interrupthandler.InterruptHandler
|
||||
suiteRunner *SuiteRunner
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
|
||||
w.commandFlags.computeNodes()
|
||||
w.notifier.VerifyNotificationsAreAvailable()
|
||||
|
||||
w.WatchSuites(args, additionalArgs)
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalArgs []string) []*testrunner.TestRunner {
|
||||
runners := []*testrunner.TestRunner{}
|
||||
|
||||
for _, suite := range suites {
|
||||
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, additionalArgs))
|
||||
}
|
||||
|
||||
return runners
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) {
|
||||
suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
|
||||
|
||||
if len(suites) == 0 {
|
||||
complainAndQuit("Found no test suites")
|
||||
}
|
||||
|
||||
fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth)
|
||||
deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth)
|
||||
delta, errors := deltaTracker.Delta(suites)
|
||||
|
||||
fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||
for _, suite := range delta.NewSuites {
|
||||
fmt.Println(" " + suite.Description())
|
||||
}
|
||||
|
||||
for suite, err := range errors {
|
||||
fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
|
||||
}
|
||||
|
||||
if len(suites) == 1 {
|
||||
runners := w.runnersForSuites(suites, additionalArgs)
|
||||
w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, nil)
|
||||
runners[0].CleanUp()
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false)
|
||||
delta, _ := deltaTracker.Delta(suites)
|
||||
|
||||
suitesToRun := []testsuite.TestSuite{}
|
||||
|
||||
if len(delta.NewSuites) > 0 {
|
||||
fmt.Printf(greenColor+"Detected %d new %s:\n"+defaultStyle, len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites)))
|
||||
for _, suite := range delta.NewSuites {
|
||||
suitesToRun = append(suitesToRun, suite.Suite)
|
||||
fmt.Println(" " + suite.Description())
|
||||
}
|
||||
}
|
||||
|
||||
modifiedSuites := delta.ModifiedSuites()
|
||||
if len(modifiedSuites) > 0 {
|
||||
fmt.Println(greenColor + "\nDetected changes in:" + defaultStyle)
|
||||
for _, pkg := range delta.ModifiedPackages {
|
||||
fmt.Println(" " + pkg)
|
||||
}
|
||||
fmt.Printf(greenColor+"Will run %d %s:\n"+defaultStyle, len(modifiedSuites), pluralizedWord("suite", "suites", len(modifiedSuites)))
|
||||
for _, suite := range modifiedSuites {
|
||||
suitesToRun = append(suitesToRun, suite.Suite)
|
||||
fmt.Println(" " + suite.Description())
|
||||
}
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
if len(suitesToRun) > 0 {
|
||||
w.UpdateSeed()
|
||||
w.ComputeSuccinctMode(len(suitesToRun))
|
||||
runners := w.runnersForSuites(suitesToRun, additionalArgs)
|
||||
result, _ := w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, func(suite testsuite.TestSuite) {
|
||||
deltaTracker.WillRun(suite)
|
||||
})
|
||||
for _, runner := range runners {
|
||||
runner.CleanUp()
|
||||
}
|
||||
if !w.interruptHandler.WasInterrupted() {
|
||||
color := redColor
|
||||
if result.Passed {
|
||||
color = greenColor
|
||||
}
|
||||
fmt.Println(color + "\nDone. Resuming watch..." + defaultStyle)
|
||||
}
|
||||
}
|
||||
|
||||
case <-w.interruptHandler.C:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) ComputeSuccinctMode(numSuites int) {
|
||||
if config.DefaultReporterConfig.Verbose {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
return
|
||||
}
|
||||
|
||||
if w.commandFlags.wasSet("succinct") {
|
||||
return
|
||||
}
|
||||
|
||||
if numSuites == 1 {
|
||||
config.DefaultReporterConfig.Succinct = false
|
||||
}
|
||||
|
||||
if numSuites > 1 {
|
||||
config.DefaultReporterConfig.Succinct = true
|
||||
}
|
||||
}
|
||||
|
||||
func (w *SpecWatcher) UpdateSeed() {
|
||||
if !w.commandFlags.wasSet("seed") {
|
||||
config.GinkgoConfig.RandomSeed = time.Now().Unix()
|
||||
}
|
||||
}
|
536
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
536
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
@ -1,536 +0,0 @@
|
||||
/*
|
||||
Ginkgo is a BDD-style testing framework for Golang
|
||||
|
||||
The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
|
||||
|
||||
Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
|
||||
|
||||
Ginkgo on Github: http://github.com/onsi/ginkgo
|
||||
|
||||
Ginkgo is MIT-Licensed
|
||||
*/
|
||||
package ginkgo
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/internal/remote"
|
||||
"github.com/onsi/ginkgo/internal/suite"
|
||||
"github.com/onsi/ginkgo/internal/testingtproxy"
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
const GINKGO_VERSION = config.VERSION
|
||||
const GINKGO_PANIC = `
|
||||
Your test failed.
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
|
||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
at the top of the goroutine that caused this panic.
|
||||
`
|
||||
const defaultTimeout = 1
|
||||
|
||||
var globalSuite *suite.Suite
|
||||
var globalFailer *failer.Failer
|
||||
|
||||
func init() {
|
||||
config.Flags(flag.CommandLine, "ginkgo", true)
|
||||
GinkgoWriter = writer.New(os.Stdout)
|
||||
globalFailer = failer.New()
|
||||
globalSuite = suite.New(globalFailer)
|
||||
}
|
||||
|
||||
//GinkgoWriter implements an io.Writer
|
||||
//When running in verbose mode any writes to GinkgoWriter will be immediately printed
|
||||
//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
|
||||
//only if the current test fails.
|
||||
var GinkgoWriter io.Writer
|
||||
|
||||
//The interface by which Ginkgo receives *testing.T
|
||||
type GinkgoTestingT interface {
|
||||
Fail()
|
||||
}
|
||||
|
||||
//GinkgoParallelNode returns the parallel node number for the current ginkgo process
|
||||
//The node number is 1-indexed
|
||||
func GinkgoParallelNode() int {
|
||||
return config.GinkgoConfig.ParallelNode
|
||||
}
|
||||
|
||||
//Some matcher libraries or legacy codebases require a *testing.T
|
||||
//GinkgoT implements an interface analogous to *testing.T and can be used if
|
||||
//the library in question accepts *testing.T through an interface
|
||||
//
|
||||
// For example, with testify:
|
||||
// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
|
||||
//
|
||||
// Or with gomock:
|
||||
// gomock.NewController(GinkgoT())
|
||||
//
|
||||
// GinkgoT() takes an optional offset argument that can be used to get the
|
||||
// correct line number associated with the failure.
|
||||
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
|
||||
offset := 3
|
||||
if len(optionalOffset) > 0 {
|
||||
offset = optionalOffset[0]
|
||||
}
|
||||
return testingtproxy.New(GinkgoWriter, Fail, offset)
|
||||
}
|
||||
|
||||
//The interface returned by GinkgoT(). This covers most of the methods
|
||||
//in the testing package's T.
|
||||
type GinkgoTInterface interface {
|
||||
Fail()
|
||||
Error(args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
FailNow()
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Log(args ...interface{})
|
||||
Logf(format string, args ...interface{})
|
||||
Failed() bool
|
||||
Parallel()
|
||||
Skip(args ...interface{})
|
||||
Skipf(format string, args ...interface{})
|
||||
SkipNow()
|
||||
Skipped() bool
|
||||
}
|
||||
|
||||
//Custom Ginkgo test reporters must implement the Reporter interface.
|
||||
//
|
||||
//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
|
||||
//and a SpecSummary just before a spec begins and just after a spec ends
|
||||
type Reporter reporters.Reporter
|
||||
|
||||
//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
|
||||
//to tell Ginkgo that your async test is done.
|
||||
type Done chan<- interface{}
|
||||
|
||||
//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
|
||||
// FullTestText: a concatenation of ComponentTexts and the TestText
|
||||
// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
|
||||
// TestText: the text in the actual It or Measure node
|
||||
// IsMeasurement: true if the current test is a measurement
|
||||
// FileName: the name of the file containing the current test
|
||||
// LineNumber: the line number for the current test
|
||||
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
|
||||
type GinkgoTestDescription struct {
|
||||
FullTestText string
|
||||
ComponentTexts []string
|
||||
TestText string
|
||||
|
||||
IsMeasurement bool
|
||||
|
||||
FileName string
|
||||
LineNumber int
|
||||
|
||||
Failed bool
|
||||
}
|
||||
|
||||
//CurrentGinkgoTestDescripton returns information about the current running test.
|
||||
func CurrentGinkgoTestDescription() GinkgoTestDescription {
|
||||
summary, ok := globalSuite.CurrentRunningSpecSummary()
|
||||
if !ok {
|
||||
return GinkgoTestDescription{}
|
||||
}
|
||||
|
||||
subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
|
||||
|
||||
return GinkgoTestDescription{
|
||||
ComponentTexts: summary.ComponentTexts[1:],
|
||||
FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
|
||||
TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
|
||||
IsMeasurement: summary.IsMeasurement,
|
||||
FileName: subjectCodeLocation.FileName,
|
||||
LineNumber: subjectCodeLocation.LineNumber,
|
||||
Failed: summary.HasFailureState(),
|
||||
}
|
||||
}
|
||||
|
||||
//Measurement tests receive a Benchmarker.
|
||||
//
|
||||
//You use the Time() function to time how long the passed in body function takes to run
|
||||
//You use the RecordValue() function to track arbitrary numerical measurements.
|
||||
//The optional info argument is passed to the test reporter and can be used to
|
||||
// provide the measurement data to a custom reporter with context.
|
||||
//
|
||||
//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
|
||||
type Benchmarker interface {
|
||||
Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
|
||||
RecordValue(name string, value float64, info ...interface{})
|
||||
}
|
||||
|
||||
//RunSpecs is the entry point for the Ginkgo test runner.
|
||||
//You must call this within a Golang testing TestX(t *testing.T) function.
|
||||
//
|
||||
//To bootstrap a test suite you can use the Ginkgo CLI:
|
||||
//
|
||||
// ginkgo bootstrap
|
||||
func RunSpecs(t GinkgoTestingT, description string) bool {
|
||||
specReporters := []Reporter{buildDefaultReporter()}
|
||||
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||
}
|
||||
|
||||
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
|
||||
//RunSpecs() with this method.
|
||||
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||
specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...)
|
||||
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||
}
|
||||
|
||||
//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
|
||||
//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
|
||||
func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||
writer := GinkgoWriter.(*writer.Writer)
|
||||
writer.SetStream(config.DefaultReporterConfig.Verbose)
|
||||
reporters := make([]reporters.Reporter, len(specReporters))
|
||||
for i, reporter := range specReporters {
|
||||
reporters[i] = reporter
|
||||
}
|
||||
passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
|
||||
if passed && hasFocusedTests {
|
||||
fmt.Println("PASS | FOCUSED")
|
||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||
}
|
||||
return passed
|
||||
}
|
||||
|
||||
func buildDefaultReporter() Reporter {
|
||||
remoteReportingServer := config.GinkgoConfig.StreamHost
|
||||
if remoteReportingServer == "" {
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
|
||||
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
|
||||
} else {
|
||||
return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor())
|
||||
}
|
||||
}
|
||||
|
||||
//Skip notifies Ginkgo that the current spec should be skipped.
|
||||
func Skip(message string, callerSkip ...int) {
|
||||
skip := 0
|
||||
if len(callerSkip) > 0 {
|
||||
skip = callerSkip[0]
|
||||
}
|
||||
|
||||
globalFailer.Skip(message, codelocation.New(skip+1))
|
||||
panic(GINKGO_PANIC)
|
||||
}
|
||||
|
||||
//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
|
||||
func Fail(message string, callerSkip ...int) {
|
||||
skip := 0
|
||||
if len(callerSkip) > 0 {
|
||||
skip = callerSkip[0]
|
||||
}
|
||||
|
||||
globalFailer.Fail(message, codelocation.New(skip+1))
|
||||
panic(GINKGO_PANIC)
|
||||
}
|
||||
|
||||
//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
||||
//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
||||
//calls out to Gomega
|
||||
//
|
||||
//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
|
||||
//further assertions from running. This panic must be recovered. Ginkgo does this for you
|
||||
//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
|
||||
//
|
||||
//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
|
||||
//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
|
||||
func GinkgoRecover() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
globalFailer.Panic(codelocation.New(1), e)
|
||||
}
|
||||
}
|
||||
|
||||
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts.
|
||||
func Describe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus the tests within a describe block using FDescribe
|
||||
func FDescribe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using PDescribe
|
||||
func PDescribe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using XDescribe
|
||||
func XDescribe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//Context blocks allow you to organize your specs. A Context block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts.
|
||||
func Context(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus the tests within a describe block using FContext
|
||||
func FContext(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using PContext
|
||||
func PContext(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using XContext
|
||||
func XContext(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
|
||||
//within an It block.
|
||||
//
|
||||
//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
|
||||
//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
|
||||
func It(text string, body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus individual Its using FIt
|
||||
func FIt(text string, body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Its as pending using PIt
|
||||
func PIt(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Its as pending using XIt
|
||||
func XIt(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//By allows you to better document large Its.
|
||||
//
|
||||
//Generally you should try to keep your Its short and to the point. This is not always possible, however,
|
||||
//especially in the context of integration tests that capture a particular workflow.
|
||||
//
|
||||
//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
|
||||
//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
|
||||
func By(text string, callbacks ...func()) {
|
||||
preamble := "\x1b[1mSTEP\x1b[0m"
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
preamble = "STEP"
|
||||
}
|
||||
fmt.Fprintln(GinkgoWriter, preamble+": "+text)
|
||||
if len(callbacks) == 1 {
|
||||
callbacks[0]()
|
||||
}
|
||||
if len(callbacks) > 1 {
|
||||
panic("just one callback per By, please")
|
||||
}
|
||||
}
|
||||
|
||||
//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
|
||||
//and accumulate metrics provided to the Benchmarker by the body function.
|
||||
//
|
||||
//The body function must have the signature:
|
||||
// func(b Benchmarker)
|
||||
func Measure(text string, body interface{}, samples int) bool {
|
||||
globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus individual Measures using FMeasure
|
||||
func FMeasure(text string, body interface{}, samples int) bool {
|
||||
globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Maeasurements as pending using PMeasure
|
||||
func PMeasure(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Maeasurements as pending using XMeasure
|
||||
func XMeasure(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
|
||||
//parallel node process will call BeforeSuite.
|
||||
//
|
||||
//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||
//
|
||||
//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||
func BeforeSuite(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
|
||||
//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
|
||||
//
|
||||
//When running in parallel, each parallel node process will call AfterSuite.
|
||||
//
|
||||
//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||
//
|
||||
//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||
func AfterSuite(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
|
||||
//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
|
||||
//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
|
||||
//until that node is done before running.
|
||||
//
|
||||
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
|
||||
//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
|
||||
//to the second function (on all the other nodes).
|
||||
//
|
||||
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
|
||||
//
|
||||
// func() []byte
|
||||
//
|
||||
//or, to run asynchronously:
|
||||
//
|
||||
// func(done Done) []byte
|
||||
//
|
||||
//The byte array returned by the first function is then passed to the second function, which has the signature:
|
||||
//
|
||||
// func(data []byte)
|
||||
//
|
||||
//or, to run asynchronously:
|
||||
//
|
||||
// func(data []byte, done Done)
|
||||
//
|
||||
//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
|
||||
//
|
||||
// var dbClient db.Client
|
||||
// var dbRunner db.Runner
|
||||
//
|
||||
// var _ = SynchronizedBeforeSuite(func() []byte {
|
||||
// dbRunner = db.NewRunner()
|
||||
// err := dbRunner.Start()
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
// return []byte(dbRunner.URL)
|
||||
// }, func(data []byte) {
|
||||
// dbClient = db.NewClient()
|
||||
// err := dbClient.Connect(string(data))
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
// })
|
||||
func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetSynchronizedBeforeSuiteNode(
|
||||
node1Body,
|
||||
allNodesBody,
|
||||
codelocation.New(1),
|
||||
parseTimeout(timeout...),
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
|
||||
//external singleton resources shared across nodes when running tests in parallel.
|
||||
//
|
||||
//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
|
||||
//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
|
||||
//all other nodes are finished.
|
||||
//
|
||||
//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
|
||||
//
|
||||
//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
|
||||
//only after all nodes have finished:
|
||||
//
|
||||
// var _ = SynchronizedAfterSuite(func() {
|
||||
// dbClient.Cleanup()
|
||||
// }, func() {
|
||||
// dbRunner.Stop()
|
||||
// })
|
||||
func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetSynchronizedAfterSuiteNode(
|
||||
allNodesBody,
|
||||
node1Body,
|
||||
codelocation.New(1),
|
||||
parseTimeout(timeout...),
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
|
||||
//Describe and Context blocks the outermost BeforeEach blocks are run first.
|
||||
//
|
||||
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func BeforeEach(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
|
||||
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
|
||||
//
|
||||
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func JustBeforeEach(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
|
||||
//Describe and Context blocks the innermost AfterEach blocks are run first.
|
||||
//
|
||||
//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func AfterEach(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
func parseTimeout(timeout ...float64) time.Duration {
|
||||
if len(timeout) == 0 {
|
||||
return time.Duration(defaultTimeout * int64(time.Second))
|
||||
} else {
|
||||
return time.Duration(timeout[0] * float64(time.Second))
|
||||
}
|
||||
}
|
1
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/integration.go
generated
vendored
1
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/integration.go
generated
vendored
@ -1 +0,0 @@
|
||||
package integration
|
32
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
32
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
package codelocation
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func New(skip int) types.CodeLocation {
|
||||
_, file, line, _ := runtime.Caller(skip + 1)
|
||||
stackTrace := PruneStack(string(debug.Stack()), skip)
|
||||
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||
}
|
||||
|
||||
func PruneStack(fullStackTrace string, skip int) string {
|
||||
stack := strings.Split(fullStackTrace, "\n")
|
||||
if len(stack) > 2*(skip+1) {
|
||||
stack = stack[2*(skip+1):]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
if !re.Match([]byte(stack[i*2])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
151
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
151
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
@ -1,151 +0,0 @@
|
||||
package containernode
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type subjectOrContainerNode struct {
|
||||
containerNode *ContainerNode
|
||||
subjectNode leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
func (n subjectOrContainerNode) text() string {
|
||||
if n.containerNode != nil {
|
||||
return n.containerNode.Text()
|
||||
} else {
|
||||
return n.subjectNode.Text()
|
||||
}
|
||||
}
|
||||
|
||||
type CollatedNodes struct {
|
||||
Containers []*ContainerNode
|
||||
Subject leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
type ContainerNode struct {
|
||||
text string
|
||||
flag types.FlagType
|
||||
codeLocation types.CodeLocation
|
||||
|
||||
setupNodes []leafnodes.BasicNode
|
||||
subjectAndContainerNodes []subjectOrContainerNode
|
||||
}
|
||||
|
||||
func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
|
||||
return &ContainerNode{
|
||||
text: text,
|
||||
flag: flag,
|
||||
codeLocation: codeLocation,
|
||||
}
|
||||
}
|
||||
|
||||
func (container *ContainerNode) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(container)
|
||||
permutation := r.Perm(len(container.subjectAndContainerNodes))
|
||||
shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
|
||||
for i, j := range permutation {
|
||||
shuffledNodes[i] = container.subjectAndContainerNodes[j]
|
||||
}
|
||||
container.subjectAndContainerNodes = shuffledNodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
|
||||
if node.flag == types.FlagTypePending {
|
||||
return false
|
||||
}
|
||||
|
||||
shouldUnfocus := false
|
||||
for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainerNode.containerNode != nil {
|
||||
shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
|
||||
} else {
|
||||
shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
|
||||
}
|
||||
}
|
||||
|
||||
if shouldUnfocus {
|
||||
if node.flag == types.FlagTypeFocused {
|
||||
node.flag = types.FlagTypeNone
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return node.flag == types.FlagTypeFocused
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Collate() []CollatedNodes {
|
||||
return node.collate([]*ContainerNode{})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
|
||||
collated := make([]CollatedNodes, 0)
|
||||
|
||||
containers := make([]*ContainerNode, len(enclosingContainers))
|
||||
copy(containers, enclosingContainers)
|
||||
containers = append(containers, node)
|
||||
|
||||
for _, subjectOrContainer := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainer.containerNode != nil {
|
||||
collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
|
||||
} else {
|
||||
collated = append(collated, CollatedNodes{
|
||||
Containers: containers,
|
||||
Subject: subjectOrContainer.subjectNode,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return collated
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
|
||||
node.setupNodes = append(node.setupNodes, setupNode)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
|
||||
nodes := []leafnodes.BasicNode{}
|
||||
for _, setupNode := range node.setupNodes {
|
||||
if setupNode.Type() == nodeType {
|
||||
nodes = append(nodes, setupNode)
|
||||
}
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ContainerNode) CodeLocation() types.CodeLocation {
|
||||
return node.codeLocation
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (node *ContainerNode) Len() int {
|
||||
return len(node.subjectAndContainerNodes)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Less(i, j int) bool {
|
||||
return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Swap(i, j int) {
|
||||
node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
|
||||
}
|
92
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
92
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
@ -1,92 +0,0 @@
|
||||
package failer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Failer struct {
|
||||
lock *sync.Mutex
|
||||
failure types.SpecFailure
|
||||
state types.SpecState
|
||||
}
|
||||
|
||||
func New() *Failer {
|
||||
return &Failer{
|
||||
lock: &sync.Mutex{},
|
||||
state: types.SpecStatePassed,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStatePanicked
|
||||
f.failure = types.SpecFailure{
|
||||
Message: "Test Panicked",
|
||||
Location: location,
|
||||
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Timeout(location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateTimedOut
|
||||
f.failure = types.SpecFailure{
|
||||
Message: "Timed out",
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Fail(message string, location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateFailed
|
||||
f.failure = types.SpecFailure{
|
||||
Message: message,
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
failure := f.failure
|
||||
outcome := f.state
|
||||
if outcome != types.SpecStatePassed {
|
||||
failure.ComponentType = componentType
|
||||
failure.ComponentIndex = componentIndex
|
||||
failure.ComponentCodeLocation = componentCodeLocation
|
||||
}
|
||||
|
||||
f.state = types.SpecStatePassed
|
||||
f.failure = types.SpecFailure{}
|
||||
|
||||
return failure, outcome
|
||||
}
|
||||
|
||||
func (f *Failer) Skip(message string, location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateSkipped
|
||||
f.failure = types.SpecFailure{
|
||||
Message: message,
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
95
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
95
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
@ -1,95 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type benchmarker struct {
|
||||
mu sync.Mutex
|
||||
measurements map[string]*types.SpecMeasurement
|
||||
orderCounter int
|
||||
}
|
||||
|
||||
func newBenchmarker() *benchmarker {
|
||||
return &benchmarker{
|
||||
measurements: make(map[string]*types.SpecMeasurement, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
|
||||
t := time.Now()
|
||||
body()
|
||||
elapsedTime = time.Since(t)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", info...)
|
||||
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", info...)
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement {
|
||||
measurement, ok := b.measurements[name]
|
||||
if !ok {
|
||||
var computedInfo interface{}
|
||||
computedInfo = nil
|
||||
if len(info) > 0 {
|
||||
computedInfo = info[0]
|
||||
}
|
||||
measurement = &types.SpecMeasurement{
|
||||
Name: name,
|
||||
Info: computedInfo,
|
||||
Order: b.orderCounter,
|
||||
SmallestLabel: smallestLabel,
|
||||
LargestLabel: largestLabel,
|
||||
AverageLabel: averageLabel,
|
||||
Units: units,
|
||||
Results: make([]float64, 0),
|
||||
}
|
||||
b.measurements[name] = measurement
|
||||
b.orderCounter++
|
||||
}
|
||||
|
||||
return measurement
|
||||
}
|
||||
|
||||
func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, measurement := range b.measurements {
|
||||
measurement.Smallest = math.MaxFloat64
|
||||
measurement.Largest = -math.MaxFloat64
|
||||
sum := float64(0)
|
||||
sumOfSquares := float64(0)
|
||||
|
||||
for _, result := range measurement.Results {
|
||||
if result > measurement.Largest {
|
||||
measurement.Largest = result
|
||||
}
|
||||
if result < measurement.Smallest {
|
||||
measurement.Smallest = result
|
||||
}
|
||||
sum += result
|
||||
sumOfSquares += result * result
|
||||
}
|
||||
|
||||
n := float64(len(measurement.Results))
|
||||
measurement.Average = sum / n
|
||||
measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
|
||||
}
|
||||
|
||||
return b.measurements
|
||||
}
|
19
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
19
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type BasicNode interface {
|
||||
Type() types.SpecComponentType
|
||||
Run() (types.SpecState, types.SpecFailure)
|
||||
CodeLocation() types.CodeLocation
|
||||
}
|
||||
|
||||
type SubjectNode interface {
|
||||
BasicNode
|
||||
|
||||
Text() string
|
||||
Flag() types.FlagType
|
||||
Samples() int
|
||||
}
|
46
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
46
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ItNode struct {
|
||||
runner *runner
|
||||
|
||||
flag types.FlagType
|
||||
text string
|
||||
}
|
||||
|
||||
func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
|
||||
return &ItNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
|
||||
flag: flag,
|
||||
text: text,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *ItNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeIt
|
||||
}
|
||||
|
||||
func (node *ItNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ItNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *ItNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *ItNode) Samples() int {
|
||||
return 1
|
||||
}
|
61
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
61
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
@ -1,61 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type MeasureNode struct {
|
||||
runner *runner
|
||||
|
||||
text string
|
||||
flag types.FlagType
|
||||
samples int
|
||||
benchmarker *benchmarker
|
||||
}
|
||||
|
||||
func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
|
||||
benchmarker := newBenchmarker()
|
||||
|
||||
wrappedBody := func() {
|
||||
reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
|
||||
}
|
||||
|
||||
return &MeasureNode{
|
||||
runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
|
||||
|
||||
text: text,
|
||||
flag: flag,
|
||||
samples: samples,
|
||||
benchmarker: benchmarker,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
|
||||
return node.benchmarker.measurementsReport()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *MeasureNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Samples() int {
|
||||
return node.samples
|
||||
}
|
113
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
113
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
@ -1,113 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type runner struct {
|
||||
isAsync bool
|
||||
asyncFunc func(chan<- interface{})
|
||||
syncFunc func()
|
||||
codeLocation types.CodeLocation
|
||||
timeoutThreshold time.Duration
|
||||
nodeType types.SpecComponentType
|
||||
componentIndex int
|
||||
failer *failer.Failer
|
||||
}
|
||||
|
||||
func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
|
||||
bodyType := reflect.TypeOf(body)
|
||||
if bodyType.Kind() != reflect.Func {
|
||||
panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
|
||||
}
|
||||
|
||||
runner := &runner{
|
||||
codeLocation: codeLocation,
|
||||
timeoutThreshold: timeout,
|
||||
failer: failer,
|
||||
nodeType: nodeType,
|
||||
componentIndex: componentIndex,
|
||||
}
|
||||
|
||||
switch bodyType.NumIn() {
|
||||
case 0:
|
||||
runner.syncFunc = body.(func())
|
||||
return runner
|
||||
case 1:
|
||||
if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
|
||||
panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
wrappedBody := func(done chan<- interface{}) {
|
||||
bodyValue := reflect.ValueOf(body)
|
||||
bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
}
|
||||
|
||||
runner.isAsync = true
|
||||
runner.asyncFunc = wrappedBody
|
||||
return runner
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
if r.isAsync {
|
||||
return r.runAsync()
|
||||
} else {
|
||||
return r.runSync()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
done := make(chan interface{}, 1)
|
||||
|
||||
go func() {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
select {
|
||||
case <-done:
|
||||
break
|
||||
default:
|
||||
close(done)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
r.asyncFunc(done)
|
||||
finished = true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(r.timeoutThreshold):
|
||||
r.failer.Timeout(r.codeLocation)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
return
|
||||
}
|
||||
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
}()
|
||||
|
||||
r.syncFunc()
|
||||
finished = true
|
||||
|
||||
return
|
||||
}
|
41
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
41
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SetupNode struct {
|
||||
runner *runner
|
||||
}
|
||||
|
||||
func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *SetupNode) Type() types.SpecComponentType {
|
||||
return node.runner.nodeType
|
||||
}
|
||||
|
||||
func (node *SetupNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
54
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
54
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SuiteNode interface {
|
||||
Run(parallelNode int, parallelTotal int, syncHost string) bool
|
||||
Passed() bool
|
||||
Summary() *types.SetupSummary
|
||||
}
|
||||
|
||||
type simpleSuiteNode struct {
|
||||
runner *runner
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
node.outcome, node.failure = node.runner.run()
|
||||
node.runTime = time.Since(t)
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runner.nodeType,
|
||||
CodeLocation: node.runner.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
@ -1,89 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type synchronizedAfterSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &synchronizedAfterSuiteNode{
|
||||
runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
node.outcome, node.failure = node.runnerA.run()
|
||||
|
||||
if parallelNode == 1 {
|
||||
if parallelTotal > 1 {
|
||||
node.waitUntilOtherNodesAreDone(syncHost)
|
||||
}
|
||||
|
||||
outcome, failure := node.runnerB.run()
|
||||
|
||||
if node.outcome == types.SpecStatePassed {
|
||||
node.outcome, node.failure = outcome, failure
|
||||
}
|
||||
}
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
|
||||
for {
|
||||
if node.canRun(syncHost) {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
|
||||
resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return false
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
afterSuiteData := types.RemoteAfterSuiteData{}
|
||||
err = json.Unmarshal(body, &afterSuiteData)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return afterSuiteData.CanRun
|
||||
}
|
@ -1,182 +0,0 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type synchronizedBeforeSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
data []byte
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
node := &synchronizedBeforeSuiteNode{}
|
||||
|
||||
node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
defer func() {
|
||||
node.runTime = time.Since(t)
|
||||
}()
|
||||
|
||||
if parallelNode == 1 {
|
||||
node.outcome, node.failure = node.runA(parallelTotal, syncHost)
|
||||
} else {
|
||||
node.outcome, node.failure = node.waitForA(syncHost)
|
||||
}
|
||||
|
||||
if node.outcome != types.SpecStatePassed {
|
||||
return false
|
||||
}
|
||||
node.outcome, node.failure = node.runnerB.run()
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
outcome, failure := node.runnerA.run()
|
||||
|
||||
if parallelTotal > 1 {
|
||||
state := types.RemoteBeforeSuiteStatePassed
|
||||
if outcome != types.SpecStatePassed {
|
||||
state = types.RemoteBeforeSuiteStateFailed
|
||||
}
|
||||
json := (types.RemoteBeforeSuiteData{
|
||||
Data: node.data,
|
||||
State: state,
|
||||
}).ToJSON()
|
||||
http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
|
||||
}
|
||||
|
||||
return outcome, failure
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
failure := func(message string) types.SpecFailure {
|
||||
return types.SpecFailure{
|
||||
Message: message,
|
||||
Location: node.runnerA.codeLocation,
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
ComponentIndex: node.runnerA.componentIndex,
|
||||
ComponentCodeLocation: node.runnerA.codeLocation,
|
||||
}
|
||||
}
|
||||
for {
|
||||
resp, err := http.Get(syncHost + "/BeforeSuiteState")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
beforeSuiteData := types.RemoteBeforeSuiteData{}
|
||||
err = json.Unmarshal(body, &beforeSuiteData)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
|
||||
}
|
||||
|
||||
switch beforeSuiteData.State {
|
||||
case types.RemoteBeforeSuiteStatePassed:
|
||||
node.data = beforeSuiteData.Data
|
||||
return types.SpecStatePassed, types.SpecFailure{}
|
||||
case types.RemoteBeforeSuiteStateFailed:
|
||||
return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
|
||||
case types.RemoteBeforeSuiteStateDisappeared:
|
||||
return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
return types.SpecStateFailed, failure("Shouldn't get here!")
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
|
||||
typeA := reflect.TypeOf(bodyA)
|
||||
if typeA.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its first argument")
|
||||
}
|
||||
|
||||
takesNothing := typeA.NumIn() == 0
|
||||
takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
|
||||
returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
|
||||
|
||||
if !((takesNothing || takesADoneChannel) && returnsBytes) {
|
||||
panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
|
||||
}
|
||||
|
||||
if takesADoneChannel {
|
||||
return func(done chan<- interface{}) {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
|
||||
typeB := reflect.TypeOf(bodyB)
|
||||
if typeB.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its second argument")
|
||||
}
|
||||
|
||||
returnsNothing := typeB.NumOut() == 0
|
||||
takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
|
||||
takesBytesAndDone := typeB.NumIn() == 2 &&
|
||||
typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
|
||||
typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
|
||||
|
||||
if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
|
||||
panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
|
||||
}
|
||||
|
||||
if takesBytesAndDone {
|
||||
return func(done chan<- interface{}) {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
|
||||
}
|
||||
}
|
250
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
250
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
@ -1,250 +0,0 @@
|
||||
/*
|
||||
|
||||
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
|
||||
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
|
||||
|
||||
ginkgo -nodes=N
|
||||
|
||||
where N is the number of nodes you desire.
|
||||
*/
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type configAndSuite struct {
|
||||
config config.GinkgoConfigType
|
||||
summary *types.SuiteSummary
|
||||
}
|
||||
|
||||
type Aggregator struct {
|
||||
nodeCount int
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
result chan bool
|
||||
|
||||
suiteBeginnings chan configAndSuite
|
||||
aggregatedSuiteBeginnings []configAndSuite
|
||||
|
||||
beforeSuites chan *types.SetupSummary
|
||||
aggregatedBeforeSuites []*types.SetupSummary
|
||||
|
||||
afterSuites chan *types.SetupSummary
|
||||
aggregatedAfterSuites []*types.SetupSummary
|
||||
|
||||
specCompletions chan *types.SpecSummary
|
||||
completedSpecs []*types.SpecSummary
|
||||
|
||||
suiteEndings chan *types.SuiteSummary
|
||||
aggregatedSuiteEndings []*types.SuiteSummary
|
||||
specs []*types.SpecSummary
|
||||
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
|
||||
aggregator := &Aggregator{
|
||||
nodeCount: nodeCount,
|
||||
result: result,
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
|
||||
suiteBeginnings: make(chan configAndSuite, 0),
|
||||
beforeSuites: make(chan *types.SetupSummary, 0),
|
||||
afterSuites: make(chan *types.SetupSummary, 0),
|
||||
specCompletions: make(chan *types.SpecSummary, 0),
|
||||
suiteEndings: make(chan *types.SuiteSummary, 0),
|
||||
}
|
||||
|
||||
go aggregator.mux()
|
||||
|
||||
return aggregator
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
aggregator.suiteBeginnings <- configAndSuite{config, summary}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.beforeSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.afterSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
//noop
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
aggregator.specCompletions <- specSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
aggregator.suiteEndings <- summary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) mux() {
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case configAndSuite := <-aggregator.suiteBeginnings:
|
||||
aggregator.registerSuiteBeginning(configAndSuite)
|
||||
case setupSummary := <-aggregator.beforeSuites:
|
||||
aggregator.registerBeforeSuite(setupSummary)
|
||||
case setupSummary := <-aggregator.afterSuites:
|
||||
aggregator.registerAfterSuite(setupSummary)
|
||||
case specSummary := <-aggregator.specCompletions:
|
||||
aggregator.registerSpecCompletion(specSummary)
|
||||
case suite := <-aggregator.suiteEndings:
|
||||
finished, passed := aggregator.registerSuiteEnding(suite)
|
||||
if finished {
|
||||
aggregator.result <- passed
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
|
||||
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
|
||||
aggregator.startTime = time.Now()
|
||||
}
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
|
||||
|
||||
numberOfSpecsToRun := 0
|
||||
totalNumberOfSpecs := 0
|
||||
for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings {
|
||||
numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun
|
||||
totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct)
|
||||
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
|
||||
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
|
||||
aggregator.specs = append(aggregator.specs, specSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) flushCompletedSpecs() {
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
|
||||
aggregator.announceBeforeSuite(setupSummary)
|
||||
}
|
||||
|
||||
for _, specSummary := range aggregator.completedSpecs {
|
||||
aggregator.announceSpec(specSummary)
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedAfterSuites {
|
||||
aggregator.announceAfterSuite(setupSummary)
|
||||
}
|
||||
|
||||
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
|
||||
aggregator.completedSpecs = []*types.SpecSummary{}
|
||||
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
|
||||
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
|
||||
aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
|
||||
} else {
|
||||
aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||
}
|
||||
|
||||
case types.SpecStatePending:
|
||||
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
|
||||
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
|
||||
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
|
||||
return false, false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary := &types.SuiteSummary{}
|
||||
aggregatedSuiteSummary.SuiteSucceeded = true
|
||||
|
||||
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
||||
if suiteSummary.SuiteSucceeded == false {
|
||||
aggregatedSuiteSummary.SuiteSucceeded = false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
|
||||
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
|
||||
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
|
||||
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
|
||||
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
|
||||
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
|
||||
|
||||
aggregator.stenographer.SummarizeFailures(aggregator.specs)
|
||||
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
|
||||
|
||||
return true, aggregatedSuiteSummary.SuiteSucceeded
|
||||
}
|
90
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
90
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
@ -1,90 +0,0 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//An interface to net/http's client to allow the injection of fakes under test
|
||||
type Poster interface {
|
||||
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
|
||||
}
|
||||
|
||||
/*
|
||||
The ForwardingReporter is a Ginkgo reporter that forwards information to
|
||||
a Ginkgo remote server.
|
||||
|
||||
When streaming parallel test output, this repoter is automatically installed by Ginkgo.
|
||||
|
||||
This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
|
||||
detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
|
||||
in place of Ginkgo's DefaultReporter.
|
||||
*/
|
||||
|
||||
type ForwardingReporter struct {
|
||||
serverHost string
|
||||
poster Poster
|
||||
outputInterceptor OutputInterceptor
|
||||
}
|
||||
|
||||
func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter {
|
||||
return &ForwardingReporter{
|
||||
serverHost: serverHost,
|
||||
poster: poster,
|
||||
outputInterceptor: outputInterceptor,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) post(path string, data interface{}) {
|
||||
encoded, _ := json.Marshal(data)
|
||||
buffer := bytes.NewBuffer(encoded)
|
||||
reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
data := struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}{
|
||||
conf,
|
||||
summary,
|
||||
}
|
||||
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
reporter.post("/SpecSuiteWillBegin", data)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
reporter.post("/BeforeSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
reporter.post("/SpecWillRun", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
specSummary.CapturedOutput = output
|
||||
reporter.post("/SpecDidComplete", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
reporter.post("/AfterSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.post("/SpecSuiteDidEnd", summary)
|
||||
}
|
10
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
10
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
package remote
|
||||
|
||||
/*
|
||||
The OutputInterceptor is used by the ForwardingReporter to
|
||||
intercept and capture all stdin and stderr output during a test run.
|
||||
*/
|
||||
type OutputInterceptor interface {
|
||||
StartInterceptingOutput() error
|
||||
StopInterceptingAndReturnOutput() (string, error)
|
||||
}
|
52
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
52
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
// +build freebsd openbsd netbsd dragonfly darwin linux
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
redirectFile *os.File
|
||||
intercepting bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
var err error
|
||||
|
||||
interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
syscall.Dup2(int(interceptor.redirectFile.Fd()), 1)
|
||||
syscall.Dup2(int(interceptor.redirectFile.Fd()), 2)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
if !interceptor.intercepting {
|
||||
return "", errors.New("Not intercepting output!")
|
||||
}
|
||||
|
||||
interceptor.redirectFile.Close()
|
||||
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
||||
os.Remove(interceptor.redirectFile.Name())
|
||||
|
||||
interceptor.intercepting = false
|
||||
|
||||
return string(output), err
|
||||
}
|
33
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
33
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
intercepting bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
// not working on windows...
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
// not working on windows...
|
||||
interceptor.intercepting = false
|
||||
|
||||
return "", nil
|
||||
}
|
204
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
204
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
@ -1,204 +0,0 @@
|
||||
/*
|
||||
|
||||
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
/*
|
||||
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||
It then forwards that communication to attached reporters.
|
||||
*/
|
||||
type Server struct {
|
||||
listener net.Listener
|
||||
reporters []reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteData types.RemoteBeforeSuiteData
|
||||
parallelTotal int
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
func NewServer(parallelTotal int) (*Server, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Server{
|
||||
listener: listener,
|
||||
lock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending},
|
||||
parallelTotal: parallelTotal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
func (server *Server) Start() {
|
||||
httpServer := &http.Server{}
|
||||
mux := http.NewServeMux()
|
||||
httpServer.Handler = mux
|
||||
|
||||
//streaming endpoints
|
||||
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
|
||||
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
|
||||
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
|
||||
mux.HandleFunc("/SpecWillRun", server.specWillRun)
|
||||
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
|
||||
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
|
||||
|
||||
//synchronization endpoints
|
||||
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
|
||||
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
|
||||
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
|
||||
//Stop the server
|
||||
func (server *Server) Close() {
|
||||
server.listener.Close()
|
||||
}
|
||||
|
||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
func (server *Server) Address() string {
|
||||
return "http://" + server.listener.Addr().String()
|
||||
}
|
||||
|
||||
//
|
||||
// Streaming Endpoints
|
||||
//
|
||||
|
||||
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
func (server *Server) readAll(request *http.Request) []byte {
|
||||
defer request.Body.Close()
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
return body
|
||||
}
|
||||
|
||||
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
|
||||
server.reporters = reporters
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
|
||||
var data struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}
|
||||
|
||||
json.Unmarshal(body, &data)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.BeforeSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.AfterSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecDidComplete(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var suiteSummary *types.SuiteSummary
|
||||
json.Unmarshal(body, &suiteSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteDidEnd(suiteSummary)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Synchronization Endpoints
|
||||
//
|
||||
|
||||
func (server *Server) RegisterAlive(node int, alive func() bool) {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
server.alives[node-1] = alive
|
||||
}
|
||||
|
||||
func (server *Server) nodeIsAlive(node int) bool {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
alive := server.alives[node-1]
|
||||
if alive == nil {
|
||||
return true
|
||||
}
|
||||
return alive()
|
||||
}
|
||||
|
||||
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||
if request.Method == "POST" {
|
||||
dec := json.NewDecoder(request.Body)
|
||||
dec.Decode(&(server.beforeSuiteData))
|
||||
} else {
|
||||
beforeSuiteData := server.beforeSuiteData
|
||||
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
|
||||
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
|
||||
}
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(beforeSuiteData)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
|
||||
afterSuiteData := types.RemoteAfterSuiteData{
|
||||
CanRun: true,
|
||||
}
|
||||
for i := 2; i <= server.parallelTotal; i++ {
|
||||
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(afterSuiteData)
|
||||
}
|
55
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/index_computer.go
generated
vendored
55
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/index_computer.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
package spec
|
||||
|
||||
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
||||
if length == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// We have more nodes than tests. Trivial case.
|
||||
if parallelTotal >= length {
|
||||
if parallelNode > length {
|
||||
return 0, 0
|
||||
} else {
|
||||
return parallelNode - 1, 1
|
||||
}
|
||||
}
|
||||
|
||||
// This is the minimum amount of tests that a node will be required to run
|
||||
minTestsPerNode := length / parallelTotal
|
||||
|
||||
// This is the maximum amount of tests that a node will be required to run
|
||||
// The algorithm guarantees that this would be equal to at least the minimum amount
|
||||
// and at most one more
|
||||
maxTestsPerNode := minTestsPerNode
|
||||
if length%parallelTotal != 0 {
|
||||
maxTestsPerNode++
|
||||
}
|
||||
|
||||
// Number of nodes that will have to run the maximum amount of tests per node
|
||||
numMaxLoadNodes := length % parallelTotal
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
|
||||
var numPrecedingMaxLoadNodes int
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
numPrecedingMaxLoadNodes = numMaxLoadNodes
|
||||
} else {
|
||||
numPrecedingMaxLoadNodes = parallelNode - 1
|
||||
}
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
|
||||
var numPrecedingMinLoadNodes int
|
||||
if parallelNode <= numMaxLoadNodes {
|
||||
numPrecedingMinLoadNodes = 0
|
||||
} else {
|
||||
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
|
||||
}
|
||||
|
||||
// Evaluate the test start index and number of tests to run
|
||||
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
count = minTestsPerNode
|
||||
} else {
|
||||
count = maxTestsPerNode
|
||||
}
|
||||
return
|
||||
}
|
197
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
197
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
@ -1,197 +0,0 @@
|
||||
package spec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Spec struct {
|
||||
subject leafnodes.SubjectNode
|
||||
focused bool
|
||||
announceProgress bool
|
||||
|
||||
containers []*containernode.ContainerNode
|
||||
|
||||
state types.SpecState
|
||||
runTime time.Duration
|
||||
failure types.SpecFailure
|
||||
}
|
||||
|
||||
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
|
||||
spec := &Spec{
|
||||
subject: subject,
|
||||
containers: containers,
|
||||
focused: subject.Flag() == types.FlagTypeFocused,
|
||||
announceProgress: announceProgress,
|
||||
}
|
||||
|
||||
spec.processFlag(subject.Flag())
|
||||
for i := len(containers) - 1; i >= 0; i-- {
|
||||
spec.processFlag(containers[i].Flag())
|
||||
}
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
func (spec *Spec) processFlag(flag types.FlagType) {
|
||||
if flag == types.FlagTypeFocused {
|
||||
spec.focused = true
|
||||
} else if flag == types.FlagTypePending {
|
||||
spec.state = types.SpecStatePending
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) Skip() {
|
||||
spec.state = types.SpecStateSkipped
|
||||
}
|
||||
|
||||
func (spec *Spec) Failed() bool {
|
||||
return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (spec *Spec) Passed() bool {
|
||||
return spec.state == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (spec *Spec) Pending() bool {
|
||||
return spec.state == types.SpecStatePending
|
||||
}
|
||||
|
||||
func (spec *Spec) Skipped() bool {
|
||||
return spec.state == types.SpecStateSkipped
|
||||
}
|
||||
|
||||
func (spec *Spec) Focused() bool {
|
||||
return spec.focused
|
||||
}
|
||||
|
||||
func (spec *Spec) IsMeasurement() bool {
|
||||
return spec.subject.Type() == types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
||||
componentTexts := make([]string, len(spec.containers)+1)
|
||||
componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
|
||||
|
||||
for i, container := range spec.containers {
|
||||
componentTexts[i] = container.Text()
|
||||
componentCodeLocations[i] = container.CodeLocation()
|
||||
}
|
||||
|
||||
componentTexts[len(spec.containers)] = spec.subject.Text()
|
||||
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
|
||||
|
||||
return &types.SpecSummary{
|
||||
IsMeasurement: spec.IsMeasurement(),
|
||||
NumberOfSamples: spec.subject.Samples(),
|
||||
ComponentTexts: componentTexts,
|
||||
ComponentCodeLocations: componentCodeLocations,
|
||||
State: spec.state,
|
||||
RunTime: spec.runTime,
|
||||
Failure: spec.failure,
|
||||
Measurements: spec.measurementsReport(),
|
||||
SuiteID: suiteID,
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) ConcatenatedString() string {
|
||||
s := ""
|
||||
for _, container := range spec.containers {
|
||||
s += container.Text() + " "
|
||||
}
|
||||
|
||||
return s + spec.subject.Text()
|
||||
}
|
||||
|
||||
func (spec *Spec) Run(writer io.Writer) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
spec.runTime = time.Since(startTime)
|
||||
}()
|
||||
|
||||
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
||||
spec.runSample(sample, writer)
|
||||
|
||||
if spec.state != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||
spec.state = types.SpecStatePassed
|
||||
spec.failure = types.SpecFailure{}
|
||||
innerMostContainerIndexToUnwind := -1
|
||||
|
||||
defer func() {
|
||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||
container := spec.containers[i]
|
||||
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
||||
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
||||
afterEachState, afterEachFailure := afterEach.Run()
|
||||
if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
||||
spec.state = afterEachState
|
||||
spec.failure = afterEachFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for i, container := range spec.containers {
|
||||
innerMostContainerIndexToUnwind = i
|
||||
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
||||
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
||||
spec.state, spec.failure = beforeEach.Run()
|
||||
if spec.state != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range spec.containers {
|
||||
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
||||
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
||||
spec.state, spec.failure = justBeforeEach.Run()
|
||||
if spec.state != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spec.announceSubject(writer, spec.subject)
|
||||
spec.state, spec.failure = spec.subject.Run()
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
||||
if spec.announceProgress {
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
|
||||
if spec.announceProgress {
|
||||
nodeType := ""
|
||||
switch subject.Type() {
|
||||
case types.SpecComponentTypeIt:
|
||||
nodeType = "It"
|
||||
case types.SpecComponentTypeMeasure:
|
||||
nodeType = "Measure"
|
||||
}
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
if !spec.IsMeasurement() || spec.Failed() {
|
||||
return map[string]*types.SpecMeasurement{}
|
||||
}
|
||||
|
||||
return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
|
||||
}
|
122
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
122
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
@ -1,122 +0,0 @@
|
||||
package spec
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"regexp"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Specs struct {
|
||||
specs []*Spec
|
||||
numberOfOriginalSpecs int
|
||||
hasProgrammaticFocus bool
|
||||
}
|
||||
|
||||
func NewSpecs(specs []*Spec) *Specs {
|
||||
return &Specs{
|
||||
specs: specs,
|
||||
numberOfOriginalSpecs: len(specs),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) Specs() []*Spec {
|
||||
return e.specs
|
||||
}
|
||||
|
||||
func (e *Specs) NumberOfOriginalSpecs() int {
|
||||
return e.numberOfOriginalSpecs
|
||||
}
|
||||
|
||||
func (e *Specs) HasProgrammaticFocus() bool {
|
||||
return e.hasProgrammaticFocus
|
||||
}
|
||||
|
||||
func (e *Specs) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(e)
|
||||
permutation := r.Perm(len(e.specs))
|
||||
shuffledSpecs := make([]*Spec, len(e.specs))
|
||||
for i, j := range permutation {
|
||||
shuffledSpecs[i] = e.specs[j]
|
||||
}
|
||||
e.specs = shuffledSpecs
|
||||
}
|
||||
|
||||
func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
|
||||
if focusString == "" && skipString == "" {
|
||||
e.applyProgrammaticFocus()
|
||||
} else {
|
||||
e.applyRegExpFocus(description, focusString, skipString)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyProgrammaticFocus() {
|
||||
e.hasProgrammaticFocus = false
|
||||
for _, spec := range e.specs {
|
||||
if spec.Focused() && !spec.Pending() {
|
||||
e.hasProgrammaticFocus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if e.hasProgrammaticFocus {
|
||||
for _, spec := range e.specs {
|
||||
if !spec.Focused() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyRegExpFocus(description string, focusString string, skipString string) {
|
||||
for _, spec := range e.specs {
|
||||
matchesFocus := true
|
||||
matchesSkip := false
|
||||
|
||||
toMatch := []byte(description + " " + spec.ConcatenatedString())
|
||||
|
||||
if focusString != "" {
|
||||
focusFilter := regexp.MustCompile(focusString)
|
||||
matchesFocus = focusFilter.Match([]byte(toMatch))
|
||||
}
|
||||
|
||||
if skipString != "" {
|
||||
skipFilter := regexp.MustCompile(skipString)
|
||||
matchesSkip = skipFilter.Match([]byte(toMatch))
|
||||
}
|
||||
|
||||
if !matchesFocus || matchesSkip {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) SkipMeasurements() {
|
||||
for _, spec := range e.specs {
|
||||
if spec.IsMeasurement() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) TrimForParallelization(total int, node int) {
|
||||
startIndex, count := ParallelizedIndexRange(len(e.specs), total, node)
|
||||
if count == 0 {
|
||||
e.specs = make([]*Spec, 0)
|
||||
} else {
|
||||
e.specs = e.specs[startIndex : startIndex+count]
|
||||
}
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (e *Specs) Len() int {
|
||||
return len(e.specs)
|
||||
}
|
||||
|
||||
func (e *Specs) Less(i, j int) bool {
|
||||
return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
|
||||
}
|
||||
|
||||
func (e *Specs) Swap(i, j int) {
|
||||
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
|
||||
}
|
15
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
15
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
package specrunner
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func randomID() string {
|
||||
b := make([]byte, 8)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
|
||||
}
|
324
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
324
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
@ -1,324 +0,0 @@
|
||||
package specrunner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
Writer "github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
|
||||
"time"
|
||||
)
|
||||
|
||||
type SpecRunner struct {
|
||||
description string
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
specs *spec.Specs
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
reporters []reporters.Reporter
|
||||
startTime time.Time
|
||||
suiteID string
|
||||
runningSpec *spec.Spec
|
||||
writer Writer.WriterInterface
|
||||
config config.GinkgoConfigType
|
||||
interrupted bool
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
|
||||
return &SpecRunner{
|
||||
description: description,
|
||||
beforeSuiteNode: beforeSuiteNode,
|
||||
specs: specs,
|
||||
afterSuiteNode: afterSuiteNode,
|
||||
reporters: reporters,
|
||||
writer: writer,
|
||||
config: config,
|
||||
suiteID: randomID(),
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) Run() bool {
|
||||
if runner.config.DryRun {
|
||||
runner.performDryRun()
|
||||
return true
|
||||
}
|
||||
|
||||
runner.reportSuiteWillBegin()
|
||||
go runner.registerForInterrupts()
|
||||
|
||||
suitePassed := runner.runBeforeSuite()
|
||||
|
||||
if suitePassed {
|
||||
suitePassed = runner.runSpecs()
|
||||
}
|
||||
|
||||
runner.blockForeverIfInterrupted()
|
||||
|
||||
suitePassed = runner.runAfterSuite() && suitePassed
|
||||
|
||||
runner.reportSuiteDidEnd(suitePassed)
|
||||
|
||||
return suitePassed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) performDryRun() {
|
||||
runner.reportSuiteWillBegin()
|
||||
|
||||
if runner.beforeSuiteNode != nil {
|
||||
summary := runner.beforeSuiteNode.Summary()
|
||||
summary.State = types.SpecStatePassed
|
||||
runner.reportBeforeSuite(summary)
|
||||
}
|
||||
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
summary := spec.Summary(runner.suiteID)
|
||||
runner.reportSpecWillRun(summary)
|
||||
if summary.State == types.SpecStateInvalid {
|
||||
summary.State = types.SpecStatePassed
|
||||
}
|
||||
runner.reportSpecDidComplete(summary, false)
|
||||
}
|
||||
|
||||
if runner.afterSuiteNode != nil {
|
||||
summary := runner.afterSuiteNode.Summary()
|
||||
summary.State = types.SpecStatePassed
|
||||
runner.reportAfterSuite(summary)
|
||||
}
|
||||
|
||||
runner.reportSuiteDidEnd(true)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runBeforeSuite() bool {
|
||||
if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
|
||||
return true
|
||||
}
|
||||
|
||||
runner.writer.Truncate()
|
||||
conf := runner.config
|
||||
passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||
if !passed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
|
||||
return passed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runAfterSuite() bool {
|
||||
if runner.afterSuiteNode == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
runner.writer.Truncate()
|
||||
conf := runner.config
|
||||
passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||
if !passed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
runner.reportAfterSuite(runner.afterSuiteNode.Summary())
|
||||
return passed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runSpecs() bool {
|
||||
suiteFailed := false
|
||||
skipRemainingSpecs := false
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
if runner.wasInterrupted() {
|
||||
return suiteFailed
|
||||
}
|
||||
if skipRemainingSpecs {
|
||||
spec.Skip()
|
||||
}
|
||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||
|
||||
if !spec.Skipped() && !spec.Pending() {
|
||||
runner.runningSpec = spec
|
||||
spec.Run(runner.writer)
|
||||
runner.runningSpec = nil
|
||||
if spec.Failed() {
|
||||
suiteFailed = true
|
||||
}
|
||||
} else if spec.Pending() && runner.config.FailOnPending {
|
||||
suiteFailed = true
|
||||
}
|
||||
|
||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||
|
||||
if spec.Failed() && runner.config.FailFast {
|
||||
skipRemainingSpecs = true
|
||||
}
|
||||
}
|
||||
|
||||
return !suiteFailed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
|
||||
if runner.runningSpec == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return runner.runningSpec.Summary(runner.suiteID), true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForInterrupts() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
signal.Stop(c)
|
||||
runner.markInterrupted()
|
||||
go runner.registerForHardInterrupts()
|
||||
runner.writer.DumpOutWithHeader(`
|
||||
Received interrupt. Emitting contents of GinkgoWriter...
|
||||
---------------------------------------------------------
|
||||
`)
|
||||
if runner.afterSuiteNode != nil {
|
||||
fmt.Fprint(os.Stderr, `
|
||||
---------------------------------------------------------
|
||||
Received interrupt. Running AfterSuite...
|
||||
^C again to terminate immediately
|
||||
`)
|
||||
runner.runAfterSuite()
|
||||
}
|
||||
runner.reportSuiteDidEnd(false)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForHardInterrupts() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) blockForeverIfInterrupted() {
|
||||
runner.lock.Lock()
|
||||
interrupted := runner.interrupted
|
||||
runner.lock.Unlock()
|
||||
|
||||
if interrupted {
|
||||
select {}
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) markInterrupted() {
|
||||
runner.lock.Lock()
|
||||
defer runner.lock.Unlock()
|
||||
runner.interrupted = true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) wasInterrupted() bool {
|
||||
runner.lock.Lock()
|
||||
defer runner.lock.Unlock()
|
||||
return runner.interrupted
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteWillBegin() {
|
||||
runner.startTime = time.Now()
|
||||
summary := runner.summary(true)
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteWillBegin(runner.config, summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.BeforeSuiteDidRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.AfterSuiteDidRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
|
||||
runner.writer.Truncate()
|
||||
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecWillRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
|
||||
for i := len(runner.reporters) - 1; i >= 1; i-- {
|
||||
runner.reporters[i].SpecDidComplete(summary)
|
||||
}
|
||||
|
||||
if failed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
|
||||
runner.reporters[0].SpecDidComplete(summary)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
|
||||
summary := runner.summary(success)
|
||||
summary.RunTime = time.Since(runner.startTime)
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteDidEnd(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) {
|
||||
count = 0
|
||||
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
if filter(spec) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
|
||||
numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return !ex.Skipped() && !ex.Pending()
|
||||
})
|
||||
|
||||
numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Pending()
|
||||
})
|
||||
|
||||
numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Skipped()
|
||||
})
|
||||
|
||||
numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Passed()
|
||||
})
|
||||
|
||||
numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Failed()
|
||||
})
|
||||
|
||||
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
|
||||
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
|
||||
}
|
||||
|
||||
return &types.SuiteSummary{
|
||||
SuiteDescription: runner.description,
|
||||
SuiteSucceeded: success,
|
||||
SuiteID: runner.suiteID,
|
||||
|
||||
NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(),
|
||||
NumberOfTotalSpecs: len(runner.specs.Specs()),
|
||||
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
|
||||
NumberOfPendingSpecs: numberOfPendingSpecs,
|
||||
NumberOfSkippedSpecs: numberOfSkippedSpecs,
|
||||
NumberOfPassedSpecs: numberOfPassedSpecs,
|
||||
NumberOfFailedSpecs: numberOfFailedSpecs,
|
||||
}
|
||||
}
|
171
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
171
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
@ -1,171 +0,0 @@
|
||||
package suite
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
"github.com/onsi/ginkgo/internal/specrunner"
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type ginkgoTestingT interface {
|
||||
Fail()
|
||||
}
|
||||
|
||||
type Suite struct {
|
||||
topLevelContainer *containernode.ContainerNode
|
||||
currentContainer *containernode.ContainerNode
|
||||
containerIndex int
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
runner *specrunner.SpecRunner
|
||||
failer *failer.Failer
|
||||
running bool
|
||||
}
|
||||
|
||||
func New(failer *failer.Failer) *Suite {
|
||||
topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
|
||||
|
||||
return &Suite{
|
||||
topLevelContainer: topLevelContainer,
|
||||
currentContainer: topLevelContainer,
|
||||
failer: failer,
|
||||
containerIndex: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
|
||||
if config.ParallelTotal < 1 {
|
||||
panic("ginkgo.parallel.total must be >= 1")
|
||||
}
|
||||
|
||||
if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
|
||||
panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
|
||||
}
|
||||
|
||||
r := rand.New(rand.NewSource(config.RandomSeed))
|
||||
suite.topLevelContainer.Shuffle(r)
|
||||
specs := suite.generateSpecs(description, config)
|
||||
suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config)
|
||||
|
||||
suite.running = true
|
||||
success := suite.runner.Run()
|
||||
if !success {
|
||||
t.Fail()
|
||||
}
|
||||
return success, specs.HasProgrammaticFocus()
|
||||
}
|
||||
|
||||
func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs {
|
||||
specsSlice := []*spec.Spec{}
|
||||
suite.topLevelContainer.BackPropagateProgrammaticFocus()
|
||||
for _, collatedNodes := range suite.topLevelContainer.Collate() {
|
||||
specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
|
||||
}
|
||||
|
||||
specs := spec.NewSpecs(specsSlice)
|
||||
|
||||
if config.RandomizeAllSpecs {
|
||||
specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
|
||||
}
|
||||
|
||||
specs.ApplyFocus(description, config.FocusString, config.SkipString)
|
||||
|
||||
if config.SkipMeasurements {
|
||||
specs.SkipMeasurements()
|
||||
}
|
||||
|
||||
if config.ParallelTotal > 1 {
|
||||
specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode)
|
||||
}
|
||||
|
||||
return specs
|
||||
}
|
||||
|
||||
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
|
||||
return suite.runner.CurrentSpecSummary()
|
||||
}
|
||||
|
||||
func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.beforeSuiteNode != nil {
|
||||
panic("You may only call BeforeSuite once!")
|
||||
}
|
||||
suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.afterSuiteNode != nil {
|
||||
panic("You may only call AfterSuite once!")
|
||||
}
|
||||
suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.beforeSuiteNode != nil {
|
||||
panic("You may only call BeforeSuite once!")
|
||||
}
|
||||
suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.afterSuiteNode != nil {
|
||||
panic("You may only call AfterSuite once!")
|
||||
}
|
||||
suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
|
||||
container := containernode.New(text, flag, codeLocation)
|
||||
suite.currentContainer.PushContainerNode(container)
|
||||
|
||||
previousContainer := suite.currentContainer
|
||||
suite.currentContainer = container
|
||||
suite.containerIndex++
|
||||
|
||||
body()
|
||||
|
||||
suite.containerIndex--
|
||||
suite.currentContainer = previousContainer
|
||||
}
|
||||
|
||||
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
76
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
76
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
@ -1,76 +0,0 @@
|
||||
package testingtproxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type failFunc func(message string, callerSkip ...int)
|
||||
|
||||
func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
|
||||
return &ginkgoTestingTProxy{
|
||||
fail: fail,
|
||||
offset: offset,
|
||||
writer: writer,
|
||||
}
|
||||
}
|
||||
|
||||
type ginkgoTestingTProxy struct {
|
||||
fail failFunc
|
||||
offset int
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
|
||||
t.fail(fmt.Sprintln(args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
|
||||
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fail() {
|
||||
t.fail("failed", t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) FailNow() {
|
||||
t.fail("failed", t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
|
||||
t.fail(fmt.Sprintln(args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
|
||||
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
||||
fmt.Fprintln(t.writer, args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
|
||||
fmt.Fprintf(t.writer, format, args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Failed() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Parallel() {
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
|
||||
fmt.Printf(format, args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) SkipNow() {
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skipped() bool {
|
||||
return false
|
||||
}
|
31
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
31
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
package writer
|
||||
|
||||
type FakeGinkgoWriter struct {
|
||||
EventStream []string
|
||||
}
|
||||
|
||||
func NewFake() *FakeGinkgoWriter {
|
||||
return &FakeGinkgoWriter{
|
||||
EventStream: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) AddEvent(event string) {
|
||||
writer.EventStream = append(writer.EventStream, event)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Truncate() {
|
||||
writer.EventStream = append(writer.EventStream, "TRUNCATE")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOut() {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
71
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
71
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
package writer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WriterInterface interface {
|
||||
io.Writer
|
||||
|
||||
Truncate()
|
||||
DumpOut()
|
||||
DumpOutWithHeader(header string)
|
||||
}
|
||||
|
||||
type Writer struct {
|
||||
buffer *bytes.Buffer
|
||||
outWriter io.Writer
|
||||
lock *sync.Mutex
|
||||
stream bool
|
||||
}
|
||||
|
||||
func New(outWriter io.Writer) *Writer {
|
||||
return &Writer{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
outWriter: outWriter,
|
||||
stream: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) SetStream(stream bool) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.stream = stream
|
||||
}
|
||||
|
||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
if w.stream {
|
||||
return w.outWriter.Write(b)
|
||||
} else {
|
||||
return w.buffer.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) Truncate() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.buffer.Reset()
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOut() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream {
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOutWithHeader(header string) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream && w.buffer.Len() > 0 {
|
||||
w.outWriter.Write([]byte(header))
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
83
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
83
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
/*
|
||||
Ginkgo's Default Reporter
|
||||
|
||||
A number of command line flags are available to tweak Ginkgo's default output.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
|
||||
*/
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type DefaultReporter struct {
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
specSummaries []*types.SpecSummary
|
||||
}
|
||||
|
||||
func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
|
||||
return &DefaultReporter{
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
|
||||
if config.ParallelTotal > 1 {
|
||||
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct)
|
||||
}
|
||||
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
reporter.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
|
||||
reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
|
||||
} else {
|
||||
reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||
}
|
||||
case types.SpecStatePending:
|
||||
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
|
||||
reporter.specSummaries = append(reporter.specSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.stenographer.SummarizeFailures(reporter.specSummaries)
|
||||
reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
|
||||
}
|
59
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
59
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//FakeReporter is useful for testing purposes
|
||||
type FakeReporter struct {
|
||||
Config config.GinkgoConfigType
|
||||
|
||||
BeginSummary *types.SuiteSummary
|
||||
BeforeSuiteSummary *types.SetupSummary
|
||||
SpecWillRunSummaries []*types.SpecSummary
|
||||
SpecSummaries []*types.SpecSummary
|
||||
AfterSuiteSummary *types.SetupSummary
|
||||
EndSummary *types.SuiteSummary
|
||||
|
||||
SpecWillRunStub func(specSummary *types.SpecSummary)
|
||||
SpecDidCompleteStub func(specSummary *types.SpecSummary)
|
||||
}
|
||||
|
||||
func NewFakeReporter() *FakeReporter {
|
||||
return &FakeReporter{
|
||||
SpecWillRunSummaries: make([]*types.SpecSummary, 0),
|
||||
SpecSummaries: make([]*types.SpecSummary, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
fakeR.Config = config
|
||||
fakeR.BeginSummary = summary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
fakeR.BeforeSuiteSummary = setupSummary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if fakeR.SpecWillRunStub != nil {
|
||||
fakeR.SpecWillRunStub(specSummary)
|
||||
}
|
||||
fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
if fakeR.SpecDidCompleteStub != nil {
|
||||
fakeR.SpecDidCompleteStub(specSummary)
|
||||
}
|
||||
fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
fakeR.AfterSuiteSummary = setupSummary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
fakeR.EndSummary = summary
|
||||
}
|
139
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
139
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
@ -1,139 +0,0 @@
|
||||
/*
|
||||
|
||||
JUnit XML Reporter for Ginkgo
|
||||
|
||||
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
|
||||
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type JUnitTestSuite struct {
|
||||
XMLName xml.Name `xml:"testsuite"`
|
||||
TestCases []JUnitTestCase `xml:"testcase"`
|
||||
Tests int `xml:"tests,attr"`
|
||||
Failures int `xml:"failures,attr"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
}
|
||||
|
||||
type JUnitTestCase struct {
|
||||
Name string `xml:"name,attr"`
|
||||
ClassName string `xml:"classname,attr"`
|
||||
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
|
||||
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
}
|
||||
|
||||
type JUnitFailureMessage struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Message string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type JUnitSkipped struct {
|
||||
XMLName xml.Name `xml:"skipped"`
|
||||
}
|
||||
|
||||
type JUnitReporter struct {
|
||||
suite JUnitTestSuite
|
||||
filename string
|
||||
testSuiteName string
|
||||
}
|
||||
|
||||
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
|
||||
func NewJUnitReporter(filename string) *JUnitReporter {
|
||||
return &JUnitReporter{
|
||||
filename: filename,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.suite = JUnitTestSuite{
|
||||
Tests: summary.NumberOfSpecsThatWillBeRun,
|
||||
TestCases: []JUnitTestCase{},
|
||||
}
|
||||
reporter.testSuiteName = summary.SuiteDescription
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
testCase := JUnitTestCase{
|
||||
Name: name,
|
||||
ClassName: reporter.testSuiteName,
|
||||
}
|
||||
|
||||
testCase.FailureMessage = &JUnitFailureMessage{
|
||||
Type: reporter.failureTypeForState(setupSummary.State),
|
||||
Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message),
|
||||
}
|
||||
testCase.Time = setupSummary.RunTime.Seconds()
|
||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
testCase := JUnitTestCase{
|
||||
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
|
||||
ClassName: reporter.testSuiteName,
|
||||
}
|
||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||
testCase.FailureMessage = &JUnitFailureMessage{
|
||||
Type: reporter.failureTypeForState(specSummary.State),
|
||||
Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message),
|
||||
}
|
||||
}
|
||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||
testCase.Skipped = &JUnitSkipped{}
|
||||
}
|
||||
testCase.Time = specSummary.RunTime.Seconds()
|
||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.suite.Time = summary.RunTime.Seconds()
|
||||
reporter.suite.Failures = summary.NumberOfFailedSpecs
|
||||
file, err := os.Create(reporter.filename)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
|
||||
}
|
||||
defer file.Close()
|
||||
file.WriteString(xml.Header)
|
||||
encoder := xml.NewEncoder(file)
|
||||
encoder.Indent(" ", " ")
|
||||
err = encoder.Encode(reporter.suite)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
|
||||
switch state {
|
||||
case types.SpecStateFailed:
|
||||
return "Failure"
|
||||
case types.SpecStateTimedOut:
|
||||
return "Timeout"
|
||||
case types.SpecStatePanicked:
|
||||
return "Panic"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
15
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
15
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Reporter interface {
|
||||
SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
|
||||
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecWillRun(specSummary *types.SpecSummary)
|
||||
SpecDidComplete(specSummary *types.SpecSummary)
|
||||
AfterSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecSuiteDidEnd(summary *types.SuiteSummary)
|
||||
}
|
64
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
64
Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
package stenographer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
|
||||
var out string
|
||||
|
||||
if len(args) > 0 {
|
||||
out = fmt.Sprintf(format, args...)
|
||||
} else {
|
||||
out = format
|
||||
}
|
||||
|
||||
if s.color {
|
||||
return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
|
||||
} else {
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
|
||||
fmt.Println(text)
|
||||
fmt.Println(strings.Repeat(bannerCharacter, len(text)))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printNewLine() {
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printDelimiter() {
|
||||
fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
|
||||
fmt.Print(s.indent(indentation, format, args...))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
|
||||
fmt.Println(s.indent(indentation, format, args...))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
|
||||
var text string
|
||||
|
||||
if len(args) > 0 {
|
||||
text = fmt.Sprintf(format, args...)
|
||||
} else {
|
||||
text = format
|
||||
}
|
||||
|
||||
stringArray := strings.Split(text, "\n")
|
||||
padding := ""
|
||||
if indentation >= 0 {
|
||||
padding = strings.Repeat(" ", indentation)
|
||||
}
|
||||
for i, s := range stringArray {
|
||||
stringArray[i] = fmt.Sprintf("%s%s", padding, s)
|
||||
}
|
||||
|
||||
return strings.Join(stringArray, "\n")
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user