Moves to official vendoring solution

This commit is contained in:
Zachary Gershman
2016-03-16 08:38:20 -07:00
parent d19044896e
commit 48ff0e472a
377 changed files with 577 additions and 8130 deletions

191
vendor/github.com/coreos/go-iptables/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,300 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package iptables
import (
"bytes"
"fmt"
"io"
"log"
"os/exec"
"regexp"
"strconv"
"strings"
"syscall"
)
// Adds the output of stderr to exec.ExitError
type Error struct {
exec.ExitError
msg string
}
func (e *Error) ExitStatus() int {
return e.Sys().(syscall.WaitStatus).ExitStatus()
}
func (e *Error) Error() string {
return fmt.Sprintf("exit status %v: %v", e.ExitStatus(), e.msg)
}
type IPTables struct {
path string
hasCheck bool
hasWait bool
fmu *fileLock
}
func New() (*IPTables, error) {
path, err := exec.LookPath("iptables")
if err != nil {
return nil, err
}
checkPresent, waitPresent, err := getIptablesCommandSupport()
if err != nil {
log.Printf("Error checking iptables version, assuming version at least 1.4.20: %v", err)
checkPresent = true
waitPresent = true
}
ipt := IPTables{
path: path,
hasCheck: checkPresent,
hasWait: waitPresent,
}
if !waitPresent {
ipt.fmu, err = newXtablesFileLock()
if err != nil {
return nil, err
}
}
return &ipt, nil
}
// Exists checks if given rulespec in specified table/chain exists
func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) {
if !ipt.hasCheck {
return ipt.existsForOldIptables(table, chain, rulespec)
}
cmd := append([]string{"-t", table, "-C", chain}, rulespec...)
err := ipt.run(cmd...)
switch {
case err == nil:
return true, nil
case err.(*Error).ExitStatus() == 1:
return false, nil
default:
return false, err
}
}
// Insert inserts rulespec to specified table/chain (in specified pos)
func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error {
cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...)
return ipt.run(cmd...)
}
// Append appends rulespec to specified table/chain
func (ipt *IPTables) Append(table, chain string, rulespec ...string) error {
cmd := append([]string{"-t", table, "-A", chain}, rulespec...)
return ipt.run(cmd...)
}
// AppendUnique acts like Append except that it won't add a duplicate
func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error {
exists, err := ipt.Exists(table, chain, rulespec...)
if err != nil {
return err
}
if !exists {
return ipt.Append(table, chain, rulespec...)
}
return nil
}
// Delete removes rulespec in specified table/chain
func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error {
cmd := append([]string{"-t", table, "-D", chain}, rulespec...)
return ipt.run(cmd...)
}
// List rules in specified table/chain
func (ipt *IPTables) List(table, chain string) ([]string, error) {
args := []string{"-t", table, "-S", chain}
var stdout bytes.Buffer
if err := ipt.runWithOutput(args, &stdout); err != nil {
return nil, err
}
rules := strings.Split(stdout.String(), "\n")
if len(rules) > 0 && rules[len(rules)-1] == "" {
rules = rules[:len(rules)-1]
}
return rules, nil
}
func (ipt *IPTables) NewChain(table, chain string) error {
return ipt.run("-t", table, "-N", chain)
}
// ClearChain flushed (deletes all rules) in the specified table/chain.
// If the chain does not exist, a new one will be created
func (ipt *IPTables) ClearChain(table, chain string) error {
err := ipt.NewChain(table, chain)
switch {
case err == nil:
return nil
case err.(*Error).ExitStatus() == 1:
// chain already exists. Flush (clear) it.
return ipt.run("-t", table, "-F", chain)
default:
return err
}
}
// RenameChain renames the old chain to the new one.
func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error {
return ipt.run("-t", table, "-E", oldChain, newChain)
}
// DeleteChain deletes the chain in the specified table.
// The chain must be empty
func (ipt *IPTables) DeleteChain(table, chain string) error {
return ipt.run("-t", table, "-X", chain)
}
// run runs an iptables command with the given arguments, ignoring
// any stdout output
func (ipt *IPTables) run(args ...string) error {
return ipt.runWithOutput(args, nil)
}
// runWithOutput runs an iptables command with the given arguments,
// writing any stdout output to the given writer
func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error {
args = append([]string{ipt.path}, args...)
if ipt.hasWait {
args = append(args, "--wait")
} else {
ul, err := ipt.fmu.tryLock()
if err != nil {
return err
}
defer ul.Unlock()
}
var stderr bytes.Buffer
cmd := exec.Cmd{
Path: ipt.path,
Args: args,
Stdout: stdout,
Stderr: &stderr,
}
if err := cmd.Run(); err != nil {
return &Error{*(err.(*exec.ExitError)), stderr.String()}
}
return nil
}
// Checks if iptables has the "-C" and "--wait" flag
func getIptablesCommandSupport() (bool, bool, error) {
vstring, err := getIptablesVersionString()
if err != nil {
return false, false, err
}
v1, v2, v3, err := extractIptablesVersion(vstring)
if err != nil {
return false, false, err
}
return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), nil
}
// getIptablesVersion returns the first three components of the iptables version.
// e.g. "iptables v1.3.66" would return (1, 3, 66, nil)
func extractIptablesVersion(str string) (int, int, int, error) {
versionMatcher := regexp.MustCompile("v([0-9]+)\\.([0-9]+)\\.([0-9]+)")
result := versionMatcher.FindStringSubmatch(str)
if result == nil {
return 0, 0, 0, fmt.Errorf("no iptables version found in string: %s", str)
}
v1, err := strconv.Atoi(result[1])
if err != nil {
return 0, 0, 0, err
}
v2, err := strconv.Atoi(result[2])
if err != nil {
return 0, 0, 0, err
}
v3, err := strconv.Atoi(result[3])
if err != nil {
return 0, 0, 0, err
}
return v1, v2, v3, nil
}
// Runs "iptables --version" to get the version string
func getIptablesVersionString() (string, error) {
cmd := exec.Command("iptables", "--version")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return "", err
}
return out.String(), nil
}
// Checks if an iptables version is after 1.4.11, when --check was added
func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool {
if v1 > 1 {
return true
}
if v1 == 1 && v2 > 4 {
return true
}
if v1 == 1 && v2 == 4 && v3 >= 11 {
return true
}
return false
}
// Checks if an iptables version is after 1.4.20, when --wait was added
func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool {
if v1 > 1 {
return true
}
if v1 == 1 && v2 > 4 {
return true
}
if v1 == 1 && v2 == 4 && v3 >= 20 {
return true
}
return false
}
// Checks if a rule specification exists for a table
func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) {
rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ")
args := []string{"-t", table, "-S"}
var stdout bytes.Buffer
err := ipt.runWithOutput(args, &stdout)
if err != nil {
return false, err
}
return strings.Contains(stdout.String(), rs), nil
}

84
vendor/github.com/coreos/go-iptables/iptables/lock.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package iptables
import (
"os"
"sync"
"syscall"
)
const (
// In earlier versions of iptables, the xtables lock was implemented
// via a Unix socket, but now flock is used via this lockfile:
// http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707
// Note the LSB-conforming "/run" directory does not exist on old
// distributions, so assume "/var" is symlinked
xtablesLockFilePath = "/var/run/xtables.lock"
defaultFilePerm = 0600
)
type Unlocker interface {
Unlock() error
}
type nopUnlocker struct{}
func (_ nopUnlocker) Unlock() error { return nil }
type fileLock struct {
// mu is used to protect against concurrent invocations from within this process
mu sync.Mutex
fd int
}
// tryLock takes an exclusive lock on the xtables lock file without blocking.
// This is best-effort only: if the exclusive lock would block (i.e. because
// another process already holds it), no error is returned. Otherwise, any
// error encountered during the locking operation is returned.
// The returned Unlocker should be used to release the lock when the caller is
// done invoking iptables commands.
func (l *fileLock) tryLock() (Unlocker, error) {
l.mu.Lock()
err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
switch err {
case syscall.EWOULDBLOCK:
l.mu.Unlock()
return nopUnlocker{}, nil
case nil:
return l, nil
default:
l.mu.Unlock()
return nil, err
}
}
// Unlock closes the underlying file, which implicitly unlocks it as well. It
// also unlocks the associated mutex.
func (l *fileLock) Unlock() error {
defer l.mu.Unlock()
return syscall.Close(l.fd)
}
// newXtablesFileLock opens a new lock on the xtables lockfile without
// acquiring the lock
func newXtablesFileLock() (*fileLock, error) {
fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm)
if err != nil {
return nil, err
}
return &fileLock{fd: fd}, nil
}

191
vendor/github.com/coreos/go-systemd/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,52 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package activation implements primitives for systemd socket activation.
package activation
import (
"os"
"strconv"
"syscall"
)
// based on: https://gist.github.com/alberts/4640792
const (
listenFdsStart = 3
)
func Files(unsetEnv bool) []*os.File {
if unsetEnv {
defer os.Unsetenv("LISTEN_PID")
defer os.Unsetenv("LISTEN_FDS")
}
pid, err := strconv.Atoi(os.Getenv("LISTEN_PID"))
if err != nil || pid != os.Getpid() {
return nil
}
nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS"))
if err != nil || nfds == 0 {
return nil
}
files := make([]*os.File, 0, nfds)
for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ {
syscall.CloseOnExec(fd)
files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd)))
}
return files
}

View File

@ -0,0 +1,37 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package activation
import (
"net"
)
// Listeners returns a slice containing a net.Listener for each matching socket type
// passed to this process.
//
// The order of the file descriptors is preserved in the returned slice.
// Nil values are used to fill any gaps. For example if systemd were to return file descriptors
// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener}
func Listeners(unsetEnv bool) ([]net.Listener, error) {
files := Files(unsetEnv)
listeners := make([]net.Listener, len(files))
for i, f := range files {
if pc, err := net.FileListener(f); err == nil {
listeners[i] = pc
}
}
return listeners, nil
}

View File

@ -0,0 +1,37 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package activation
import (
"net"
)
// PacketConns returns a slice containing a net.PacketConn for each matching socket type
// passed to this process.
//
// The order of the file descriptors is preserved in the returned slice.
// Nil values are used to fill any gaps. For example if systemd were to return file descriptors
// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn}
func PacketConns(unsetEnv bool) ([]net.PacketConn, error) {
files := Files(unsetEnv)
conns := make([]net.PacketConn, len(files))
for i, f := range files {
if pc, err := net.FilePacketConn(f); err == nil {
conns[i] = pc
}
}
return conns, nil
}

27
vendor/github.com/d2g/dhcp4/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2013 Skagerrak Software Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Skagerrak Software Limited nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

5
vendor/github.com/d2g/dhcp4/README.md generated vendored Normal file
View File

@ -0,0 +1,5 @@
# DHCP4 - A DHCP library written in Go.
Warning: This library is still being developed. Function calls will change.
I've removed Server Functionality, for me this project supports the underlying DHCP format not the implementation.

121
vendor/github.com/d2g/dhcp4/constants.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
package dhcp4
// OpCodes
const (
BootRequest OpCode = 1 // From Client
BootReply OpCode = 2 // From Server
)
// DHCP Message Type 53
const (
Discover MessageType = 1 // Broadcast Packet From Client - Can I have an IP?
Offer MessageType = 2 // Broadcast From Server - Here's an IP
Request MessageType = 3 // Broadcast From Client - I'll take that IP (Also start for renewals)
Decline MessageType = 4 // Broadcast From Client - Sorry I can't use that IP
ACK MessageType = 5 // From Server, Yes you can have that IP
NAK MessageType = 6 // From Server, No you cannot have that IP
Release MessageType = 7 // From Client, I don't need that IP anymore
Inform MessageType = 8 // From Client, I have this IP and there's nothing you can do about it
)
// DHCP Options
const (
End OptionCode = 255
Pad OptionCode = 0
OptionSubnetMask OptionCode = 1
OptionTimeOffset OptionCode = 2
OptionRouter OptionCode = 3
OptionTimeServer OptionCode = 4
OptionNameServer OptionCode = 5
OptionDomainNameServer OptionCode = 6
OptionLogServer OptionCode = 7
OptionCookieServer OptionCode = 8
OptionLPRServer OptionCode = 9
OptionImpressServer OptionCode = 10
OptionResourceLocationServer OptionCode = 11
OptionHostName OptionCode = 12
OptionBootFileSize OptionCode = 13
OptionMeritDumpFile OptionCode = 14
OptionDomainName OptionCode = 15
OptionSwapServer OptionCode = 16
OptionRootPath OptionCode = 17
OptionExtensionsPath OptionCode = 18
// IP Layer Parameters per Host
OptionIPForwardingEnableDisable OptionCode = 19
OptionNonLocalSourceRoutingEnableDisable OptionCode = 20
OptionPolicyFilter OptionCode = 21
OptionMaximumDatagramReassemblySize OptionCode = 22
OptionDefaultIPTimeToLive OptionCode = 23
OptionPathMTUAgingTimeout OptionCode = 24
OptionPathMTUPlateauTable OptionCode = 25
// IP Layer Parameters per Interface
OptionInterfaceMTU OptionCode = 26
OptionAllSubnetsAreLocal OptionCode = 27
OptionBroadcastAddress OptionCode = 28
OptionPerformMaskDiscovery OptionCode = 29
OptionMaskSupplier OptionCode = 30
OptionPerformRouterDiscovery OptionCode = 31
OptionRouterSolicitationAddress OptionCode = 32
OptionStaticRoute OptionCode = 33
// Link Layer Parameters per Interface
OptionTrailerEncapsulation OptionCode = 34
OptionARPCacheTimeout OptionCode = 35
OptionEthernetEncapsulation OptionCode = 36
// TCP Parameters
OptionTCPDefaultTTL OptionCode = 37
OptionTCPKeepaliveInterval OptionCode = 38
OptionTCPKeepaliveGarbage OptionCode = 39
// Application and Service Parameters
OptionNetworkInformationServiceDomain OptionCode = 40
OptionNetworkInformationServers OptionCode = 41
OptionNetworkTimeProtocolServers OptionCode = 42
OptionVendorSpecificInformation OptionCode = 43
OptionNetBIOSOverTCPIPNameServer OptionCode = 44
OptionNetBIOSOverTCPIPDatagramDistributionServer OptionCode = 45
OptionNetBIOSOverTCPIPNodeType OptionCode = 46
OptionNetBIOSOverTCPIPScope OptionCode = 47
OptionXWindowSystemFontServer OptionCode = 48
OptionXWindowSystemDisplayManager OptionCode = 49
OptionNetworkInformationServicePlusDomain OptionCode = 64
OptionNetworkInformationServicePlusServers OptionCode = 65
OptionMobileIPHomeAgent OptionCode = 68
OptionSimpleMailTransportProtocol OptionCode = 69
OptionPostOfficeProtocolServer OptionCode = 70
OptionNetworkNewsTransportProtocol OptionCode = 71
OptionDefaultWorldWideWebServer OptionCode = 72
OptionDefaultFingerServer OptionCode = 73
OptionDefaultInternetRelayChatServer OptionCode = 74
OptionStreetTalkServer OptionCode = 75
OptionStreetTalkDirectoryAssistance OptionCode = 76
// DHCP Extensions
OptionRequestedIPAddress OptionCode = 50
OptionIPAddressLeaseTime OptionCode = 51
OptionOverload OptionCode = 52
OptionDHCPMessageType OptionCode = 53
OptionServerIdentifier OptionCode = 54
OptionParameterRequestList OptionCode = 55
OptionMessage OptionCode = 56
OptionMaximumDHCPMessageSize OptionCode = 57
OptionRenewalTimeValue OptionCode = 58
OptionRebindingTimeValue OptionCode = 59
OptionVendorClassIdentifier OptionCode = 60
OptionClientIdentifier OptionCode = 61
OptionTFTPServerName OptionCode = 66
OptionBootFileName OptionCode = 67
OptionUserClass OptionCode = 77
OptionClientArchitecture OptionCode = 93
OptionTZPOSIXString OptionCode = 100
OptionTZDatabaseString OptionCode = 101
OptionClasslessRouteFormat OptionCode = 121
)

58
vendor/github.com/d2g/dhcp4/helpers.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
package dhcp4
import (
"encoding/binary"
"net"
"time"
)
// IPRange returns how many ips in the ip range from start to stop (inclusive)
func IPRange(start, stop net.IP) int {
//return int(Uint([]byte(stop))-Uint([]byte(start))) + 1
return int(binary.BigEndian.Uint32(stop.To4())) - int(binary.BigEndian.Uint32(start.To4())) + 1
}
// IPAdd returns a copy of start + add.
// IPAdd(net.IP{192,168,1,1},30) returns net.IP{192.168.1.31}
func IPAdd(start net.IP, add int) net.IP { // IPv4 only
start = start.To4()
//v := Uvarint([]byte(start))
result := make(net.IP, 4)
binary.BigEndian.PutUint32(result, binary.BigEndian.Uint32(start)+uint32(add))
//PutUint([]byte(result), v+uint64(add))
return result
}
// IPLess returns where IP a is less than IP b.
func IPLess(a, b net.IP) bool {
b = b.To4()
for i, ai := range a.To4() {
if ai != b[i] {
return ai < b[i]
}
}
return false
}
// IPInRange returns true if ip is between (inclusive) start and stop.
func IPInRange(start, stop, ip net.IP) bool {
return !(IPLess(ip, start) || IPLess(stop, ip))
}
// OptionsLeaseTime - converts a time.Duration to a 4 byte slice, compatible
// with OptionIPAddressLeaseTime.
func OptionsLeaseTime(d time.Duration) []byte {
leaseBytes := make([]byte, 4)
binary.BigEndian.PutUint32(leaseBytes, uint32(d/time.Second))
//PutUvarint(leaseBytes, uint64(d/time.Second))
return leaseBytes
}
// JoinIPs returns a byte slice of IP addresses, one immediately after the other
// This may be useful for creating multiple IP options such as OptionRouter.
func JoinIPs(ips []net.IP) (b []byte) {
for _, v := range ips {
b = append(b, v.To4()...)
}
return
}

40
vendor/github.com/d2g/dhcp4/option.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
package dhcp4
type OptionCode byte
type Option struct {
Code OptionCode
Value []byte
}
// Map of DHCP options
type Options map[OptionCode][]byte
// SelectOrderOrAll has same functionality as SelectOrder, except if the order
// param is nil, whereby all options are added (in arbitary order).
func (o Options) SelectOrderOrAll(order []byte) []Option {
if order == nil {
opts := make([]Option, 0, len(o))
for i, v := range o {
opts = append(opts, Option{Code: i, Value: v})
}
return opts
}
return o.SelectOrder(order)
}
// SelectOrder returns a slice of options ordered and selected by a byte array
// usually defined by OptionParameterRequestList. This result is expected to be
// used in ReplyPacket()'s []Option parameter.
func (o Options) SelectOrder(order []byte) []Option {
opts := make([]Option, 0, len(order))
for _, v := range order {
if data, ok := o[OptionCode(v)]; ok {
opts = append(opts, Option{Code: OptionCode(v), Value: data})
}
}
return opts
}
type OpCode byte
type MessageType byte // Option 53

149
vendor/github.com/d2g/dhcp4/packet.go generated vendored Normal file
View File

@ -0,0 +1,149 @@
package dhcp4
import (
"net"
"time"
)
// A DHCP packet
type Packet []byte
func (p Packet) OpCode() OpCode { return OpCode(p[0]) }
func (p Packet) HType() byte { return p[1] }
func (p Packet) HLen() byte { return p[2] }
func (p Packet) Hops() byte { return p[3] }
func (p Packet) XId() []byte { return p[4:8] }
func (p Packet) Secs() []byte { return p[8:10] } // Never Used?
func (p Packet) Flags() []byte { return p[10:12] }
func (p Packet) CIAddr() net.IP { return net.IP(p[12:16]) }
func (p Packet) YIAddr() net.IP { return net.IP(p[16:20]) }
func (p Packet) SIAddr() net.IP { return net.IP(p[20:24]) }
func (p Packet) GIAddr() net.IP { return net.IP(p[24:28]) }
func (p Packet) CHAddr() net.HardwareAddr {
hLen := p.HLen()
if hLen > 16 { // Prevent chaddr exceeding p boundary
hLen = 16
}
return net.HardwareAddr(p[28 : 28+hLen]) // max endPos 44
}
// 192 bytes of zeros BOOTP legacy
func (p Packet) Cookie() []byte { return p[236:240] }
func (p Packet) Options() []byte {
if len(p) > 240 {
return p[240:]
}
return nil
}
func (p Packet) Broadcast() bool { return p.Flags()[0] > 127 }
func (p Packet) SetBroadcast(broadcast bool) {
if p.Broadcast() != broadcast {
p.Flags()[0] ^= 128
}
}
func (p Packet) SetOpCode(c OpCode) { p[0] = byte(c) }
func (p Packet) SetCHAddr(a net.HardwareAddr) {
copy(p[28:44], a)
p[2] = byte(len(a))
}
func (p Packet) SetHType(hType byte) { p[1] = hType }
func (p Packet) SetCookie(cookie []byte) { copy(p.Cookie(), cookie) }
func (p Packet) SetHops(hops byte) { p[3] = hops }
func (p Packet) SetXId(xId []byte) { copy(p.XId(), xId) }
func (p Packet) SetSecs(secs []byte) { copy(p.Secs(), secs) }
func (p Packet) SetFlags(flags []byte) { copy(p.Flags(), flags) }
func (p Packet) SetCIAddr(ip net.IP) { copy(p.CIAddr(), ip.To4()) }
func (p Packet) SetYIAddr(ip net.IP) { copy(p.YIAddr(), ip.To4()) }
func (p Packet) SetSIAddr(ip net.IP) { copy(p.SIAddr(), ip.To4()) }
func (p Packet) SetGIAddr(ip net.IP) { copy(p.GIAddr(), ip.To4()) }
// Parses the packet's options into an Options map
func (p Packet) ParseOptions() Options {
opts := p.Options()
options := make(Options, 10)
for len(opts) >= 2 && OptionCode(opts[0]) != End {
if OptionCode(opts[0]) == Pad {
opts = opts[1:]
continue
}
size := int(opts[1])
if len(opts) < 2+size {
break
}
options[OptionCode(opts[0])] = opts[2 : 2+size]
opts = opts[2+size:]
}
return options
}
func NewPacket(opCode OpCode) Packet {
p := make(Packet, 241)
p.SetOpCode(opCode)
p.SetHType(1) // Ethernet
p.SetCookie([]byte{99, 130, 83, 99})
p[240] = byte(End)
return p
}
// Appends a DHCP option to the end of a packet
func (p *Packet) AddOption(o OptionCode, value []byte) {
*p = append((*p)[:len(*p)-1], []byte{byte(o), byte(len(value))}...) // Strip off End, Add OptionCode and Length
*p = append(*p, value...) // Add Option Value
*p = append(*p, byte(End)) // Add on new End
}
// Removes all options from packet.
func (p *Packet) StripOptions() {
*p = append((*p)[:240], byte(End))
}
// Creates a request packet that a Client would send to a server.
func RequestPacket(mt MessageType, chAddr net.HardwareAddr, cIAddr net.IP, xId []byte, broadcast bool, options []Option) Packet {
p := NewPacket(BootRequest)
p.SetCHAddr(chAddr)
p.SetXId(xId)
if cIAddr != nil {
p.SetCIAddr(cIAddr)
}
p.SetBroadcast(broadcast)
p.AddOption(OptionDHCPMessageType, []byte{byte(mt)})
for _, o := range options {
p.AddOption(o.Code, o.Value)
}
p.PadToMinSize()
return p
}
// ReplyPacket creates a reply packet that a Server would send to a client.
// It uses the req Packet param to copy across common/necessary fields to
// associate the reply the request.
func ReplyPacket(req Packet, mt MessageType, serverId, yIAddr net.IP, leaseDuration time.Duration, options []Option) Packet {
p := NewPacket(BootReply)
p.SetXId(req.XId())
p.SetFlags(req.Flags())
p.SetYIAddr(yIAddr)
p.SetGIAddr(req.GIAddr())
p.SetCHAddr(req.CHAddr())
p.SetSecs(req.Secs())
p.AddOption(OptionDHCPMessageType, []byte{byte(mt)})
p.AddOption(OptionServerIdentifier, []byte(serverId))
p.AddOption(OptionIPAddressLeaseTime, OptionsLeaseTime(leaseDuration))
for _, o := range options {
p.AddOption(o.Code, o.Value)
}
p.PadToMinSize()
return p
}
// PadToMinSize pads a packet so that when sent over UDP, the entire packet,
// is 300 bytes (BOOTP min), to be compatible with really old devices.
var padder [272]byte
func (p *Packet) PadToMinSize() {
if n := len(*p); n < 272 {
*p = append(*p, padder[:272-n]...)
}
}

354
vendor/github.com/d2g/dhcp4client/LICENSE generated vendored Normal file
View File

@ -0,0 +1,354 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

8
vendor/github.com/d2g/dhcp4client/README.md generated vendored Normal file
View File

@ -0,0 +1,8 @@
dhcp4client [![GoDoc](https://godoc.org/github.com/d2g/dhcp4client?status.svg)](http://godoc.org/github.com/d2g/dhcp4client) [![Coverage Status](https://coveralls.io/repos/d2g/dhcp4client/badge.svg?branch=HEAD)](https://coveralls.io/r/d2g/dhcp4client?branch=HEAD) [![Codeship Status for d2g/dhcp4client](https://codeship.com/projects/d75d9860-b364-0132-bc79-7e1d8cf367b9/status?branch=master)](https://codeship.com/projects/70187)
===========
DHCP Client
###### Thanks to:
@eyakubovich For AF_PACKET support.

366
vendor/github.com/d2g/dhcp4client/client.go generated vendored Normal file
View File

@ -0,0 +1,366 @@
package dhcp4client
import (
"bytes"
"crypto/rand"
"net"
"time"
"github.com/d2g/dhcp4"
)
const (
MaxDHCPLen = 576
)
type Client struct {
hardwareAddr net.HardwareAddr //The HardwareAddr to send in the request.
ignoreServers []net.IP //List of Servers to Ignore requests from.
timeout time.Duration //Time before we timeout.
broadcast bool //Set the Bcast flag in BOOTP Flags
connection connection //The Connection Method to use
}
/*
* Abstracts the type of underlying socket used
*/
type connection interface {
Close() error
Write(packet []byte) error
ReadFrom() ([]byte, net.IP, error)
SetReadTimeout(t time.Duration) error
}
func New(options ...func(*Client) error) (*Client, error) {
c := Client{
timeout: time.Second * 10,
broadcast: true,
}
err := c.SetOption(options...)
if err != nil {
return nil, err
}
//if connection hasn't been set as an option create the default.
if c.connection == nil {
conn, err := NewInetSock()
if err != nil {
return nil, err
}
c.connection = conn
}
return &c, nil
}
func (c *Client) SetOption(options ...func(*Client) error) error {
for _, opt := range options {
if err := opt(c); err != nil {
return err
}
}
return nil
}
func Timeout(t time.Duration) func(*Client) error {
return func(c *Client) error {
c.timeout = t
return nil
}
}
func IgnoreServers(s []net.IP) func(*Client) error {
return func(c *Client) error {
c.ignoreServers = s
return nil
}
}
func HardwareAddr(h net.HardwareAddr) func(*Client) error {
return func(c *Client) error {
c.hardwareAddr = h
return nil
}
}
func Broadcast(b bool) func(*Client) error {
return func(c *Client) error {
c.broadcast = b
return nil
}
}
func Connection(conn connection) func(*Client) error {
return func(c *Client) error {
c.connection = conn
return nil
}
}
/*
* Close Connections
*/
func (c *Client) Close() error {
if c.connection != nil {
return c.connection.Close()
}
return nil
}
/*
* Send the Discovery Packet to the Broadcast Channel
*/
func (c *Client) SendDiscoverPacket() (dhcp4.Packet, error) {
discoveryPacket := c.DiscoverPacket()
discoveryPacket.PadToMinSize()
return discoveryPacket, c.SendPacket(discoveryPacket)
}
/*
* Retreive Offer...
* Wait for the offer for a specific Discovery Packet.
*/
func (c *Client) GetOffer(discoverPacket *dhcp4.Packet) (dhcp4.Packet, error) {
for {
c.connection.SetReadTimeout(c.timeout)
readBuffer, source, err := c.connection.ReadFrom()
if err != nil {
return dhcp4.Packet{}, err
}
offerPacket := dhcp4.Packet(readBuffer)
offerPacketOptions := offerPacket.ParseOptions()
// Ignore Servers in my Ignore list
for _, ignoreServer := range c.ignoreServers {
if source.Equal(ignoreServer) {
continue
}
if offerPacket.SIAddr().Equal(ignoreServer) {
continue
}
}
if len(offerPacketOptions[dhcp4.OptionDHCPMessageType]) < 1 || dhcp4.MessageType(offerPacketOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.Offer || !bytes.Equal(discoverPacket.XId(), offerPacket.XId()) {
continue
}
return offerPacket, nil
}
}
/*
* Send Request Based On the offer Received.
*/
func (c *Client) SendRequest(offerPacket *dhcp4.Packet) (dhcp4.Packet, error) {
requestPacket := c.RequestPacket(offerPacket)
requestPacket.PadToMinSize()
return requestPacket, c.SendPacket(requestPacket)
}
/*
* Retreive Acknowledgement
* Wait for the offer for a specific Request Packet.
*/
func (c *Client) GetAcknowledgement(requestPacket *dhcp4.Packet) (dhcp4.Packet, error) {
for {
c.connection.SetReadTimeout(c.timeout)
readBuffer, source, err := c.connection.ReadFrom()
if err != nil {
return dhcp4.Packet{}, err
}
acknowledgementPacket := dhcp4.Packet(readBuffer)
acknowledgementPacketOptions := acknowledgementPacket.ParseOptions()
// Ignore Servers in my Ignore list
for _, ignoreServer := range c.ignoreServers {
if source.Equal(ignoreServer) {
continue
}
if acknowledgementPacket.SIAddr().Equal(ignoreServer) {
continue
}
}
if !bytes.Equal(requestPacket.XId(), acknowledgementPacket.XId()) || len(acknowledgementPacketOptions[dhcp4.OptionDHCPMessageType]) < 1 || (dhcp4.MessageType(acknowledgementPacketOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.ACK && dhcp4.MessageType(acknowledgementPacketOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.NAK) {
continue
}
return acknowledgementPacket, nil
}
}
/*
* Send a DHCP Packet.
*/
func (c *Client) SendPacket(packet dhcp4.Packet) error {
return c.connection.Write(packet)
}
/*
* Create Discover Packet
*/
func (c *Client) DiscoverPacket() dhcp4.Packet {
messageid := make([]byte, 4)
if _, err := rand.Read(messageid); err != nil {
panic(err)
}
packet := dhcp4.NewPacket(dhcp4.BootRequest)
packet.SetCHAddr(c.hardwareAddr)
packet.SetXId(messageid)
packet.SetBroadcast(c.broadcast)
packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Discover)})
//packet.PadToMinSize()
return packet
}
/*
* Create Request Packet
*/
func (c *Client) RequestPacket(offerPacket *dhcp4.Packet) dhcp4.Packet {
offerOptions := offerPacket.ParseOptions()
packet := dhcp4.NewPacket(dhcp4.BootRequest)
packet.SetCHAddr(c.hardwareAddr)
packet.SetXId(offerPacket.XId())
packet.SetCIAddr(offerPacket.CIAddr())
packet.SetSIAddr(offerPacket.SIAddr())
packet.SetBroadcast(c.broadcast)
packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Request)})
packet.AddOption(dhcp4.OptionRequestedIPAddress, (offerPacket.YIAddr()).To4())
packet.AddOption(dhcp4.OptionServerIdentifier, offerOptions[dhcp4.OptionServerIdentifier])
//packet.PadToMinSize()
return packet
}
/*
* Create Request Packet For a Renew
*/
func (c *Client) RenewalRequestPacket(acknowledgement *dhcp4.Packet) dhcp4.Packet {
messageid := make([]byte, 4)
if _, err := rand.Read(messageid); err != nil {
panic(err)
}
acknowledgementOptions := acknowledgement.ParseOptions()
packet := dhcp4.NewPacket(dhcp4.BootRequest)
packet.SetCHAddr(acknowledgement.CHAddr())
packet.SetXId(messageid)
packet.SetCIAddr(acknowledgement.YIAddr())
packet.SetSIAddr(acknowledgement.SIAddr())
packet.SetBroadcast(c.broadcast)
packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Request)})
packet.AddOption(dhcp4.OptionRequestedIPAddress, (acknowledgement.YIAddr()).To4())
packet.AddOption(dhcp4.OptionServerIdentifier, acknowledgementOptions[dhcp4.OptionServerIdentifier])
//packet.PadToMinSize()
return packet
}
/*
* Create Release Packet For a Release
*/
func (c *Client) ReleasePacket(acknowledgement *dhcp4.Packet) dhcp4.Packet {
messageid := make([]byte, 4)
if _, err := rand.Read(messageid); err != nil {
panic(err)
}
acknowledgementOptions := acknowledgement.ParseOptions()
packet := dhcp4.NewPacket(dhcp4.BootRequest)
packet.SetCHAddr(acknowledgement.CHAddr())
packet.SetXId(messageid)
packet.SetCIAddr(acknowledgement.YIAddr())
packet.AddOption(dhcp4.OptionDHCPMessageType, []byte{byte(dhcp4.Release)})
packet.AddOption(dhcp4.OptionServerIdentifier, acknowledgementOptions[dhcp4.OptionServerIdentifier])
//packet.PadToMinSize()
return packet
}
/*
* Lets do a Full DHCP Request.
*/
func (c *Client) Request() (bool, dhcp4.Packet, error) {
discoveryPacket, err := c.SendDiscoverPacket()
if err != nil {
return false, discoveryPacket, err
}
offerPacket, err := c.GetOffer(&discoveryPacket)
if err != nil {
return false, offerPacket, err
}
requestPacket, err := c.SendRequest(&offerPacket)
if err != nil {
return false, requestPacket, err
}
acknowledgement, err := c.GetAcknowledgement(&requestPacket)
if err != nil {
return false, acknowledgement, err
}
acknowledgementOptions := acknowledgement.ParseOptions()
if dhcp4.MessageType(acknowledgementOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.ACK {
return false, acknowledgement, nil
}
return true, acknowledgement, nil
}
/*
* Renew a lease backed on the Acknowledgement Packet.
* Returns Sucessfull, The AcknoledgementPacket, Any Errors
*/
func (c *Client) Renew(acknowledgement dhcp4.Packet) (bool, dhcp4.Packet, error) {
renewRequest := c.RenewalRequestPacket(&acknowledgement)
renewRequest.PadToMinSize()
err := c.SendPacket(renewRequest)
if err != nil {
return false, renewRequest, err
}
newAcknowledgement, err := c.GetAcknowledgement(&renewRequest)
if err != nil {
return false, newAcknowledgement, err
}
newAcknowledgementOptions := newAcknowledgement.ParseOptions()
if dhcp4.MessageType(newAcknowledgementOptions[dhcp4.OptionDHCPMessageType][0]) != dhcp4.ACK {
return false, newAcknowledgement, nil
}
return true, newAcknowledgement, nil
}
/*
* Release a lease backed on the Acknowledgement Packet.
* Returns Any Errors
*/
func (c *Client) Release(acknowledgement dhcp4.Packet) error {
release := c.ReleasePacket(&acknowledgement)
release.PadToMinSize()
return c.SendPacket(release)
}

75
vendor/github.com/d2g/dhcp4client/inetsock.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
package dhcp4client
import (
"net"
"time"
)
type inetSock struct {
*net.UDPConn
laddr net.UDPAddr
raddr net.UDPAddr
}
func NewInetSock(options ...func(*inetSock) error) (*inetSock, error) {
c := &inetSock{
laddr: net.UDPAddr{IP: net.IPv4(0, 0, 0, 0), Port: 68},
raddr: net.UDPAddr{IP: net.IPv4bcast, Port: 67},
}
err := c.setOption(options...)
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp4", &c.laddr)
if err != nil {
return nil, err
}
c.UDPConn = conn
return c, err
}
func (c *inetSock) setOption(options ...func(*inetSock) error) error {
for _, opt := range options {
if err := opt(c); err != nil {
return err
}
}
return nil
}
func SetLocalAddr(l net.UDPAddr) func(*inetSock) error {
return func(c *inetSock) error {
c.laddr = l
return nil
}
}
func SetRemoteAddr(r net.UDPAddr) func(*inetSock) error {
return func(c *inetSock) error {
c.raddr = r
return nil
}
}
func (c *inetSock) Write(packet []byte) error {
_, err := c.WriteToUDP(packet, &c.raddr)
return err
}
func (c *inetSock) ReadFrom() ([]byte, net.IP, error) {
readBuffer := make([]byte, MaxDHCPLen)
n, source, err := c.ReadFromUDP(readBuffer)
if source != nil {
return readBuffer[:n], source.IP, err
} else {
return readBuffer[:n], net.IP{}, err
}
}
func (c *inetSock) SetReadTimeout(t time.Duration) error {
return c.SetReadDeadline(time.Now().Add(t))
}

10
vendor/github.com/d2g/dhcp4client/init.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package dhcp4client
import (
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().Unix())
}

147
vendor/github.com/d2g/dhcp4client/pktsock_linux.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
package dhcp4client
import (
"crypto/rand"
"encoding/binary"
"net"
"time"
"golang.org/x/sys/unix"
)
const (
minIPHdrLen = 20
maxIPHdrLen = 60
udpHdrLen = 8
ip4Ver = 0x40
ttl = 16
srcPort = 68
dstPort = 67
)
var (
bcastMAC = []byte{255, 255, 255, 255, 255, 255}
)
// abstracts AF_PACKET
type packetSock struct {
fd int
ifindex int
}
func NewPacketSock(ifindex int) (*packetSock, error) {
fd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_DGRAM, int(swap16(unix.ETH_P_IP)))
if err != nil {
return nil, err
}
addr := unix.SockaddrLinklayer{
Ifindex: ifindex,
Protocol: swap16(unix.ETH_P_IP),
}
if err = unix.Bind(fd, &addr); err != nil {
return nil, err
}
return &packetSock{
fd: fd,
ifindex: ifindex,
}, nil
}
func (pc *packetSock) Close() error {
return unix.Close(pc.fd)
}
func (pc *packetSock) Write(packet []byte) error {
lladdr := unix.SockaddrLinklayer{
Ifindex: pc.ifindex,
Protocol: swap16(unix.ETH_P_IP),
Halen: uint8(len(bcastMAC)),
}
copy(lladdr.Addr[:], bcastMAC)
pkt := make([]byte, minIPHdrLen+udpHdrLen+len(packet))
fillIPHdr(pkt[0:minIPHdrLen], udpHdrLen+uint16(len(packet)))
fillUDPHdr(pkt[minIPHdrLen:minIPHdrLen+udpHdrLen], uint16(len(packet)))
// payload
copy(pkt[minIPHdrLen+udpHdrLen:len(pkt)], packet)
return unix.Sendto(pc.fd, pkt, 0, &lladdr)
}
func (pc *packetSock) ReadFrom() ([]byte, net.IP, error) {
pkt := make([]byte, maxIPHdrLen+udpHdrLen+MaxDHCPLen)
n, _, err := unix.Recvfrom(pc.fd, pkt, 0)
if err != nil {
return nil, nil, err
}
// IP hdr len
ihl := int(pkt[0]&0x0F) * 4
// Source IP address
src := net.IP(pkt[12:16])
return pkt[ihl+udpHdrLen : n], src, nil
}
func (pc *packetSock) SetReadTimeout(t time.Duration) error {
tv := unix.NsecToTimeval(t.Nanoseconds())
return unix.SetsockoptTimeval(pc.fd, unix.SOL_SOCKET, unix.SO_RCVTIMEO, &tv)
}
// compute's 1's complement checksum
func chksum(p []byte, csum []byte) {
cklen := len(p)
s := uint32(0)
for i := 0; i < (cklen - 1); i += 2 {
s += uint32(p[i+1])<<8 | uint32(p[i])
}
if cklen&1 == 1 {
s += uint32(p[cklen-1])
}
s = (s >> 16) + (s & 0xffff)
s = s + (s >> 16)
s = ^s
csum[0] = uint8(s & 0xff)
csum[1] = uint8(s >> 8)
}
func fillIPHdr(hdr []byte, payloadLen uint16) {
// version + IHL
hdr[0] = ip4Ver | (minIPHdrLen / 4)
// total length
binary.BigEndian.PutUint16(hdr[2:4], uint16(len(hdr))+payloadLen)
// identification
if _, err := rand.Read(hdr[4:5]); err != nil {
panic(err)
}
// TTL
hdr[8] = 16
// Protocol
hdr[9] = unix.IPPROTO_UDP
// dst IP
copy(hdr[16:20], net.IPv4bcast.To4())
// compute IP hdr checksum
chksum(hdr[0:len(hdr)], hdr[10:12])
}
func fillUDPHdr(hdr []byte, payloadLen uint16) {
// src port
binary.BigEndian.PutUint16(hdr[0:2], srcPort)
// dest port
binary.BigEndian.PutUint16(hdr[2:4], dstPort)
// length
binary.BigEndian.PutUint16(hdr[4:6], udpHdrLen+payloadLen)
}
func swap16(x uint16) uint16 {
var b [2]byte
binary.BigEndian.PutUint16(b[:], x)
return binary.LittleEndian.Uint16(b[:])
}

4
vendor/github.com/onsi/ginkgo/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
.DS_Store
TODO
tmp/**/*
*.coverprofile

15
vendor/github.com/onsi/ginkgo/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,15 @@
language: go
go:
- 1.3
- 1.4
- 1.5
- tip
install:
- go get -v -t ./...
- go get golang.org/x/tools/cmd/cover
- go get github.com/onsi/gomega
- go install github.com/onsi/ginkgo/ginkgo
- export PATH=$PATH:$HOME/gopath/bin
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace

136
vendor/github.com/onsi/ginkgo/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,136 @@
## HEAD
Improvements:
- `Skip(message)` can be used to skip the current test.
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
Bug Fixes:
- Ginkgo tests now fail when you `panic(nil)` (#167)
## 1.2.0 5/31/2015
Improvements
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
## 1.2.0-beta
Ginkgo now requires Go 1.4+
Improvements:
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
- Improved focus behavior. Now, this:
```golang
FDescribe("Some describe", func() {
It("A", func() {})
FIt("B", func() {})
})
```
will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
- Improved output when an error occurs in a setup or teardown block.
- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
- Add support for precompiled tests:
- `ginkgo build <path-to-package>` will now compile the package, producing a file named `package.test`
- The compiled `package.test` file can be run directly. This runs the tests in series.
- To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
- `ginkgo -notify` now works on Linux
Bug Fixes:
- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
- Fix tempfile leak when running in parallel
- Fix incorrect failure message when a panic occurs during a parallel test run
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
- Be more consistent about handling SIGTERM as well as SIGINT
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
## 1.1.0 (8/2/2014)
No changes, just dropping the beta.
## 1.1.0-beta (7/22/2014)
New Features:
- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass. This allows CI systems to detect accidental commits of focused test suites.
- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
- `ginkgo --failFast` aborts the test suite after the first failure.
- `ginkgo generate file_1 file_2` can take multiple file arguments.
- Ginkgo now summarizes any spec failures that occured at the end of the test run.
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
Improvements:
- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
- `ginkgo --untilItFails` no longer recompiles between attempts.
- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
Bug Fixes:
- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
## 1.0.0 (5/24/2014)
New Features:
- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
Improvements:
- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
Bug Fixes:
- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
- Fix all remaining race conditions in Ginkgo's test suite.
## 1.0.0-beta (4/14/2014)
Breaking changes:
- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
- Modified the Reporter interface
- `watch` is now a subcommand, not a flag.
DSL changes:
- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
CLI changes:
- `watch` is now a subcommand, not a flag
- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
- Additional arguments can be passed to specs. Pass them after the `--` separator
- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
Misc:
- Start using semantic versioning
- Start maintaining changelog
Major refactor:
- Pull out Ginkgo's internal to `internal`
- Rename `example` everywhere to `spec`
- Much more!

20
vendor/github.com/onsi/ginkgo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (c) 2013-2014 Onsi Fakhouri
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

115
vendor/github.com/onsi/ginkgo/README.md generated vendored Normal file
View File

@ -0,0 +1,115 @@
![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo)
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
## Feature List
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
- Structure your BDD-style tests expressively:
- Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
- [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
- [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
- [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
- A comprehensive test runner that lets you:
- Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
- [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
- Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
- Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
- `ginkgo -cover` runs your tests using Golang's code coverage tool
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
- `ginkgo -r` runs all tests suites under the current directory
- `ginkgo -v` prints out identifying information for each tests just before it runs
And much more: run `ginkgo help` for details!
The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
- A modular architecture that lets you easily:
- Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
- [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
## Set Me Up!
You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
```bash
go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
go get github.com/onsi/gomega # fetches the matcher library
cd path/to/package/you/want/to/test
ginkgo bootstrap # set up a new ginkgo suite
ginkgo generate # will create a sample test file. edit this file and add your tests then...
go test # to run your tests
ginkgo # also runs your tests
```
## I'm new to Go: What are my testing options?
Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
With that said, it's great to know what your options are :)
### What Golang gives you out of the box
Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
### Matcher libraries for Golang's XUnit style tests
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
- [testify](https://github.com/stretchr/testify)
- [gocheck](http://labix.org/gocheck)
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
### BDD style testing frameworks
There are a handful of BDD-style testing frameworks written for Golang. Here are a few:
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
- [GoConvey](https://github.com/smartystreets/goconvey)
- [Goblin](https://github.com/franela/goblin)
- [Mao](https://github.com/azer/mao)
- [Zen](https://github.com/pranavraja/zen)
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
Go explore!
## License
Ginkgo is MIT-Licensed

170
vendor/github.com/onsi/ginkgo/config/config.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
/*
Ginkgo accepts a number of configuration options.
These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
You can also learn more via
ginkgo help
or (I kid you not):
go test -asdf
*/
package config
import (
"flag"
"time"
"fmt"
)
const VERSION = "1.2.0"
type GinkgoConfigType struct {
RandomSeed int64
RandomizeAllSpecs bool
FocusString string
SkipString string
SkipMeasurements bool
FailOnPending bool
FailFast bool
EmitSpecProgress bool
DryRun bool
ParallelNode int
ParallelTotal int
SyncHost string
StreamHost string
}
var GinkgoConfig = GinkgoConfigType{}
type DefaultReporterConfigType struct {
NoColor bool
SlowSpecThreshold float64
NoisyPendings bool
Succinct bool
Verbose bool
FullTrace bool
}
var DefaultReporterConfig = DefaultReporterConfigType{}
func processPrefix(prefix string) string {
if prefix != "" {
prefix = prefix + "."
}
return prefix
}
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
prefix = processPrefix(prefix)
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.")
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
if includeParallelFlags {
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
}
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter (default: 5 seconds).")
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
}
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
prefix = processPrefix(prefix)
result := make([]string, 0)
if ginkgo.RandomSeed > 0 {
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
}
if ginkgo.RandomizeAllSpecs {
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
}
if ginkgo.SkipMeasurements {
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
}
if ginkgo.FailOnPending {
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
}
if ginkgo.FailFast {
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
}
if ginkgo.DryRun {
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
}
if ginkgo.FocusString != "" {
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
}
if ginkgo.SkipString != "" {
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
}
if ginkgo.EmitSpecProgress {
result = append(result, fmt.Sprintf("--%sprogress", prefix))
}
if ginkgo.ParallelNode != 0 {
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
}
if ginkgo.ParallelTotal != 0 {
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
}
if ginkgo.StreamHost != "" {
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
}
if ginkgo.SyncHost != "" {
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
}
if reporter.NoColor {
result = append(result, fmt.Sprintf("--%snoColor", prefix))
}
if reporter.SlowSpecThreshold > 0 {
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
}
if !reporter.NoisyPendings {
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
}
if reporter.Verbose {
result = append(result, fmt.Sprintf("--%sv", prefix))
}
if reporter.Succinct {
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
}
if reporter.FullTrace {
result = append(result, fmt.Sprintf("--%strace", prefix))
}
return result
}

536
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go generated vendored Normal file
View File

@ -0,0 +1,536 @@
/*
Ginkgo is a BDD-style testing framework for Golang
The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
Ginkgo on Github: http://github.com/onsi/ginkgo
Ginkgo is MIT-Licensed
*/
package ginkgo
import (
"flag"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/internal/codelocation"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/internal/remote"
"github.com/onsi/ginkgo/internal/suite"
"github.com/onsi/ginkgo/internal/testingtproxy"
"github.com/onsi/ginkgo/internal/writer"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/reporters/stenographer"
"github.com/onsi/ginkgo/types"
)
const GINKGO_VERSION = config.VERSION
const GINKGO_PANIC = `
Your test failed.
Ginkgo panics to prevent subsequent assertions from running.
Normally Ginkgo rescues this panic so you shouldn't see it.
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
To circumvent this, you should call
defer GinkgoRecover()
at the top of the goroutine that caused this panic.
`
const defaultTimeout = 1
var globalSuite *suite.Suite
var globalFailer *failer.Failer
func init() {
config.Flags(flag.CommandLine, "ginkgo", true)
GinkgoWriter = writer.New(os.Stdout)
globalFailer = failer.New()
globalSuite = suite.New(globalFailer)
}
//GinkgoWriter implements an io.Writer
//When running in verbose mode any writes to GinkgoWriter will be immediately printed
//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
//only if the current test fails.
var GinkgoWriter io.Writer
//The interface by which Ginkgo receives *testing.T
type GinkgoTestingT interface {
Fail()
}
//GinkgoParallelNode returns the parallel node number for the current ginkgo process
//The node number is 1-indexed
func GinkgoParallelNode() int {
return config.GinkgoConfig.ParallelNode
}
//Some matcher libraries or legacy codebases require a *testing.T
//GinkgoT implements an interface analogous to *testing.T and can be used if
//the library in question accepts *testing.T through an interface
//
// For example, with testify:
// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
//
// Or with gomock:
// gomock.NewController(GinkgoT())
//
// GinkgoT() takes an optional offset argument that can be used to get the
// correct line number associated with the failure.
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
offset := 3
if len(optionalOffset) > 0 {
offset = optionalOffset[0]
}
return testingtproxy.New(GinkgoWriter, Fail, offset)
}
//The interface returned by GinkgoT(). This covers most of the methods
//in the testing package's T.
type GinkgoTInterface interface {
Fail()
Error(args ...interface{})
Errorf(format string, args ...interface{})
FailNow()
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Log(args ...interface{})
Logf(format string, args ...interface{})
Failed() bool
Parallel()
Skip(args ...interface{})
Skipf(format string, args ...interface{})
SkipNow()
Skipped() bool
}
//Custom Ginkgo test reporters must implement the Reporter interface.
//
//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
//and a SpecSummary just before a spec begins and just after a spec ends
type Reporter reporters.Reporter
//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
//to tell Ginkgo that your async test is done.
type Done chan<- interface{}
//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
// FullTestText: a concatenation of ComponentTexts and the TestText
// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
// TestText: the text in the actual It or Measure node
// IsMeasurement: true if the current test is a measurement
// FileName: the name of the file containing the current test
// LineNumber: the line number for the current test
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
type GinkgoTestDescription struct {
FullTestText string
ComponentTexts []string
TestText string
IsMeasurement bool
FileName string
LineNumber int
Failed bool
}
//CurrentGinkgoTestDescripton returns information about the current running test.
func CurrentGinkgoTestDescription() GinkgoTestDescription {
summary, ok := globalSuite.CurrentRunningSpecSummary()
if !ok {
return GinkgoTestDescription{}
}
subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
return GinkgoTestDescription{
ComponentTexts: summary.ComponentTexts[1:],
FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
IsMeasurement: summary.IsMeasurement,
FileName: subjectCodeLocation.FileName,
LineNumber: subjectCodeLocation.LineNumber,
Failed: summary.HasFailureState(),
}
}
//Measurement tests receive a Benchmarker.
//
//You use the Time() function to time how long the passed in body function takes to run
//You use the RecordValue() function to track arbitrary numerical measurements.
//The optional info argument is passed to the test reporter and can be used to
// provide the measurement data to a custom reporter with context.
//
//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
type Benchmarker interface {
Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
RecordValue(name string, value float64, info ...interface{})
}
//RunSpecs is the entry point for the Ginkgo test runner.
//You must call this within a Golang testing TestX(t *testing.T) function.
//
//To bootstrap a test suite you can use the Ginkgo CLI:
//
// ginkgo bootstrap
func RunSpecs(t GinkgoTestingT, description string) bool {
specReporters := []Reporter{buildDefaultReporter()}
return RunSpecsWithCustomReporters(t, description, specReporters)
}
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
//RunSpecs() with this method.
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...)
return RunSpecsWithCustomReporters(t, description, specReporters)
}
//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
writer := GinkgoWriter.(*writer.Writer)
writer.SetStream(config.DefaultReporterConfig.Verbose)
reporters := make([]reporters.Reporter, len(specReporters))
for i, reporter := range specReporters {
reporters[i] = reporter
}
passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
if passed && hasFocusedTests {
fmt.Println("PASS | FOCUSED")
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
}
return passed
}
func buildDefaultReporter() Reporter {
remoteReportingServer := config.GinkgoConfig.StreamHost
if remoteReportingServer == "" {
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
} else {
return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor())
}
}
//Skip notifies Ginkgo that the current spec should be skipped.
func Skip(message string, callerSkip ...int) {
skip := 0
if len(callerSkip) > 0 {
skip = callerSkip[0]
}
globalFailer.Skip(message, codelocation.New(skip+1))
panic(GINKGO_PANIC)
}
//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
func Fail(message string, callerSkip ...int) {
skip := 0
if len(callerSkip) > 0 {
skip = callerSkip[0]
}
globalFailer.Fail(message, codelocation.New(skip+1))
panic(GINKGO_PANIC)
}
//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
//calls out to Gomega
//
//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
//further assertions from running. This panic must be recovered. Ginkgo does this for you
//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
//
//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
func GinkgoRecover() {
e := recover()
if e != nil {
globalFailer.Panic(codelocation.New(1), e)
}
}
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
//
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
//or method and, within that Describe, outline a number of Contexts.
func Describe(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
return true
}
//You can focus the tests within a describe block using FDescribe
func FDescribe(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
return true
}
//You can mark the tests within a describe block as pending using PDescribe
func PDescribe(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
return true
}
//You can mark the tests within a describe block as pending using XDescribe
func XDescribe(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
return true
}
//Context blocks allow you to organize your specs. A Context block can contain any number of
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
//
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
//or method and, within that Describe, outline a number of Contexts.
func Context(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
return true
}
//You can focus the tests within a describe block using FContext
func FContext(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
return true
}
//You can mark the tests within a describe block as pending using PContext
func PContext(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
return true
}
//You can mark the tests within a describe block as pending using XContext
func XContext(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
return true
}
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
//within an It block.
//
//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
func It(text string, body interface{}, timeout ...float64) bool {
globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
return true
}
//You can focus individual Its using FIt
func FIt(text string, body interface{}, timeout ...float64) bool {
globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
return true
}
//You can mark Its as pending using PIt
func PIt(text string, _ ...interface{}) bool {
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//You can mark Its as pending using XIt
func XIt(text string, _ ...interface{}) bool {
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//By allows you to better document large Its.
//
//Generally you should try to keep your Its short and to the point. This is not always possible, however,
//especially in the context of integration tests that capture a particular workflow.
//
//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
func By(text string, callbacks ...func()) {
preamble := "\x1b[1mSTEP\x1b[0m"
if config.DefaultReporterConfig.NoColor {
preamble = "STEP"
}
fmt.Fprintln(GinkgoWriter, preamble+": "+text)
if len(callbacks) == 1 {
callbacks[0]()
}
if len(callbacks) > 1 {
panic("just one callback per By, please")
}
}
//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
//and accumulate metrics provided to the Benchmarker by the body function.
//
//The body function must have the signature:
// func(b Benchmarker)
func Measure(text string, body interface{}, samples int) bool {
globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
return true
}
//You can focus individual Measures using FMeasure
func FMeasure(text string, body interface{}, samples int) bool {
globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
return true
}
//You can mark Maeasurements as pending using PMeasure
func PMeasure(text string, _ ...interface{}) bool {
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//You can mark Maeasurements as pending using XMeasure
func XMeasure(text string, _ ...interface{}) bool {
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
//parallel node process will call BeforeSuite.
//
//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
//
//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
func BeforeSuite(body interface{}, timeout ...float64) bool {
globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
return true
}
//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
//
//When running in parallel, each parallel node process will call AfterSuite.
//
//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
//
//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
func AfterSuite(body interface{}, timeout ...float64) bool {
globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
return true
}
//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
//until that node is done before running.
//
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
//to the second function (on all the other nodes).
//
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
//
// func() []byte
//
//or, to run asynchronously:
//
// func(done Done) []byte
//
//The byte array returned by the first function is then passed to the second function, which has the signature:
//
// func(data []byte)
//
//or, to run asynchronously:
//
// func(data []byte, done Done)
//
//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
//
// var dbClient db.Client
// var dbRunner db.Runner
//
// var _ = SynchronizedBeforeSuite(func() []byte {
// dbRunner = db.NewRunner()
// err := dbRunner.Start()
// Ω(err).ShouldNot(HaveOccurred())
// return []byte(dbRunner.URL)
// }, func(data []byte) {
// dbClient = db.NewClient()
// err := dbClient.Connect(string(data))
// Ω(err).ShouldNot(HaveOccurred())
// })
func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
globalSuite.SetSynchronizedBeforeSuiteNode(
node1Body,
allNodesBody,
codelocation.New(1),
parseTimeout(timeout...),
)
return true
}
//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
//external singleton resources shared across nodes when running tests in parallel.
//
//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
//all other nodes are finished.
//
//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
//
//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
//only after all nodes have finished:
//
// var _ = SynchronizedAfterSuite(func() {
// dbClient.Cleanup()
// }, func() {
// dbRunner.Stop()
// })
func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
globalSuite.SetSynchronizedAfterSuiteNode(
allNodesBody,
node1Body,
codelocation.New(1),
parseTimeout(timeout...),
)
return true
}
//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
//Describe and Context blocks the outermost BeforeEach blocks are run first.
//
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
//a Done channel
func BeforeEach(body interface{}, timeout ...float64) bool {
globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
return true
}
//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
//
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
//a Done channel
func JustBeforeEach(body interface{}, timeout ...float64) bool {
globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
return true
}
//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
//Describe and Context blocks the innermost AfterEach blocks are run first.
//
//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
//a Done channel
func AfterEach(body interface{}, timeout ...float64) bool {
globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
return true
}
func parseTimeout(timeout ...float64) time.Duration {
if len(timeout) == 0 {
return time.Duration(defaultTimeout * int64(time.Second))
} else {
return time.Duration(timeout[0] * float64(time.Second))
}
}

View File

@ -0,0 +1,32 @@
package codelocation
import (
"regexp"
"runtime"
"runtime/debug"
"strings"
"github.com/onsi/ginkgo/types"
)
func New(skip int) types.CodeLocation {
_, file, line, _ := runtime.Caller(skip + 1)
stackTrace := PruneStack(string(debug.Stack()), skip)
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
}
func PruneStack(fullStackTrace string, skip int) string {
stack := strings.Split(fullStackTrace, "\n")
if len(stack) > 2*(skip+1) {
stack = stack[2*(skip+1):]
}
prunedStack := []string{}
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
for i := 0; i < len(stack)/2; i++ {
if !re.Match([]byte(stack[i*2])) {
prunedStack = append(prunedStack, stack[i*2])
prunedStack = append(prunedStack, stack[i*2+1])
}
}
return strings.Join(prunedStack, "\n")
}

View File

@ -0,0 +1,151 @@
package containernode
import (
"math/rand"
"sort"
"github.com/onsi/ginkgo/internal/leafnodes"
"github.com/onsi/ginkgo/types"
)
type subjectOrContainerNode struct {
containerNode *ContainerNode
subjectNode leafnodes.SubjectNode
}
func (n subjectOrContainerNode) text() string {
if n.containerNode != nil {
return n.containerNode.Text()
} else {
return n.subjectNode.Text()
}
}
type CollatedNodes struct {
Containers []*ContainerNode
Subject leafnodes.SubjectNode
}
type ContainerNode struct {
text string
flag types.FlagType
codeLocation types.CodeLocation
setupNodes []leafnodes.BasicNode
subjectAndContainerNodes []subjectOrContainerNode
}
func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
return &ContainerNode{
text: text,
flag: flag,
codeLocation: codeLocation,
}
}
func (container *ContainerNode) Shuffle(r *rand.Rand) {
sort.Sort(container)
permutation := r.Perm(len(container.subjectAndContainerNodes))
shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
for i, j := range permutation {
shuffledNodes[i] = container.subjectAndContainerNodes[j]
}
container.subjectAndContainerNodes = shuffledNodes
}
func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
if node.flag == types.FlagTypePending {
return false
}
shouldUnfocus := false
for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
if subjectOrContainerNode.containerNode != nil {
shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
} else {
shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
}
}
if shouldUnfocus {
if node.flag == types.FlagTypeFocused {
node.flag = types.FlagTypeNone
}
return true
}
return node.flag == types.FlagTypeFocused
}
func (node *ContainerNode) Collate() []CollatedNodes {
return node.collate([]*ContainerNode{})
}
func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
collated := make([]CollatedNodes, 0)
containers := make([]*ContainerNode, len(enclosingContainers))
copy(containers, enclosingContainers)
containers = append(containers, node)
for _, subjectOrContainer := range node.subjectAndContainerNodes {
if subjectOrContainer.containerNode != nil {
collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
} else {
collated = append(collated, CollatedNodes{
Containers: containers,
Subject: subjectOrContainer.subjectNode,
})
}
}
return collated
}
func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
}
func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
}
func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
node.setupNodes = append(node.setupNodes, setupNode)
}
func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
nodes := []leafnodes.BasicNode{}
for _, setupNode := range node.setupNodes {
if setupNode.Type() == nodeType {
nodes = append(nodes, setupNode)
}
}
return nodes
}
func (node *ContainerNode) Text() string {
return node.text
}
func (node *ContainerNode) CodeLocation() types.CodeLocation {
return node.codeLocation
}
func (node *ContainerNode) Flag() types.FlagType {
return node.flag
}
//sort.Interface
func (node *ContainerNode) Len() int {
return len(node.subjectAndContainerNodes)
}
func (node *ContainerNode) Less(i, j int) bool {
return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
}
func (node *ContainerNode) Swap(i, j int) {
node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
}

View File

@ -0,0 +1,92 @@
package failer
import (
"fmt"
"sync"
"github.com/onsi/ginkgo/types"
)
type Failer struct {
lock *sync.Mutex
failure types.SpecFailure
state types.SpecState
}
func New() *Failer {
return &Failer{
lock: &sync.Mutex{},
state: types.SpecStatePassed,
}
}
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStatePanicked
f.failure = types.SpecFailure{
Message: "Test Panicked",
Location: location,
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
}
}
}
func (f *Failer) Timeout(location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStateTimedOut
f.failure = types.SpecFailure{
Message: "Timed out",
Location: location,
}
}
}
func (f *Failer) Fail(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStateFailed
f.failure = types.SpecFailure{
Message: message,
Location: location,
}
}
}
func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
f.lock.Lock()
defer f.lock.Unlock()
failure := f.failure
outcome := f.state
if outcome != types.SpecStatePassed {
failure.ComponentType = componentType
failure.ComponentIndex = componentIndex
failure.ComponentCodeLocation = componentCodeLocation
}
f.state = types.SpecStatePassed
f.failure = types.SpecFailure{}
return failure, outcome
}
func (f *Failer) Skip(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStateSkipped
f.failure = types.SpecFailure{
Message: message,
Location: location,
}
}
}

View File

@ -0,0 +1,95 @@
package leafnodes
import (
"math"
"time"
"sync"
"github.com/onsi/ginkgo/types"
)
type benchmarker struct {
mu sync.Mutex
measurements map[string]*types.SpecMeasurement
orderCounter int
}
func newBenchmarker() *benchmarker {
return &benchmarker{
measurements: make(map[string]*types.SpecMeasurement, 0),
}
}
func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
t := time.Now()
body()
elapsedTime = time.Since(t)
b.mu.Lock()
defer b.mu.Unlock()
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", info...)
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
return
}
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", info...)
b.mu.Lock()
defer b.mu.Unlock()
measurement.Results = append(measurement.Results, value)
}
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement {
measurement, ok := b.measurements[name]
if !ok {
var computedInfo interface{}
computedInfo = nil
if len(info) > 0 {
computedInfo = info[0]
}
measurement = &types.SpecMeasurement{
Name: name,
Info: computedInfo,
Order: b.orderCounter,
SmallestLabel: smallestLabel,
LargestLabel: largestLabel,
AverageLabel: averageLabel,
Units: units,
Results: make([]float64, 0),
}
b.measurements[name] = measurement
b.orderCounter++
}
return measurement
}
func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
b.mu.Lock()
defer b.mu.Unlock()
for _, measurement := range b.measurements {
measurement.Smallest = math.MaxFloat64
measurement.Largest = -math.MaxFloat64
sum := float64(0)
sumOfSquares := float64(0)
for _, result := range measurement.Results {
if result > measurement.Largest {
measurement.Largest = result
}
if result < measurement.Smallest {
measurement.Smallest = result
}
sum += result
sumOfSquares += result * result
}
n := float64(len(measurement.Results))
measurement.Average = sum / n
measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
}
return b.measurements
}

View File

@ -0,0 +1,19 @@
package leafnodes
import (
"github.com/onsi/ginkgo/types"
)
type BasicNode interface {
Type() types.SpecComponentType
Run() (types.SpecState, types.SpecFailure)
CodeLocation() types.CodeLocation
}
type SubjectNode interface {
BasicNode
Text() string
Flag() types.FlagType
Samples() int
}

View File

@ -0,0 +1,46 @@
package leafnodes
import (
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"time"
)
type ItNode struct {
runner *runner
flag types.FlagType
text string
}
func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
return &ItNode{
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
flag: flag,
text: text,
}
}
func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
return node.runner.run()
}
func (node *ItNode) Type() types.SpecComponentType {
return types.SpecComponentTypeIt
}
func (node *ItNode) Text() string {
return node.text
}
func (node *ItNode) Flag() types.FlagType {
return node.flag
}
func (node *ItNode) CodeLocation() types.CodeLocation {
return node.runner.codeLocation
}
func (node *ItNode) Samples() int {
return 1
}

View File

@ -0,0 +1,61 @@
package leafnodes
import (
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"reflect"
)
type MeasureNode struct {
runner *runner
text string
flag types.FlagType
samples int
benchmarker *benchmarker
}
func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
benchmarker := newBenchmarker()
wrappedBody := func() {
reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
}
return &MeasureNode{
runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
text: text,
flag: flag,
samples: samples,
benchmarker: benchmarker,
}
}
func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
return node.runner.run()
}
func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
return node.benchmarker.measurementsReport()
}
func (node *MeasureNode) Type() types.SpecComponentType {
return types.SpecComponentTypeMeasure
}
func (node *MeasureNode) Text() string {
return node.text
}
func (node *MeasureNode) Flag() types.FlagType {
return node.flag
}
func (node *MeasureNode) CodeLocation() types.CodeLocation {
return node.runner.codeLocation
}
func (node *MeasureNode) Samples() int {
return node.samples
}

View File

@ -0,0 +1,113 @@
package leafnodes
import (
"fmt"
"github.com/onsi/ginkgo/internal/codelocation"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"reflect"
"time"
)
type runner struct {
isAsync bool
asyncFunc func(chan<- interface{})
syncFunc func()
codeLocation types.CodeLocation
timeoutThreshold time.Duration
nodeType types.SpecComponentType
componentIndex int
failer *failer.Failer
}
func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
bodyType := reflect.TypeOf(body)
if bodyType.Kind() != reflect.Func {
panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
}
runner := &runner{
codeLocation: codeLocation,
timeoutThreshold: timeout,
failer: failer,
nodeType: nodeType,
componentIndex: componentIndex,
}
switch bodyType.NumIn() {
case 0:
runner.syncFunc = body.(func())
return runner
case 1:
if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
}
wrappedBody := func(done chan<- interface{}) {
bodyValue := reflect.ValueOf(body)
bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
}
runner.isAsync = true
runner.asyncFunc = wrappedBody
return runner
}
panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
}
func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
if r.isAsync {
return r.runAsync()
} else {
return r.runSync()
}
}
func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
done := make(chan interface{}, 1)
go func() {
finished := false
defer func() {
if e := recover(); e != nil || !finished {
r.failer.Panic(codelocation.New(2), e)
select {
case <-done:
break
default:
close(done)
}
}
}()
r.asyncFunc(done)
finished = true
}()
select {
case <-done:
case <-time.After(r.timeoutThreshold):
r.failer.Timeout(r.codeLocation)
}
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
return
}
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
finished := false
defer func() {
if e := recover(); e != nil || !finished {
r.failer.Panic(codelocation.New(2), e)
}
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
}()
r.syncFunc()
finished = true
return
}

View File

@ -0,0 +1,41 @@
package leafnodes
import (
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"time"
)
type SetupNode struct {
runner *runner
}
func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
return node.runner.run()
}
func (node *SetupNode) Type() types.SpecComponentType {
return node.runner.nodeType
}
func (node *SetupNode) CodeLocation() types.CodeLocation {
return node.runner.codeLocation
}
func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
return &SetupNode{
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
}
}
func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
return &SetupNode{
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
}
}
func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
return &SetupNode{
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
}
}

View File

@ -0,0 +1,54 @@
package leafnodes
import (
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"time"
)
type SuiteNode interface {
Run(parallelNode int, parallelTotal int, syncHost string) bool
Passed() bool
Summary() *types.SetupSummary
}
type simpleSuiteNode struct {
runner *runner
outcome types.SpecState
failure types.SpecFailure
runTime time.Duration
}
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
t := time.Now()
node.outcome, node.failure = node.runner.run()
node.runTime = time.Since(t)
return node.outcome == types.SpecStatePassed
}
func (node *simpleSuiteNode) Passed() bool {
return node.outcome == types.SpecStatePassed
}
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
return &types.SetupSummary{
ComponentType: node.runner.nodeType,
CodeLocation: node.runner.codeLocation,
State: node.outcome,
RunTime: node.runTime,
Failure: node.failure,
}
}
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
return &simpleSuiteNode{
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
}
}
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
return &simpleSuiteNode{
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
}
}

View File

@ -0,0 +1,89 @@
package leafnodes
import (
"encoding/json"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"io/ioutil"
"net/http"
"time"
)
type synchronizedAfterSuiteNode struct {
runnerA *runner
runnerB *runner
outcome types.SpecState
failure types.SpecFailure
runTime time.Duration
}
func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
return &synchronizedAfterSuiteNode{
runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
}
}
func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
node.outcome, node.failure = node.runnerA.run()
if parallelNode == 1 {
if parallelTotal > 1 {
node.waitUntilOtherNodesAreDone(syncHost)
}
outcome, failure := node.runnerB.run()
if node.outcome == types.SpecStatePassed {
node.outcome, node.failure = outcome, failure
}
}
return node.outcome == types.SpecStatePassed
}
func (node *synchronizedAfterSuiteNode) Passed() bool {
return node.outcome == types.SpecStatePassed
}
func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
return &types.SetupSummary{
ComponentType: node.runnerA.nodeType,
CodeLocation: node.runnerA.codeLocation,
State: node.outcome,
RunTime: node.runTime,
Failure: node.failure,
}
}
func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
for {
if node.canRun(syncHost) {
return
}
time.Sleep(50 * time.Millisecond)
}
}
func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
if err != nil || resp.StatusCode != http.StatusOK {
return false
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false
}
resp.Body.Close()
afterSuiteData := types.RemoteAfterSuiteData{}
err = json.Unmarshal(body, &afterSuiteData)
if err != nil {
return false
}
return afterSuiteData.CanRun
}

View File

@ -0,0 +1,182 @@
package leafnodes
import (
"bytes"
"encoding/json"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"io/ioutil"
"net/http"
"reflect"
"time"
)
type synchronizedBeforeSuiteNode struct {
runnerA *runner
runnerB *runner
data []byte
outcome types.SpecState
failure types.SpecFailure
runTime time.Duration
}
func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
node := &synchronizedBeforeSuiteNode{}
node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
return node
}
func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
t := time.Now()
defer func() {
node.runTime = time.Since(t)
}()
if parallelNode == 1 {
node.outcome, node.failure = node.runA(parallelTotal, syncHost)
} else {
node.outcome, node.failure = node.waitForA(syncHost)
}
if node.outcome != types.SpecStatePassed {
return false
}
node.outcome, node.failure = node.runnerB.run()
return node.outcome == types.SpecStatePassed
}
func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
outcome, failure := node.runnerA.run()
if parallelTotal > 1 {
state := types.RemoteBeforeSuiteStatePassed
if outcome != types.SpecStatePassed {
state = types.RemoteBeforeSuiteStateFailed
}
json := (types.RemoteBeforeSuiteData{
Data: node.data,
State: state,
}).ToJSON()
http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
}
return outcome, failure
}
func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
failure := func(message string) types.SpecFailure {
return types.SpecFailure{
Message: message,
Location: node.runnerA.codeLocation,
ComponentType: node.runnerA.nodeType,
ComponentIndex: node.runnerA.componentIndex,
ComponentCodeLocation: node.runnerA.codeLocation,
}
}
for {
resp, err := http.Get(syncHost + "/BeforeSuiteState")
if err != nil || resp.StatusCode != http.StatusOK {
return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
}
resp.Body.Close()
beforeSuiteData := types.RemoteBeforeSuiteData{}
err = json.Unmarshal(body, &beforeSuiteData)
if err != nil {
return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
}
switch beforeSuiteData.State {
case types.RemoteBeforeSuiteStatePassed:
node.data = beforeSuiteData.Data
return types.SpecStatePassed, types.SpecFailure{}
case types.RemoteBeforeSuiteStateFailed:
return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
case types.RemoteBeforeSuiteStateDisappeared:
return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
}
time.Sleep(50 * time.Millisecond)
}
return types.SpecStateFailed, failure("Shouldn't get here!")
}
func (node *synchronizedBeforeSuiteNode) Passed() bool {
return node.outcome == types.SpecStatePassed
}
func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
return &types.SetupSummary{
ComponentType: node.runnerA.nodeType,
CodeLocation: node.runnerA.codeLocation,
State: node.outcome,
RunTime: node.runTime,
Failure: node.failure,
}
}
func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
typeA := reflect.TypeOf(bodyA)
if typeA.Kind() != reflect.Func {
panic("SynchronizedBeforeSuite expects a function as its first argument")
}
takesNothing := typeA.NumIn() == 0
takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
if !((takesNothing || takesADoneChannel) && returnsBytes) {
panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
}
if takesADoneChannel {
return func(done chan<- interface{}) {
out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
node.data = out[0].Interface().([]byte)
}
}
return func() {
out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
node.data = out[0].Interface().([]byte)
}
}
func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
typeB := reflect.TypeOf(bodyB)
if typeB.Kind() != reflect.Func {
panic("SynchronizedBeforeSuite expects a function as its second argument")
}
returnsNothing := typeB.NumOut() == 0
takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
takesBytesAndDone := typeB.NumIn() == 2 &&
typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
}
if takesBytesAndDone {
return func(done chan<- interface{}) {
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
}
}
return func() {
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
}
}

View File

@ -0,0 +1,250 @@
/*
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
ginkgo -nodes=N
where N is the number of nodes you desire.
*/
package remote
import (
"time"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters/stenographer"
"github.com/onsi/ginkgo/types"
)
type configAndSuite struct {
config config.GinkgoConfigType
summary *types.SuiteSummary
}
type Aggregator struct {
nodeCount int
config config.DefaultReporterConfigType
stenographer stenographer.Stenographer
result chan bool
suiteBeginnings chan configAndSuite
aggregatedSuiteBeginnings []configAndSuite
beforeSuites chan *types.SetupSummary
aggregatedBeforeSuites []*types.SetupSummary
afterSuites chan *types.SetupSummary
aggregatedAfterSuites []*types.SetupSummary
specCompletions chan *types.SpecSummary
completedSpecs []*types.SpecSummary
suiteEndings chan *types.SuiteSummary
aggregatedSuiteEndings []*types.SuiteSummary
specs []*types.SpecSummary
startTime time.Time
}
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
aggregator := &Aggregator{
nodeCount: nodeCount,
result: result,
config: config,
stenographer: stenographer,
suiteBeginnings: make(chan configAndSuite, 0),
beforeSuites: make(chan *types.SetupSummary, 0),
afterSuites: make(chan *types.SetupSummary, 0),
specCompletions: make(chan *types.SpecSummary, 0),
suiteEndings: make(chan *types.SuiteSummary, 0),
}
go aggregator.mux()
return aggregator
}
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
aggregator.suiteBeginnings <- configAndSuite{config, summary}
}
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
aggregator.beforeSuites <- setupSummary
}
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
aggregator.afterSuites <- setupSummary
}
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
//noop
}
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
aggregator.specCompletions <- specSummary
}
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
aggregator.suiteEndings <- summary
}
func (aggregator *Aggregator) mux() {
loop:
for {
select {
case configAndSuite := <-aggregator.suiteBeginnings:
aggregator.registerSuiteBeginning(configAndSuite)
case setupSummary := <-aggregator.beforeSuites:
aggregator.registerBeforeSuite(setupSummary)
case setupSummary := <-aggregator.afterSuites:
aggregator.registerAfterSuite(setupSummary)
case specSummary := <-aggregator.specCompletions:
aggregator.registerSpecCompletion(specSummary)
case suite := <-aggregator.suiteEndings:
finished, passed := aggregator.registerSuiteEnding(suite)
if finished {
aggregator.result <- passed
break loop
}
}
}
}
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
aggregator.startTime = time.Now()
}
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
return
}
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
numberOfSpecsToRun := 0
totalNumberOfSpecs := 0
for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings {
numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun
totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs
}
aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct)
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
aggregator.flushCompletedSpecs()
}
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
aggregator.flushCompletedSpecs()
}
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
aggregator.flushCompletedSpecs()
}
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
aggregator.specs = append(aggregator.specs, specSummary)
aggregator.flushCompletedSpecs()
}
func (aggregator *Aggregator) flushCompletedSpecs() {
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
return
}
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
aggregator.announceBeforeSuite(setupSummary)
}
for _, specSummary := range aggregator.completedSpecs {
aggregator.announceSpec(specSummary)
}
for _, setupSummary := range aggregator.aggregatedAfterSuites {
aggregator.announceAfterSuite(setupSummary)
}
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
aggregator.completedSpecs = []*types.SpecSummary{}
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
}
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
if setupSummary.State != types.SpecStatePassed {
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
}
}
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
if setupSummary.State != types.SpecStatePassed {
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
}
}
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
}
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
switch specSummary.State {
case types.SpecStatePassed:
if specSummary.IsMeasurement {
aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
} else {
aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
}
case types.SpecStatePending:
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
case types.SpecStateSkipped:
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
case types.SpecStateTimedOut:
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
case types.SpecStatePanicked:
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
case types.SpecStateFailed:
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
}
}
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
return false, false
}
aggregatedSuiteSummary := &types.SuiteSummary{}
aggregatedSuiteSummary.SuiteSucceeded = true
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
if suiteSummary.SuiteSucceeded == false {
aggregatedSuiteSummary.SuiteSucceeded = false
}
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
}
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
aggregator.stenographer.SummarizeFailures(aggregator.specs)
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
return true, aggregatedSuiteSummary.SuiteSucceeded
}

View File

@ -0,0 +1,90 @@
package remote
import (
"bytes"
"encoding/json"
"io"
"net/http"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
)
//An interface to net/http's client to allow the injection of fakes under test
type Poster interface {
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
}
/*
The ForwardingReporter is a Ginkgo reporter that forwards information to
a Ginkgo remote server.
When streaming parallel test output, this repoter is automatically installed by Ginkgo.
This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
in place of Ginkgo's DefaultReporter.
*/
type ForwardingReporter struct {
serverHost string
poster Poster
outputInterceptor OutputInterceptor
}
func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter {
return &ForwardingReporter{
serverHost: serverHost,
poster: poster,
outputInterceptor: outputInterceptor,
}
}
func (reporter *ForwardingReporter) post(path string, data interface{}) {
encoded, _ := json.Marshal(data)
buffer := bytes.NewBuffer(encoded)
reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
}
func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
data := struct {
Config config.GinkgoConfigType `json:"config"`
Summary *types.SuiteSummary `json:"suite-summary"`
}{
conf,
summary,
}
reporter.outputInterceptor.StartInterceptingOutput()
reporter.post("/SpecSuiteWillBegin", data)
}
func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
reporter.outputInterceptor.StartInterceptingOutput()
setupSummary.CapturedOutput = output
reporter.post("/BeforeSuiteDidRun", setupSummary)
}
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
reporter.post("/SpecWillRun", specSummary)
}
func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
reporter.outputInterceptor.StartInterceptingOutput()
specSummary.CapturedOutput = output
reporter.post("/SpecDidComplete", specSummary)
}
func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
reporter.outputInterceptor.StartInterceptingOutput()
setupSummary.CapturedOutput = output
reporter.post("/AfterSuiteDidRun", setupSummary)
}
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
reporter.post("/SpecSuiteDidEnd", summary)
}

View File

@ -0,0 +1,10 @@
package remote
/*
The OutputInterceptor is used by the ForwardingReporter to
intercept and capture all stdin and stderr output during a test run.
*/
type OutputInterceptor interface {
StartInterceptingOutput() error
StopInterceptingAndReturnOutput() (string, error)
}

View File

@ -0,0 +1,52 @@
// +build freebsd openbsd netbsd dragonfly darwin linux
package remote
import (
"errors"
"io/ioutil"
"os"
"syscall"
)
func NewOutputInterceptor() OutputInterceptor {
return &outputInterceptor{}
}
type outputInterceptor struct {
redirectFile *os.File
intercepting bool
}
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
if interceptor.intercepting {
return errors.New("Already intercepting output!")
}
interceptor.intercepting = true
var err error
interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
if err != nil {
return err
}
syscall.Dup2(int(interceptor.redirectFile.Fd()), 1)
syscall.Dup2(int(interceptor.redirectFile.Fd()), 2)
return nil
}
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
if !interceptor.intercepting {
return "", errors.New("Not intercepting output!")
}
interceptor.redirectFile.Close()
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
os.Remove(interceptor.redirectFile.Name())
interceptor.intercepting = false
return string(output), err
}

View File

@ -0,0 +1,33 @@
// +build windows
package remote
import (
"errors"
)
func NewOutputInterceptor() OutputInterceptor {
return &outputInterceptor{}
}
type outputInterceptor struct {
intercepting bool
}
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
if interceptor.intercepting {
return errors.New("Already intercepting output!")
}
interceptor.intercepting = true
// not working on windows...
return nil
}
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
// not working on windows...
interceptor.intercepting = false
return "", nil
}

204
vendor/github.com/onsi/ginkgo/internal/remote/server.go generated vendored Normal file
View File

@ -0,0 +1,204 @@
/*
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
*/
package remote
import (
"encoding/json"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
"io/ioutil"
"net"
"net/http"
"sync"
)
/*
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
It then forwards that communication to attached reporters.
*/
type Server struct {
listener net.Listener
reporters []reporters.Reporter
alives []func() bool
lock *sync.Mutex
beforeSuiteData types.RemoteBeforeSuiteData
parallelTotal int
}
//Create a new server, automatically selecting a port
func NewServer(parallelTotal int) (*Server, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
return &Server{
listener: listener,
lock: &sync.Mutex{},
alives: make([]func() bool, parallelTotal),
beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending},
parallelTotal: parallelTotal,
}, nil
}
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
func (server *Server) Start() {
httpServer := &http.Server{}
mux := http.NewServeMux()
httpServer.Handler = mux
//streaming endpoints
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
mux.HandleFunc("/SpecWillRun", server.specWillRun)
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
//synchronization endpoints
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
go httpServer.Serve(server.listener)
}
//Stop the server
func (server *Server) Close() {
server.listener.Close()
}
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
func (server *Server) Address() string {
return "http://" + server.listener.Addr().String()
}
//
// Streaming Endpoints
//
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
func (server *Server) readAll(request *http.Request) []byte {
defer request.Body.Close()
body, _ := ioutil.ReadAll(request.Body)
return body
}
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
server.reporters = reporters
}
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var data struct {
Config config.GinkgoConfigType `json:"config"`
Summary *types.SuiteSummary `json:"suite-summary"`
}
json.Unmarshal(body, &data)
for _, reporter := range server.reporters {
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
}
}
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var setupSummary *types.SetupSummary
json.Unmarshal(body, &setupSummary)
for _, reporter := range server.reporters {
reporter.BeforeSuiteDidRun(setupSummary)
}
}
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var setupSummary *types.SetupSummary
json.Unmarshal(body, &setupSummary)
for _, reporter := range server.reporters {
reporter.AfterSuiteDidRun(setupSummary)
}
}
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var specSummary *types.SpecSummary
json.Unmarshal(body, &specSummary)
for _, reporter := range server.reporters {
reporter.SpecWillRun(specSummary)
}
}
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var specSummary *types.SpecSummary
json.Unmarshal(body, &specSummary)
for _, reporter := range server.reporters {
reporter.SpecDidComplete(specSummary)
}
}
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
body := server.readAll(request)
var suiteSummary *types.SuiteSummary
json.Unmarshal(body, &suiteSummary)
for _, reporter := range server.reporters {
reporter.SpecSuiteDidEnd(suiteSummary)
}
}
//
// Synchronization Endpoints
//
func (server *Server) RegisterAlive(node int, alive func() bool) {
server.lock.Lock()
defer server.lock.Unlock()
server.alives[node-1] = alive
}
func (server *Server) nodeIsAlive(node int) bool {
server.lock.Lock()
defer server.lock.Unlock()
alive := server.alives[node-1]
if alive == nil {
return true
}
return alive()
}
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
if request.Method == "POST" {
dec := json.NewDecoder(request.Body)
dec.Decode(&(server.beforeSuiteData))
} else {
beforeSuiteData := server.beforeSuiteData
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
}
enc := json.NewEncoder(writer)
enc.Encode(beforeSuiteData)
}
}
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
afterSuiteData := types.RemoteAfterSuiteData{
CanRun: true,
}
for i := 2; i <= server.parallelTotal; i++ {
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
}
enc := json.NewEncoder(writer)
enc.Encode(afterSuiteData)
}

View File

@ -0,0 +1,55 @@
package spec
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
if length == 0 {
return 0, 0
}
// We have more nodes than tests. Trivial case.
if parallelTotal >= length {
if parallelNode > length {
return 0, 0
} else {
return parallelNode - 1, 1
}
}
// This is the minimum amount of tests that a node will be required to run
minTestsPerNode := length / parallelTotal
// This is the maximum amount of tests that a node will be required to run
// The algorithm guarantees that this would be equal to at least the minimum amount
// and at most one more
maxTestsPerNode := minTestsPerNode
if length%parallelTotal != 0 {
maxTestsPerNode++
}
// Number of nodes that will have to run the maximum amount of tests per node
numMaxLoadNodes := length % parallelTotal
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
var numPrecedingMaxLoadNodes int
if parallelNode > numMaxLoadNodes {
numPrecedingMaxLoadNodes = numMaxLoadNodes
} else {
numPrecedingMaxLoadNodes = parallelNode - 1
}
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
var numPrecedingMinLoadNodes int
if parallelNode <= numMaxLoadNodes {
numPrecedingMinLoadNodes = 0
} else {
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
}
// Evaluate the test start index and number of tests to run
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
if parallelNode > numMaxLoadNodes {
count = minTestsPerNode
} else {
count = maxTestsPerNode
}
return
}

197
vendor/github.com/onsi/ginkgo/internal/spec/spec.go generated vendored Normal file
View File

@ -0,0 +1,197 @@
package spec
import (
"fmt"
"io"
"time"
"github.com/onsi/ginkgo/internal/containernode"
"github.com/onsi/ginkgo/internal/leafnodes"
"github.com/onsi/ginkgo/types"
)
type Spec struct {
subject leafnodes.SubjectNode
focused bool
announceProgress bool
containers []*containernode.ContainerNode
state types.SpecState
runTime time.Duration
failure types.SpecFailure
}
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
spec := &Spec{
subject: subject,
containers: containers,
focused: subject.Flag() == types.FlagTypeFocused,
announceProgress: announceProgress,
}
spec.processFlag(subject.Flag())
for i := len(containers) - 1; i >= 0; i-- {
spec.processFlag(containers[i].Flag())
}
return spec
}
func (spec *Spec) processFlag(flag types.FlagType) {
if flag == types.FlagTypeFocused {
spec.focused = true
} else if flag == types.FlagTypePending {
spec.state = types.SpecStatePending
}
}
func (spec *Spec) Skip() {
spec.state = types.SpecStateSkipped
}
func (spec *Spec) Failed() bool {
return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut
}
func (spec *Spec) Passed() bool {
return spec.state == types.SpecStatePassed
}
func (spec *Spec) Pending() bool {
return spec.state == types.SpecStatePending
}
func (spec *Spec) Skipped() bool {
return spec.state == types.SpecStateSkipped
}
func (spec *Spec) Focused() bool {
return spec.focused
}
func (spec *Spec) IsMeasurement() bool {
return spec.subject.Type() == types.SpecComponentTypeMeasure
}
func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
componentTexts := make([]string, len(spec.containers)+1)
componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
for i, container := range spec.containers {
componentTexts[i] = container.Text()
componentCodeLocations[i] = container.CodeLocation()
}
componentTexts[len(spec.containers)] = spec.subject.Text()
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
return &types.SpecSummary{
IsMeasurement: spec.IsMeasurement(),
NumberOfSamples: spec.subject.Samples(),
ComponentTexts: componentTexts,
ComponentCodeLocations: componentCodeLocations,
State: spec.state,
RunTime: spec.runTime,
Failure: spec.failure,
Measurements: spec.measurementsReport(),
SuiteID: suiteID,
}
}
func (spec *Spec) ConcatenatedString() string {
s := ""
for _, container := range spec.containers {
s += container.Text() + " "
}
return s + spec.subject.Text()
}
func (spec *Spec) Run(writer io.Writer) {
startTime := time.Now()
defer func() {
spec.runTime = time.Since(startTime)
}()
for sample := 0; sample < spec.subject.Samples(); sample++ {
spec.runSample(sample, writer)
if spec.state != types.SpecStatePassed {
return
}
}
}
func (spec *Spec) runSample(sample int, writer io.Writer) {
spec.state = types.SpecStatePassed
spec.failure = types.SpecFailure{}
innerMostContainerIndexToUnwind := -1
defer func() {
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
container := spec.containers[i]
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
afterEachState, afterEachFailure := afterEach.Run()
if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
spec.state = afterEachState
spec.failure = afterEachFailure
}
}
}
}()
for i, container := range spec.containers {
innerMostContainerIndexToUnwind = i
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
spec.state, spec.failure = beforeEach.Run()
if spec.state != types.SpecStatePassed {
return
}
}
}
for _, container := range spec.containers {
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
spec.state, spec.failure = justBeforeEach.Run()
if spec.state != types.SpecStatePassed {
return
}
}
}
spec.announceSubject(writer, spec.subject)
spec.state, spec.failure = spec.subject.Run()
}
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
if spec.announceProgress {
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
writer.Write([]byte(s))
}
}
func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
if spec.announceProgress {
nodeType := ""
switch subject.Type() {
case types.SpecComponentTypeIt:
nodeType = "It"
case types.SpecComponentTypeMeasure:
nodeType = "Measure"
}
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
writer.Write([]byte(s))
}
}
func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
if !spec.IsMeasurement() || spec.Failed() {
return map[string]*types.SpecMeasurement{}
}
return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
}

122
vendor/github.com/onsi/ginkgo/internal/spec/specs.go generated vendored Normal file
View File

@ -0,0 +1,122 @@
package spec
import (
"math/rand"
"regexp"
"sort"
)
type Specs struct {
specs []*Spec
numberOfOriginalSpecs int
hasProgrammaticFocus bool
}
func NewSpecs(specs []*Spec) *Specs {
return &Specs{
specs: specs,
numberOfOriginalSpecs: len(specs),
}
}
func (e *Specs) Specs() []*Spec {
return e.specs
}
func (e *Specs) NumberOfOriginalSpecs() int {
return e.numberOfOriginalSpecs
}
func (e *Specs) HasProgrammaticFocus() bool {
return e.hasProgrammaticFocus
}
func (e *Specs) Shuffle(r *rand.Rand) {
sort.Sort(e)
permutation := r.Perm(len(e.specs))
shuffledSpecs := make([]*Spec, len(e.specs))
for i, j := range permutation {
shuffledSpecs[i] = e.specs[j]
}
e.specs = shuffledSpecs
}
func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
if focusString == "" && skipString == "" {
e.applyProgrammaticFocus()
} else {
e.applyRegExpFocus(description, focusString, skipString)
}
}
func (e *Specs) applyProgrammaticFocus() {
e.hasProgrammaticFocus = false
for _, spec := range e.specs {
if spec.Focused() && !spec.Pending() {
e.hasProgrammaticFocus = true
break
}
}
if e.hasProgrammaticFocus {
for _, spec := range e.specs {
if !spec.Focused() {
spec.Skip()
}
}
}
}
func (e *Specs) applyRegExpFocus(description string, focusString string, skipString string) {
for _, spec := range e.specs {
matchesFocus := true
matchesSkip := false
toMatch := []byte(description + " " + spec.ConcatenatedString())
if focusString != "" {
focusFilter := regexp.MustCompile(focusString)
matchesFocus = focusFilter.Match([]byte(toMatch))
}
if skipString != "" {
skipFilter := regexp.MustCompile(skipString)
matchesSkip = skipFilter.Match([]byte(toMatch))
}
if !matchesFocus || matchesSkip {
spec.Skip()
}
}
}
func (e *Specs) SkipMeasurements() {
for _, spec := range e.specs {
if spec.IsMeasurement() {
spec.Skip()
}
}
}
func (e *Specs) TrimForParallelization(total int, node int) {
startIndex, count := ParallelizedIndexRange(len(e.specs), total, node)
if count == 0 {
e.specs = make([]*Spec, 0)
} else {
e.specs = e.specs[startIndex : startIndex+count]
}
}
//sort.Interface
func (e *Specs) Len() int {
return len(e.specs)
}
func (e *Specs) Less(i, j int) bool {
return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
}
func (e *Specs) Swap(i, j int) {
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
}

View File

@ -0,0 +1,15 @@
package specrunner
import (
"crypto/rand"
"fmt"
)
func randomID() string {
b := make([]byte, 8)
_, err := rand.Read(b)
if err != nil {
return ""
}
return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
}

View File

@ -0,0 +1,324 @@
package specrunner
import (
"fmt"
"os"
"os/signal"
"sync"
"syscall"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/internal/leafnodes"
"github.com/onsi/ginkgo/internal/spec"
Writer "github.com/onsi/ginkgo/internal/writer"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
"time"
)
type SpecRunner struct {
description string
beforeSuiteNode leafnodes.SuiteNode
specs *spec.Specs
afterSuiteNode leafnodes.SuiteNode
reporters []reporters.Reporter
startTime time.Time
suiteID string
runningSpec *spec.Spec
writer Writer.WriterInterface
config config.GinkgoConfigType
interrupted bool
lock *sync.Mutex
}
func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
return &SpecRunner{
description: description,
beforeSuiteNode: beforeSuiteNode,
specs: specs,
afterSuiteNode: afterSuiteNode,
reporters: reporters,
writer: writer,
config: config,
suiteID: randomID(),
lock: &sync.Mutex{},
}
}
func (runner *SpecRunner) Run() bool {
if runner.config.DryRun {
runner.performDryRun()
return true
}
runner.reportSuiteWillBegin()
go runner.registerForInterrupts()
suitePassed := runner.runBeforeSuite()
if suitePassed {
suitePassed = runner.runSpecs()
}
runner.blockForeverIfInterrupted()
suitePassed = runner.runAfterSuite() && suitePassed
runner.reportSuiteDidEnd(suitePassed)
return suitePassed
}
func (runner *SpecRunner) performDryRun() {
runner.reportSuiteWillBegin()
if runner.beforeSuiteNode != nil {
summary := runner.beforeSuiteNode.Summary()
summary.State = types.SpecStatePassed
runner.reportBeforeSuite(summary)
}
for _, spec := range runner.specs.Specs() {
summary := spec.Summary(runner.suiteID)
runner.reportSpecWillRun(summary)
if summary.State == types.SpecStateInvalid {
summary.State = types.SpecStatePassed
}
runner.reportSpecDidComplete(summary, false)
}
if runner.afterSuiteNode != nil {
summary := runner.afterSuiteNode.Summary()
summary.State = types.SpecStatePassed
runner.reportAfterSuite(summary)
}
runner.reportSuiteDidEnd(true)
}
func (runner *SpecRunner) runBeforeSuite() bool {
if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
return true
}
runner.writer.Truncate()
conf := runner.config
passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
if !passed {
runner.writer.DumpOut()
}
runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
return passed
}
func (runner *SpecRunner) runAfterSuite() bool {
if runner.afterSuiteNode == nil {
return true
}
runner.writer.Truncate()
conf := runner.config
passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
if !passed {
runner.writer.DumpOut()
}
runner.reportAfterSuite(runner.afterSuiteNode.Summary())
return passed
}
func (runner *SpecRunner) runSpecs() bool {
suiteFailed := false
skipRemainingSpecs := false
for _, spec := range runner.specs.Specs() {
if runner.wasInterrupted() {
return suiteFailed
}
if skipRemainingSpecs {
spec.Skip()
}
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
if !spec.Skipped() && !spec.Pending() {
runner.runningSpec = spec
spec.Run(runner.writer)
runner.runningSpec = nil
if spec.Failed() {
suiteFailed = true
}
} else if spec.Pending() && runner.config.FailOnPending {
suiteFailed = true
}
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
if spec.Failed() && runner.config.FailFast {
skipRemainingSpecs = true
}
}
return !suiteFailed
}
func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
if runner.runningSpec == nil {
return nil, false
}
return runner.runningSpec.Summary(runner.suiteID), true
}
func (runner *SpecRunner) registerForInterrupts() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
<-c
signal.Stop(c)
runner.markInterrupted()
go runner.registerForHardInterrupts()
runner.writer.DumpOutWithHeader(`
Received interrupt. Emitting contents of GinkgoWriter...
---------------------------------------------------------
`)
if runner.afterSuiteNode != nil {
fmt.Fprint(os.Stderr, `
---------------------------------------------------------
Received interrupt. Running AfterSuite...
^C again to terminate immediately
`)
runner.runAfterSuite()
}
runner.reportSuiteDidEnd(false)
os.Exit(1)
}
func (runner *SpecRunner) registerForHardInterrupts() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
<-c
fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
os.Exit(1)
}
func (runner *SpecRunner) blockForeverIfInterrupted() {
runner.lock.Lock()
interrupted := runner.interrupted
runner.lock.Unlock()
if interrupted {
select {}
}
}
func (runner *SpecRunner) markInterrupted() {
runner.lock.Lock()
defer runner.lock.Unlock()
runner.interrupted = true
}
func (runner *SpecRunner) wasInterrupted() bool {
runner.lock.Lock()
defer runner.lock.Unlock()
return runner.interrupted
}
func (runner *SpecRunner) reportSuiteWillBegin() {
runner.startTime = time.Now()
summary := runner.summary(true)
for _, reporter := range runner.reporters {
reporter.SpecSuiteWillBegin(runner.config, summary)
}
}
func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
for _, reporter := range runner.reporters {
reporter.BeforeSuiteDidRun(summary)
}
}
func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
for _, reporter := range runner.reporters {
reporter.AfterSuiteDidRun(summary)
}
}
func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
runner.writer.Truncate()
for _, reporter := range runner.reporters {
reporter.SpecWillRun(summary)
}
}
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
for i := len(runner.reporters) - 1; i >= 1; i-- {
runner.reporters[i].SpecDidComplete(summary)
}
if failed {
runner.writer.DumpOut()
}
runner.reporters[0].SpecDidComplete(summary)
}
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
summary := runner.summary(success)
summary.RunTime = time.Since(runner.startTime)
for _, reporter := range runner.reporters {
reporter.SpecSuiteDidEnd(summary)
}
}
func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) {
count = 0
for _, spec := range runner.specs.Specs() {
if filter(spec) {
count++
}
}
return count
}
func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
return !ex.Skipped() && !ex.Pending()
})
numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
return ex.Pending()
})
numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
return ex.Skipped()
})
numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
return ex.Passed()
})
numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
return ex.Failed()
})
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
}
return &types.SuiteSummary{
SuiteDescription: runner.description,
SuiteSucceeded: success,
SuiteID: runner.suiteID,
NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(),
NumberOfTotalSpecs: len(runner.specs.Specs()),
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
NumberOfPendingSpecs: numberOfPendingSpecs,
NumberOfSkippedSpecs: numberOfSkippedSpecs,
NumberOfPassedSpecs: numberOfPassedSpecs,
NumberOfFailedSpecs: numberOfFailedSpecs,
}
}

171
vendor/github.com/onsi/ginkgo/internal/suite/suite.go generated vendored Normal file
View File

@ -0,0 +1,171 @@
package suite
import (
"math/rand"
"time"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/internal/containernode"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/internal/leafnodes"
"github.com/onsi/ginkgo/internal/spec"
"github.com/onsi/ginkgo/internal/specrunner"
"github.com/onsi/ginkgo/internal/writer"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
)
type ginkgoTestingT interface {
Fail()
}
type Suite struct {
topLevelContainer *containernode.ContainerNode
currentContainer *containernode.ContainerNode
containerIndex int
beforeSuiteNode leafnodes.SuiteNode
afterSuiteNode leafnodes.SuiteNode
runner *specrunner.SpecRunner
failer *failer.Failer
running bool
}
func New(failer *failer.Failer) *Suite {
topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
return &Suite{
topLevelContainer: topLevelContainer,
currentContainer: topLevelContainer,
failer: failer,
containerIndex: 1,
}
}
func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
if config.ParallelTotal < 1 {
panic("ginkgo.parallel.total must be >= 1")
}
if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
}
r := rand.New(rand.NewSource(config.RandomSeed))
suite.topLevelContainer.Shuffle(r)
specs := suite.generateSpecs(description, config)
suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config)
suite.running = true
success := suite.runner.Run()
if !success {
t.Fail()
}
return success, specs.HasProgrammaticFocus()
}
func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs {
specsSlice := []*spec.Spec{}
suite.topLevelContainer.BackPropagateProgrammaticFocus()
for _, collatedNodes := range suite.topLevelContainer.Collate() {
specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
}
specs := spec.NewSpecs(specsSlice)
if config.RandomizeAllSpecs {
specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
}
specs.ApplyFocus(description, config.FocusString, config.SkipString)
if config.SkipMeasurements {
specs.SkipMeasurements()
}
if config.ParallelTotal > 1 {
specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode)
}
return specs
}
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
return suite.runner.CurrentSpecSummary()
}
func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.beforeSuiteNode != nil {
panic("You may only call BeforeSuite once!")
}
suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
}
func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.afterSuiteNode != nil {
panic("You may only call AfterSuite once!")
}
suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
}
func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.beforeSuiteNode != nil {
panic("You may only call BeforeSuite once!")
}
suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
}
func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.afterSuiteNode != nil {
panic("You may only call AfterSuite once!")
}
suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
}
func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
container := containernode.New(text, flag, codeLocation)
suite.currentContainer.PushContainerNode(container)
previousContainer := suite.currentContainer
suite.currentContainer = container
suite.containerIndex++
body()
suite.containerIndex--
suite.currentContainer = previousContainer
}
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.running {
suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation)
}
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
}
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
if suite.running {
suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation)
}
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
}
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.running {
suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation)
}
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
}
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.running {
suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation)
}
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
}
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.running {
suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation)
}
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
}

View File

@ -0,0 +1,76 @@
package testingtproxy
import (
"fmt"
"io"
)
type failFunc func(message string, callerSkip ...int)
func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
return &ginkgoTestingTProxy{
fail: fail,
offset: offset,
writer: writer,
}
}
type ginkgoTestingTProxy struct {
fail failFunc
offset int
writer io.Writer
}
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
t.fail(fmt.Sprintln(args...), t.offset)
}
func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
func (t *ginkgoTestingTProxy) Fail() {
t.fail("failed", t.offset)
}
func (t *ginkgoTestingTProxy) FailNow() {
t.fail("failed", t.offset)
}
func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
t.fail(fmt.Sprintln(args...), t.offset)
}
func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
fmt.Fprintln(t.writer, args...)
}
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
fmt.Fprintf(t.writer, format, args...)
}
func (t *ginkgoTestingTProxy) Failed() bool {
return false
}
func (t *ginkgoTestingTProxy) Parallel() {
}
func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
fmt.Println(args...)
}
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
fmt.Printf(format, args...)
}
func (t *ginkgoTestingTProxy) SkipNow() {
}
func (t *ginkgoTestingTProxy) Skipped() bool {
return false
}

View File

@ -0,0 +1,31 @@
package writer
type FakeGinkgoWriter struct {
EventStream []string
}
func NewFake() *FakeGinkgoWriter {
return &FakeGinkgoWriter{
EventStream: []string{},
}
}
func (writer *FakeGinkgoWriter) AddEvent(event string) {
writer.EventStream = append(writer.EventStream, event)
}
func (writer *FakeGinkgoWriter) Truncate() {
writer.EventStream = append(writer.EventStream, "TRUNCATE")
}
func (writer *FakeGinkgoWriter) DumpOut() {
writer.EventStream = append(writer.EventStream, "DUMP")
}
func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
}
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
return 0, nil
}

View File

@ -0,0 +1,71 @@
package writer
import (
"bytes"
"io"
"sync"
)
type WriterInterface interface {
io.Writer
Truncate()
DumpOut()
DumpOutWithHeader(header string)
}
type Writer struct {
buffer *bytes.Buffer
outWriter io.Writer
lock *sync.Mutex
stream bool
}
func New(outWriter io.Writer) *Writer {
return &Writer{
buffer: &bytes.Buffer{},
lock: &sync.Mutex{},
outWriter: outWriter,
stream: true,
}
}
func (w *Writer) SetStream(stream bool) {
w.lock.Lock()
defer w.lock.Unlock()
w.stream = stream
}
func (w *Writer) Write(b []byte) (n int, err error) {
w.lock.Lock()
defer w.lock.Unlock()
if w.stream {
return w.outWriter.Write(b)
} else {
return w.buffer.Write(b)
}
}
func (w *Writer) Truncate() {
w.lock.Lock()
defer w.lock.Unlock()
w.buffer.Reset()
}
func (w *Writer) DumpOut() {
w.lock.Lock()
defer w.lock.Unlock()
if !w.stream {
w.buffer.WriteTo(w.outWriter)
}
}
func (w *Writer) DumpOutWithHeader(header string) {
w.lock.Lock()
defer w.lock.Unlock()
if !w.stream && w.buffer.Len() > 0 {
w.outWriter.Write([]byte(header))
w.buffer.WriteTo(w.outWriter)
}
}

View File

@ -0,0 +1,83 @@
/*
Ginkgo's Default Reporter
A number of command line flags are available to tweak Ginkgo's default output.
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
*/
package reporters
import (
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters/stenographer"
"github.com/onsi/ginkgo/types"
)
type DefaultReporter struct {
config config.DefaultReporterConfigType
stenographer stenographer.Stenographer
specSummaries []*types.SpecSummary
}
func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
return &DefaultReporter{
config: config,
stenographer: stenographer,
}
}
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
if config.ParallelTotal > 1 {
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct)
}
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
}
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
if setupSummary.State != types.SpecStatePassed {
reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
}
}
func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
if setupSummary.State != types.SpecStatePassed {
reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
}
}
func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
reporter.stenographer.AnnounceSpecWillRun(specSummary)
}
}
func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
switch specSummary.State {
case types.SpecStatePassed:
if specSummary.IsMeasurement {
reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
} else {
reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
}
case types.SpecStatePending:
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
case types.SpecStateSkipped:
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
case types.SpecStateTimedOut:
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
case types.SpecStatePanicked:
reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
case types.SpecStateFailed:
reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
}
reporter.specSummaries = append(reporter.specSummaries, specSummary)
}
func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
reporter.stenographer.SummarizeFailures(reporter.specSummaries)
reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
}

View File

@ -0,0 +1,59 @@
package reporters
import (
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
)
//FakeReporter is useful for testing purposes
type FakeReporter struct {
Config config.GinkgoConfigType
BeginSummary *types.SuiteSummary
BeforeSuiteSummary *types.SetupSummary
SpecWillRunSummaries []*types.SpecSummary
SpecSummaries []*types.SpecSummary
AfterSuiteSummary *types.SetupSummary
EndSummary *types.SuiteSummary
SpecWillRunStub func(specSummary *types.SpecSummary)
SpecDidCompleteStub func(specSummary *types.SpecSummary)
}
func NewFakeReporter() *FakeReporter {
return &FakeReporter{
SpecWillRunSummaries: make([]*types.SpecSummary, 0),
SpecSummaries: make([]*types.SpecSummary, 0),
}
}
func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
fakeR.Config = config
fakeR.BeginSummary = summary
}
func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
fakeR.BeforeSuiteSummary = setupSummary
}
func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
if fakeR.SpecWillRunStub != nil {
fakeR.SpecWillRunStub(specSummary)
}
fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
}
func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
if fakeR.SpecDidCompleteStub != nil {
fakeR.SpecDidCompleteStub(specSummary)
}
fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
}
func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
fakeR.AfterSuiteSummary = setupSummary
}
func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
fakeR.EndSummary = summary
}

View File

@ -0,0 +1,139 @@
/*
JUnit XML Reporter for Ginkgo
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
*/
package reporters
import (
"encoding/xml"
"fmt"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
"os"
"strings"
)
type JUnitTestSuite struct {
XMLName xml.Name `xml:"testsuite"`
TestCases []JUnitTestCase `xml:"testcase"`
Tests int `xml:"tests,attr"`
Failures int `xml:"failures,attr"`
Time float64 `xml:"time,attr"`
}
type JUnitTestCase struct {
Name string `xml:"name,attr"`
ClassName string `xml:"classname,attr"`
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
Time float64 `xml:"time,attr"`
}
type JUnitFailureMessage struct {
Type string `xml:"type,attr"`
Message string `xml:",chardata"`
}
type JUnitSkipped struct {
XMLName xml.Name `xml:"skipped"`
}
type JUnitReporter struct {
suite JUnitTestSuite
filename string
testSuiteName string
}
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
func NewJUnitReporter(filename string) *JUnitReporter {
return &JUnitReporter{
filename: filename,
}
}
func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
reporter.suite = JUnitTestSuite{
Tests: summary.NumberOfSpecsThatWillBeRun,
TestCases: []JUnitTestCase{},
}
reporter.testSuiteName = summary.SuiteDescription
}
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
}
func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
reporter.handleSetupSummary("BeforeSuite", setupSummary)
}
func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
reporter.handleSetupSummary("AfterSuite", setupSummary)
}
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
if setupSummary.State != types.SpecStatePassed {
testCase := JUnitTestCase{
Name: name,
ClassName: reporter.testSuiteName,
}
testCase.FailureMessage = &JUnitFailureMessage{
Type: reporter.failureTypeForState(setupSummary.State),
Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message),
}
testCase.Time = setupSummary.RunTime.Seconds()
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
}
}
func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
testCase := JUnitTestCase{
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
ClassName: reporter.testSuiteName,
}
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
testCase.FailureMessage = &JUnitFailureMessage{
Type: reporter.failureTypeForState(specSummary.State),
Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message),
}
}
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
testCase.Skipped = &JUnitSkipped{}
}
testCase.Time = specSummary.RunTime.Seconds()
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
}
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
reporter.suite.Time = summary.RunTime.Seconds()
reporter.suite.Failures = summary.NumberOfFailedSpecs
file, err := os.Create(reporter.filename)
if err != nil {
fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
}
defer file.Close()
file.WriteString(xml.Header)
encoder := xml.NewEncoder(file)
encoder.Indent(" ", " ")
err = encoder.Encode(reporter.suite)
if err != nil {
fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error())
}
}
func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
switch state {
case types.SpecStateFailed:
return "Failure"
case types.SpecStateTimedOut:
return "Timeout"
case types.SpecStatePanicked:
return "Panic"
default:
return ""
}
}

15
vendor/github.com/onsi/ginkgo/reporters/reporter.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package reporters
import (
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
)
type Reporter interface {
SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
SpecWillRun(specSummary *types.SpecSummary)
SpecDidComplete(specSummary *types.SpecSummary)
AfterSuiteDidRun(setupSummary *types.SetupSummary)
SpecSuiteDidEnd(summary *types.SuiteSummary)
}

View File

@ -0,0 +1,64 @@
package stenographer
import (
"fmt"
"strings"
)
func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
var out string
if len(args) > 0 {
out = fmt.Sprintf(format, args...)
} else {
out = format
}
if s.color {
return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
} else {
return out
}
}
func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
fmt.Println(text)
fmt.Println(strings.Repeat(bannerCharacter, len(text)))
}
func (s *consoleStenographer) printNewLine() {
fmt.Println("")
}
func (s *consoleStenographer) printDelimiter() {
fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
}
func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
fmt.Print(s.indent(indentation, format, args...))
}
func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
fmt.Println(s.indent(indentation, format, args...))
}
func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
var text string
if len(args) > 0 {
text = fmt.Sprintf(format, args...)
} else {
text = format
}
stringArray := strings.Split(text, "\n")
padding := ""
if indentation >= 0 {
padding = strings.Repeat(" ", indentation)
}
for i, s := range stringArray {
stringArray[i] = fmt.Sprintf("%s%s", padding, s)
}
return strings.Join(stringArray, "\n")
}

View File

@ -0,0 +1,138 @@
package stenographer
import (
"sync"
"github.com/onsi/ginkgo/types"
)
func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
return FakeStenographerCall{
Method: method,
Args: args,
}
}
type FakeStenographer struct {
calls []FakeStenographerCall
lock *sync.Mutex
}
type FakeStenographerCall struct {
Method string
Args []interface{}
}
func NewFakeStenographer() *FakeStenographer {
stenographer := &FakeStenographer{
lock: &sync.Mutex{},
}
stenographer.Reset()
return stenographer
}
func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
stenographer.lock.Lock()
defer stenographer.lock.Unlock()
return stenographer.calls
}
func (stenographer *FakeStenographer) Reset() {
stenographer.lock.Lock()
defer stenographer.lock.Unlock()
stenographer.calls = make([]FakeStenographerCall, 0)
}
func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
stenographer.lock.Lock()
defer stenographer.lock.Unlock()
results := make([]FakeStenographerCall, 0)
for _, call := range stenographer.calls {
if call.Method == method {
results = append(results, call)
}
}
return results
}
func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
stenographer.lock.Lock()
defer stenographer.lock.Unlock()
stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
}
func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
}
func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
}
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct)
}
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
}
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
}
func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
stenographer.registerCall("AnnounceSpecWillRun", spec)
}
func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
}
func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
}
func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
stenographer.registerCall("AnnounceCapturedOutput", output)
}
func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
stenographer.registerCall("AnnounceSuccesfulSpec", spec)
}
func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
}
func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
}
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
}
func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
}
func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
}
func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
}
func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
}
func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
stenographer.registerCall("SummarizeFailures", summaries)
}

View File

@ -0,0 +1,549 @@
/*
The stenographer is used by Ginkgo's reporters to generate output.
Move along, nothing to see here.
*/
package stenographer
import (
"fmt"
"runtime"
"strings"
"github.com/onsi/ginkgo/types"
)
const defaultStyle = "\x1b[0m"
const boldStyle = "\x1b[1m"
const redColor = "\x1b[91m"
const greenColor = "\x1b[32m"
const yellowColor = "\x1b[33m"
const cyanColor = "\x1b[36m"
const grayColor = "\x1b[90m"
const lightGrayColor = "\x1b[37m"
type cursorStateType int
const (
cursorStateTop cursorStateType = iota
cursorStateStreaming
cursorStateMidBlock
cursorStateEndBlock
)
type Stenographer interface {
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
AnnounceAggregatedParallelRun(nodes int, succinct bool)
AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool)
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
AnnounceSpecWillRun(spec *types.SpecSummary)
AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
AnnounceCapturedOutput(output string)
AnnounceSuccesfulSpec(spec *types.SpecSummary)
AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
SummarizeFailures(summaries []*types.SpecSummary)
}
func New(color bool) Stenographer {
denoter := "•"
if runtime.GOOS == "windows" {
denoter = "+"
}
return &consoleStenographer{
color: color,
denoter: denoter,
cursorState: cursorStateTop,
}
}
type consoleStenographer struct {
color bool
denoter string
cursorState cursorStateType
}
var alternatingColors = []string{defaultStyle, grayColor}
func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
if succinct {
s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
return
}
s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
if randomizingAll {
s.print(0, " - Will randomize all specs")
}
s.printNewLine()
}
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
if succinct {
s.print(0, "- node #%d ", node)
return
}
s.println(0,
"Parallel test node %s/%s. Assigned %s of %s specs.",
s.colorize(boldStyle, "%d", node),
s.colorize(boldStyle, "%d", nodes),
s.colorize(boldStyle, "%d", specsToRun),
s.colorize(boldStyle, "%d", totalSpecs),
)
s.printNewLine()
}
func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
if succinct {
s.print(0, "- %d nodes ", nodes)
return
}
s.println(0,
"Running in parallel across %s nodes",
s.colorize(boldStyle, "%d", nodes),
)
s.printNewLine()
}
func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
if succinct {
s.print(0, "- %d/%d specs ", specsToRun, total)
s.stream()
return
}
s.println(0,
"Will run %s of %s specs",
s.colorize(boldStyle, "%d", specsToRun),
s.colorize(boldStyle, "%d", total),
)
s.printNewLine()
}
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
if succinct && summary.SuiteSucceeded {
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
return
}
s.printNewLine()
color := greenColor
if !summary.SuiteSucceeded {
color = redColor
}
s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
status := ""
if summary.SuiteSucceeded {
status = s.colorize(boldStyle+greenColor, "SUCCESS!")
} else {
status = s.colorize(boldStyle+redColor, "FAIL!")
}
s.print(0,
"%s -- %s | %s | %s | %s ",
status,
s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs),
s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
)
}
func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
s.startBlock()
for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
}
indentation := 0
if len(spec.ComponentTexts) > 2 {
indentation = 1
s.printNewLine()
}
index := len(spec.ComponentTexts) - 1
s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
s.printNewLine()
s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
s.printNewLine()
s.midBlock()
}
func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
}
func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
}
func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
s.startBlock()
var message string
switch summary.State {
case types.SpecStateFailed:
message = "Failure"
case types.SpecStatePanicked:
message = "Panic"
case types.SpecStateTimedOut:
message = "Timeout"
}
s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
s.printNewLine()
s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
s.endBlock()
}
func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
if output == "" {
return
}
s.startBlock()
s.println(0, output)
s.midBlock()
}
func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
s.print(0, s.colorize(greenColor, s.denoter))
s.stream()
}
func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
s.printBlockWithMessage(
s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
"",
spec,
succinct,
)
}
func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
s.printBlockWithMessage(
s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
s.measurementReport(spec, succinct),
spec,
succinct,
)
}
func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
if noisy {
s.printBlockWithMessage(
s.colorize(yellowColor, "P [PENDING]"),
"",
spec,
false,
)
} else {
s.print(0, s.colorize(yellowColor, "P"))
s.stream()
}
}
func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
// Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
if succinct || spec.Failure == (types.SpecFailure{}) {
s.print(0, s.colorize(cyanColor, "S"))
s.stream()
} else {
s.startBlock()
s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
s.printNewLine()
s.printSkip(indentation, spec.Failure)
s.endBlock()
}
}
func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
}
func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
}
func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
}
func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
failingSpecs := []*types.SpecSummary{}
for _, summary := range summaries {
if summary.HasFailureState() {
failingSpecs = append(failingSpecs, summary)
}
}
if len(failingSpecs) == 0 {
return
}
s.printNewLine()
s.printNewLine()
plural := "s"
if len(failingSpecs) == 1 {
plural = ""
}
s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
for _, summary := range failingSpecs {
s.printNewLine()
if summary.HasFailureState() {
if summary.TimedOut() {
s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
} else if summary.Panicked() {
s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
} else if summary.Failed() {
s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
}
s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
s.printNewLine()
s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
}
}
}
func (s *consoleStenographer) startBlock() {
if s.cursorState == cursorStateStreaming {
s.printNewLine()
s.printDelimiter()
} else if s.cursorState == cursorStateMidBlock {
s.printNewLine()
}
}
func (s *consoleStenographer) midBlock() {
s.cursorState = cursorStateMidBlock
}
func (s *consoleStenographer) endBlock() {
s.printDelimiter()
s.cursorState = cursorStateEndBlock
}
func (s *consoleStenographer) stream() {
s.cursorState = cursorStateStreaming
}
func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
s.startBlock()
s.println(0, header)
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
if message != "" {
s.printNewLine()
s.println(indentation, message)
}
s.endBlock()
}
func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
s.startBlock()
s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
s.printNewLine()
s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
s.endBlock()
}
func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
switch failedComponentType {
case types.SpecComponentTypeBeforeSuite:
return " in Suite Setup (BeforeSuite)"
case types.SpecComponentTypeAfterSuite:
return " in Suite Teardown (AfterSuite)"
case types.SpecComponentTypeBeforeEach:
return " in Spec Setup (BeforeEach)"
case types.SpecComponentTypeJustBeforeEach:
return " in Spec Setup (JustBeforeEach)"
case types.SpecComponentTypeAfterEach:
return " in Spec Teardown (AfterEach)"
}
return ""
}
func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
s.println(indentation, s.colorize(cyanColor, spec.Message))
s.printNewLine()
s.println(indentation, spec.Location.String())
}
func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
if state == types.SpecStatePanicked {
s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
s.println(indentation, failure.Location.String())
s.printNewLine()
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
s.println(indentation, failure.Location.FullStackTrace)
} else {
s.println(indentation, s.colorize(redColor, failure.Message))
s.printNewLine()
s.println(indentation, failure.Location.String())
if fullTrace {
s.printNewLine()
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
s.println(indentation, failure.Location.FullStackTrace)
}
}
}
func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
startIndex := 1
indentation := 0
if len(componentTexts) == 1 {
startIndex = 0
}
for i := startIndex; i < len(componentTexts); i++ {
if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
color := redColor
if state == types.SpecStateSkipped {
color = cyanColor
}
blockType := ""
switch failedComponentType {
case types.SpecComponentTypeBeforeSuite:
blockType = "BeforeSuite"
case types.SpecComponentTypeAfterSuite:
blockType = "AfterSuite"
case types.SpecComponentTypeBeforeEach:
blockType = "BeforeEach"
case types.SpecComponentTypeJustBeforeEach:
blockType = "JustBeforeEach"
case types.SpecComponentTypeAfterEach:
blockType = "AfterEach"
case types.SpecComponentTypeIt:
blockType = "It"
case types.SpecComponentTypeMeasure:
blockType = "Measurement"
}
if succinct {
s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
} else {
s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
}
} else {
if succinct {
s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
} else {
s.println(indentation, componentTexts[i])
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
}
}
indentation++
}
return indentation
}
func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
if succinct {
if len(componentTexts) > 0 {
s.printNewLine()
s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
}
s.printNewLine()
indentation = 1
} else {
indentation--
}
return indentation
}
func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
orderedKeys := make([]string, len(measurements))
for key, measurement := range measurements {
orderedKeys[measurement.Order] = key
}
return orderedKeys
}
func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
if len(spec.Measurements) == 0 {
return "Found no measurements"
}
message := []string{}
orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
if succinct {
message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
for _, key := range orderedKeys {
measurement := spec.Measurements[key]
message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
s.colorize(boldStyle, "%s", measurement.Name),
measurement.SmallestLabel,
s.colorize(greenColor, "%.3f", measurement.Smallest),
measurement.Units,
measurement.AverageLabel,
s.colorize(cyanColor, "%.3f", measurement.Average),
measurement.Units,
s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
measurement.Units,
measurement.LargestLabel,
s.colorize(redColor, "%.3f", measurement.Largest),
measurement.Units,
))
}
} else {
message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
for _, key := range orderedKeys {
measurement := spec.Measurements[key]
info := ""
if measurement.Info != nil {
message = append(message, fmt.Sprintf("%v", measurement.Info))
}
message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
s.colorize(boldStyle, "%s", measurement.Name),
info,
measurement.SmallestLabel,
s.colorize(greenColor, "%.3f", measurement.Smallest),
measurement.Units,
measurement.LargestLabel,
s.colorize(redColor, "%.3f", measurement.Largest),
measurement.Units,
measurement.AverageLabel,
s.colorize(cyanColor, "%.3f", measurement.Average),
measurement.Units,
s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
measurement.Units,
))
}
}
return strings.Join(message, "\n")
}

View File

@ -0,0 +1,92 @@
/*
TeamCity Reporter for Ginkgo
Makes use of TeamCity's support for Service Messages
http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
*/
package reporters
import (
"fmt"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
"io"
"strings"
)
const (
messageId = "##teamcity"
)
type TeamCityReporter struct {
writer io.Writer
testSuiteName string
}
func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
return &TeamCityReporter{
writer: writer,
}
}
func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
reporter.testSuiteName = escape(summary.SuiteDescription)
fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName)
}
func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
reporter.handleSetupSummary("BeforeSuite", setupSummary)
}
func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
reporter.handleSetupSummary("AfterSuite", setupSummary)
}
func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
if setupSummary.State != types.SpecStatePassed {
testName := escape(name)
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
message := escape(setupSummary.Failure.ComponentCodeLocation.String())
details := escape(setupSummary.Failure.Message)
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
}
}
func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
}
func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
message := escape(specSummary.Failure.ComponentCodeLocation.String())
details := escape(specSummary.Failure.Message)
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
}
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName)
}
durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
}
func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName)
}
func escape(output string) string {
output = strings.Replace(output, "|", "||", -1)
output = strings.Replace(output, "'", "|'", -1)
output = strings.Replace(output, "\n", "|n", -1)
output = strings.Replace(output, "\r", "|r", -1)
output = strings.Replace(output, "[", "|[", -1)
output = strings.Replace(output, "]", "|]", -1)
return output
}

15
vendor/github.com/onsi/ginkgo/types/code_location.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package types
import (
"fmt"
)
type CodeLocation struct {
FileName string
LineNumber int
FullStackTrace string
}
func (codeLocation CodeLocation) String() string {
return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
}

30
vendor/github.com/onsi/ginkgo/types/synchronization.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package types
import (
"encoding/json"
)
type RemoteBeforeSuiteState int
const (
RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota
RemoteBeforeSuiteStatePending
RemoteBeforeSuiteStatePassed
RemoteBeforeSuiteStateFailed
RemoteBeforeSuiteStateDisappeared
)
type RemoteBeforeSuiteData struct {
Data []byte
State RemoteBeforeSuiteState
}
func (r RemoteBeforeSuiteData) ToJSON() []byte {
data, _ := json.Marshal(r)
return data
}
type RemoteAfterSuiteData struct {
CanRun bool
}

143
vendor/github.com/onsi/ginkgo/types/types.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
package types
import "time"
const GINKGO_FOCUS_EXIT_CODE = 197
type SuiteSummary struct {
SuiteDescription string
SuiteSucceeded bool
SuiteID string
NumberOfSpecsBeforeParallelization int
NumberOfTotalSpecs int
NumberOfSpecsThatWillBeRun int
NumberOfPendingSpecs int
NumberOfSkippedSpecs int
NumberOfPassedSpecs int
NumberOfFailedSpecs int
RunTime time.Duration
}
type SpecSummary struct {
ComponentTexts []string
ComponentCodeLocations []CodeLocation
State SpecState
RunTime time.Duration
Failure SpecFailure
IsMeasurement bool
NumberOfSamples int
Measurements map[string]*SpecMeasurement
CapturedOutput string
SuiteID string
}
func (s SpecSummary) HasFailureState() bool {
return s.State.IsFailure()
}
func (s SpecSummary) TimedOut() bool {
return s.State == SpecStateTimedOut
}
func (s SpecSummary) Panicked() bool {
return s.State == SpecStatePanicked
}
func (s SpecSummary) Failed() bool {
return s.State == SpecStateFailed
}
func (s SpecSummary) Passed() bool {
return s.State == SpecStatePassed
}
func (s SpecSummary) Skipped() bool {
return s.State == SpecStateSkipped
}
func (s SpecSummary) Pending() bool {
return s.State == SpecStatePending
}
type SetupSummary struct {
ComponentType SpecComponentType
CodeLocation CodeLocation
State SpecState
RunTime time.Duration
Failure SpecFailure
CapturedOutput string
SuiteID string
}
type SpecFailure struct {
Message string
Location CodeLocation
ForwardedPanic string
ComponentIndex int
ComponentType SpecComponentType
ComponentCodeLocation CodeLocation
}
type SpecMeasurement struct {
Name string
Info interface{}
Order int
Results []float64
Smallest float64
Largest float64
Average float64
StdDeviation float64
SmallestLabel string
LargestLabel string
AverageLabel string
Units string
}
type SpecState uint
const (
SpecStateInvalid SpecState = iota
SpecStatePending
SpecStateSkipped
SpecStatePassed
SpecStateFailed
SpecStatePanicked
SpecStateTimedOut
)
func (state SpecState) IsFailure() bool {
return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed
}
type SpecComponentType uint
const (
SpecComponentTypeInvalid SpecComponentType = iota
SpecComponentTypeContainer
SpecComponentTypeBeforeSuite
SpecComponentTypeAfterSuite
SpecComponentTypeBeforeEach
SpecComponentTypeJustBeforeEach
SpecComponentTypeAfterEach
SpecComponentTypeIt
SpecComponentTypeMeasure
)
type FlagType uint
const (
FlagTypeNone FlagType = iota
FlagTypeFocused
FlagTypePending
)

3
vendor/github.com/onsi/gomega/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
.DS_Store
*.test
.

11
vendor/github.com/onsi/gomega/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,11 @@
language: go
go:
- 1.4
- 1.5
install:
- go get -v ./...
- go get github.com/onsi/ginkgo
- go install github.com/onsi/ginkgo/ginkgo
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race

70
vendor/github.com/onsi/gomega/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,70 @@
## HEAD
Improvements:
- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout.
- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests.
- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel.
- Added `HavePrefix` and `HaveSuffix` matchers.
- `ghttp` can now handle concurrent requests.
- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
- Improved `ghttp`'s behavior around failing assertions and panics:
- If a registered handler makes a failing assertion `ghttp` will return `500`.
- If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive.
- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives.
- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher
- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers
Bug Fixes:
- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure.
- `ContainElement` no longer bails if a passed-in matcher errors.
## 1.0 (8/2/2014)
No changes. Dropping "beta" from the version number.
## 1.0.0-beta (7/8/2014)
Breaking Changes:
- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead.
- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher
New Test-Support Features:
- `ghttp`: supports testing http clients
- Provides a flexible fake http server
- Provides a collection of chainable http handlers that perform assertions.
- `gbytes`: supports making ordered assertions against streams of data
- Provides a `gbytes.Buffer`
- Provides a `Say` matcher to perform ordered assertions against output data
- `gexec`: supports testing external processes
- Provides support for building Go binaries
- Wraps and starts `exec.Cmd` commands
- Makes it easy to assert against stdout and stderr
- Makes it easy to send signals and wait for processes to exit
- Provides an `Exit` matcher to assert against exit code.
DSL Changes:
- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs.
- The default timeouts for `Eventually` and `Consistently` are now configurable.
New Matchers:
- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map.
- `BeTemporally`: like `BeNumerically` but for `time.Time`
- `HaveKeyWithValue`: asserts a map has a given key with the given value.
Updated Matchers:
- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed.
Misc:
- Start using semantic versioning
- Start maintaining changelog
Major refactor:
- Pull out Gomega's internal to `internal`

20
vendor/github.com/onsi/gomega/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (c) 2013-2014 Onsi Fakhouri
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

17
vendor/github.com/onsi/gomega/README.md generated vendored Normal file
View File

@ -0,0 +1,17 @@
![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png)
[![Build Status](https://travis-ci.org/onsi/gomega.png)](https://travis-ci.org/onsi/gomega)
Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers).
To discuss Gomega and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang
Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/)
## License
Gomega is MIT-Licensed
The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license.

276
vendor/github.com/onsi/gomega/format/format.go generated vendored Normal file
View File

@ -0,0 +1,276 @@
/*
Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information.
*/
package format
import (
"fmt"
"reflect"
"strings"
)
// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects
var MaxDepth = uint(10)
/*
By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output.
Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead.
Note that GoString and String don't always have all the information you need to understand why a test failed!
*/
var UseStringerRepresentation = false
//The default indentation string emitted by the format package
var Indent = " "
var longFormThreshold = 20
/*
Generates a formatted matcher success/failure message of the form:
Expected
<pretty printed actual>
<message>
<pretty printed expected>
If expected is omited, then the message looks like:
Expected
<pretty printed actual>
<message>
*/
func Message(actual interface{}, message string, expected ...interface{}) string {
if len(expected) == 0 {
return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
} else {
return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1))
}
}
/*
Pretty prints the passed in object at the passed in indentation level.
Object recurses into deeply nested objects emitting pretty-printed representations of their components.
Modify format.MaxDepth to control how deep the recursion is allowed to go
Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of
recursing into the object.
*/
func Object(object interface{}, indentation uint) string {
indent := strings.Repeat(Indent, int(indentation))
value := reflect.ValueOf(object)
return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation))
}
/*
IndentString takes a string and indents each line by the specified amount.
*/
func IndentString(s string, indentation uint) string {
components := strings.Split(s, "\n")
result := ""
indent := strings.Repeat(Indent, int(indentation))
for i, component := range components {
result += indent + component
if i < len(components)-1 {
result += "\n"
}
}
return result
}
func formatType(object interface{}) string {
t := reflect.TypeOf(object)
if t == nil {
return "nil"
}
switch t.Kind() {
case reflect.Chan:
v := reflect.ValueOf(object)
return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
case reflect.Ptr:
return fmt.Sprintf("%T | %p", object, object)
case reflect.Slice:
v := reflect.ValueOf(object)
return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
case reflect.Map:
v := reflect.ValueOf(object)
return fmt.Sprintf("%T | len:%d", object, v.Len())
default:
return fmt.Sprintf("%T", object)
}
}
func formatValue(value reflect.Value, indentation uint) string {
if indentation > MaxDepth {
return "..."
}
if isNilValue(value) {
return "nil"
}
if UseStringerRepresentation {
if value.CanInterface() {
obj := value.Interface()
switch x := obj.(type) {
case fmt.GoStringer:
return x.GoString()
case fmt.Stringer:
return x.String()
}
}
}
switch value.Kind() {
case reflect.Bool:
return fmt.Sprintf("%v", value.Bool())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return fmt.Sprintf("%v", value.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return fmt.Sprintf("%v", value.Uint())
case reflect.Uintptr:
return fmt.Sprintf("0x%x", value.Uint())
case reflect.Float32, reflect.Float64:
return fmt.Sprintf("%v", value.Float())
case reflect.Complex64, reflect.Complex128:
return fmt.Sprintf("%v", value.Complex())
case reflect.Chan:
return fmt.Sprintf("0x%x", value.Pointer())
case reflect.Func:
return fmt.Sprintf("0x%x", value.Pointer())
case reflect.Ptr:
return formatValue(value.Elem(), indentation)
case reflect.Slice:
if value.Type().Elem().Kind() == reflect.Uint8 {
return formatString(value.Bytes(), indentation)
}
return formatSlice(value, indentation)
case reflect.String:
return formatString(value.String(), indentation)
case reflect.Array:
return formatSlice(value, indentation)
case reflect.Map:
return formatMap(value, indentation)
case reflect.Struct:
return formatStruct(value, indentation)
case reflect.Interface:
return formatValue(value.Elem(), indentation)
default:
if value.CanInterface() {
return fmt.Sprintf("%#v", value.Interface())
} else {
return fmt.Sprintf("%#v", value)
}
}
}
func formatString(object interface{}, indentation uint) string {
if indentation == 1 {
s := fmt.Sprintf("%s", object)
components := strings.Split(s, "\n")
result := ""
for i, component := range components {
if i == 0 {
result += component
} else {
result += Indent + component
}
if i < len(components)-1 {
result += "\n"
}
}
return fmt.Sprintf("%s", result)
} else {
return fmt.Sprintf("%q", object)
}
}
func formatSlice(v reflect.Value, indentation uint) string {
l := v.Len()
result := make([]string, l)
longest := 0
for i := 0; i < l; i++ {
result[i] = formatValue(v.Index(i), indentation+1)
if len(result[i]) > longest {
longest = len(result[i])
}
}
if longest > longFormThreshold {
indenter := strings.Repeat(Indent, int(indentation))
return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
} else {
return fmt.Sprintf("[%s]", strings.Join(result, ", "))
}
}
func formatMap(v reflect.Value, indentation uint) string {
l := v.Len()
result := make([]string, l)
longest := 0
for i, key := range v.MapKeys() {
value := v.MapIndex(key)
result[i] = fmt.Sprintf("%s: %s", formatValue(key, 0), formatValue(value, indentation+1))
if len(result[i]) > longest {
longest = len(result[i])
}
}
if longest > longFormThreshold {
indenter := strings.Repeat(Indent, int(indentation))
return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
} else {
return fmt.Sprintf("{%s}", strings.Join(result, ", "))
}
}
func formatStruct(v reflect.Value, indentation uint) string {
t := v.Type()
l := v.NumField()
result := []string{}
longest := 0
for i := 0; i < l; i++ {
structField := t.Field(i)
fieldEntry := v.Field(i)
representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1))
result = append(result, representation)
if len(representation) > longest {
longest = len(representation)
}
}
if longest > longFormThreshold {
indenter := strings.Repeat(Indent, int(indentation))
return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
} else {
return fmt.Sprintf("{%s}", strings.Join(result, ", "))
}
}
func isNilValue(a reflect.Value) bool {
switch a.Kind() {
case reflect.Invalid:
return true
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return a.IsNil()
}
return false
}
func isNil(a interface{}) bool {
if a == nil {
return true
}
switch reflect.TypeOf(a).Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return reflect.ValueOf(a).IsNil()
}
return false
}

229
vendor/github.com/onsi/gomega/gbytes/buffer.go generated vendored Normal file
View File

@ -0,0 +1,229 @@
/*
Package gbytes provides a buffer that supports incrementally detecting input.
You use gbytes.Buffer with the gbytes.Say matcher. When Say finds a match, it fastforwards the buffer's read cursor to the end of that match.
Subsequent matches against the buffer will only operate against data that appears *after* the read cursor.
The read cursor is an opaque implementation detail that you cannot access. You should use the Say matcher to sift through the buffer. You can always
access the entire buffer's contents with Contents().
*/
package gbytes
import (
"errors"
"fmt"
"io"
"regexp"
"sync"
"time"
)
/*
gbytes.Buffer implements an io.Writer and can be used with the gbytes.Say matcher.
You should only use a gbytes.Buffer in test code. It stores all writes in an in-memory buffer - behavior that is inappropriate for production code!
*/
type Buffer struct {
contents []byte
readCursor uint64
lock *sync.Mutex
detectCloser chan interface{}
closed bool
}
/*
NewBuffer returns a new gbytes.Buffer
*/
func NewBuffer() *Buffer {
return &Buffer{
lock: &sync.Mutex{},
}
}
/*
BufferWithBytes returns a new gbytes.Buffer seeded with the passed in bytes
*/
func BufferWithBytes(bytes []byte) *Buffer {
return &Buffer{
lock: &sync.Mutex{},
contents: bytes,
}
}
/*
Write implements the io.Writer interface
*/
func (b *Buffer) Write(p []byte) (n int, err error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.closed {
return 0, errors.New("attempt to write to closed buffer")
}
b.contents = append(b.contents, p...)
return len(p), nil
}
/*
Read implements the io.Reader interface. It advances the
cursor as it reads.
Returns an error if called after Close.
*/
func (b *Buffer) Read(d []byte) (int, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.closed {
return 0, errors.New("attempt to read from closed buffer")
}
if uint64(len(b.contents)) <= b.readCursor {
return 0, io.EOF
}
n := copy(d, b.contents[b.readCursor:])
b.readCursor += uint64(n)
return n, nil
}
/*
Close signifies that the buffer will no longer be written to
*/
func (b *Buffer) Close() error {
b.lock.Lock()
defer b.lock.Unlock()
b.closed = true
return nil
}
/*
Closed returns true if the buffer has been closed
*/
func (b *Buffer) Closed() bool {
b.lock.Lock()
defer b.lock.Unlock()
return b.closed
}
/*
Contents returns all data ever written to the buffer.
*/
func (b *Buffer) Contents() []byte {
b.lock.Lock()
defer b.lock.Unlock()
contents := make([]byte, len(b.contents))
copy(contents, b.contents)
return contents
}
/*
Detect takes a regular expression and returns a channel.
The channel will receive true the first time data matching the regular expression is written to the buffer.
The channel is subsequently closed and the buffer's read-cursor is fast-forwarded to just after the matching region.
You typically don't need to use Detect and should use the ghttp.Say matcher instead. Detect is useful, however, in cases where your code must
be branch and handle different outputs written to the buffer.
For example, consider a buffer hooked up to the stdout of a client library. You may (or may not, depending on state outside of your control) need to authenticate the client library.
You could do something like:
select {
case <-buffer.Detect("You are not logged in"):
//log in
case <-buffer.Detect("Success"):
//carry on
case <-time.After(time.Second):
//welp
}
buffer.CancelDetects()
You should always call CancelDetects after using Detect. This will close any channels that have not detected and clean up the goroutines that were spawned to support them.
Finally, you can pass detect a format string followed by variadic arguments. This will construct the regexp using fmt.Sprintf.
*/
func (b *Buffer) Detect(desired string, args ...interface{}) chan bool {
formattedRegexp := desired
if len(args) > 0 {
formattedRegexp = fmt.Sprintf(desired, args...)
}
re := regexp.MustCompile(formattedRegexp)
b.lock.Lock()
defer b.lock.Unlock()
if b.detectCloser == nil {
b.detectCloser = make(chan interface{})
}
closer := b.detectCloser
response := make(chan bool)
go func() {
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
defer close(response)
for {
select {
case <-ticker.C:
b.lock.Lock()
data, cursor := b.contents[b.readCursor:], b.readCursor
loc := re.FindIndex(data)
b.lock.Unlock()
if loc != nil {
response <- true
b.lock.Lock()
newCursorPosition := cursor + uint64(loc[1])
if newCursorPosition >= b.readCursor {
b.readCursor = newCursorPosition
}
b.lock.Unlock()
return
}
case <-closer:
return
}
}
}()
return response
}
/*
CancelDetects cancels any pending detects and cleans up their goroutines. You should always call this when you're done with a set of Detect channels.
*/
func (b *Buffer) CancelDetects() {
b.lock.Lock()
defer b.lock.Unlock()
close(b.detectCloser)
b.detectCloser = nil
}
func (b *Buffer) didSay(re *regexp.Regexp) (bool, []byte) {
b.lock.Lock()
defer b.lock.Unlock()
unreadBytes := b.contents[b.readCursor:]
copyOfUnreadBytes := make([]byte, len(unreadBytes))
copy(copyOfUnreadBytes, unreadBytes)
loc := re.FindIndex(unreadBytes)
if loc != nil {
b.readCursor += uint64(loc[1])
return true, copyOfUnreadBytes
} else {
return false, copyOfUnreadBytes
}
}

105
vendor/github.com/onsi/gomega/gbytes/say_matcher.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
package gbytes
import (
"fmt"
"regexp"
"github.com/onsi/gomega/format"
)
//Objects satisfying the BufferProvider can be used with the Say matcher.
type BufferProvider interface {
Buffer() *Buffer
}
/*
Say is a Gomega matcher that operates on gbytes.Buffers:
Ω(buffer).Should(Say("something"))
will succeed if the unread portion of the buffer matches the regular expression "something".
When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the succesful match.
Thus, subsequent calls to Say will only match against the unread portion of the buffer
Say pairs very well with Eventually. To asser that a buffer eventually receives data matching "[123]-star" within 3 seconds you can:
Eventually(buffer, 3).Should(Say("[123]-star"))
Ditto with consistently. To assert that a buffer does not receive data matching "never-see-this" for 1 second you can:
Consistently(buffer, 1).ShouldNot(Say("never-see-this"))
In addition to bytes.Buffers, Say can operate on objects that implement the gbytes.BufferProvider interface.
In such cases, Say simply operates on the *gbytes.Buffer returned by Buffer()
If the buffer is closed, the Say matcher will tell Eventually to abort.
*/
func Say(expected string, args ...interface{}) *sayMatcher {
formattedRegexp := expected
if len(args) > 0 {
formattedRegexp = fmt.Sprintf(expected, args...)
}
return &sayMatcher{
re: regexp.MustCompile(formattedRegexp),
}
}
type sayMatcher struct {
re *regexp.Regexp
receivedSayings []byte
}
func (m *sayMatcher) buffer(actual interface{}) (*Buffer, bool) {
var buffer *Buffer
switch x := actual.(type) {
case *Buffer:
buffer = x
case BufferProvider:
buffer = x.Buffer()
default:
return nil, false
}
return buffer, true
}
func (m *sayMatcher) Match(actual interface{}) (success bool, err error) {
buffer, ok := m.buffer(actual)
if !ok {
return false, fmt.Errorf("Say must be passed a *gbytes.Buffer or BufferProvider. Got:\n%s", format.Object(actual, 1))
}
didSay, sayings := buffer.didSay(m.re)
m.receivedSayings = sayings
return didSay, nil
}
func (m *sayMatcher) FailureMessage(actual interface{}) (message string) {
return fmt.Sprintf(
"Got stuck at:\n%s\nWaiting for:\n%s",
format.IndentString(string(m.receivedSayings), 1),
format.IndentString(m.re.String(), 1),
)
}
func (m *sayMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return fmt.Sprintf(
"Saw:\n%s\nWhich matches the unexpected:\n%s",
format.IndentString(string(m.receivedSayings), 1),
format.IndentString(m.re.String(), 1),
)
}
func (m *sayMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
switch x := actual.(type) {
case *Buffer:
return !x.Closed()
case BufferProvider:
return !x.Buffer().Closed()
default:
return true
}
}

78
vendor/github.com/onsi/gomega/gexec/build.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
package gexec
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
)
var tmpDir string
/*
Build uses go build to compile the package at packagePath. The resulting binary is saved off in a temporary directory.
A path pointing to this binary is returned.
Build uses the $GOPATH set in your environment. It passes the variadic args on to `go build`.
*/
func Build(packagePath string, args ...string) (compiledPath string, err error) {
return BuildIn(os.Getenv("GOPATH"), packagePath, args...)
}
/*
BuildIn is identical to Build but allows you to specify a custom $GOPATH (the first argument).
*/
func BuildIn(gopath string, packagePath string, args ...string) (compiledPath string, err error) {
tmpDir, err := temporaryDirectory()
if err != nil {
return "", err
}
if len(gopath) == 0 {
return "", errors.New("$GOPATH not provided when building " + packagePath)
}
executable := filepath.Join(tmpDir, path.Base(packagePath))
if runtime.GOOS == "windows" {
executable = executable + ".exe"
}
cmdArgs := append([]string{"build"}, args...)
cmdArgs = append(cmdArgs, "-o", executable, packagePath)
build := exec.Command("go", cmdArgs...)
build.Env = append([]string{"GOPATH=" + gopath}, os.Environ()...)
output, err := build.CombinedOutput()
if err != nil {
return "", fmt.Errorf("Failed to build %s:\n\nError:\n%s\n\nOutput:\n%s", packagePath, err, string(output))
}
return executable, nil
}
/*
You should call CleanupBuildArtifacts before your test ends to clean up any temporary artifacts generated by
gexec. In Ginkgo this is typically done in an AfterSuite callback.
*/
func CleanupBuildArtifacts() {
if tmpDir != "" {
os.RemoveAll(tmpDir)
}
}
func temporaryDirectory() (string, error) {
var err error
if tmpDir == "" {
tmpDir, err = ioutil.TempDir("", "gexec_artifacts")
if err != nil {
return "", err
}
}
return ioutil.TempDir(tmpDir, "g")
}

88
vendor/github.com/onsi/gomega/gexec/exit_matcher.go generated vendored Normal file
View File

@ -0,0 +1,88 @@
package gexec
import (
"fmt"
"github.com/onsi/gomega/format"
)
/*
The Exit matcher operates on a session:
Ω(session).Should(Exit(<optional status code>))
Exit passes if the session has already exited.
If no status code is provided, then Exit will succeed if the session has exited regardless of exit code.
Otherwise, Exit will only succeed if the process has exited with the provided status code.
Note that the process must have already exited. To wait for a process to exit, use Eventually:
Eventually(session, 3).Should(Exit(0))
*/
func Exit(optionalExitCode ...int) *exitMatcher {
exitCode := -1
if len(optionalExitCode) > 0 {
exitCode = optionalExitCode[0]
}
return &exitMatcher{
exitCode: exitCode,
}
}
type exitMatcher struct {
exitCode int
didExit bool
actualExitCode int
}
type Exiter interface {
ExitCode() int
}
func (m *exitMatcher) Match(actual interface{}) (success bool, err error) {
exiter, ok := actual.(Exiter)
if !ok {
return false, fmt.Errorf("Exit must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1))
}
m.actualExitCode = exiter.ExitCode()
if m.actualExitCode == -1 {
return false, nil
}
if m.exitCode == -1 {
return true, nil
}
return m.exitCode == m.actualExitCode, nil
}
func (m *exitMatcher) FailureMessage(actual interface{}) (message string) {
if m.actualExitCode == -1 {
return "Expected process to exit. It did not."
} else {
return format.Message(m.actualExitCode, "to match exit code:", m.exitCode)
}
}
func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) {
if m.actualExitCode == -1 {
return "you really shouldn't be able to see this!"
} else {
if m.exitCode == -1 {
return "Expected process not to exit. It did."
} else {
return format.Message(m.actualExitCode, "not to match exit code:", m.exitCode)
}
}
}
func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
session, ok := actual.(*Session)
if ok {
return session.ExitCode() == -1
}
return true
}

53
vendor/github.com/onsi/gomega/gexec/prefixed_writer.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
package gexec
import (
"io"
"sync"
)
/*
PrefixedWriter wraps an io.Writer, emiting the passed in prefix at the beginning of each new line.
This can be useful when running multiple gexec.Sessions concurrently - you can prefix the log output of each
session by passing in a PrefixedWriter:
gexec.Start(cmd, NewPrefixedWriter("[my-cmd] ", GinkgoWriter), NewPrefixedWriter("[my-cmd] ", GinkgoWriter))
*/
type PrefixedWriter struct {
prefix []byte
writer io.Writer
lock *sync.Mutex
atStartOfLine bool
}
func NewPrefixedWriter(prefix string, writer io.Writer) *PrefixedWriter {
return &PrefixedWriter{
prefix: []byte(prefix),
writer: writer,
lock: &sync.Mutex{},
atStartOfLine: true,
}
}
func (w *PrefixedWriter) Write(b []byte) (int, error) {
w.lock.Lock()
defer w.lock.Unlock()
toWrite := []byte{}
for _, c := range b {
if w.atStartOfLine {
toWrite = append(toWrite, w.prefix...)
}
toWrite = append(toWrite, c)
w.atStartOfLine = c == '\n'
}
_, err := w.writer.Write(toWrite)
if err != nil {
return 0, err
}
return len(b), nil
}

214
vendor/github.com/onsi/gomega/gexec/session.go generated vendored Normal file
View File

@ -0,0 +1,214 @@
/*
Package gexec provides support for testing external processes.
*/
package gexec
import (
"io"
"os"
"os/exec"
"reflect"
"sync"
"syscall"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
)
const INVALID_EXIT_CODE = 254
type Session struct {
//The wrapped command
Command *exec.Cmd
//A *gbytes.Buffer connected to the command's stdout
Out *gbytes.Buffer
//A *gbytes.Buffer connected to the command's stderr
Err *gbytes.Buffer
//A channel that will close when the command exits
Exited <-chan struct{}
lock *sync.Mutex
exitCode int
}
/*
Start starts the passed-in *exec.Cmd command. It wraps the command in a *gexec.Session.
The session pipes the command's stdout and stderr to two *gbytes.Buffers available as properties on the session: session.Out and session.Err.
These buffers can be used with the gbytes.Say matcher to match against unread output:
Ω(session.Out).Should(gbytes.Say("foo-out"))
Ω(session.Err).Should(gbytes.Say("foo-err"))
In addition, Session satisfies the gbytes.BufferProvider interface and provides the stdout *gbytes.Buffer. This allows you to replace the first line, above, with:
Ω(session).Should(gbytes.Say("foo-out"))
When outWriter and/or errWriter are non-nil, the session will pipe stdout and/or stderr output both into the session *gybtes.Buffers and to the passed-in outWriter/errWriter.
This is useful for capturing the process's output or logging it to screen. In particular, when using Ginkgo it can be convenient to direct output to the GinkgoWriter:
session, err := Start(command, GinkgoWriter, GinkgoWriter)
This will log output when running tests in verbose mode, but - otherwise - will only log output when a test fails.
The session wrapper is responsible for waiting on the *exec.Cmd command. You *should not* call command.Wait() yourself.
Instead, to assert that the command has exited you can use the gexec.Exit matcher:
Ω(session).Should(gexec.Exit())
When the session exits it closes the stdout and stderr gbytes buffers. This will short circuit any
Eventuallys waiting fo the buffers to Say something.
*/
func Start(command *exec.Cmd, outWriter io.Writer, errWriter io.Writer) (*Session, error) {
exited := make(chan struct{})
session := &Session{
Command: command,
Out: gbytes.NewBuffer(),
Err: gbytes.NewBuffer(),
Exited: exited,
lock: &sync.Mutex{},
exitCode: -1,
}
var commandOut, commandErr io.Writer
commandOut, commandErr = session.Out, session.Err
if outWriter != nil && !reflect.ValueOf(outWriter).IsNil() {
commandOut = io.MultiWriter(commandOut, outWriter)
}
if errWriter != nil && !reflect.ValueOf(errWriter).IsNil() {
commandErr = io.MultiWriter(commandErr, errWriter)
}
command.Stdout = commandOut
command.Stderr = commandErr
err := command.Start()
if err == nil {
go session.monitorForExit(exited)
}
return session, err
}
/*
Buffer implements the gbytes.BufferProvider interface and returns s.Out
This allows you to make gbytes.Say matcher assertions against stdout without having to reference .Out:
Eventually(session).Should(gbytes.Say("foo"))
*/
func (s *Session) Buffer() *gbytes.Buffer {
return s.Out
}
/*
ExitCode returns the wrapped command's exit code. If the command hasn't exited yet, ExitCode returns -1.
To assert that the command has exited it is more convenient to use the Exit matcher:
Eventually(s).Should(gexec.Exit())
When the process exits because it has received a particular signal, the exit code will be 128+signal-value
(See http://www.tldp.org/LDP/abs/html/exitcodes.html and http://man7.org/linux/man-pages/man7/signal.7.html)
*/
func (s *Session) ExitCode() int {
s.lock.Lock()
defer s.lock.Unlock()
return s.exitCode
}
/*
Wait waits until the wrapped command exits. It can be passed an optional timeout.
If the command does not exit within the timeout, Wait will trigger a test failure.
Wait returns the session, making it possible to chain:
session.Wait().Out.Contents()
will wait for the command to exit then return the entirety of Out's contents.
Wait uses eventually under the hood and accepts the same timeout/polling intervals that eventually does.
*/
func (s *Session) Wait(timeout ...interface{}) *Session {
EventuallyWithOffset(1, s, timeout...).Should(Exit())
return s
}
/*
Kill sends the running command a SIGKILL signal. It does not wait for the process to exit.
If the command has already exited, Kill returns silently.
The session is returned to enable chaining.
*/
func (s *Session) Kill() *Session {
if s.ExitCode() != -1 {
return s
}
s.Command.Process.Kill()
return s
}
/*
Interrupt sends the running command a SIGINT signal. It does not wait for the process to exit.
If the command has already exited, Interrupt returns silently.
The session is returned to enable chaining.
*/
func (s *Session) Interrupt() *Session {
return s.Signal(syscall.SIGINT)
}
/*
Terminate sends the running command a SIGTERM signal. It does not wait for the process to exit.
If the command has already exited, Terminate returns silently.
The session is returned to enable chaining.
*/
func (s *Session) Terminate() *Session {
return s.Signal(syscall.SIGTERM)
}
/*
Terminate sends the running command the passed in signal. It does not wait for the process to exit.
If the command has already exited, Signal returns silently.
The session is returned to enable chaining.
*/
func (s *Session) Signal(signal os.Signal) *Session {
if s.ExitCode() != -1 {
return s
}
s.Command.Process.Signal(signal)
return s
}
func (s *Session) monitorForExit(exited chan<- struct{}) {
err := s.Command.Wait()
s.lock.Lock()
s.Out.Close()
s.Err.Close()
status := s.Command.ProcessState.Sys().(syscall.WaitStatus)
if status.Signaled() {
s.exitCode = 128 + int(status.Signal())
} else {
exitStatus := status.ExitStatus()
if exitStatus == -1 && err != nil {
s.exitCode = INVALID_EXIT_CODE
}
s.exitCode = exitStatus
}
s.lock.Unlock()
close(exited)
}

335
vendor/github.com/onsi/gomega/gomega_dsl.go generated vendored Normal file
View File

@ -0,0 +1,335 @@
/*
Gomega is the Ginkgo BDD-style testing framework's preferred matcher library.
The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/
Gomega on Github: http://github.com/onsi/gomega
Learn more about Ginkgo online: http://onsi.github.io/ginkgo
Ginkgo on Github: http://github.com/onsi/ginkgo
Gomega is MIT-Licensed
*/
package gomega
import (
"fmt"
"reflect"
"time"
"github.com/onsi/gomega/internal/assertion"
"github.com/onsi/gomega/internal/asyncassertion"
"github.com/onsi/gomega/internal/testingtsupport"
"github.com/onsi/gomega/types"
)
const GOMEGA_VERSION = "1.0"
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT().
`
var globalFailHandler types.GomegaFailHandler
var defaultEventuallyTimeout = time.Second
var defaultEventuallyPollingInterval = 10 * time.Millisecond
var defaultConsistentlyDuration = 100 * time.Millisecond
var defaultConsistentlyPollingInterval = 10 * time.Millisecond
//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
//the fail handler passed into RegisterFailHandler is called.
func RegisterFailHandler(handler types.GomegaFailHandler) {
globalFailHandler = handler
}
//RegisterTestingT connects Gomega to Golang's XUnit style
//Testing.T tests. You'll need to call this at the top of each XUnit style test:
//
// func TestFarmHasCow(t *testing.T) {
// RegisterTestingT(t)
//
// f := farm.New([]string{"Cow", "Horse"})
// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
// }
//
// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to
// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests
// in parallel as the global fail handler cannot point to more than one testing.T at a time.
//
// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*).
func RegisterTestingT(t types.GomegaTestingT) {
RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t))
}
//InterceptGomegaHandlers runs a given callback and returns an array of
//failure messages generated by any Gomega assertions within the callback.
//
//This is accomplished by temporarily replacing the *global* fail handler
//with a fail handler that simply annotates failures. The original fail handler
//is reset when InterceptGomegaFailures returns.
//
//This is most useful when testing custom matchers, but can also be used to check
//on a value using a Gomega assertion without causing a test failure.
func InterceptGomegaFailures(f func()) []string {
originalHandler := globalFailHandler
failures := []string{}
RegisterFailHandler(func(message string, callerSkip ...int) {
failures = append(failures, message)
})
f()
RegisterFailHandler(originalHandler)
return failures
}
//Ω wraps an actual value allowing assertions to be made on it:
// Ω("foo").Should(Equal("foo"))
//
//If Ω is passed more than one argument it will pass the *first* argument to the matcher.
//All subsequent arguments will be required to be nil/zero.
//
//This is convenient if you want to make an assertion on a method/function that returns
//a value and an error - a common patter in Go.
//
//For example, given a function with signature:
// func MyAmazingThing() (int, error)
//
//Then:
// Ω(MyAmazingThing()).Should(Equal(3))
//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
//Ω and Expect are identical
func Ω(actual interface{}, extra ...interface{}) GomegaAssertion {
return ExpectWithOffset(0, actual, extra...)
}
//Expect wraps an actual value allowing assertions to be made on it:
// Expect("foo").To(Equal("foo"))
//
//If Expect is passed more than one argument it will pass the *first* argument to the matcher.
//All subsequent arguments will be required to be nil/zero.
//
//This is convenient if you want to make an assertion on a method/function that returns
//a value and an error - a common patter in Go.
//
//For example, given a function with signature:
// func MyAmazingThing() (int, error)
//
//Then:
// Expect(MyAmazingThing()).Should(Equal(3))
//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
//Expect and Ω are identical
func Expect(actual interface{}, extra ...interface{}) GomegaAssertion {
return ExpectWithOffset(0, actual, extra...)
}
//ExpectWithOffset wraps an actual value allowing assertions to be made on it:
// ExpectWithOffset(1, "foo").To(Equal("foo"))
//
//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
//this is used to modify the call-stack offset when computing line numbers.
//
//This is most useful in helper functions that make assertions. If you want Gomega's
//error message to refer to the calling line in the test (as opposed to the line in the helper function)
//set the first argument of `ExpectWithOffset` appropriately.
func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion {
if globalFailHandler == nil {
panic(nilFailHandlerPanic)
}
return assertion.New(actual, globalFailHandler, offset, extra...)
}
//Eventually wraps an actual value allowing assertions to be made on it.
//The assertion is tried periodically until it passes or a timeout occurs.
//
//Both the timeout and polling interval are configurable as optional arguments:
//The first optional argument is the timeout
//The second optional argument is the polling interval
//
//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
//last case they are interpreted as seconds.
//
//If Eventually is passed an actual that is a function taking no arguments and returning at least one value,
//then Eventually will call the function periodically and try the matcher against the function's first return value.
//
//Example:
//
// Eventually(func() int {
// return thingImPolling.Count()
// }).Should(BeNumerically(">=", 17))
//
//Note that this example could be rewritten:
//
// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17))
//
//If the function returns more than one value, then Eventually will pass the first value to the matcher and
//assert that all other values are nil/zero.
//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
//
//For example, consider a method that returns a value and an error:
// func FetchFromDB() (string, error)
//
//Then
// Eventually(FetchFromDB).Should(Equal("hasselhoff"))
//
//Will pass only if the the returned error is nil and the returned string passes the matcher.
//
//Eventually's default timeout is 1 second, and its default polling interval is 10ms
func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
return EventuallyWithOffset(0, actual, intervals...)
}
//EventuallyWithOffset operates like Eventually but takes an additional
//initial argument to indicate an offset in the call stack. This is useful when building helper
//functions that contain matchers. To learn more, read about `ExpectWithOffset`.
func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
if globalFailHandler == nil {
panic(nilFailHandlerPanic)
}
timeoutInterval := defaultEventuallyTimeout
pollingInterval := defaultEventuallyPollingInterval
if len(intervals) > 0 {
timeoutInterval = toDuration(intervals[0])
}
if len(intervals) > 1 {
pollingInterval = toDuration(intervals[1])
}
return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset)
}
//Consistently wraps an actual value allowing assertions to be made on it.
//The assertion is tried periodically and is required to pass for a period of time.
//
//Both the total time and polling interval are configurable as optional arguments:
//The first optional argument is the duration that Consistently will run for
//The second optional argument is the polling interval
//
//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
//last case they are interpreted as seconds.
//
//If Consistently is passed an actual that is a function taking no arguments and returning at least one value,
//then Consistently will call the function periodically and try the matcher against the function's first return value.
//
//If the function returns more than one value, then Consistently will pass the first value to the matcher and
//assert that all other values are nil/zero.
//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
//
//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem.
//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could:
//
// Consistently(channel).ShouldNot(Receive())
//
//Consistently's default duration is 100ms, and its default polling interval is 10ms
func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
return ConsistentlyWithOffset(0, actual, intervals...)
}
//ConsistentlyWithOffset operates like Consistnetly but takes an additional
//initial argument to indicate an offset in the call stack. This is useful when building helper
//functions that contain matchers. To learn more, read about `ExpectWithOffset`.
func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
if globalFailHandler == nil {
panic(nilFailHandlerPanic)
}
timeoutInterval := defaultConsistentlyDuration
pollingInterval := defaultConsistentlyPollingInterval
if len(intervals) > 0 {
timeoutInterval = toDuration(intervals[0])
}
if len(intervals) > 1 {
pollingInterval = toDuration(intervals[1])
}
return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset)
}
//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
func SetDefaultEventuallyTimeout(t time.Duration) {
defaultEventuallyTimeout = t
}
//Set the default polling interval for Eventually.
func SetDefaultEventuallyPollingInterval(t time.Duration) {
defaultEventuallyPollingInterval = t
}
//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long.
func SetDefaultConsistentlyDuration(t time.Duration) {
defaultConsistentlyDuration = t
}
//Set the default polling interval for Consistently.
func SetDefaultConsistentlyPollingInterval(t time.Duration) {
defaultConsistentlyPollingInterval = t
}
//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
//the matcher passed to the Should and ShouldNot methods.
//
//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to
//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more
//descriptive
//
//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
//
//Example:
//
// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.")
type GomegaAsyncAssertion interface {
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
}
//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher
//passed to the Should/ShouldNot and To/ToNot/NotTo methods.
//
//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect
//though this is not enforced.
//
//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf()
//and is used to annotate failure messages.
//
//All methods return a bool that is true if hte assertion passed and false if it failed.
//
//Example:
//
// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
type GomegaAssertion interface {
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
}
//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
type OmegaMatcher types.GomegaMatcher
func toDuration(input interface{}) time.Duration {
duration, ok := input.(time.Duration)
if ok {
return duration
}
value := reflect.ValueOf(input)
kind := reflect.TypeOf(input).Kind()
if reflect.Int <= kind && kind <= reflect.Int64 {
return time.Duration(value.Int()) * time.Second
} else if reflect.Uint <= kind && kind <= reflect.Uint64 {
return time.Duration(value.Uint()) * time.Second
} else if reflect.Float32 <= kind && kind <= reflect.Float64 {
return time.Duration(value.Float() * float64(time.Second))
} else if reflect.String == kind {
duration, err := time.ParseDuration(value.String())
if err != nil {
panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input))
}
return duration
}
panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input))
}

View File

@ -0,0 +1,98 @@
package assertion
import (
"fmt"
"reflect"
"github.com/onsi/gomega/types"
)
type Assertion struct {
actualInput interface{}
fail types.GomegaFailHandler
offset int
extra []interface{}
}
func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion {
return &Assertion{
actualInput: actualInput,
fail: fail,
offset: offset,
extra: extra,
}
}
func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
switch len(optionalDescription) {
case 0:
return ""
default:
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
}
}
func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
matches, err := matcher.Match(assertion.actualInput)
description := assertion.buildDescription(optionalDescription...)
if err != nil {
assertion.fail(description+err.Error(), 2+assertion.offset)
return false
}
if matches != desiredMatch {
var message string
if desiredMatch {
message = matcher.FailureMessage(assertion.actualInput)
} else {
message = matcher.NegatedFailureMessage(assertion.actualInput)
}
assertion.fail(description+message, 2+assertion.offset)
return false
}
return true
}
func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
success, message := vetExtras(assertion.extra)
if success {
return true
}
description := assertion.buildDescription(optionalDescription...)
assertion.fail(description+message, 2+assertion.offset)
return false
}
func vetExtras(extras []interface{}) (bool, string) {
for i, extra := range extras {
if extra != nil {
zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
if !reflect.DeepEqual(zeroValue, extra) {
message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
return false, message
}
}
}
return true, ""
}

View File

@ -0,0 +1,189 @@
package asyncassertion
import (
"errors"
"fmt"
"reflect"
"time"
"github.com/onsi/gomega/internal/oraclematcher"
"github.com/onsi/gomega/types"
)
type AsyncAssertionType uint
const (
AsyncAssertionTypeEventually AsyncAssertionType = iota
AsyncAssertionTypeConsistently
)
type AsyncAssertion struct {
asyncType AsyncAssertionType
actualInput interface{}
timeoutInterval time.Duration
pollingInterval time.Duration
fail types.GomegaFailHandler
offset int
}
func New(asyncType AsyncAssertionType, actualInput interface{}, fail types.GomegaFailHandler, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion {
actualType := reflect.TypeOf(actualInput)
if actualType.Kind() == reflect.Func {
if actualType.NumIn() != 0 || actualType.NumOut() == 0 {
panic("Expected a function with no arguments and one or more return values.")
}
}
return &AsyncAssertion{
asyncType: asyncType,
actualInput: actualInput,
fail: fail,
timeoutInterval: timeoutInterval,
pollingInterval: pollingInterval,
offset: offset,
}
}
func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.match(matcher, true, optionalDescription...)
}
func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
return assertion.match(matcher, false, optionalDescription...)
}
func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
switch len(optionalDescription) {
case 0:
return ""
default:
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
}
}
func (assertion *AsyncAssertion) actualInputIsAFunction() bool {
actualType := reflect.TypeOf(assertion.actualInput)
return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0
}
func (assertion *AsyncAssertion) pollActual() (interface{}, error) {
if assertion.actualInputIsAFunction() {
values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{})
extras := []interface{}{}
for _, value := range values[1:] {
extras = append(extras, value.Interface())
}
success, message := vetExtras(extras)
if !success {
return nil, errors.New(message)
}
return values[0].Interface(), nil
}
return assertion.actualInput, nil
}
func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool {
if assertion.actualInputIsAFunction() {
return true
}
return oraclematcher.MatchMayChangeInTheFuture(matcher, value)
}
func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
timer := time.Now()
timeout := time.After(assertion.timeoutInterval)
description := assertion.buildDescription(optionalDescription...)
var matches bool
var err error
mayChange := true
value, err := assertion.pollActual()
if err == nil {
mayChange = assertion.matcherMayChange(matcher, value)
matches, err = matcher.Match(value)
}
fail := func(preamble string) {
errMsg := ""
message := ""
if err != nil {
errMsg = "Error: " + err.Error()
} else {
if desiredMatch {
message = matcher.FailureMessage(value)
} else {
message = matcher.NegatedFailureMessage(value)
}
}
assertion.fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset)
}
if assertion.asyncType == AsyncAssertionTypeEventually {
for {
if err == nil && matches == desiredMatch {
return true
}
if !mayChange {
fail("No future change is possible. Bailing out early")
return false
}
select {
case <-time.After(assertion.pollingInterval):
value, err = assertion.pollActual()
if err == nil {
mayChange = assertion.matcherMayChange(matcher, value)
matches, err = matcher.Match(value)
}
case <-timeout:
fail("Timed out")
return false
}
}
} else if assertion.asyncType == AsyncAssertionTypeConsistently {
for {
if !(err == nil && matches == desiredMatch) {
fail("Failed")
return false
}
if !mayChange {
return true
}
select {
case <-time.After(assertion.pollingInterval):
value, err = assertion.pollActual()
if err == nil {
mayChange = assertion.matcherMayChange(matcher, value)
matches, err = matcher.Match(value)
}
case <-timeout:
return true
}
}
}
return false
}
func vetExtras(extras []interface{}) (bool, string) {
for i, extra := range extras {
if extra != nil {
zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
if !reflect.DeepEqual(zeroValue, extra) {
message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
return false, message
}
}
}
return true, ""
}

View File

@ -0,0 +1,25 @@
package oraclematcher
import "github.com/onsi/gomega/types"
/*
GomegaMatchers that also match the OracleMatcher interface can convey information about
whether or not their result will change upon future attempts.
This allows `Eventually` and `Consistently` to short circuit if success becomes impossible.
For example, a process' exit code can never change. So, gexec's Exit matcher returns `true`
for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
*/
type OracleMatcher interface {
MatchMayChangeInTheFuture(actual interface{}) bool
}
func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool {
oracleMatcher, ok := matcher.(OracleMatcher)
if !ok {
return true
}
return oracleMatcher.MatchMayChangeInTheFuture(value)
}

View File

@ -0,0 +1,40 @@
package testingtsupport
import (
"regexp"
"runtime/debug"
"strings"
"github.com/onsi/gomega/types"
)
type gomegaTestingT interface {
Errorf(format string, args ...interface{})
}
func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler {
return func(message string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip = callerSkip[0]
}
stackTrace := pruneStack(string(debug.Stack()), skip)
t.Errorf("\n%s\n%s", stackTrace, message)
}
}
func pruneStack(fullStackTrace string, skip int) string {
stack := strings.Split(fullStackTrace, "\n")
if len(stack) > 2*(skip+1) {
stack = stack[2*(skip+1):]
}
prunedStack := []string{}
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
for i := 0; i < len(stack)/2; i++ {
if !re.Match([]byte(stack[i*2])) {
prunedStack = append(prunedStack, stack[i*2])
prunedStack = append(prunedStack, stack[i*2+1])
}
}
return strings.Join(prunedStack, "\n")
}

393
vendor/github.com/onsi/gomega/matchers.go generated vendored Normal file
View File

@ -0,0 +1,393 @@
package gomega
import (
"time"
"github.com/onsi/gomega/matchers"
"github.com/onsi/gomega/types"
)
//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
//types when performing comparisons.
//It is an error for both actual and expected to be nil. Use BeNil() instead.
func Equal(expected interface{}) types.GomegaMatcher {
return &matchers.EqualMatcher{
Expected: expected,
}
}
//BeEquivalentTo is more lax than Equal, allowing equality between different types.
//This is done by converting actual to have the type of expected before
//attempting equality with reflect.DeepEqual.
//It is an error for actual and expected to be nil. Use BeNil() instead.
func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
return &matchers.BeEquivalentToMatcher{
Expected: expected,
}
}
//BeNil succeeds if actual is nil
func BeNil() types.GomegaMatcher {
return &matchers.BeNilMatcher{}
}
//BeTrue succeeds if actual is true
func BeTrue() types.GomegaMatcher {
return &matchers.BeTrueMatcher{}
}
//BeFalse succeeds if actual is false
func BeFalse() types.GomegaMatcher {
return &matchers.BeFalseMatcher{}
}
//HaveOccurred succeeds if actual is a non-nil error
//The typical Go error checking pattern looks like:
// err := SomethingThatMightFail()
// Ω(err).ShouldNot(HaveOccurred())
func HaveOccurred() types.GomegaMatcher {
return &matchers.HaveOccurredMatcher{}
}
//Succeed passes if actual is a nil error
//Succeed is intended to be used with functions that return a single error value. Instead of
// err := SomethingThatMightFail()
// Ω(err).ShouldNot(HaveOccurred())
//
//You can write:
// Ω(SomethingThatMightFail()).Should(Succeed())
//
//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect
//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
func Succeed() types.GomegaMatcher {
return &matchers.SucceedMatcher{}
}
//MatchError succeeds if actual is a non-nil error that matches the passed in string/error.
//
//These are valid use-cases:
// Ω(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
// Ω(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
//
//It is an error for err to be nil or an object that does not implement the Error interface
func MatchError(expected interface{}) types.GomegaMatcher {
return &matchers.MatchErrorMatcher{
Expected: expected,
}
}
//BeClosed succeeds if actual is a closed channel.
//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
//
//In order to check whether or not the channel is closed, Gomega must try to read from the channel
//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about
//values coming down the channel.
//
//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
//
//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
func BeClosed() types.GomegaMatcher {
return &matchers.BeClosedMatcher{}
}
//Receive succeeds if there is a value to be received on actual.
//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
//
//Receive returns immediately and never blocks:
//
//- If there is nothing on the channel `c` then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
//
//- If the channel `c` is closed then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
//
//- If there is something on the channel `c` ready to be read, then Ω(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
//
//If you have a go-routine running in the background that will write to channel `c` you can:
// Eventually(c).Should(Receive())
//
//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
//
//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`:
// Consistently(c).ShouldNot(Receive())
//
//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example:
// Ω(c).Should(Receive(Equal("foo")))
//
//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
//
//Passing Receive a matcher is especially useful when paired with Eventually:
//
// Eventually(c).Should(Receive(ContainSubstring("bar")))
//
//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
//
//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
// var myThing thing
// Eventually(thingChan).Should(Receive(&myThing))
// Ω(myThing.Sprocket).Should(Equal("foo"))
// Ω(myThing.IsValid()).Should(BeTrue())
func Receive(args ...interface{}) types.GomegaMatcher {
var arg interface{}
if len(args) > 0 {
arg = args[0]
}
return &matchers.ReceiveMatcher{
Arg: arg,
}
}
//BeSent succeeds if a value can be sent to actual.
//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
//In addition, actual must not be closed.
//
//BeSent never blocks:
//
//- If the channel `c` is not ready to receive then Ω(c).Should(BeSent("foo")) will fail immediately
//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout
//- If the channel `c` is closed then Ω(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
//
//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
func BeSent(arg interface{}) types.GomegaMatcher {
return &matchers.BeSentMatcher{
Arg: arg,
}
}
//MatchRegexp succeeds if actual is a string or stringer that matches the
//passed-in regexp. Optional arguments can be provided to construct a regexp
//via fmt.Sprintf().
func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
return &matchers.MatchRegexpMatcher{
Regexp: regexp,
Args: args,
}
}
//ContainSubstring succeeds if actual is a string or stringer that contains the
//passed-in regexp. Optional arguments can be provided to construct the substring
//via fmt.Sprintf().
func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
return &matchers.ContainSubstringMatcher{
Substr: substr,
Args: args,
}
}
//HavePrefix succeeds if actual is a string or stringer that contains the
//passed-in string as a prefix. Optional arguments can be provided to construct
//via fmt.Sprintf().
func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
return &matchers.HavePrefixMatcher{
Prefix: prefix,
Args: args,
}
}
//HaveSuffix succeeds if actual is a string or stringer that contains the
//passed-in string as a suffix. Optional arguments can be provided to construct
//via fmt.Sprintf().
func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
return &matchers.HaveSuffixMatcher{
Suffix: suffix,
Args: args,
}
}
//MatchJSON succeeds if actual is a string or stringer of JSON that matches
//the expected JSON. The JSONs are decoded and the resulting objects are compared via
//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
func MatchJSON(json interface{}) types.GomegaMatcher {
return &matchers.MatchJSONMatcher{
JSONToMatch: json,
}
}
//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice.
func BeEmpty() types.GomegaMatcher {
return &matchers.BeEmptyMatcher{}
}
//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice.
func HaveLen(count int) types.GomegaMatcher {
return &matchers.HaveLenMatcher{
Count: count,
}
}
//BeZero succeeds if actual is the zero value for its type or if actual is nil.
func BeZero() types.GomegaMatcher {
return &matchers.BeZeroMatcher{}
}
//ContainElement succeeds if actual contains the passed in element.
//By default ContainElement() uses Equal() to perform the match, however a
//matcher can be passed in instead:
// Ω([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
//
//Actual must be an array, slice or map.
//For maps, ContainElement searches through the map's values.
func ContainElement(element interface{}) types.GomegaMatcher {
return &matchers.ContainElementMatcher{
Element: element,
}
}
//ConsistOf succeeds if actual contains preciely the elements passed into the matcher. The ordering of the elements does not matter.
//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
//
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
//
//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values.
//
//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it
//is the only element passed in to ConsistOf:
//
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
//
//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
func ConsistOf(elements ...interface{}) types.GomegaMatcher {
return &matchers.ConsistOfMatcher{
Elements: elements,
}
}
//HaveKey succeeds if actual is a map with the passed in key.
//By default HaveKey uses Equal() to perform the match, however a
//matcher can be passed in instead:
// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
func HaveKey(key interface{}) types.GomegaMatcher {
return &matchers.HaveKeyMatcher{
Key: key,
}
}
//HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
//By default HaveKeyWithValue uses Equal() to perform the match, however a
//matcher can be passed in instead:
// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
return &matchers.HaveKeyWithValueMatcher{
Key: key,
Value: value,
}
}
//BeNumerically performs numerical assertions in a type-agnostic way.
//Actual and expected should be numbers, though the specific type of
//number is irrelevant (floa32, float64, uint8, etc...).
//
//There are six, self-explanatory, supported comparators:
// Ω(1.0).Should(BeNumerically("==", 1))
// Ω(1.0).Should(BeNumerically("~", 0.999, 0.01))
// Ω(1.0).Should(BeNumerically(">", 0.9))
// Ω(1.0).Should(BeNumerically(">=", 1.0))
// Ω(1.0).Should(BeNumerically("<", 3))
// Ω(1.0).Should(BeNumerically("<=", 1.0))
func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
return &matchers.BeNumericallyMatcher{
Comparator: comparator,
CompareTo: compareTo,
}
}
//BeTemporally compares time.Time's like BeNumerically
//Actual and expected must be time.Time. The comparators are the same as for BeNumerically
// Ω(time.Now()).Should(BeTemporally(">", time.Time{}))
// Ω(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher {
return &matchers.BeTemporallyMatcher{
Comparator: comparator,
CompareTo: compareTo,
Threshold: threshold,
}
}
//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
//It will return an error when one of the values is nil.
// Ω(0).Should(BeAssignableToTypeOf(0)) // Same values
// Ω(5).Should(BeAssignableToTypeOf(-1)) // different values same type
// Ω("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
// Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
return &matchers.AssignableToTypeOfMatcher{
Expected: expected,
}
}
//Panic succeeds if actual is a function that, when invoked, panics.
//Actual must be a function that takes no arguments and returns no results.
func Panic() types.GomegaMatcher {
return &matchers.PanicMatcher{}
}
//BeAnExistingFile succeeds if a file exists.
//Actual must be a string representing the abs path to the file being checked.
func BeAnExistingFile() types.GomegaMatcher {
return &matchers.BeAnExistingFileMatcher{}
}
//BeARegularFile succeeds iff a file exists and is a regular file.
//Actual must be a string representing the abs path to the file being checked.
func BeARegularFile() types.GomegaMatcher {
return &matchers.BeARegularFileMatcher{}
}
//BeADirectory succeeds iff a file exists and is a directory.
//Actual must be a string representing the abs path to the file being checked.
func BeADirectory() types.GomegaMatcher {
return &matchers.BeADirectoryMatcher{}
}
//And succeeds only if all of the given matchers succeed.
//The matchers are tried in order, and will fail-fast if one doesn't succeed.
// Expect("hi").To(And(HaveLen(2), Equal("hi"))
//
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func And(ms ...types.GomegaMatcher) types.GomegaMatcher {
return &matchers.AndMatcher{Matchers: ms}
}
//SatisfyAll is an alias for And().
// Ω("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher {
return And(matchers...)
}
//Or succeeds if any of the given matchers succeed.
//The matchers are tried in order and will return immediately upon the first successful match.
// Expect("hi").To(Or(HaveLen(3), HaveLen(2))
//
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func Or(ms ...types.GomegaMatcher) types.GomegaMatcher {
return &matchers.OrMatcher{Matchers: ms}
}
//SatisfyAny is an alias for Or().
// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher {
return Or(matchers...)
}
//Not negates the given matcher; it succeeds if the given matcher fails.
// Expect(1).To(Not(Equal(2))
//
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
return &matchers.NotMatcher{Matcher: matcher}
}
//WithTransform applies the `transform` to the actual value and matches it against `matcher`.
//The given transform must be a function of one parameter that returns one value.
// var plus1 = func(i int) int { return i + 1 }
// Expect(1).To(WithTransform(plus1, Equal(2))
//
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
return matchers.NewWithTransformMatcher(transform, matcher)
}

64
vendor/github.com/onsi/gomega/matchers/and.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package matchers
import (
"fmt"
"github.com/onsi/gomega/format"
"github.com/onsi/gomega/internal/oraclematcher"
"github.com/onsi/gomega/types"
)
type AndMatcher struct {
Matchers []types.GomegaMatcher
// state
firstFailedMatcher types.GomegaMatcher
}
func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
m.firstFailedMatcher = nil
for _, matcher := range m.Matchers {
success, err := matcher.Match(actual)
if !success || err != nil {
m.firstFailedMatcher = matcher
return false, err
}
}
return true, nil
}
func (m *AndMatcher) FailureMessage(actual interface{}) (message string) {
return m.firstFailedMatcher.FailureMessage(actual)
}
func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) {
// not the most beautiful list of matchers, but not bad either...
return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers))
}
func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
/*
Example with 3 matchers: A, B, C
Match evaluates them: T, F, <?> => F
So match is currently F, what should MatchMayChangeInTheFuture() return?
Seems like it only depends on B, since currently B MUST change to allow the result to become T
Match eval: T, T, T => T
So match is currently T, what should MatchMayChangeInTheFuture() return?
Seems to depend on ANY of them being able to change to F.
*/
if m.firstFailedMatcher == nil {
// so all matchers succeeded.. Any one of them changing would change the result.
for _, matcher := range m.Matchers {
if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) {
return true
}
}
return false // none of were going to change
} else {
// one of the matchers failed.. it must be able to change in order to affect the result
return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual)
}
}

View File

@ -0,0 +1,31 @@
package matchers
import (
"fmt"
"reflect"
"github.com/onsi/gomega/format"
)
type AssignableToTypeOfMatcher struct {
Expected interface{}
}
func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
if actual == nil || matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
actualType := reflect.TypeOf(actual)
expectedType := reflect.TypeOf(matcher.Expected)
return actualType.AssignableTo(expectedType), nil
}
func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string {
return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected))
}
func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string {
return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected))
}

View File

@ -0,0 +1,54 @@
package matchers
import (
"fmt"
"os"
"github.com/onsi/gomega/format"
)
type notADirectoryError struct {
os.FileInfo
}
func (t notADirectoryError) Error() string {
fileInfo := os.FileInfo(t)
switch {
case fileInfo.Mode().IsRegular():
return "file is a regular file"
default:
return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
}
}
type BeADirectoryMatcher struct {
expected interface{}
err error
}
func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path")
}
fileInfo, err := os.Stat(actualFilename)
if err != nil {
matcher.err = err
return false, nil
}
if !fileInfo.Mode().IsDir() {
matcher.err = notADirectoryError{fileInfo}
return false, nil
}
return true, nil
}
func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err))
}
func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, fmt.Sprintf("not be a directory"))
}

View File

@ -0,0 +1,54 @@
package matchers
import (
"fmt"
"os"
"github.com/onsi/gomega/format"
)
type notARegularFileError struct {
os.FileInfo
}
func (t notARegularFileError) Error() string {
fileInfo := os.FileInfo(t)
switch {
case fileInfo.IsDir():
return "file is a directory"
default:
return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
}
}
type BeARegularFileMatcher struct {
expected interface{}
err error
}
func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path")
}
fileInfo, err := os.Stat(actualFilename)
if err != nil {
matcher.err = err
return false, nil
}
if !fileInfo.Mode().IsRegular() {
matcher.err = notARegularFileError{fileInfo}
return false, nil
}
return true, nil
}
func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err))
}
func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, fmt.Sprintf("not be a regular file"))
}

View File

@ -0,0 +1,38 @@
package matchers
import (
"fmt"
"os"
"github.com/onsi/gomega/format"
)
type BeAnExistingFileMatcher struct {
expected interface{}
}
func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path")
}
if _, err = os.Stat(actualFilename); err != nil {
switch {
case os.IsNotExist(err):
return false, nil
default:
return false, err
}
}
return true, nil
}
func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, fmt.Sprintf("to exist"))
}
func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, fmt.Sprintf("not to exist"))
}

View File

@ -0,0 +1,45 @@
package matchers
import (
"fmt"
"github.com/onsi/gomega/format"
"reflect"
)
type BeClosedMatcher struct {
}
func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1))
}
channelType := reflect.TypeOf(actual)
channelValue := reflect.ValueOf(actual)
if channelType.ChanDir() == reflect.SendDir {
return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1))
}
winnerIndex, _, open := reflect.Select([]reflect.SelectCase{
reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue},
reflect.SelectCase{Dir: reflect.SelectDefault},
})
var closed bool
if winnerIndex == 0 {
closed = !open
} else if winnerIndex == 1 {
closed = false
}
return closed, nil
}
func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to be closed")
}
func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to be open")
}

View File

@ -0,0 +1,26 @@
package matchers
import (
"fmt"
"github.com/onsi/gomega/format"
)
type BeEmptyMatcher struct {
}
func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) {
length, ok := lengthOf(actual)
if !ok {
return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1))
}
return length == 0, nil
}
func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to be empty")
}
func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to be empty")
}

View File

@ -0,0 +1,33 @@
package matchers
import (
"fmt"
"github.com/onsi/gomega/format"
"reflect"
)
type BeEquivalentToMatcher struct {
Expected interface{}
}
func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Both actual and expected must not be nil.")
}
convertedActual := actual
if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) {
convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface()
}
return reflect.DeepEqual(convertedActual, matcher.Expected), nil
}
func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to be equivalent to", matcher.Expected)
}
func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to be equivalent to", matcher.Expected)
}

View File

@ -0,0 +1,25 @@
package matchers
import (
"fmt"
"github.com/onsi/gomega/format"
)
type BeFalseMatcher struct {
}
func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) {
if !isBool(actual) {
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
}
return actual == false, nil
}
func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to be false")
}
func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to be false")
}

View File

@ -0,0 +1,18 @@
package matchers
import "github.com/onsi/gomega/format"
type BeNilMatcher struct {
}
func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) {
return isNil(actual), nil
}
func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to be nil")
}
func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to be nil")
}

View File

@ -0,0 +1,119 @@
package matchers
import (
"fmt"
"github.com/onsi/gomega/format"
"math"
)
type BeNumericallyMatcher struct {
Comparator string
CompareTo []interface{}
}
func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo[0])
}
func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo[0])
}
func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) {
if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 {
return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1))
}
if !isNumber(actual) {
return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1))
}
if !isNumber(matcher.CompareTo[0]) {
return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1))
}
if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) {
return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1))
}
switch matcher.Comparator {
case "==", "~", ">", ">=", "<", "<=":
default:
return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
}
if isFloat(actual) || isFloat(matcher.CompareTo[0]) {
var secondOperand float64 = 1e-8
if len(matcher.CompareTo) == 2 {
secondOperand = toFloat(matcher.CompareTo[1])
}
success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand)
} else if isInteger(actual) {
var secondOperand int64 = 0
if len(matcher.CompareTo) == 2 {
secondOperand = toInteger(matcher.CompareTo[1])
}
success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand)
} else if isUnsignedInteger(actual) {
var secondOperand uint64 = 0
if len(matcher.CompareTo) == 2 {
secondOperand = toUnsignedInteger(matcher.CompareTo[1])
}
success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand)
} else {
return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1))
}
return success, nil
}
func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) {
switch matcher.Comparator {
case "==", "~":
diff := actual - compareTo
return -threshold <= diff && diff <= threshold
case ">":
return (actual > compareTo)
case ">=":
return (actual >= compareTo)
case "<":
return (actual < compareTo)
case "<=":
return (actual <= compareTo)
}
return false
}
func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) {
switch matcher.Comparator {
case "==", "~":
if actual < compareTo {
actual, compareTo = compareTo, actual
}
return actual-compareTo <= threshold
case ">":
return (actual > compareTo)
case ">=":
return (actual >= compareTo)
case "<":
return (actual < compareTo)
case "<=":
return (actual <= compareTo)
}
return false
}
func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) {
switch matcher.Comparator {
case "~":
return math.Abs(actual-compareTo) <= threshold
case "==":
return (actual == compareTo)
case ">":
return (actual > compareTo)
case ">=":
return (actual >= compareTo)
case "<":
return (actual < compareTo)
case "<=":
return (actual <= compareTo)
}
return false
}

View File

@ -0,0 +1,71 @@
package matchers
import (
"fmt"
"reflect"
"github.com/onsi/gomega/format"
)
type BeSentMatcher struct {
Arg interface{}
channelClosed bool
}
func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1))
}
channelType := reflect.TypeOf(actual)
channelValue := reflect.ValueOf(actual)
if channelType.ChanDir() == reflect.RecvDir {
return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1))
}
argType := reflect.TypeOf(matcher.Arg)
assignable := argType.AssignableTo(channelType.Elem())
if !assignable {
return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1))
}
argValue := reflect.ValueOf(matcher.Arg)
defer func() {
if e := recover(); e != nil {
success = false
err = fmt.Errorf("Cannot send to a closed channel")
matcher.channelClosed = true
}
}()
winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{
reflect.SelectCase{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue},
reflect.SelectCase{Dir: reflect.SelectDefault},
})
var didSend bool
if winnerIndex == 0 {
didSend = true
}
return didSend, nil
}
func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to send:", matcher.Arg)
}
func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to send:", matcher.Arg)
}
func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
if !isChan(actual) {
return false
}
return !matcher.channelClosed
}

View File

@ -0,0 +1,65 @@
package matchers
import (
"fmt"
"github.com/onsi/gomega/format"
"time"
)
type BeTemporallyMatcher struct {
Comparator string
CompareTo time.Time
Threshold []time.Duration
}
func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo)
}
func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo)
}
func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) {
// predicate to test for time.Time type
isTime := func(t interface{}) bool {
_, ok := t.(time.Time)
return ok
}
if !isTime(actual) {
return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1))
}
switch matcher.Comparator {
case "==", "~", ">", ">=", "<", "<=":
default:
return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
}
var threshold = time.Millisecond
if len(matcher.Threshold) == 1 {
threshold = matcher.Threshold[0]
}
return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil
}
func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) {
switch matcher.Comparator {
case "==":
return actual.Equal(compareTo)
case "~":
diff := actual.Sub(compareTo)
return -threshold <= diff && diff <= threshold
case ">":
return actual.After(compareTo)
case ">=":
return !actual.Before(compareTo)
case "<":
return actual.Before(compareTo)
case "<=":
return !actual.After(compareTo)
}
return false
}

View File

@ -0,0 +1,25 @@
package matchers
import (
"fmt"
"github.com/onsi/gomega/format"
)
type BeTrueMatcher struct {
}
func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) {
if !isBool(actual) {
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
}
return actual.(bool), nil
}
func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to be true")
}
func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to be true")
}

View File

@ -0,0 +1,27 @@
package matchers
import (
"github.com/onsi/gomega/format"
"reflect"
)
type BeZeroMatcher struct {
}
func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) {
if actual == nil {
return true, nil
}
zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface()
return reflect.DeepEqual(zeroValue, actual), nil
}
func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to be zero-valued")
}
func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to be zero-valued")
}

80
vendor/github.com/onsi/gomega/matchers/consist_of.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
package matchers
import (
"fmt"
"reflect"
"github.com/onsi/gomega/format"
"github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
)
type ConsistOfMatcher struct {
Elements []interface{}
}
func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) {
return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
}
elements := matcher.Elements
if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) {
elements = []interface{}{}
value := reflect.ValueOf(matcher.Elements[0])
for i := 0; i < value.Len(); i++ {
elements = append(elements, value.Index(i).Interface())
}
}
matchers := []interface{}{}
for _, element := range elements {
matcher, isMatcher := element.(omegaMatcher)
if !isMatcher {
matcher = &EqualMatcher{Expected: element}
}
matchers = append(matchers, matcher)
}
values := matcher.valuesOf(actual)
if len(values) != len(matchers) {
return false, nil
}
neighbours := func(v, m interface{}) (bool, error) {
match, err := m.(omegaMatcher).Match(v)
return match && err == nil, nil
}
bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours)
if err != nil {
return false, err
}
return len(bipartiteGraph.LargestMatching()) == len(values), nil
}
func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} {
value := reflect.ValueOf(actual)
values := []interface{}{}
if isMap(actual) {
keys := value.MapKeys()
for i := 0; i < value.Len(); i++ {
values = append(values, value.MapIndex(keys[i]).Interface())
}
} else {
for i := 0; i < value.Len(); i++ {
values = append(values, value.Index(i).Interface())
}
}
return values
}
func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to consist of", matcher.Elements)
}
func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to consist of", matcher.Elements)
}

View File

@ -0,0 +1,56 @@
package matchers
import (
"fmt"
"reflect"
"github.com/onsi/gomega/format"
)
type ContainElementMatcher struct {
Element interface{}
}
func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) {
return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
}
elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
if !elementIsMatcher {
elemMatcher = &EqualMatcher{Expected: matcher.Element}
}
value := reflect.ValueOf(actual)
var keys []reflect.Value
if isMap(actual) {
keys = value.MapKeys()
}
var lastError error
for i := 0; i < value.Len(); i++ {
var success bool
var err error
if isMap(actual) {
success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface())
} else {
success, err = elemMatcher.Match(value.Index(i).Interface())
}
if err != nil {
lastError = err
continue
}
if success {
return true, nil
}
}
return false, lastError
}
func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to contain element matching", matcher.Element)
}
func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to contain element matching", matcher.Element)
}

View File

@ -0,0 +1,37 @@
package matchers
import (
"fmt"
"github.com/onsi/gomega/format"
"strings"
)
type ContainSubstringMatcher struct {
Substr string
Args []interface{}
}
func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
}
return strings.Contains(actualString, matcher.stringToMatch()), nil
}
func (matcher *ContainSubstringMatcher) stringToMatch() string {
stringToMatch := matcher.Substr
if len(matcher.Args) > 0 {
stringToMatch = fmt.Sprintf(matcher.Substr, matcher.Args...)
}
return stringToMatch
}
func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, "to contain substring", matcher.stringToMatch())
}
func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not to contain substring", matcher.stringToMatch())
}

Some files were not shown because too many files have changed in this diff Show More