vendor: bump ginkgo, gover

Signed-off-by: Casey Callendrello <cdc@redhat.com>
This commit is contained in:
Casey Callendrello 2020-12-08 14:51:48 +01:00
parent 509d645ee9
commit b47d178ae0
408 changed files with 194680 additions and 1961 deletions

8
go.mod
View File

@ -15,17 +15,15 @@ require (
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4 // indirect
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c
github.com/golang/protobuf v1.3.1 // indirect
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56
github.com/mattn/go-shellwords v1.0.3
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5 // indirect
github.com/onsi/ginkgo v1.12.1
github.com/onsi/gomega v1.10.3
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8
github.com/sirupsen/logrus v1.0.6 // indirect
github.com/stretchr/testify v1.3.0 // indirect
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941 // indirect
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1 // indirect
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect

56
go.sum
View File

@ -22,18 +22,41 @@ github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4 h1:itqmmf1PFpC4n5
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c h1:RBUpb2b14UnmRHNd2uHz20ZHLDK+SW5Us/vWF5IHRaY=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56 h1:742eGXur0715JMq73aD95/FU0XpVKXqNuTnEfXsLOYQ=
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/mattn/go-shellwords v1.0.3 h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5 h1:8Q0qkMVC/MmWkpIdlvZgcv2o2jrlF6zqVOh7W5YHdMA=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b h1:Ey6yH0acn50T/v6CB75bGP4EMJqnv9WvnjN7oZaj+xE=
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a h1:KfNOeFvoAssuZLT7IntKZElKwi/5LRuxY71k+t6rfaM=
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U=
@ -49,13 +72,46 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3C
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941 h1:qBTHLajHecfu+xzRI9PqVDcqx7SdHj9d4B+EzSn3tAc=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1 h1:Y/KGZSOdz/2r0WJ9Mkmz6NJBusp0kiNx1Cn82lzJQ6w=
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 h1:wBouT66WTYFXdxfVdz9sVWARVd/2vfGcmI45D2gj45M=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637 h1:O5hKNaGxIT4A8OTMnuh6UpmBdI3SAPxlZ3g0olDrJVM=
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

5
vendor/github.com/fsnotify/fsnotify/.editorconfig generated vendored Normal file
View File

@ -0,0 +1,5 @@
root = true
[*]
indent_style = tab
indent_size = 4

6
vendor/github.com/fsnotify/fsnotify/.gitignore generated vendored Normal file
View File

@ -0,0 +1,6 @@
# Setup a Global .gitignore for OS and editor generated files:
# https://help.github.com/articles/ignoring-files
# git config --global core.excludesfile ~/.gitignore_global
.vagrant
*.sublime-project

30
vendor/github.com/fsnotify/fsnotify/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,30 @@
sudo: false
language: go
go:
- 1.8.x
- 1.9.x
- tip
matrix:
allow_failures:
- go: tip
fast_finish: true
before_script:
- go get -u github.com/golang/lint/golint
script:
- go test -v --race ./...
after_script:
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
- test -z "$(golint ./... | tee /dev/stderr)"
- go vet ./...
os:
- linux
- osx
notifications:
email: false

52
vendor/github.com/fsnotify/fsnotify/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,52 @@
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# You can update this list using the following command:
#
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
# Please keep the list sorted.
Aaron L <aaron@bettercoder.net>
Adrien Bustany <adrien@bustany.org>
Amit Krishnan <amit.krishnan@oracle.com>
Anmol Sethi <me@anmol.io>
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
Bruno Bigras <bigras.bruno@gmail.com>
Caleb Spare <cespare@gmail.com>
Case Nelson <case@teammating.com>
Chris Howey <chris@howey.me> <howeyc@gmail.com>
Christoffer Buchholz <christoffer.buchholz@gmail.com>
Daniel Wagner-Hall <dawagner@gmail.com>
Dave Cheney <dave@cheney.net>
Evan Phoenix <evan@fallingsnow.net>
Francisco Souza <f@souza.cc>
Hari haran <hariharan.uno@gmail.com>
John C Barstow
Kelvin Fo <vmirage@gmail.com>
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
Matt Layher <mdlayher@gmail.com>
Nathan Youngman <git@nathany.com>
Nickolai Zeldovich <nickolai@csail.mit.edu>
Patrick <patrick@dropbox.com>
Paul Hammond <paul@paulhammond.org>
Pawel Knap <pawelknap88@gmail.com>
Pieter Droogendijk <pieter@binky.org.uk>
Pursuit92 <JoshChase@techpursuit.net>
Riku Voipio <riku.voipio@linaro.org>
Rob Figueiredo <robfig@gmail.com>
Rodrigo Chiossi <rodrigochiossi@gmail.com>
Slawek Ligus <root@ooz.ie>
Soge Zhang <zhssoge@gmail.com>
Tiffany Jernigan <tiffany.jernigan@intel.com>
Tilak Sharma <tilaks@google.com>
Tom Payne <twpayne@gmail.com>
Travis Cline <travis.cline@gmail.com>
Tudor Golubenco <tudor.g@gmail.com>
Vahe Khachikyan <vahe@live.ca>
Yukang <moorekang@gmail.com>
bronze1man <bronze1man@gmail.com>
debrando <denis.brandolini@gmail.com>
henrikedwards <henrik.edwards@gmail.com>
铁哥 <guotie.9@gmail.com>

317
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,317 @@
# Changelog
## v1.4.7 / 2018-01-09
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
* Tests: Fix missing verb on format string (thanks @rchiossi)
* Linux: Fix deadlock in Remove (thanks @aarondl)
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
* Docs: Moved FAQ into the README (thanks @vahe)
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
* Docs: replace references to OS X with macOS
## v1.4.2 / 2016-10-10
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
## v1.4.1 / 2016-10-04
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
## v1.4.0 / 2016-10-01
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
## v1.3.1 / 2016-06-28
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
## v1.3.0 / 2016-04-19
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
## v1.2.10 / 2016-03-02
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
## v1.2.9 / 2016-01-13
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
## v1.2.8 / 2015-12-17
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
* inotify: fix race in test
* enable race detection for continuous integration (Linux, Mac, Windows)
## v1.2.5 / 2015-10-17
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
## v1.2.1 / 2015-10-14
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
## v1.2.0 / 2015-02-08
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
## v1.1.1 / 2015-02-05
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
## v1.1.0 / 2014-12-12
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
* add low-level functions
* only need to store flags on directories
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
* done can be an unbuffered channel
* remove calls to os.NewSyscallError
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## v1.0.4 / 2014-09-07
* kqueue: add dragonfly to the build tags.
* Rename source code files, rearrange code so exported APIs are at the top.
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
## v1.0.3 / 2014-08-19
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
## v1.0.2 / 2014-08-17
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
## v1.0.0 / 2014-08-15
* [API] Remove AddWatch on Windows, use Add.
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
* Minor updates based on feedback from golint.
## dev / 2014-07-09
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
## dev / 2014-07-04
* kqueue: fix incorrect mutex used in Close()
* Update example to demonstrate usage of Op.
## dev / 2014-06-28
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
* Fix for String() method on Event (thanks Alex Brainman)
* Don't build on Plan 9 or Solaris (thanks @4ad)
## dev / 2014-06-21
* Events channel of type Event rather than *Event.
* [internal] use syscall constants directly for inotify and kqueue.
* [internal] kqueue: rename events to kevents and fileEvent to event.
## dev / 2014-06-19
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
* [internal] remove cookie from Event struct (unused).
* [internal] Event struct has the same definition across every OS.
* [internal] remove internal watch and removeWatch methods.
## dev / 2014-06-12
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
* [API] Pluralized channel names: Events and Errors.
* [API] Renamed FileEvent struct to Event.
* [API] Op constants replace methods like IsCreate().
## dev / 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## dev / 2014-05-23
* [API] Remove current implementation of WatchFlags.
* current implementation doesn't take advantage of OS for efficiency
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
* no tests for the current implementation
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
## v0.9.3 / 2014-12-31
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## v0.9.2 / 2014-08-17
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
## v0.9.1 / 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## v0.9.0 / 2014-01-17
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
## v0.8.12 / 2013-11-13
* [API] Remove FD_SET and friends from Linux adapter
## v0.8.11 / 2013-11-02
* [Doc] Add Changelog [#72][] (thanks @nathany)
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
## v0.8.10 / 2013-10-19
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
* [Doc] specify OS-specific limits in README (thanks @debrando)
## v0.8.9 / 2013-09-08
* [Doc] Contributing (thanks @nathany)
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
* [Doc] GoCI badge in README (Linux only) [#60][]
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
## v0.8.8 / 2013-06-17
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
## v0.8.7 / 2013-06-03
* [API] Make syscall flags internal
* [Fix] inotify: ignore event changes
* [Fix] race in symlink test [#45][] (reported by @srid)
* [Fix] tests on Windows
* lower case error messages
## v0.8.6 / 2013-05-23
* kqueue: Use EVT_ONLY flag on Darwin
* [Doc] Update README with full example
## v0.8.5 / 2013-05-09
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
## v0.8.4 / 2013-04-07
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
## v0.8.3 / 2013-03-13
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
## v0.8.2 / 2013-02-07
* [Doc] add Authors
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
## v0.8.1 / 2013-01-09
* [Fix] Windows path separators
* [Doc] BSD License
## v0.8.0 / 2012-11-09
* kqueue: directory watching improvements (thanks @vmirage)
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
## v0.7.4 / 2012-10-09
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
* [Fix] kqueue: modify after recreation of file
## v0.7.3 / 2012-09-27
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
* [Fix] kqueue: no longer get duplicate CREATE events
## v0.7.2 / 2012-09-01
* kqueue: events for created directories
## v0.7.1 / 2012-07-14
* [Fix] for renaming files
## v0.7.0 / 2012-07-02
* [Feature] FSNotify flags
* [Fix] inotify: Added file name back to event path
## v0.6.0 / 2012-06-06
* kqueue: watch files after directory created (thanks @tmc)
## v0.5.1 / 2012-05-22
* [Fix] inotify: remove all watches before Close()
## v0.5.0 / 2012-05-03
* [API] kqueue: return errors during watch instead of sending over channel
* kqueue: match symlink behavior on Linux
* inotify: add `DELETE_SELF` (requested by @taralx)
* [Fix] kqueue: handle EINTR (reported by @robfig)
* [Doc] Godoc example [#1][] (thanks @davecheney)
## v0.4.0 / 2012-03-30
* Go 1 released: build with go tool
* [Feature] Windows support using winfsnotify
* Windows does not have attribute change notifications
* Roll attribute notifications into IsModify
## v0.3.0 / 2012-02-19
* kqueue: add files when watch directory
## v0.2.0 / 2011-12-30
* update to latest Go weekly code
## v0.1.0 / 2011-10-19
* kqueue: add watch on file creation to match inotify
* kqueue: create file event
* inotify: ignore `IN_IGNORED` events
* event String()
* linux: common FileEvent functions
* initial commit
[#79]: https://github.com/howeyc/fsnotify/pull/79
[#77]: https://github.com/howeyc/fsnotify/pull/77
[#72]: https://github.com/howeyc/fsnotify/issues/72
[#71]: https://github.com/howeyc/fsnotify/issues/71
[#70]: https://github.com/howeyc/fsnotify/issues/70
[#63]: https://github.com/howeyc/fsnotify/issues/63
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#60]: https://github.com/howeyc/fsnotify/issues/60
[#59]: https://github.com/howeyc/fsnotify/issues/59
[#49]: https://github.com/howeyc/fsnotify/issues/49
[#45]: https://github.com/howeyc/fsnotify/issues/45
[#40]: https://github.com/howeyc/fsnotify/issues/40
[#36]: https://github.com/howeyc/fsnotify/issues/36
[#33]: https://github.com/howeyc/fsnotify/issues/33
[#29]: https://github.com/howeyc/fsnotify/issues/29
[#25]: https://github.com/howeyc/fsnotify/issues/25
[#24]: https://github.com/howeyc/fsnotify/issues/24
[#21]: https://github.com/howeyc/fsnotify/issues/21

77
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,77 @@
# Contributing
## Issues
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
* Please indicate the platform you are using fsnotify on.
* A code example to reproduce the problem is appreciated.
## Pull Requests
### Contributor License Agreement
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
Please indicate that you have signed the CLA in your pull request.
### How fsnotify is Developed
* Development is done on feature branches.
* Tests are run on BSD, Linux, macOS and Windows.
* Pull requests are reviewed and [applied to master][am] using [hub][].
* Maintainers may modify or squash commits rather than asking contributors to.
* To issue a new release, the maintainers will:
* Update the CHANGELOG
* Tag a version, which will become available through gopkg.in.
### How to Fork
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Ensure everything works and the tests pass (see below)
4. Commit your changes (`git commit -am 'Add some feature'`)
Contribute upstream:
1. Fork fsnotify on GitHub
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
3. Push to the branch (`git push fork my-new-feature`)
4. Create a new Pull Request on GitHub
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
### Testing
fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
* When you're done, you will want to halt or destroy the Vagrant boxes.
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
### Maintainers
Help maintaining fsnotify is welcome. To be a maintainer:
* Submit a pull request and sign the CLA as above.
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
All code changes should be internal pull requests.
Releases are tagged using [Semantic Versioning](http://semver.org/).
[hub]: https://github.com/github/hub
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs

28
vendor/github.com/fsnotify/fsnotify/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2012 fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

79
vendor/github.com/fsnotify/fsnotify/README.md generated vendored Normal file
View File

@ -0,0 +1,79 @@
# File system notifications for Go
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
```console
go get -u golang.org/x/sys/...
```
Cross platform: Windows, Linux, BSD and macOS.
|Adapter |OS |Status |
|----------|----------|----------|
|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|fanotify |Linux 2.6.37+ | |
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
\* Android and iOS are untested.
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
## API stability
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
## Contributing
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
## Example
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
## FAQ
**When a file is moved to another directory is it still being watched?**
No (it shouldn't be, unless you are watching where it was moved to).
**When I watch a directory, are all subdirectories watched as well?**
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
**Do I have to watch the Error and Event channels in a separate goroutine?**
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
**Why am I receiving multiple events for the same file on OS X?**
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
**How many files can be watched at once?**
There are OS-specific limits as to how many watches can be created:
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#18]: https://github.com/fsnotify/fsnotify/issues/18
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#7]: https://github.com/howeyc/fsnotify/issues/7
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
## Related Projects
* [notify](https://github.com/rjeczalik/notify)
* [fsevents](https://github.com/fsnotify/fsevents)

37
vendor/github.com/fsnotify/fsnotify/fen.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build solaris
package fsnotify
import (
"errors"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
return nil
}

66
vendor/github.com/fsnotify/fsnotify/fsnotify.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
// Package fsnotify provides a platform-independent interface for file system notifications.
package fsnotify
import (
"bytes"
"errors"
"fmt"
)
// Event represents a single file system notification.
type Event struct {
Name string // Relative path to the file or directory.
Op Op // File operation that triggered the event.
}
// Op describes a set of file operations.
type Op uint32
// These are the generalized file operations that can trigger a notification.
const (
Create Op = 1 << iota
Write
Remove
Rename
Chmod
)
func (op Op) String() string {
// Use a buffer for efficient string concatenation
var buffer bytes.Buffer
if op&Create == Create {
buffer.WriteString("|CREATE")
}
if op&Remove == Remove {
buffer.WriteString("|REMOVE")
}
if op&Write == Write {
buffer.WriteString("|WRITE")
}
if op&Rename == Rename {
buffer.WriteString("|RENAME")
}
if op&Chmod == Chmod {
buffer.WriteString("|CHMOD")
}
if buffer.Len() == 0 {
return ""
}
return buffer.String()[1:] // Strip leading pipe
}
// String returns a string representation of the event in the form
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}
// Common errors that can be reported by a watcher
var ErrEventOverflow = errors.New("fsnotify queue overflow")

337
vendor/github.com/fsnotify/fsnotify/inotify.go generated vendored Normal file
View File

@ -0,0 +1,337 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"unsafe"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
paths map[int]string // Map of watched paths (key: watch descriptor)
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneResp chan struct{} // Channel to respond to Close
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
// Create inotify fd
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
if fd == -1 {
return nil, errno
}
// Create epoll
poller, err := newFdPoller(fd)
if err != nil {
unix.Close(fd)
return nil, err
}
w := &Watcher{
fd: fd,
poller: poller,
watches: make(map[string]*watch),
paths: make(map[int]string),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed() {
return nil
}
// Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
// Wake up goroutine
w.poller.wake()
// Wait for goroutine to close
<-w.doneResp
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
name = filepath.Clean(name)
if w.isClosed() {
return errors.New("inotify instance already closed")
}
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
var flags uint32 = agnosticEvents
w.mu.Lock()
defer w.mu.Unlock()
watchEntry := w.watches[name]
if watchEntry != nil {
flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
if watchEntry == nil {
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = name
} else {
watchEntry.wd = uint32(wd)
watchEntry.flags = flags
}
return nil
}
// Remove stops watching the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
// Fetch the watch.
w.mu.Lock()
defer w.mu.Unlock()
watch, ok := w.watches[name]
// Remove it from inotify.
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
// We successfully removed the watch if InotifyRmWatch doesn't return an
// error, we need to clean up our internal state to ensure it matches
// inotify's kernel state.
delete(w.paths, int(watch.wd))
delete(w.watches, name)
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case.
// the only two possible errors are:
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
return errno
}
return nil
}
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
n int // Number of bytes read with read()
errno error // Syscall errno
ok bool // For poller.wait
)
defer close(w.doneResp)
defer close(w.Errors)
defer close(w.Events)
defer unix.Close(w.fd)
defer w.poller.close()
for {
// See if we have been closed.
if w.isClosed() {
return
}
ok, errno = w.poller.wait()
if errno != nil {
select {
case w.Errors <- errno:
case <-w.done:
return
}
continue
}
if !ok {
continue
}
n, errno = unix.Read(w.fd, buf[:])
// If a signal interrupted execution, see if we've been asked to close, and try again.
// http://man7.org/linux/man-pages/man7/signal.7.html :
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
if errno == unix.EINTR {
continue
}
// unix.Read might have been woken up by Close. If so, we're done.
if w.isClosed() {
return
}
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
// If EOF is received. This should really never happen.
err = io.EOF
} else if n < 0 {
// If an error occurred while reading.
err = errno
} else {
// Read was too short.
err = errors.New("notify: short read in readEvents()")
}
select {
case w.Errors <- err:
case <-w.done:
return
}
continue
}
var offset uint32
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
for offset <= uint32(n-unix.SizeofInotifyEvent) {
// Point "raw" to the event in the buffer
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
if mask&unix.IN_Q_OVERFLOW != 0 {
select {
case w.Errors <- ErrEventOverflow:
case <-w.done:
return
}
}
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
name, ok := w.paths[int(raw.Wd)]
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
// This is a sign to clean up the maps, otherwise we are no longer in sync
// with the inotify kernel state which has already deleted the watch
// automatically.
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
delete(w.paths, int(raw.Wd))
delete(w.watches, name)
}
w.mu.Unlock()
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
if !event.ignoreLinux(mask) {
select {
case w.Events <- event:
case <-w.done:
return
}
}
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + nameLen
}
}
}
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
func (e *Event) ignoreLinux(mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
return true
}
// If the event is not a DELETE or RENAME, the file must exist.
// Otherwise the event is ignored.
// *Note*: this was put in place because it was seen that a MODIFY
// event was sent after the DELETE. This ignores that MODIFY and
// assumes a DELETE will come or has come if the file doesn't exist.
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
_, statErr := os.Lstat(e.Name)
return os.IsNotExist(statErr)
}
return false
}
// newEvent returns an platform-independent Event based on an inotify mask.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
}
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
e.Op |= Remove
}
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
return e
}

187
vendor/github.com/fsnotify/fsnotify/inotify_poller.go generated vendored Normal file
View File

@ -0,0 +1,187 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"golang.org/x/sys/unix"
)
type fdPoller struct {
fd int // File descriptor (as returned by the inotify_init() syscall)
epfd int // Epoll file descriptor
pipe [2]int // Pipe for waking up
}
func emptyPoller(fd int) *fdPoller {
poller := new(fdPoller)
poller.fd = fd
poller.epfd = -1
poller.pipe[0] = -1
poller.pipe[1] = -1
return poller
}
// Create a new inotify poller.
// This creates an inotify handler, and an epoll handler.
func newFdPoller(fd int) (*fdPoller, error) {
var errno error
poller := emptyPoller(fd)
defer func() {
if errno != nil {
poller.close()
}
}()
poller.fd = fd
// Create epoll fd
poller.epfd, errno = unix.EpollCreate1(0)
if poller.epfd == -1 {
return nil, errno
}
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
if errno != nil {
return nil, errno
}
// Register inotify fd with epoll
event := unix.EpollEvent{
Fd: int32(poller.fd),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
if errno != nil {
return nil, errno
}
// Register pipe fd with epoll
event = unix.EpollEvent{
Fd: int32(poller.pipe[0]),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
if errno != nil {
return nil, errno
}
return poller, nil
}
// Wait using epoll.
// Returns true if something is ready to be read,
// false if there is not.
func (poller *fdPoller) wait() (bool, error) {
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
// I don't know whether epoll_wait returns the number of events returned,
// or the total number of events ready.
// I decided to catch both by making the buffer one larger than the maximum.
events := make([]unix.EpollEvent, 7)
for {
n, errno := unix.EpollWait(poller.epfd, events, -1)
if n == -1 {
if errno == unix.EINTR {
continue
}
return false, errno
}
if n == 0 {
// If there are no events, try again.
continue
}
if n > 6 {
// This should never happen. More events were returned than should be possible.
return false, errors.New("epoll_wait returned more events than I know what to do with")
}
ready := events[:n]
epollhup := false
epollerr := false
epollin := false
for _, event := range ready {
if event.Fd == int32(poller.fd) {
if event.Events&unix.EPOLLHUP != 0 {
// This should not happen, but if it does, treat it as a wakeup.
epollhup = true
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the file descriptor, we should pretend
// something is ready to read, and let unix.Read pick up the error.
epollerr = true
}
if event.Events&unix.EPOLLIN != 0 {
// There is data to read.
epollin = true
}
}
if event.Fd == int32(poller.pipe[0]) {
if event.Events&unix.EPOLLHUP != 0 {
// Write pipe descriptor was closed, by us. This means we're closing down the
// watcher, and we should wake up.
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the pipe file descriptor.
// This is an absolute mystery, and should never ever happen.
return false, errors.New("Error on the pipe descriptor.")
}
if event.Events&unix.EPOLLIN != 0 {
// This is a regular wakeup, so we have to clear the buffer.
err := poller.clearWake()
if err != nil {
return false, err
}
}
}
}
if epollhup || epollerr || epollin {
return true, nil
}
return false, nil
}
}
// Close the write end of the poller.
func (poller *fdPoller) wake() error {
buf := make([]byte, 1)
n, errno := unix.Write(poller.pipe[1], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is full, poller will wake.
return nil
}
return errno
}
return nil
}
func (poller *fdPoller) clearWake() error {
// You have to be woken up a LOT in order to get to 100!
buf := make([]byte, 100)
n, errno := unix.Read(poller.pipe[0], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is empty, someone else cleared our wake.
return nil
}
return errno
}
return nil
}
// Close all poller file descriptors, but not the one passed to it.
func (poller *fdPoller) close() {
if poller.pipe[1] != -1 {
unix.Close(poller.pipe[1])
}
if poller.pipe[0] != -1 {
unix.Close(poller.pipe[0])
}
if poller.epfd != -1 {
unix.Close(poller.epfd)
}
}

521
vendor/github.com/fsnotify/fsnotify/kqueue.go generated vendored Normal file
View File

@ -0,0 +1,521 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
kq int // File descriptor (as returned by the kqueue() syscall).
mu sync.Mutex // Protects access to watcher data
watches map[string]int // Map of watched file descriptors (key: path).
externalWatches map[string]bool // Map of watches added by user of the library.
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
isClosed bool // Set to true when Close() is first called
}
type pathInfo struct {
name string
isDir bool
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
kq, err := kqueue()
if err != nil {
return nil, err
}
w := &Watcher{
kq: kq,
watches: make(map[string]int),
dirFlags: make(map[string]uint32),
paths: make(map[int]pathInfo),
fileExists: make(map[string]bool),
externalWatches: make(map[string]bool),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
w.isClosed = true
// copy paths to remove while locked
var pathsToRemove = make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
}
w.mu.Unlock()
// unlock before calling Remove, which also locks
for _, name := range pathsToRemove {
w.Remove(name)
}
// send a "quit" message to the reader goroutine
close(w.done)
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
w.mu.Lock()
w.externalWatches[name] = true
w.mu.Unlock()
_, err := w.addWatch(name, noteAllEvents)
return err
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
w.mu.Lock()
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
}
const registerRemove = unix.EV_DELETE
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
return err
}
unix.Close(watchfd)
w.mu.Lock()
isDir := w.paths[watchfd].isDir
delete(w.watches, name)
delete(w.paths, watchfd)
delete(w.dirFlags, name)
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
if isDir {
var pathsToRemove []string
w.mu.Lock()
for _, path := range w.paths {
wdir, _ := filepath.Split(path.name)
if filepath.Clean(wdir) == name {
if !w.externalWatches[path.name] {
pathsToRemove = append(pathsToRemove, path.name)
}
}
}
w.mu.Unlock()
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error
// to the user, as that will just confuse them with an error about
// a path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// keventWaitTime to block on each read from kevent
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
// addWatch adds name to the watched file set.
// The flags are interpreted as described in kevent(2).
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
// Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return "", errors.New("kevent instance already closed")
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
if alreadyWatching {
isDir = w.paths[watchfd].isDir
}
w.mu.Unlock()
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
// Don't watch sockets.
if fi.Mode()&os.ModeSocket == os.ModeSocket {
return "", nil
}
// Don't watch named pipes.
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
return "", nil
}
// Follow Symlinks
// Unfortunately, Linux can add bogus symlinks to watch list without
// issue, and Windows can't do symlinks period (AFAIK). To maintain
// consistency, we will act like everything is fine. There will simply
// be no file events for broken symlinks.
// Hence the returns of nil on errors.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
name, err = filepath.EvalSymlinks(name)
if err != nil {
return "", nil
}
w.mu.Lock()
_, alreadyWatching = w.watches[name]
w.mu.Unlock()
if alreadyWatching {
return name, nil
}
fi, err = os.Lstat(name)
if err != nil {
return "", nil
}
}
watchfd, err = unix.Open(name, openMode, 0700)
if watchfd == -1 {
return "", err
}
isDir = fi.IsDir()
}
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
unix.Close(watchfd)
return "", err
}
if !alreadyWatching {
w.mu.Lock()
w.watches[name] = watchfd
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
// Watch the directory if it has not been watched before,
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
// Store flags so this watch can be updated later
w.dirFlags[name] = flags
w.mu.Unlock()
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
return "", err
}
}
}
return name, nil
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
eventBuffer := make([]unix.Kevent_t, 10)
loop:
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
break loop
default:
}
// Get new events
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
select {
case w.Errors <- err:
case <-w.done:
break loop
}
continue
}
// Flush the events we received to the Events channel
for len(kevents) > 0 {
kevent := &kevents[0]
watchfd := int(kevent.Ident)
mask := uint32(kevent.Fflags)
w.mu.Lock()
path := w.paths[watchfd]
w.mu.Unlock()
event := newEvent(path.name, mask)
if path.isDir && !(event.Op&Remove == Remove) {
// Double check to make sure the directory exists. This can happen when
// we do a rm -fr on a recursively watched folders and we receive a
// modification event first but the folder has been deleted and later
// receive the delete event
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
// mark is as delete event
event.Op |= Remove
}
}
if event.Op&Rename == Rename || event.Op&Remove == Remove {
w.Remove(event.Name)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
}
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
// Send the event on the Events channel.
select {
case w.Events <- event:
case <-w.done:
break loop
}
}
if event.Op&Remove == Remove {
// Look for a file that may have overwritten this.
// For example, mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
// make sure the directory exists before we watch for changes. When we
// do a recursive watch and perform rm -fr, the parent directory might
// have gone missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the parent directory.
if _, err := os.Lstat(fileDir); err == nil {
w.sendDirectoryChangeEvents(fileDir)
}
}
} else {
filePath := filepath.Clean(event.Name)
if fileInfo, err := os.Lstat(filePath); err == nil {
w.sendFileCreatedEventIfNew(filePath, fileInfo)
}
}
}
// Move to next event
kevents = kevents[1:]
}
}
// cleanup
err := unix.Close(w.kq)
if err != nil {
// only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors.
select {
case w.Errors <- err:
default:
}
}
close(w.Events)
close(w.Errors)
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
e.Op |= Write
}
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
e.Op |= Rename
}
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
return e
}
func newCreateEvent(name string) Event {
return Event{Name: name, Op: Create}
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
return err
}
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
}
return nil
}
// sendDirectoryEvents searches the directory for newly created files
// and sends them over the event channel. This functionality is to have
// the BSD version of fsnotify match Linux inotify which provides a
// create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
select {
case w.Errors <- err:
case <-w.done:
return
}
}
// Search for new files
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
if err != nil {
return
}
}
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
if !doesExist {
// Send create event
select {
case w.Events <- newCreateEvent(filePath):
case <-w.done:
return
}
}
// like watchDirectoryFiles (but without doing another ReadDir)
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
return nil
}
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
if fileInfo.IsDir() {
// mimic Linux providing delete events for subdirectories
// but preserve the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
return w.addWatch(name, flags)
}
// watch file to mimic Linux inotify
return w.addWatch(name, noteAllEvents)
}
// kqueue creates a new kernel event queue and returns a descriptor.
func kqueue() (kq int, err error) {
kq, err = unix.Kqueue()
if kq == -1 {
return kq, err
}
return kq, nil
}
// register events with the queue
func register(kq int, fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types:
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
changes[i].Fflags = fflags
}
// register the events
success, err := unix.Kevent(kq, changes, nil, nil)
if success == -1 {
return err
}
return nil
}
// read retrieves pending events, or waits until an event occurs.
// A timeout of nil blocks indefinitely, while 0 polls the queue.
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(kq, nil, events, timeout)
if err != nil {
return nil, err
}
return events[0:n], nil
}
// durationToTimespec prepares a timeout value
func durationToTimespec(d time.Duration) unix.Timespec {
return unix.NsecToTimespec(d.Nanoseconds())
}

11
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly
package fsnotify
import "golang.org/x/sys/unix"
const openMode = unix.O_NONBLOCK | unix.O_RDONLY

View File

@ -0,0 +1,12 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin
package fsnotify
import "golang.org/x/sys/unix"
// note: this constant is not defined on BSD
const openMode = unix.O_EVTONLY

561
vendor/github.com/fsnotify/fsnotify/windows.go generated vendored Normal file
View File

@ -0,0 +1,561 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"syscall"
"unsafe"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
isClosed bool // Set to true when Close() is first called
mu sync.Mutex // Map access
port syscall.Handle // Handle to completion port
watches watchMap // Map of watches (key: i-number)
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
if e != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
}
w := &Watcher{
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
Events: make(chan Event, 50),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed {
return nil
}
w.isClosed = true
// Send "quit" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
if w.isClosed {
return errors.New("watcher already closed")
}
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
const (
// Options for AddWatch
sysFSONESHOT = 0x80000000
sysFSONLYDIR = 0x1000000
// Events
sysFSACCESS = 0x1
sysFSALLEVENTS = 0xfff
sysFSATTRIB = 0x4
sysFSCLOSE = 0x18
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
sysFSMODIFY = 0x2
sysFSMOVE = 0xc0
sysFSMOVEDFROM = 0x40
sysFSMOVEDTO = 0x80
sysFSMOVESELF = 0x800
// Special events
sysFSIGNORED = 0x8000
sysFSQOVERFLOW = 0x4000
)
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
}
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
e.Op |= Remove
}
if mask&sysFSMODIFY == sysFSMODIFY {
e.Op |= Write
}
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
if mask&sysFSATTRIB == sysFSATTRIB {
e.Op |= Chmod
}
return e
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
reply chan error
}
type inode struct {
handle syscall.Handle
volume uint32
index uint64
}
type watch struct {
ov syscall.Overlapped
ino *inode // i-number
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf [4096]byte
}
type indexMap map[uint64]*watch
type watchMap map[uint32]indexMap
func (w *Watcher) wakeupReader() error {
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if e != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", e)
}
return nil
}
func getDir(pathname string) (dir string, err error) {
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
if e != nil {
return "", os.NewSyscallError("GetFileAttributes", e)
}
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func getIno(path string) (ino *inode, err error) {
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
syscall.FILE_LIST_DIRECTORY,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil, syscall.OPEN_EXISTING,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
if e != nil {
return nil, os.NewSyscallError("CreateFile", e)
}
var fi syscall.ByHandleFileInformation
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
syscall.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
if flags&sysFSONLYDIR != 0 && pathname != dir {
return nil
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
syscall.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", e)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
syscall.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
if err = w.startRead(watchEntry); err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
if watch == nil {
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
}
if pathname == dir {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *Watcher) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *Watcher) startRead(watch *watch) error {
if e := syscall.CancelIo(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CancelIo", e)
w.deleteWatch(watch)
}
mask := toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= toWindowsFlags(m)
}
if mask == 0 {
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CloseHandle", e)
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
if e != nil {
err := os.NewSyscallError("ReadDirectoryChanges", e)
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
func (w *Watcher) readEvents() {
var (
n, key uint32
ov *syscall.Overlapped
)
runtime.LockOSThread()
for {
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
var err error
if e := syscall.CloseHandle(w.port); e != nil {
err = os.NewSyscallError("CloseHandle", e)
}
close(w.Events)
close(w.Errors)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags))
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch e {
case syscall.ERROR_MORE_DATA:
if watch == nil {
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case syscall.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case syscall.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
continue
case nil:
}
var offset uint32
for {
if n == 0 {
w.Events <- newEvent("", sysFSQOVERFLOW)
w.Errors <- errors.New("short read in readEvents()")
break
}
// Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
fullname := filepath.Join(watch.path, name)
var mask uint64
switch raw.Action {
case syscall.FILE_ACTION_REMOVED:
mask = sysFSDELETESELF
case syscall.FILE_ACTION_MODIFIED:
mask = sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sysFSMOVESELF
}
}
sendNameEvent := func() {
if w.sendEvent(fullname, watch.names[name]&mask) {
if watch.names[name]&sysFSONESHOT != 0 {
delete(watch.names, name)
}
}
}
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
sendNameEvent()
}
if raw.Action == syscall.FILE_ACTION_REMOVED {
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
fullname = filepath.Join(watch.path, watch.rename)
sendNameEvent()
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
break
}
}
if err := w.startRead(watch); err != nil {
w.Errors <- err
}
}
}
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
}
event := newEvent(name, uint32(mask))
select {
case ch := <-w.quit:
w.quit <- ch
case w.Events <- event:
}
return true
}
func toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSACCESS != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
}
if mask&sysFSMODIFY != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&sysFSATTRIB != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func toFSnotifyFlags(action uint32) uint64 {
switch action {
case syscall.FILE_ACTION_ADDED:
return sysFSCREATE
case syscall.FILE_ACTION_REMOVED:
return sysFSDELETE
case syscall.FILE_ACTION_MODIFIED:
return sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
return sysFSMOVEDFROM
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
return sysFSMOVEDTO
}
return 0
}

2
vendor/github.com/nxadm/tail/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
.idea/
.test/

16
vendor/github.com/nxadm/tail/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,16 @@
language: go
script:
- go test -race -v ./...
go:
- "1.9"
- "1.10"
- "1.11"
- "1.12"
- "1.13"
- tip
matrix:
allow_failures:
- go: tip

46
vendor/github.com/nxadm/tail/CHANGES.md generated vendored Normal file
View File

@ -0,0 +1,46 @@
# Version v1.4.4
* Fix of checksum problem because of forced tag. No changes to the code.
# Version v1.4.1
* Incorporated PR 162 by by Mohammed902: "Simplify non-Windows build tag".
# Version v1.4.0
* Incorporated PR 9 by mschneider82: "Added seekinfo to Tail".
# Version v1.3.1
* Incorporated PR 7: "Fix deadlock when stopping on non-empty file/buffer",
fixes upstream issue 93.
# Version v1.3.0
* Incorporated changes of unmerged upstream PR 149 by mezzi: "added line num
to Line struct".
# Version v1.2.1
* Incorporated changes of unmerged upstream PR 128 by jadekler: "Compile-able
code in readme".
* Incorporated changes of unmerged upstream PR 130 by fgeller: "small change
to comment wording".
* Incorporated changes of unmerged upstream PR 133 by sm3142: "removed
spurious newlines from log messages".
# Version v1.2.0
* Incorporated changes of unmerged upstream PR 126 by Code-Hex: "Solved the
problem for never return the last line if it's not followed by a newline".
* Incorporated changes of unmerged upstream PR 131 by StoicPerlman: "Remove
deprecated os.SEEK consts". The changes bumped the minimal supported Go
release to 1.9.
# Version v1.1.0
* migration to go modules.
* release of master branch of the dormant upstream, because it contains
fixes and improvement no present in the tagged release.

19
vendor/github.com/nxadm/tail/Dockerfile generated vendored Normal file
View File

@ -0,0 +1,19 @@
FROM golang
RUN mkdir -p $GOPATH/src/github.com/nxadm/tail/
ADD . $GOPATH/src/github.com/nxadm/tail/
# expecting to fetch dependencies successfully.
RUN go get -v github.com/nxadm/tail
# expecting to run the test successfully.
RUN go test -v github.com/nxadm/tail
# expecting to install successfully
RUN go install -v github.com/nxadm/tail
RUN go install -v github.com/nxadm/tail/cmd/gotail
RUN $GOPATH/bin/gotail -h || true
ENV PATH $GOPATH/bin:$PATH
CMD ["gotail"]

21
vendor/github.com/nxadm/tail/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
# The MIT License (MIT)
# © Copyright 2015 Hewlett Packard Enterprise Development LP
Copyright (c) 2014 ActiveState
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

36
vendor/github.com/nxadm/tail/README.md generated vendored Normal file
View File

@ -0,0 +1,36 @@
[![Build Status](https://travis-ci.org/nxadm/tail.svg?branch=master)](https://travis-ci.org/nxadm/tail)
This is repo is forked from the dormant upstream repo at
[hpcloud](https://github.com/hpcloud/tail). This fork adds support for go
modules, updates the dependencies, adds features and fixes bugs. Go 1.9 is
the oldest compiler release supported.
# Go package for tail-ing files
A Go package striving to emulate the features of the BSD `tail` program.
```Go
t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
if err != nil {
panic(err)
}
for line := range t.Lines {
fmt.Println(line.Text)
}
```
See [API documentation](http://godoc.org/github.com/nxadm/tail).
## Log rotation
Tail comes with full support for truncation/move detection as it is
designed to work with log rotation tools.
## Installing
go get github.com/nxadm/tail/...
## Windows support
This package [needs assistance](https://github.com/nxadm/tail/labels/Windows) for full Windows support.

11
vendor/github.com/nxadm/tail/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,11 @@
version: 0.{build}
skip_tags: true
cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
build_script:
- SET GOPATH=c:\workspace
- go test -v -race ./...
test: off
clone_folder: c:\workspace\src\github.com\nxadm\tail
branches:
only:
- master

9
vendor/github.com/nxadm/tail/go.mod generated vendored Normal file
View File

@ -0,0 +1,9 @@
module github.com/nxadm/tail
go 1.13
require (
github.com/fsnotify/fsnotify v1.4.7
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7
)

6
vendor/github.com/nxadm/tail/go.sum generated vendored Normal file
View File

@ -0,0 +1,6 @@
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=

7
vendor/github.com/nxadm/tail/ratelimiter/Licence generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (C) 2013 99designs
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,97 @@
// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
package ratelimiter
import (
"time"
)
type LeakyBucket struct {
Size uint16
Fill float64
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
Lastupdate time.Time
Now func() time.Time
}
func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
bucket := LeakyBucket{
Size: size,
Fill: 0,
LeakInterval: leakInterval,
Now: time.Now,
Lastupdate: time.Now(),
}
return &bucket
}
func (b *LeakyBucket) updateFill() {
now := b.Now()
if b.Fill > 0 {
elapsed := now.Sub(b.Lastupdate)
b.Fill -= float64(elapsed) / float64(b.LeakInterval)
if b.Fill < 0 {
b.Fill = 0
}
}
b.Lastupdate = now
}
func (b *LeakyBucket) Pour(amount uint16) bool {
b.updateFill()
var newfill float64 = b.Fill + float64(amount)
if newfill > float64(b.Size) {
return false
}
b.Fill = newfill
return true
}
// The time at which this bucket will be completely drained
func (b *LeakyBucket) DrainedAt() time.Time {
return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
}
// The duration until this bucket is completely drained
func (b *LeakyBucket) TimeToDrain() time.Duration {
return b.DrainedAt().Sub(b.Now())
}
func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
return b.Now().Sub(b.Lastupdate)
}
type LeakyBucketSer struct {
Size uint16
Fill float64
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
Lastupdate time.Time
}
func (b *LeakyBucket) Serialise() *LeakyBucketSer {
bucket := LeakyBucketSer{
Size: b.Size,
Fill: b.Fill,
LeakInterval: b.LeakInterval,
Lastupdate: b.Lastupdate,
}
return &bucket
}
func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
bucket := LeakyBucket{
Size: b.Size,
Fill: b.Fill,
LeakInterval: b.LeakInterval,
Lastupdate: b.Lastupdate,
Now: time.Now,
}
return &bucket
}

60
vendor/github.com/nxadm/tail/ratelimiter/memory.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
package ratelimiter
import (
"errors"
"time"
)
const (
GC_SIZE int = 100
GC_PERIOD time.Duration = 60 * time.Second
)
type Memory struct {
store map[string]LeakyBucket
lastGCCollected time.Time
}
func NewMemory() *Memory {
m := new(Memory)
m.store = make(map[string]LeakyBucket)
m.lastGCCollected = time.Now()
return m
}
func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
bucket, ok := m.store[key]
if !ok {
return nil, errors.New("miss")
}
return &bucket, nil
}
func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
if len(m.store) > GC_SIZE {
m.GarbageCollect()
}
m.store[key] = bucket
return nil
}
func (m *Memory) GarbageCollect() {
now := time.Now()
// rate limit GC to once per minute
if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() {
for key, bucket := range m.store {
// if the bucket is drained, then GC
if bucket.DrainedAt().Unix() < now.Unix() {
delete(m.store, key)
}
}
m.lastGCCollected = now
}
}

6
vendor/github.com/nxadm/tail/ratelimiter/storage.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
package ratelimiter
type Storage interface {
GetBucketFor(string) (*LeakyBucket, error)
SetBucketFor(string, LeakyBucket) error
}

440
vendor/github.com/nxadm/tail/tail.go generated vendored Normal file
View File

@ -0,0 +1,440 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package tail
import (
"bufio"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
"sync"
"time"
"github.com/nxadm/tail/ratelimiter"
"github.com/nxadm/tail/util"
"github.com/nxadm/tail/watch"
"gopkg.in/tomb.v1"
)
var (
ErrStop = errors.New("tail should now stop")
)
type Line struct {
Text string
Num int
SeekInfo SeekInfo
Time time.Time
Err error // Error from tail
}
// NewLine returns a Line with present time.
func NewLine(text string, lineNum int) *Line {
return &Line{text, lineNum, SeekInfo{}, time.Now(), nil}
}
// SeekInfo represents arguments to `io.Seek`
type SeekInfo struct {
Offset int64
Whence int // io.Seek*
}
type logger interface {
Fatal(v ...interface{})
Fatalf(format string, v ...interface{})
Fatalln(v ...interface{})
Panic(v ...interface{})
Panicf(format string, v ...interface{})
Panicln(v ...interface{})
Print(v ...interface{})
Printf(format string, v ...interface{})
Println(v ...interface{})
}
// Config is used to specify how a file must be tailed.
type Config struct {
// File-specifc
Location *SeekInfo // Seek to this location before tailing
ReOpen bool // Reopen recreated files (tail -F)
MustExist bool // Fail early if the file does not exist
Poll bool // Poll for file changes instead of using inotify
Pipe bool // Is a named pipe (mkfifo)
RateLimiter *ratelimiter.LeakyBucket
// Generic IO
Follow bool // Continue looking for new lines (tail -f)
MaxLineSize int // If non-zero, split longer lines into multiple lines
// Logger, when nil, is set to tail.DefaultLogger
// To disable logging: set field to tail.DiscardingLogger
Logger logger
}
type Tail struct {
Filename string
Lines chan *Line
Config
file *os.File
reader *bufio.Reader
lineNum int
watcher watch.FileWatcher
changes *watch.FileChanges
tomb.Tomb // provides: Done, Kill, Dying
lk sync.Mutex
}
var (
// DefaultLogger is used when Config.Logger == nil
DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
// DiscardingLogger can be used to disable logging output
DiscardingLogger = log.New(ioutil.Discard, "", 0)
)
// TailFile begins tailing the file. Output stream is made available
// via the `Tail.Lines` channel. To handle errors during tailing,
// invoke the `Wait` or `Err` method after finishing reading from the
// `Lines` channel.
func TailFile(filename string, config Config) (*Tail, error) {
if config.ReOpen && !config.Follow {
util.Fatal("cannot set ReOpen without Follow.")
}
t := &Tail{
Filename: filename,
Lines: make(chan *Line),
Config: config,
}
// when Logger was not specified in config, use default logger
if t.Logger == nil {
t.Logger = DefaultLogger
}
if t.Poll {
t.watcher = watch.NewPollingFileWatcher(filename)
} else {
t.watcher = watch.NewInotifyFileWatcher(filename)
}
if t.MustExist {
var err error
t.file, err = OpenFile(t.Filename)
if err != nil {
return nil, err
}
}
go t.tailFileSync()
return t, nil
}
// Tell returns the file's current position, like stdio's ftell().
// But this value is not very accurate.
// One line from the chan(tail.Lines) may have been read,
// so it may have lost one line.
func (tail *Tail) Tell() (offset int64, err error) {
if tail.file == nil {
return
}
offset, err = tail.file.Seek(0, io.SeekCurrent)
if err != nil {
return
}
tail.lk.Lock()
defer tail.lk.Unlock()
if tail.reader == nil {
return
}
offset -= int64(tail.reader.Buffered())
return
}
// Stop stops the tailing activity.
func (tail *Tail) Stop() error {
tail.Kill(nil)
return tail.Wait()
}
// StopAtEOF stops tailing as soon as the end of the file is reached.
func (tail *Tail) StopAtEOF() error {
tail.Kill(errStopAtEOF)
return tail.Wait()
}
var errStopAtEOF = errors.New("tail: stop at eof")
func (tail *Tail) close() {
close(tail.Lines)
tail.closeFile()
}
func (tail *Tail) closeFile() {
if tail.file != nil {
tail.file.Close()
tail.file = nil
}
}
func (tail *Tail) reopen() error {
tail.closeFile()
tail.lineNum = 0
for {
var err error
tail.file, err = OpenFile(tail.Filename)
if err != nil {
if os.IsNotExist(err) {
tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
if err == tomb.ErrDying {
return err
}
return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
}
continue
}
return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
}
break
}
return nil
}
func (tail *Tail) readLine() (string, error) {
tail.lk.Lock()
line, err := tail.reader.ReadString('\n')
tail.lk.Unlock()
if err != nil {
// Note ReadString "returns the data read before the error" in
// case of an error, including EOF, so we return it as is. The
// caller is expected to process it if err is EOF.
return line, err
}
line = strings.TrimRight(line, "\n")
return line, err
}
func (tail *Tail) tailFileSync() {
defer tail.Done()
defer tail.close()
if !tail.MustExist {
// deferred first open.
err := tail.reopen()
if err != nil {
if err != tomb.ErrDying {
tail.Kill(err)
}
return
}
}
// Seek to requested location on first open of the file.
if tail.Location != nil {
_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
if err != nil {
tail.Killf("Seek error on %s: %s", tail.Filename, err)
return
}
}
tail.openReader()
// Read line by line.
for {
// do not seek in named pipes
if !tail.Pipe {
// grab the position in case we need to back up in the event of a half-line
if _, err := tail.Tell(); err != nil {
tail.Kill(err)
return
}
}
line, err := tail.readLine()
// Process `line` even if err is EOF.
if err == nil {
cooloff := !tail.sendLine(line)
if cooloff {
// Wait a second before seeking till the end of
// file when rate limit is reached.
msg := ("Too much log activity; waiting a second before resuming tailing")
offset, _ := tail.Tell()
tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)}
select {
case <-time.After(time.Second):
case <-tail.Dying():
return
}
if err := tail.seekEnd(); err != nil {
tail.Kill(err)
return
}
}
} else if err == io.EOF {
if !tail.Follow {
if line != "" {
tail.sendLine(line)
}
return
}
if tail.Follow && line != "" {
tail.sendLine(line)
if err := tail.seekEnd(); err != nil {
tail.Kill(err)
return
}
}
// When EOF is reached, wait for more data to become
// available. Wait strategy is based on the `tail.watcher`
// implementation (inotify or polling).
err := tail.waitForChanges()
if err != nil {
if err != ErrStop {
tail.Kill(err)
}
return
}
} else {
// non-EOF error
tail.Killf("Error reading %s: %s", tail.Filename, err)
return
}
select {
case <-tail.Dying():
if tail.Err() == errStopAtEOF {
continue
}
return
default:
}
}
}
// waitForChanges waits until the file has been appended, deleted,
// moved or truncated. When moved or deleted - the file will be
// reopened if ReOpen is true. Truncated files are always reopened.
func (tail *Tail) waitForChanges() error {
if tail.changes == nil {
pos, err := tail.file.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
if err != nil {
return err
}
}
select {
case <-tail.changes.Modified:
return nil
case <-tail.changes.Deleted:
tail.changes = nil
if tail.ReOpen {
// XXX: we must not log from a library.
tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
if err := tail.reopen(); err != nil {
return err
}
tail.Logger.Printf("Successfully reopened %s", tail.Filename)
tail.openReader()
return nil
}
tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
return ErrStop
case <-tail.changes.Truncated:
// Always reopen truncated files (Follow is true)
tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
if err := tail.reopen(); err != nil {
return err
}
tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
tail.openReader()
return nil
case <-tail.Dying():
return ErrStop
}
}
func (tail *Tail) openReader() {
tail.lk.Lock()
if tail.MaxLineSize > 0 {
// add 2 to account for newline characters
tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
} else {
tail.reader = bufio.NewReader(tail.file)
}
tail.lk.Unlock()
}
func (tail *Tail) seekEnd() error {
return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd})
}
func (tail *Tail) seekTo(pos SeekInfo) error {
_, err := tail.file.Seek(pos.Offset, pos.Whence)
if err != nil {
return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
}
// Reset the read buffer whenever the file is re-seek'ed
tail.reader.Reset(tail.file)
return nil
}
// sendLine sends the line(s) to Lines channel, splitting longer lines
// if necessary. Return false if rate limit is reached.
func (tail *Tail) sendLine(line string) bool {
now := time.Now()
lines := []string{line}
// Split longer lines
if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
lines = util.PartitionString(line, tail.MaxLineSize)
}
for _, line := range lines {
tail.lineNum++
offset, _ := tail.Tell()
select {
case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}:
case <-tail.Dying():
return true
}
}
if tail.Config.RateLimiter != nil {
ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
if !ok {
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.",
tail.Filename)
return false
}
}
return true
}
// Cleanup removes inotify watches added by the tail package. This function is
// meant to be invoked from a process's exit handler. Linux kernel may not
// automatically remove inotify watches after the process exits.
func (tail *Tail) Cleanup() {
watch.Cleanup(tail.Filename)
}

11
vendor/github.com/nxadm/tail/tail_posix.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build !windows
package tail
import (
"os"
)
func OpenFile(name string) (file *os.File, err error) {
return os.Open(name)
}

12
vendor/github.com/nxadm/tail/tail_windows.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// +build windows
package tail
import (
"github.com/nxadm/tail/winfile"
"os"
)
func OpenFile(name string) (file *os.File, err error) {
return winfile.OpenFile(name, os.O_RDONLY, 0)
}

48
vendor/github.com/nxadm/tail/util/util.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package util
import (
"fmt"
"log"
"os"
"runtime/debug"
)
type Logger struct {
*log.Logger
}
var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
// fatal is like panic except it displays only the current goroutine's stack.
func Fatal(format string, v ...interface{}) {
// https://github.com/nxadm/log/blob/master/log.go#L45
LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
os.Exit(1)
}
// partitionString partitions the string into chunks of given size,
// with the last chunk of variable size.
func PartitionString(s string, chunkSize int) []string {
if chunkSize <= 0 {
panic("invalid chunkSize")
}
length := len(s)
chunks := 1 + length/chunkSize
start := 0
end := chunkSize
parts := make([]string, 0, chunks)
for {
if end > length {
end = length
}
parts = append(parts, s[start:end])
if end == length {
break
}
start, end = end, end+chunkSize
}
return parts
}

36
vendor/github.com/nxadm/tail/watch/filechanges.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
package watch
type FileChanges struct {
Modified chan bool // Channel to get notified of modifications
Truncated chan bool // Channel to get notified of truncations
Deleted chan bool // Channel to get notified of deletions/renames
}
func NewFileChanges() *FileChanges {
return &FileChanges{
make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)}
}
func (fc *FileChanges) NotifyModified() {
sendOnlyIfEmpty(fc.Modified)
}
func (fc *FileChanges) NotifyTruncated() {
sendOnlyIfEmpty(fc.Truncated)
}
func (fc *FileChanges) NotifyDeleted() {
sendOnlyIfEmpty(fc.Deleted)
}
// sendOnlyIfEmpty sends on a bool channel only if the channel has no
// backlog to be read by other goroutines. This concurrency pattern
// can be used to notify other goroutines if and only if they are
// looking for it (i.e., subsequent notifications can be compressed
// into one).
func sendOnlyIfEmpty(ch chan bool) {
select {
case ch <- true:
default:
}
}

135
vendor/github.com/nxadm/tail/watch/inotify.go generated vendored Normal file
View File

@ -0,0 +1,135 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package watch
import (
"fmt"
"os"
"path/filepath"
"github.com/nxadm/tail/util"
"github.com/fsnotify/fsnotify"
"gopkg.in/tomb.v1"
)
// InotifyFileWatcher uses inotify to monitor file changes.
type InotifyFileWatcher struct {
Filename string
Size int64
}
func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
return fw
}
func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
err := WatchCreate(fw.Filename)
if err != nil {
return err
}
defer RemoveWatchCreate(fw.Filename)
// Do a real check now as the file might have been created before
// calling `WatchFlags` above.
if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
// file exists, or stat returned an error.
return err
}
events := Events(fw.Filename)
for {
select {
case evt, ok := <-events:
if !ok {
return fmt.Errorf("inotify watcher has been closed")
}
evtName, err := filepath.Abs(evt.Name)
if err != nil {
return err
}
fwFilename, err := filepath.Abs(fw.Filename)
if err != nil {
return err
}
if evtName == fwFilename {
return nil
}
case <-t.Dying():
return tomb.ErrDying
}
}
panic("unreachable")
}
func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
err := Watch(fw.Filename)
if err != nil {
return nil, err
}
changes := NewFileChanges()
fw.Size = pos
go func() {
events := Events(fw.Filename)
for {
prevSize := fw.Size
var evt fsnotify.Event
var ok bool
select {
case evt, ok = <-events:
if !ok {
RemoveWatch(fw.Filename)
return
}
case <-t.Dying():
RemoveWatch(fw.Filename)
return
}
switch {
case evt.Op&fsnotify.Remove == fsnotify.Remove:
fallthrough
case evt.Op&fsnotify.Rename == fsnotify.Rename:
RemoveWatch(fw.Filename)
changes.NotifyDeleted()
return
//With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod)
case evt.Op&fsnotify.Chmod == fsnotify.Chmod:
fallthrough
case evt.Op&fsnotify.Write == fsnotify.Write:
fi, err := os.Stat(fw.Filename)
if err != nil {
if os.IsNotExist(err) {
RemoveWatch(fw.Filename)
changes.NotifyDeleted()
return
}
// XXX: report this error back to the user
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
}
fw.Size = fi.Size()
if prevSize > 0 && prevSize > fw.Size {
changes.NotifyTruncated()
} else {
changes.NotifyModified()
}
prevSize = fw.Size
}
}
}()
return changes, nil
}

248
vendor/github.com/nxadm/tail/watch/inotify_tracker.go generated vendored Normal file
View File

@ -0,0 +1,248 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package watch
import (
"log"
"os"
"path/filepath"
"sync"
"syscall"
"github.com/nxadm/tail/util"
"github.com/fsnotify/fsnotify"
)
type InotifyTracker struct {
mux sync.Mutex
watcher *fsnotify.Watcher
chans map[string]chan fsnotify.Event
done map[string]chan bool
watchNums map[string]int
watch chan *watchInfo
remove chan *watchInfo
error chan error
}
type watchInfo struct {
op fsnotify.Op
fname string
}
func (this *watchInfo) isCreate() bool {
return this.op == fsnotify.Create
}
var (
// globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
shared *InotifyTracker
// these are used to ensure the shared InotifyTracker is run exactly once
once = sync.Once{}
goRun = func() {
shared = &InotifyTracker{
mux: sync.Mutex{},
chans: make(map[string]chan fsnotify.Event),
done: make(map[string]chan bool),
watchNums: make(map[string]int),
watch: make(chan *watchInfo),
remove: make(chan *watchInfo),
error: make(chan error),
}
go shared.run()
}
logger = log.New(os.Stderr, "", log.LstdFlags)
)
// Watch signals the run goroutine to begin watching the input filename
func Watch(fname string) error {
return watch(&watchInfo{
fname: fname,
})
}
// Watch create signals the run goroutine to begin watching the input filename
// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
func WatchCreate(fname string) error {
return watch(&watchInfo{
op: fsnotify.Create,
fname: fname,
})
}
func watch(winfo *watchInfo) error {
// start running the shared InotifyTracker if not already running
once.Do(goRun)
winfo.fname = filepath.Clean(winfo.fname)
shared.watch <- winfo
return <-shared.error
}
// RemoveWatch signals the run goroutine to remove the watch for the input filename
func RemoveWatch(fname string) error {
return remove(&watchInfo{
fname: fname,
})
}
// RemoveWatch create signals the run goroutine to remove the watch for the input filename
func RemoveWatchCreate(fname string) error {
return remove(&watchInfo{
op: fsnotify.Create,
fname: fname,
})
}
func remove(winfo *watchInfo) error {
// start running the shared InotifyTracker if not already running
once.Do(goRun)
winfo.fname = filepath.Clean(winfo.fname)
shared.mux.Lock()
done := shared.done[winfo.fname]
if done != nil {
delete(shared.done, winfo.fname)
close(done)
}
shared.mux.Unlock()
shared.remove <- winfo
return <-shared.error
}
// Events returns a channel to which FileEvents corresponding to the input filename
// will be sent. This channel will be closed when removeWatch is called on this
// filename.
func Events(fname string) <-chan fsnotify.Event {
shared.mux.Lock()
defer shared.mux.Unlock()
return shared.chans[fname]
}
// Cleanup removes the watch for the input filename if necessary.
func Cleanup(fname string) error {
return RemoveWatch(fname)
}
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
// a new Watcher if the previous Watcher was closed.
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
shared.mux.Lock()
defer shared.mux.Unlock()
if shared.chans[winfo.fname] == nil {
shared.chans[winfo.fname] = make(chan fsnotify.Event)
}
if shared.done[winfo.fname] == nil {
shared.done[winfo.fname] = make(chan bool)
}
fname := winfo.fname
if winfo.isCreate() {
// Watch for new files to be created in the parent directory.
fname = filepath.Dir(fname)
}
var err error
// already in inotify watch
if shared.watchNums[fname] == 0 {
err = shared.watcher.Add(fname)
}
if err == nil {
shared.watchNums[fname]++
}
return err
}
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
// corresponding events channel.
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error {
shared.mux.Lock()
ch := shared.chans[winfo.fname]
if ch != nil {
delete(shared.chans, winfo.fname)
close(ch)
}
fname := winfo.fname
if winfo.isCreate() {
// Watch for new files to be created in the parent directory.
fname = filepath.Dir(fname)
}
shared.watchNums[fname]--
watchNum := shared.watchNums[fname]
if watchNum == 0 {
delete(shared.watchNums, fname)
}
shared.mux.Unlock()
var err error
// If we were the last ones to watch this file, unsubscribe from inotify.
// This needs to happen after releasing the lock because fsnotify waits
// synchronously for the kernel to acknowledge the removal of the watch
// for this file, which causes us to deadlock if we still held the lock.
if watchNum == 0 {
err = shared.watcher.Remove(fname)
}
return err
}
// sendEvent sends the input event to the appropriate Tail.
func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
name := filepath.Clean(event.Name)
shared.mux.Lock()
ch := shared.chans[name]
done := shared.done[name]
shared.mux.Unlock()
if ch != nil && done != nil {
select {
case ch <- event:
case <-done:
}
}
}
// run starts the goroutine in which the shared struct reads events from its
// Watcher's Event channel and sends the events to the appropriate Tail.
func (shared *InotifyTracker) run() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
util.Fatal("failed to create Watcher")
}
shared.watcher = watcher
for {
select {
case winfo := <-shared.watch:
shared.error <- shared.addWatch(winfo)
case winfo := <-shared.remove:
shared.error <- shared.removeWatch(winfo)
case event, open := <-shared.watcher.Events:
if !open {
return
}
shared.sendEvent(event)
case err, open := <-shared.watcher.Errors:
if !open {
return
} else if err != nil {
sysErr, ok := err.(*os.SyscallError)
if !ok || sysErr.Err != syscall.EINTR {
logger.Printf("Error in Watcher Error channel: %s", err)
}
}
}
}
}

118
vendor/github.com/nxadm/tail/watch/polling.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package watch
import (
"os"
"runtime"
"time"
"github.com/nxadm/tail/util"
"gopkg.in/tomb.v1"
)
// PollingFileWatcher polls the file for changes.
type PollingFileWatcher struct {
Filename string
Size int64
}
func NewPollingFileWatcher(filename string) *PollingFileWatcher {
fw := &PollingFileWatcher{filename, 0}
return fw
}
var POLL_DURATION time.Duration
func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
for {
if _, err := os.Stat(fw.Filename); err == nil {
return nil
} else if !os.IsNotExist(err) {
return err
}
select {
case <-time.After(POLL_DURATION):
continue
case <-t.Dying():
return tomb.ErrDying
}
}
panic("unreachable")
}
func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
origFi, err := os.Stat(fw.Filename)
if err != nil {
return nil, err
}
changes := NewFileChanges()
var prevModTime time.Time
// XXX: use tomb.Tomb to cleanly manage these goroutines. replace
// the fatal (below) with tomb's Kill.
fw.Size = pos
go func() {
prevSize := fw.Size
for {
select {
case <-t.Dying():
return
default:
}
time.Sleep(POLL_DURATION)
fi, err := os.Stat(fw.Filename)
if err != nil {
// Windows cannot delete a file if a handle is still open (tail keeps one open)
// so it gives access denied to anything trying to read it until all handles are released.
if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
// File does not exist (has been deleted).
changes.NotifyDeleted()
return
}
// XXX: report this error back to the user
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
}
// File got moved/renamed?
if !os.SameFile(origFi, fi) {
changes.NotifyDeleted()
return
}
// File got truncated?
fw.Size = fi.Size()
if prevSize > 0 && prevSize > fw.Size {
changes.NotifyTruncated()
prevSize = fw.Size
continue
}
// File got bigger?
if prevSize > 0 && prevSize < fw.Size {
changes.NotifyModified()
prevSize = fw.Size
continue
}
prevSize = fw.Size
// File was appended to (changed)?
modTime := fi.ModTime()
if modTime != prevModTime {
prevModTime = modTime
changes.NotifyModified()
}
}
}()
return changes, nil
}
func init() {
POLL_DURATION = 250 * time.Millisecond
}

20
vendor/github.com/nxadm/tail/watch/watch.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package watch
import "gopkg.in/tomb.v1"
// FileWatcher monitors file-level events.
type FileWatcher interface {
// BlockUntilExists blocks until the file comes into existence.
BlockUntilExists(*tomb.Tomb) error
// ChangeEvents reports on changes to a file, be it modification,
// deletion, renames or truncations. Returned FileChanges group of
// channels will be closed, thus become unusable, after a deletion
// or truncation event.
// In order to properly report truncations, ChangeEvents requires
// the caller to pass their current offset in the file.
ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
}

92
vendor/github.com/nxadm/tail/winfile/winfile.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
// +build windows
package winfile
import (
"os"
"syscall"
"unsafe"
)
// issue also described here
//https://codereview.appspot.com/8203043/
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
if len(path) == 0 {
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
}
pathp, err := syscall.UTF16PtrFromString(path)
if err != nil {
return syscall.InvalidHandle, err
}
var access uint32
switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
case syscall.O_RDONLY:
access = syscall.GENERIC_READ
case syscall.O_WRONLY:
access = syscall.GENERIC_WRITE
case syscall.O_RDWR:
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
}
if mode&syscall.O_CREAT != 0 {
access |= syscall.GENERIC_WRITE
}
if mode&syscall.O_APPEND != 0 {
access &^= syscall.GENERIC_WRITE
access |= syscall.FILE_APPEND_DATA
}
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
var sa *syscall.SecurityAttributes
if mode&syscall.O_CLOEXEC == 0 {
sa = makeInheritSa()
}
var createmode uint32
switch {
case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
createmode = syscall.CREATE_NEW
case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
createmode = syscall.CREATE_ALWAYS
case mode&syscall.O_CREAT == syscall.O_CREAT:
createmode = syscall.OPEN_ALWAYS
case mode&syscall.O_TRUNC == syscall.O_TRUNC:
createmode = syscall.TRUNCATE_EXISTING
default:
createmode = syscall.OPEN_EXISTING
}
h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
return h, e
}
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
func makeInheritSa() *syscall.SecurityAttributes {
var sa syscall.SecurityAttributes
sa.Length = uint32(unsafe.Sizeof(sa))
sa.InheritHandle = 1
return &sa
}
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
if e != nil {
return nil, e
}
return os.NewFile(uintptr(r), name), nil
}
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
func syscallMode(i os.FileMode) (o uint32) {
o |= uint32(i.Perm())
if i&os.ModeSetuid != 0 {
o |= syscall.S_ISUID
}
if i&os.ModeSetgid != 0 {
o |= syscall.S_ISGID
}
if i&os.ModeSticky != 0 {
o |= syscall.S_ISVTX
}
// No mapping for Go's ModeTemporary (plan9 only).
return
}

View File

@ -1,4 +1,7 @@
.DS_Store
TODO
tmp/**/*
*.coverprofile
*.coverprofile
.vscode
.idea/
*.log

View File

@ -1,15 +1,25 @@
language: go
go:
- 1.3
- 1.4
- 1.5
- 1.13.x
- 1.14.x
- tip
cache:
directories:
- $GOPATH/pkg/mod
# allow internal package imports, necessary for forked repositories
go_import_path: github.com/onsi/ginkgo
install:
- go get -v -t ./...
- go get golang.org/x/tools/cmd/cover
- go get github.com/onsi/gomega
- go install github.com/onsi/ginkgo/ginkgo
- GO111MODULE="off" go get -v -t ./...
- GO111MODULE="off" go get golang.org/x/tools/cmd/cover
- GO111MODULE="off" go get github.com/onsi/gomega
- GO111MODULE="off" go install github.com/onsi/ginkgo/ginkgo
- export PATH=$PATH:$HOME/gopath/bin
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
script:
- GO111MODULE="on" go mod tidy
- diff -u <(echo -n) <(git diff go.mod)
- diff -u <(echo -n) <(git diff go.sum)
- $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet

View File

@ -1,9 +1,163 @@
## HEAD
## 1.12.1
### Fixes
- Make unfocus ("blur") much faster (#674) [8b18061]
- Fix typo (#673) [7fdcbe8]
- Test against 1.14 and remove 1.12 [d5c2ad6]
- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2]
- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
- improve ginkgo performance - makes progress on #644 [a14f98e]
- fix convert integration tests [1f8ba69]
- fix typo succesful -> successful (#663) [1ea49cf]
- Fix invalid link (#658) [b886136]
- convert utility : Include comments from source (#657) [1077c6d]
- Explain what BDD means [d79e7fb]
- skip race detector test on unsupported platform (#642) [f8ab89d]
- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55]
- Fix missing newline in combined coverage file (#641) [6a07ea2]
- check if a spec is run before returning SpecSummary (#645) [8850000]
## 1.12.0
### Features
- Add module definition (#630) [78916ab]
## 1.11.0
### Features
- Add syscall for riscv64 architecture [f66e896]
- teamcity reporter: output location of test failure as well as test definition (#626) [9869142]
- teamcity reporter: output newline after every service message (#625) [3cfa02d]
- Add support for go module when running `generate` command (#578) [9c89e3f]
## 1.10.3
### Fixes
- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db]
- Add integration test [d90e0dc]
- Fix coverage files combining [e5dde8c]
- A new CLI option: -ginkgo.reportFile <file path> (#601) [034fd25]
## 1.10.2
### Fixes
- speed up table entry generateIt() (#609) [5049dc5]
- Fix. Write errors to stderr instead of stdout (#610) [7bb3091]
## 1.10.1
### Fixes
- stack backtrace: fix skipping (#600) [2a4c0bd]
## 1.10.0
### Fixes
- stack backtrace: fix alignment and skipping [66915d6]
- fix typo in documentation [8f97b93]
## 1.9.0
### Features
- Option to print output into report, when tests have passed [0545415]
### Fixes
- Fixed typos in comments [0ecbc58]
- gofmt code [a7f8bfb]
- Simplify code [7454d00]
- Simplify concatenation, incrementation and function assignment [4825557]
- Avoid unnecessary conversions [9d9403c]
- JUnit: include more detailed information about panic [19cca4b]
- Print help to stdout when the user asks for help [4cb7441]
## 1.8.0
### New Features
- allow config of the vet flag for `go test` (#562) [3cd45fa]
- Support projects using go modules [d56ee76]
### Fixes and Minor Improvements
- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
- Optimize focus to avoid allocations [f493786]
- Ensure generated test file names are underscored [505cc35]
## 1.7.0
### New Features
- Add JustAfterEach (#484) [0d4f080]
### Fixes
- Correctly round suite time in junit reporter [2445fc1]
- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
## 1.6.0
### New Features
- add --debug flag to emit node output to files (#499) [39febac]
### Fixes
- fix: for `go vet` to pass [69338ec]
- docs: fix for contributing instructions [7004cb1]
- consolidate and streamline contribution docs (#494) [d848015]
- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6]
- all: gofmt [000d317]
- Increase eventually timeout to 30s [c73579c]
- Clarify asynchronous test behaviour [294d8f4]
- Travis badge should only show master [26d2143]
## 1.5.0 5/10/2018
### New Features
- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
- Re-add noisySkippings flag [652e15c]
- Allow coverage to be displayed for focused specs (#367) [11459a8]
- Handle -outputdir flag (#364) [228e3a8]
- Handle -coverprofile flag (#355) [43392d5]
### Fixes
- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c]
- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
- Add an extra new line after reporting spec run completion for test2json [874520d]
- added name name field to junit reported testsuite [ae61c63]
- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
- Replace GOPATH in Environment [4b883f0]
## 1.4.0 7/16/2017
- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
## 1.3.0 3/28/2017
Improvements:
- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
- `Skip(message)` can be used to skip the current test.
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
- Support for retrying flaky tests with `--flakeAttempts`
- `ginkgo ./...` now recurses as you'd expect
- Added `Specify` a synonym for `It`
- Support colorise on Windows
- Broader support for various go compilation flags in the `ginkgo` CLI
Bug Fixes:
@ -68,12 +222,12 @@ No changes, just dropping the beta.
New Features:
- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass. This allows CI systems to detect accidental commits of focused test suites.
- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites.
- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
- `ginkgo --failFast` aborts the test suite after the first failure.
- `ginkgo generate file_1 file_2` can take multiple file arguments.
- Ginkgo now summarizes any spec failures that occured at the end of the test run.
- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
Improvements:

33
vendor/github.com/onsi/ginkgo/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,33 @@
# Contributing to Ginkgo
Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
- Ensure adequate test coverage:
- When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
- When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch.
If relevant, please submit a docs PR to that branch alongside your code PR.
Thanks for supporting Ginkgo!
## Setup
Fork the repo, then:
```
go get github.com/onsi/ginkgo
go get github.com/onsi/gomega/...
cd $GOPATH/src/github.com/onsi/ginkgo
git remote add fork git@github.com:<NAME>/ginkgo.git
ginkgo -r -p # ensure tests are green
go vet ./... # ensure linter is happy
```
## Making the PR
- go to a new branch `git checkout -b my-feature`
- make your changes
- run tests and linter again (see above)
- `git push fork`
- open PR 🎉

View File

@ -1,31 +1,31 @@
![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png)
[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo)
[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo)
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue.
## Feature List
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
- Structure your BDD-style tests expressively:
- Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
- [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
- [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
- [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
- Ginkgo allows you to write tests in Go using expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style:
- Nestable [`Describe`, `Context` and `When` container blocks](https://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- [`BeforeEach` and `AfterEach` blocks](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
- [`It` and `Specify` blocks](https://onsi.github.io/ginkgo/#individual-specs-it) that hold your assertions
- [`JustBeforeEach` blocks](https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
- [`BeforeSuite` and `AfterSuite` blocks](https://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
- A comprehensive test runner that lets you:
- Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
- [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
- Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
- Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
- Mark specs as [pending](https://onsi.github.io/ginkgo/#pending-specs)
- [Focus](https://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
- Run your tests in [random order](https://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
- Break up your test suite into parallel processes for straightforward [test parallelization](https://onsi.github.io/ginkgo/#parallel-specs)
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](https://onsi.github.io/ginkgo/#running-tests) and [generating](https://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
- `ginkgo -cover` runs your tests using Golang's code coverage tool
- `ginkgo -cover` runs your tests using Go's code coverage tool
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
- `ginkgo -r` runs all tests suites under the current directory
@ -37,34 +37,36 @@ To discuss Ginkgo and get updates, join the [google group](https://groups.google
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
- Built-in support for testing [asynchronicity](https://onsi.github.io/ginkgo/#asynchronous-tests)
- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
- Built-in support for [benchmarking](https://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](https://onsi.github.io/ginkgo/#third-party-integrations) for details.
- A modular architecture that lets you easily:
- Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
- [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
- Write [custom reporters](https://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](https://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
- [Adapt an existing matcher library (or write your own!)](https://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
## [Gomega](https://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
Ginkgo is best paired with Gomega. Learn more about Gomega [here](https://onsi.github.io/gomega/)
## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
## [Agouti](https://github.com/sclevine/agouti): A Go Acceptance Testing Framework
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](https://agouti.org)
## Set Me Up!
You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
```bash
go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
go get github.com/onsi/gomega # fetches the matcher library
go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
go get -u github.com/onsi/gomega/... # fetches the matcher library
cd path/to/package/you/want/to/test
@ -83,22 +85,22 @@ Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Go
With that said, it's great to know what your options are :)
### What Golang gives you out of the box
### What Go gives you out of the box
Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](https://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
### Matcher libraries for Golang's XUnit style tests
### Matcher libraries for Go's XUnit style tests
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
- [testify](https://github.com/stretchr/testify)
- [gocheck](http://labix.org/gocheck)
- [gocheck](https://labix.org/gocheck)
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](https://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
### BDD style testing frameworks
There are a handful of BDD-style testing frameworks written for Golang. Here are a few:
There are a handful of BDD-style testing frameworks written for Go. Here are a few:
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
- [GoConvey](https://github.com/smartystreets/goconvey)
@ -106,10 +108,14 @@ There are a handful of BDD-style testing frameworks written for Golang. Here ar
- [Mao](https://github.com/azer/mao)
- [Zen](https://github.com/pranavraja/zen)
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
Go explore!
## License
Ginkgo is MIT-Licensed
## Contributing
See [CONTRIBUTING.md](CONTRIBUTING.md)

14
vendor/github.com/onsi/ginkgo/RELEASING.md generated vendored Normal file
View File

@ -0,0 +1,14 @@
A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
1. Ensure CHANGELOG.md is up to date.
- Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
- Categorize the changes into
- Breaking Changes (requires a major version)
- New Features (minor version)
- Fixes (fix version)
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
1. Update `VERSION` in `config/config.go`
1. Create a commit with the version number as the commit message (e.g. `v1.3.0`)
1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`)
1. Push the commit and tag to GitHub
1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.

View File

@ -20,18 +20,21 @@ import (
"fmt"
)
const VERSION = "1.2.0"
const VERSION = "1.12.1"
type GinkgoConfigType struct {
RandomSeed int64
RandomizeAllSpecs bool
FocusString string
SkipString string
SkipMeasurements bool
FailOnPending bool
FailFast bool
EmitSpecProgress bool
DryRun bool
RandomSeed int64
RandomizeAllSpecs bool
RegexScansFilePath bool
FocusString string
SkipString string
SkipMeasurements bool
FailOnPending bool
FailFast bool
FlakeAttempts int
EmitSpecProgress bool
DryRun bool
DebugParallel bool
ParallelNode int
ParallelTotal int
@ -45,16 +48,19 @@ type DefaultReporterConfigType struct {
NoColor bool
SlowSpecThreshold float64
NoisyPendings bool
NoisySkippings bool
Succinct bool
Verbose bool
FullTrace bool
ReportPassed bool
ReportFile string
}
var DefaultReporterConfig = DefaultReporterConfigType{}
func processPrefix(prefix string) string {
if prefix != "" {
prefix = prefix + "."
prefix += "."
}
return prefix
}
@ -62,15 +68,24 @@ func processPrefix(prefix string) string {
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
prefix = processPrefix(prefix)
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.")
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).")
flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.")
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
if includeParallelFlags {
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
@ -79,11 +94,15 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
}
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter (default: 5 seconds).")
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.")
}
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
@ -122,10 +141,18 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
}
if ginkgo.FlakeAttempts > 1 {
result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts))
}
if ginkgo.EmitSpecProgress {
result = append(result, fmt.Sprintf("--%sprogress", prefix))
}
if ginkgo.DebugParallel {
result = append(result, fmt.Sprintf("--%sdebug", prefix))
}
if ginkgo.ParallelNode != 0 {
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
}
@ -142,6 +169,10 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
}
if ginkgo.RegexScansFilePath {
result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix))
}
if reporter.NoColor {
result = append(result, fmt.Sprintf("--%snoColor", prefix))
}
@ -154,6 +185,10 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
}
if !reporter.NoisySkippings {
result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
}
if reporter.Verbose {
result = append(result, fmt.Sprintf("--%sv", prefix))
}
@ -166,5 +201,13 @@ func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultRepor
result = append(result, fmt.Sprintf("--%strace", prefix))
}
if reporter.ReportPassed {
result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
}
if reporter.ReportFile != "" {
result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile))
}
return result
}

View File

@ -22,9 +22,15 @@ func (t TableEntry) generateIt(itBody reflect.Value) {
return
}
values := []reflect.Value{}
for _, param := range t.Parameters {
values = append(values, reflect.ValueOf(param))
values := make([]reflect.Value, len(t.Parameters))
iBodyType := itBody.Type()
for i, param := range t.Parameters {
if param == nil {
inType := iBodyType.In(i)
values[i] = reflect.Zero(inType)
} else {
values[i] = reflect.ValueOf(param)
}
}
body := func() {

View File

@ -29,6 +29,7 @@ import (
"github.com/onsi/ginkgo/internal/writer"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/reporters/stenographer"
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
"github.com/onsi/ginkgo/types"
)
@ -68,6 +69,14 @@ type GinkgoTestingT interface {
Fail()
}
//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
//useful for seeding your own pseudorandom number generators (PRNGs) to ensure
//consistent executions from run to run, where your tests contain variability (for
//example, when selecting random test data).
func GinkgoRandomSeed() int64 {
return config.GinkgoConfig.RandomSeed
}
//GinkgoParallelNode returns the parallel node number for the current ginkgo process
//The node number is 1-indexed
func GinkgoParallelNode() int {
@ -141,7 +150,8 @@ type GinkgoTestDescription struct {
FileName string
LineNumber int
Failed bool
Failed bool
Duration time.Duration
}
//CurrentGinkgoTestDescripton returns information about the current running test.
@ -161,6 +171,7 @@ func CurrentGinkgoTestDescription() GinkgoTestDescription {
FileName: subjectCodeLocation.FileName,
LineNumber: subjectCodeLocation.LineNumber,
Failed: summary.HasFailureState(),
Duration: summary.RunTime,
}
}
@ -168,6 +179,8 @@ func CurrentGinkgoTestDescription() GinkgoTestDescription {
//
//You use the Time() function to time how long the passed in body function takes to run
//You use the RecordValue() function to track arbitrary numerical measurements.
//The RecordValueWithPrecision() function can be used alternatively to provide the unit
//and resolution of the numeric measurement.
//The optional info argument is passed to the test reporter and can be used to
// provide the measurement data to a custom reporter with context.
//
@ -175,6 +188,7 @@ func CurrentGinkgoTestDescription() GinkgoTestDescription {
type Benchmarker interface {
Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
RecordValue(name string, value float64, info ...interface{})
RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
}
//RunSpecs is the entry point for the Ginkgo test runner.
@ -185,13 +199,18 @@ type Benchmarker interface {
// ginkgo bootstrap
func RunSpecs(t GinkgoTestingT, description string) bool {
specReporters := []Reporter{buildDefaultReporter()}
if config.DefaultReporterConfig.ReportFile != "" {
reportFile := config.DefaultReporterConfig.ReportFile
specReporters[0] = reporters.NewJUnitReporter(reportFile)
return RunSpecsWithDefaultAndCustomReporters(t, description, specReporters)
}
return RunSpecsWithCustomReporters(t, description, specReporters)
}
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
//RunSpecs() with this method.
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...)
specReporters = append(specReporters, buildDefaultReporter())
return RunSpecsWithCustomReporters(t, description, specReporters)
}
@ -205,7 +224,7 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor
reporters[i] = reporter
}
passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
if passed && hasFocusedTests {
if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
fmt.Println("PASS | FOCUSED")
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
}
@ -215,14 +234,18 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor
func buildDefaultReporter() Reporter {
remoteReportingServer := config.GinkgoConfig.StreamHost
if remoteReportingServer == "" {
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
} else {
return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor())
debugFile := ""
if config.GinkgoConfig.DebugParallel {
debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode)
}
return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile)
}
}
//Skip notifies Ginkgo that the current spec should be skipped.
//Skip notifies Ginkgo that the current spec was skipped.
func Skip(message string, callerSkip ...int) {
skip := 0
if len(callerSkip) > 0 {
@ -264,9 +287,9 @@ func GinkgoRecover() {
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
//
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
//or method and, within that Describe, outline a number of Contexts.
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object
//or method and, within that Describe, outline a number of Contexts and Whens.
func Describe(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
return true
@ -293,9 +316,9 @@ func XDescribe(text string, body func()) bool {
//Context blocks allow you to organize your specs. A Context block can contain any number of
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
//
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
//or method and, within that Describe, outline a number of Contexts.
//or method and, within that Describe, outline a number of Contexts and Whens.
func Context(text string, body func()) bool {
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
return true
@ -319,6 +342,35 @@ func XContext(text string, body func()) bool {
return true
}
//When blocks allow you to organize your specs. A When block can contain any number of
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
//
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
//or method and, within that Describe, outline a number of Contexts and Whens.
func When(text string, body func()) bool {
globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1))
return true
}
//You can focus the tests within a describe block using FWhen
func FWhen(text string, body func()) bool {
globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1))
return true
}
//You can mark the tests within a describe block as pending using PWhen
func PWhen(text string, body func()) bool {
globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
return true
}
//You can mark the tests within a describe block as pending using XWhen
func XWhen(text string, body func()) bool {
globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
return true
}
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
//within an It block.
//
@ -347,6 +399,32 @@ func XIt(text string, _ ...interface{}) bool {
return true
}
//Specify blocks are aliases for It blocks and allow for more natural wording in situations
//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
//which apply to It blocks.
func Specify(text string, body interface{}, timeout ...float64) bool {
globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
return true
}
//You can focus individual Specifys using FSpecify
func FSpecify(text string, body interface{}, timeout ...float64) bool {
globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
return true
}
//You can mark Specifys as pending using PSpecify
func PSpecify(text string, is ...interface{}) bool {
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//You can mark Specifys as pending using XSpecify
func XSpecify(text string, is ...interface{}) bool {
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//By allows you to better document large Its.
//
//Generally you should try to keep your Its short and to the point. This is not always possible, however,
@ -384,13 +462,13 @@ func FMeasure(text string, body interface{}, samples int) bool {
return true
}
//You can mark Maeasurements as pending using PMeasure
//You can mark Measurements as pending using PMeasure
func PMeasure(text string, _ ...interface{}) bool {
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
return true
}
//You can mark Maeasurements as pending using XMeasure
//You can mark Measurements as pending using XMeasure
func XMeasure(text string, _ ...interface{}) bool {
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
return true
@ -426,7 +504,7 @@ func AfterSuite(body interface{}, timeout ...float64) bool {
//until that node is done before running.
//
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
//to the second function (on all the other nodes).
//
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
@ -517,6 +595,16 @@ func JustBeforeEach(body interface{}, timeout ...float64) bool {
return true
}
//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details,
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
//
//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts
//a Done channel
func JustAfterEach(body interface{}, timeout ...float64) bool {
globalSuite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
return true
}
//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
//Describe and Context blocks the innermost AfterEach blocks are run first.
//

9
vendor/github.com/onsi/ginkgo/go.mod generated vendored Normal file
View File

@ -0,0 +1,9 @@
module github.com/onsi/ginkgo
require (
github.com/nxadm/tail v1.4.4
github.com/onsi/gomega v1.7.1
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e
)
go 1.12

29
vendor/github.com/onsi/ginkgo/go.sum generated vendored Normal file
View File

@ -0,0 +1,29 @@
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -11,19 +11,35 @@ import (
func New(skip int) types.CodeLocation {
_, file, line, _ := runtime.Caller(skip + 1)
stackTrace := PruneStack(string(debug.Stack()), skip)
stackTrace := PruneStack(string(debug.Stack()), skip+1)
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
}
// PruneStack removes references to functions that are internal to Ginkgo
// and the Go runtime from a stack string and a certain number of stack entries
// at the beginning of the stack. The stack string has the format
// as returned by runtime/debug.Stack. The leading goroutine information is
// optional and always removed if present. Beware that runtime/debug.Stack
// adds itself as first entry, so typically skip must be >= 1 to remove that
// entry.
func PruneStack(fullStackTrace string, skip int) string {
stack := strings.Split(fullStackTrace, "\n")
// Ensure that the even entries are the method names and the
// the odd entries the source code information.
if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
// Ignore "goroutine 29 [running]:" line.
stack = stack[1:]
}
// The "+1" is for skipping over the initial entry, which is
// runtime/debug.Stack() itself.
if len(stack) > 2*(skip+1) {
stack = stack[2*(skip+1):]
}
prunedStack := []string{}
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
for i := 0; i < len(stack)/2; i++ {
if !re.Match([]byte(stack[i*2])) {
// We filter out based on the source code file name.
if !re.Match([]byte(stack[i*2+1])) {
prunedStack = append(prunedStack, stack[i*2])
prunedStack = append(prunedStack, stack[i*2+1])
}

View File

@ -17,7 +17,7 @@ type benchmarker struct {
func newBenchmarker() *benchmarker {
return &benchmarker{
measurements: make(map[string]*types.SpecMeasurement, 0),
measurements: make(map[string]*types.SpecMeasurement),
}
}
@ -28,20 +28,27 @@ func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elaps
b.mu.Lock()
defer b.mu.Unlock()
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", info...)
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...)
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
return
}
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", info...)
b.mu.Lock()
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
defer b.mu.Unlock()
measurement.Results = append(measurement.Results, value)
}
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement {
func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
b.mu.Lock()
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
defer b.mu.Unlock()
measurement.Results = append(measurement.Results, value)
}
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement {
measurement, ok := b.measurements[name]
if !ok {
var computedInfo interface{}
@ -57,6 +64,7 @@ func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestL
LargestLabel: largestLabel,
AverageLabel: averageLabel,
Units: units,
Precision: precision,
Results: make([]float64, 0),
}
b.measurements[name] = measurement

View File

@ -1,9 +1,10 @@
package leafnodes
import (
"time"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"time"
)
type ItNode struct {

View File

@ -1,9 +1,10 @@
package leafnodes
import (
"reflect"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"reflect"
)
type MeasureNode struct {

View File

@ -2,11 +2,12 @@ package leafnodes
import (
"fmt"
"reflect"
"time"
"github.com/onsi/ginkgo/internal/codelocation"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"reflect"
"time"
)
type runner struct {
@ -68,7 +69,7 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure)
done := make(chan interface{}, 1)
go func() {
finished := false
finished := false
defer func() {
if e := recover(); e != nil || !finished {
@ -83,9 +84,12 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure)
}()
r.asyncFunc(done)
finished = true
finished = true
}()
// If this goroutine gets no CPU time before the select block,
// the <-done case may complete even if the test took longer than the timeoutThreshold.
// This can cause flaky behaviour, but we haven't seen it in the wild.
select {
case <-done:
case <-time.After(r.timeoutThreshold):
@ -96,7 +100,7 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure)
return
}
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
finished := false
finished := false
defer func() {
if e := recover(); e != nil || !finished {
@ -107,7 +111,7 @@ func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure)
}()
r.syncFunc()
finished = true
finished = true
return
}

View File

@ -1,9 +1,10 @@
package leafnodes
import (
"time"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"time"
)
type SetupNode struct {
@ -39,3 +40,9 @@ func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, ti
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
}
}
func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
return &SetupNode{
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex),
}
}

View File

@ -1,9 +1,10 @@
package leafnodes
import (
"time"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"time"
)
type SuiteNode interface {

View File

@ -2,11 +2,12 @@ package leafnodes
import (
"encoding/json"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"io/ioutil"
"net/http"
"time"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
)
type synchronizedAfterSuiteNode struct {

View File

@ -3,12 +3,13 @@ package leafnodes
import (
"bytes"
"encoding/json"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
"io/ioutil"
"net/http"
"reflect"
"time"
"github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
)
type synchronizedBeforeSuiteNode struct {
@ -109,8 +110,6 @@ func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecSt
time.Sleep(50 * time.Millisecond)
}
return types.SpecStateFailed, failure("Shouldn't get here!")
}
func (node *synchronizedBeforeSuiteNode) Passed() bool {

View File

@ -54,11 +54,11 @@ func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporte
config: config,
stenographer: stenographer,
suiteBeginnings: make(chan configAndSuite, 0),
beforeSuites: make(chan *types.SetupSummary, 0),
afterSuites: make(chan *types.SetupSummary, 0),
specCompletions: make(chan *types.SpecSummary, 0),
suiteEndings: make(chan *types.SuiteSummary, 0),
suiteBeginnings: make(chan configAndSuite),
beforeSuites: make(chan *types.SetupSummary),
afterSuites: make(chan *types.SetupSummary),
specCompletions: make(chan *types.SpecSummary),
suiteEndings: make(chan *types.SuiteSummary),
}
go aggregator.mux()
@ -125,14 +125,12 @@ func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSui
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
numberOfSpecsToRun := 0
totalNumberOfSpecs := 0
for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings {
numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun
totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs
if len(aggregator.aggregatedSuiteBeginnings) > 0 {
totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
}
aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct)
aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
aggregator.flushCompletedSpecs()
}
@ -199,17 +197,17 @@ func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
switch specSummary.State {
case types.SpecStatePassed:
if specSummary.IsMeasurement {
aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct)
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct)
} else {
aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
aggregator.stenographer.AnnounceSuccessfulSpec(specSummary)
}
case types.SpecStatePending:
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
case types.SpecStateSkipped:
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
case types.SpecStateTimedOut:
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
case types.SpecStatePanicked:
@ -229,7 +227,7 @@ func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (fi
aggregatedSuiteSummary.SuiteSucceeded = true
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
if suiteSummary.SuiteSucceeded == false {
if !suiteSummary.SuiteSucceeded {
aggregatedSuiteSummary.SuiteSucceeded = false
}
@ -239,6 +237,7 @@ func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (fi
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
}
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)

View File

@ -3,8 +3,14 @@ package remote
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"github.com/onsi/ginkgo/internal/writer"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/reporters/stenographer"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
@ -30,14 +36,41 @@ type ForwardingReporter struct {
serverHost string
poster Poster
outputInterceptor OutputInterceptor
debugMode bool
debugFile *os.File
nestedReporter *reporters.DefaultReporter
}
func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter {
return &ForwardingReporter{
func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
reporter := &ForwardingReporter{
serverHost: serverHost,
poster: poster,
outputInterceptor: outputInterceptor,
}
if debugFile != "" {
var err error
reporter.debugMode = true
reporter.debugFile, err = os.Create(debugFile)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
if !config.Verbose {
//if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
ginkgoWriter.AndRedirectTo(reporter.debugFile)
}
outputInterceptor.StreamTo(reporter.debugFile) //This is not working
stenographer := stenographer.New(false, true, reporter.debugFile)
config.Succinct = false
config.Verbose = true
config.FullTrace = true
reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
}
return reporter
}
func (reporter *ForwardingReporter) post(path string, data interface{}) {
@ -56,6 +89,10 @@ func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigT
}
reporter.outputInterceptor.StartInterceptingOutput()
if reporter.debugMode {
reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
reporter.debugFile.Sync()
}
reporter.post("/SpecSuiteWillBegin", data)
}
@ -63,10 +100,18 @@ func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupS
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
reporter.outputInterceptor.StartInterceptingOutput()
setupSummary.CapturedOutput = output
if reporter.debugMode {
reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
reporter.debugFile.Sync()
}
reporter.post("/BeforeSuiteDidRun", setupSummary)
}
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
if reporter.debugMode {
reporter.nestedReporter.SpecWillRun(specSummary)
reporter.debugFile.Sync()
}
reporter.post("/SpecWillRun", specSummary)
}
@ -74,6 +119,10 @@ func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSumma
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
reporter.outputInterceptor.StartInterceptingOutput()
specSummary.CapturedOutput = output
if reporter.debugMode {
reporter.nestedReporter.SpecDidComplete(specSummary)
reporter.debugFile.Sync()
}
reporter.post("/SpecDidComplete", specSummary)
}
@ -81,10 +130,18 @@ func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSu
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
reporter.outputInterceptor.StartInterceptingOutput()
setupSummary.CapturedOutput = output
if reporter.debugMode {
reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
reporter.debugFile.Sync()
}
reporter.post("/AfterSuiteDidRun", setupSummary)
}
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
if reporter.debugMode {
reporter.nestedReporter.SpecSuiteDidEnd(summary)
reporter.debugFile.Sync()
}
reporter.post("/SpecSuiteDidEnd", summary)
}

View File

@ -1,5 +1,7 @@
package remote
import "os"
/*
The OutputInterceptor is used by the ForwardingReporter to
intercept and capture all stdin and stderr output during a test run.
@ -7,4 +9,5 @@ intercept and capture all stdin and stderr output during a test run.
type OutputInterceptor interface {
StartInterceptingOutput() error
StopInterceptingAndReturnOutput() (string, error)
StreamTo(*os.File)
}

View File

@ -1,4 +1,4 @@
// +build freebsd openbsd netbsd dragonfly darwin linux
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
package remote
@ -6,7 +6,9 @@ import (
"errors"
"io/ioutil"
"os"
"syscall"
"github.com/nxadm/tail"
"golang.org/x/sys/unix"
)
func NewOutputInterceptor() OutputInterceptor {
@ -15,7 +17,10 @@ func NewOutputInterceptor() OutputInterceptor {
type outputInterceptor struct {
redirectFile *os.File
streamTarget *os.File
intercepting bool
tailer *tail.Tail
doneTailing chan bool
}
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
@ -31,8 +36,22 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error {
return err
}
syscall.Dup2(int(interceptor.redirectFile.Fd()), 1)
syscall.Dup2(int(interceptor.redirectFile.Fd()), 2)
// This might call Dup3 if the dup2 syscall is not available, e.g. on
// linux/arm64 or linux/riscv64
unix.Dup2(int(interceptor.redirectFile.Fd()), 1)
unix.Dup2(int(interceptor.redirectFile.Fd()), 2)
if interceptor.streamTarget != nil {
interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
interceptor.doneTailing = make(chan bool)
go func() {
for line := range interceptor.tailer.Lines {
interceptor.streamTarget.Write([]byte(line.Text + "\n"))
}
close(interceptor.doneTailing)
}()
}
return nil
}
@ -48,5 +67,16 @@ func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string,
interceptor.intercepting = false
if interceptor.streamTarget != nil {
interceptor.tailer.Stop()
interceptor.tailer.Cleanup()
<-interceptor.doneTailing
interceptor.streamTarget.Sync()
}
return string(output), err
}
func (interceptor *outputInterceptor) StreamTo(out *os.File) {
interceptor.streamTarget = out
}

View File

@ -4,6 +4,7 @@ package remote
import (
"errors"
"os"
)
func NewOutputInterceptor() OutputInterceptor {
@ -31,3 +32,5 @@ func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string,
return "", nil
}
func (interceptor *outputInterceptor) StreamTo(*os.File) {}

View File

@ -9,13 +9,16 @@ package remote
import (
"encoding/json"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
"io/ioutil"
"net"
"net/http"
"sync"
"github.com/onsi/ginkgo/internal/spec_iterator"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
)
/*
@ -29,6 +32,7 @@ type Server struct {
lock *sync.Mutex
beforeSuiteData types.RemoteBeforeSuiteData
parallelTotal int
counter int
}
//Create a new server, automatically selecting a port
@ -41,7 +45,7 @@ func NewServer(parallelTotal int) (*Server, error) {
listener: listener,
lock: &sync.Mutex{},
alives: make([]func() bool, parallelTotal),
beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending},
beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
parallelTotal: parallelTotal,
}, nil
}
@ -63,6 +67,8 @@ func (server *Server) Start() {
//synchronization endpoints
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
mux.HandleFunc("/counter", server.handleCounter)
mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
go httpServer.Serve(server.listener)
}
@ -202,3 +208,17 @@ func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, req
enc := json.NewEncoder(writer)
enc.Encode(afterSuiteData)
}
func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
c := spec_iterator.Counter{}
server.lock.Lock()
c.Index = server.counter
server.counter++
server.lock.Unlock()
json.NewEncoder(writer).Encode(c)
}
func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
writer.Write([]byte(""))
}

View File

@ -5,6 +5,8 @@ import (
"io"
"time"
"sync"
"github.com/onsi/ginkgo/internal/containernode"
"github.com/onsi/ginkgo/internal/leafnodes"
"github.com/onsi/ginkgo/types"
@ -17,9 +19,13 @@ type Spec struct {
containers []*containernode.ContainerNode
state types.SpecState
runTime time.Duration
failure types.SpecFailure
state types.SpecState
runTime time.Duration
startTime time.Time
failure types.SpecFailure
previousFailures bool
stateMutex *sync.Mutex
}
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
@ -28,6 +34,7 @@ func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNod
containers: containers,
focused: subject.Flag() == types.FlagTypeFocused,
announceProgress: announceProgress,
stateMutex: &sync.Mutex{},
}
spec.processFlag(subject.Flag())
@ -42,28 +49,32 @@ func (spec *Spec) processFlag(flag types.FlagType) {
if flag == types.FlagTypeFocused {
spec.focused = true
} else if flag == types.FlagTypePending {
spec.state = types.SpecStatePending
spec.setState(types.SpecStatePending)
}
}
func (spec *Spec) Skip() {
spec.state = types.SpecStateSkipped
spec.setState(types.SpecStateSkipped)
}
func (spec *Spec) Failed() bool {
return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut
return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
}
func (spec *Spec) Passed() bool {
return spec.state == types.SpecStatePassed
return spec.getState() == types.SpecStatePassed
}
func (spec *Spec) Flaked() bool {
return spec.getState() == types.SpecStatePassed && spec.previousFailures
}
func (spec *Spec) Pending() bool {
return spec.state == types.SpecStatePending
return spec.getState() == types.SpecStatePending
}
func (spec *Spec) Skipped() bool {
return spec.state == types.SpecStateSkipped
return spec.getState() == types.SpecStateSkipped
}
func (spec *Spec) Focused() bool {
@ -86,16 +97,21 @@ func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
componentTexts[len(spec.containers)] = spec.subject.Text()
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
runTime := spec.runTime
if runTime == 0 && !spec.startTime.IsZero() {
runTime = time.Since(spec.startTime)
}
return &types.SpecSummary{
IsMeasurement: spec.IsMeasurement(),
NumberOfSamples: spec.subject.Samples(),
ComponentTexts: componentTexts,
ComponentCodeLocations: componentCodeLocations,
State: spec.state,
RunTime: spec.runTime,
Failure: spec.failure,
Measurements: spec.measurementsReport(),
SuiteID: suiteID,
State: spec.getState(),
RunTime: runTime,
Failure: spec.failure,
Measurements: spec.measurementsReport(),
SuiteID: suiteID,
}
}
@ -109,33 +125,61 @@ func (spec *Spec) ConcatenatedString() string {
}
func (spec *Spec) Run(writer io.Writer) {
startTime := time.Now()
if spec.getState() == types.SpecStateFailed {
spec.previousFailures = true
}
spec.startTime = time.Now()
defer func() {
spec.runTime = time.Since(startTime)
spec.runTime = time.Since(spec.startTime)
}()
for sample := 0; sample < spec.subject.Samples(); sample++ {
spec.runSample(sample, writer)
if spec.state != types.SpecStatePassed {
if spec.getState() != types.SpecStatePassed {
return
}
}
}
func (spec *Spec) getState() types.SpecState {
spec.stateMutex.Lock()
defer spec.stateMutex.Unlock()
return spec.state
}
func (spec *Spec) setState(state types.SpecState) {
spec.stateMutex.Lock()
defer spec.stateMutex.Unlock()
spec.state = state
}
func (spec *Spec) runSample(sample int, writer io.Writer) {
spec.state = types.SpecStatePassed
spec.setState(types.SpecStatePassed)
spec.failure = types.SpecFailure{}
innerMostContainerIndexToUnwind := -1
defer func() {
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
container := spec.containers[i]
for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) {
spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach)
justAfterEachState, justAfterEachFailure := justAfterEach.Run()
if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
spec.state = justAfterEachState
spec.failure = justAfterEachFailure
}
}
}
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
container := spec.containers[i]
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
afterEachState, afterEachFailure := afterEach.Run()
if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
spec.state = afterEachState
if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
spec.setState(afterEachState)
spec.failure = afterEachFailure
}
}
@ -146,8 +190,10 @@ func (spec *Spec) runSample(sample int, writer io.Writer) {
innerMostContainerIndexToUnwind = i
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
spec.state, spec.failure = beforeEach.Run()
if spec.state != types.SpecStatePassed {
s, f := beforeEach.Run()
spec.failure = f
spec.setState(s)
if spec.getState() != types.SpecStatePassed {
return
}
}
@ -156,15 +202,19 @@ func (spec *Spec) runSample(sample int, writer io.Writer) {
for _, container := range spec.containers {
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
spec.state, spec.failure = justBeforeEach.Run()
if spec.state != types.SpecStatePassed {
s, f := justBeforeEach.Run()
spec.failure = f
spec.setState(s)
if spec.getState() != types.SpecStatePassed {
return
}
}
}
spec.announceSubject(writer, spec.subject)
spec.state, spec.failure = spec.subject.Run()
s, f := spec.subject.Run()
spec.failure = f
spec.setState(s)
}
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {

View File

@ -7,15 +7,21 @@ import (
)
type Specs struct {
specs []*Spec
numberOfOriginalSpecs int
hasProgrammaticFocus bool
specs []*Spec
names []string
hasProgrammaticFocus bool
RegexScansFilePath bool
}
func NewSpecs(specs []*Spec) *Specs {
names := make([]string, len(specs))
for i, spec := range specs {
names[i] = spec.ConcatenatedString()
}
return &Specs{
specs: specs,
numberOfOriginalSpecs: len(specs),
names: names,
}
}
@ -23,10 +29,6 @@ func (e *Specs) Specs() []*Spec {
return e.specs
}
func (e *Specs) NumberOfOriginalSpecs() int {
return e.numberOfOriginalSpecs
}
func (e *Specs) HasProgrammaticFocus() bool {
return e.hasProgrammaticFocus
}
@ -35,17 +37,20 @@ func (e *Specs) Shuffle(r *rand.Rand) {
sort.Sort(e)
permutation := r.Perm(len(e.specs))
shuffledSpecs := make([]*Spec, len(e.specs))
names := make([]string, len(e.specs))
for i, j := range permutation {
shuffledSpecs[i] = e.specs[j]
names[i] = e.names[j]
}
e.specs = shuffledSpecs
e.names = names
}
func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
if focusString == "" && skipString == "" {
e.applyProgrammaticFocus()
} else {
e.applyRegExpFocus(description, focusString, skipString)
e.applyRegExpFocusAndSkip(description, focusString, skipString)
}
}
@ -67,21 +72,46 @@ func (e *Specs) applyProgrammaticFocus() {
}
}
func (e *Specs) applyRegExpFocus(description string, focusString string, skipString string) {
for _, spec := range e.specs {
// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
// this is the place which we append to.
func (e *Specs) toMatch(description string, i int) []byte {
if i > len(e.names) {
return nil
}
if e.RegexScansFilePath {
return []byte(
description + " " +
e.names[i] + " " +
e.specs[i].subject.CodeLocation().FileName)
} else {
return []byte(
description + " " +
e.names[i])
}
}
func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) {
var focusFilter *regexp.Regexp
if focusString != "" {
focusFilter = regexp.MustCompile(focusString)
}
var skipFilter *regexp.Regexp
if skipString != "" {
skipFilter = regexp.MustCompile(skipString)
}
for i, spec := range e.specs {
matchesFocus := true
matchesSkip := false
toMatch := []byte(description + " " + spec.ConcatenatedString())
toMatch := e.toMatch(description, i)
if focusString != "" {
focusFilter := regexp.MustCompile(focusString)
matchesFocus = focusFilter.Match([]byte(toMatch))
if focusFilter != nil {
matchesFocus = focusFilter.Match(toMatch)
}
if skipString != "" {
skipFilter := regexp.MustCompile(skipString)
matchesSkip = skipFilter.Match([]byte(toMatch))
if skipFilter != nil {
matchesSkip = skipFilter.Match(toMatch)
}
if !matchesFocus || matchesSkip {
@ -98,15 +128,6 @@ func (e *Specs) SkipMeasurements() {
}
}
func (e *Specs) TrimForParallelization(total int, node int) {
startIndex, count := ParallelizedIndexRange(len(e.specs), total, node)
if count == 0 {
e.specs = make([]*Spec, 0)
} else {
e.specs = e.specs[startIndex : startIndex+count]
}
}
//sort.Interface
func (e *Specs) Len() int {
@ -114,9 +135,10 @@ func (e *Specs) Len() int {
}
func (e *Specs) Less(i, j int) bool {
return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
return e.names[i] < e.names[j]
}
func (e *Specs) Swap(i, j int) {
e.names[i], e.names[j] = e.names[j], e.names[i]
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
}

View File

@ -1,4 +1,4 @@
package spec
package spec_iterator
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
if length == 0 {

View File

@ -0,0 +1,59 @@
package spec_iterator
import (
"encoding/json"
"fmt"
"net/http"
"github.com/onsi/ginkgo/internal/spec"
)
type ParallelIterator struct {
specs []*spec.Spec
host string
client *http.Client
}
func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
return &ParallelIterator{
specs: specs,
host: host,
client: &http.Client{},
}
}
func (s *ParallelIterator) Next() (*spec.Spec, error) {
resp, err := s.client.Get(s.host + "/counter")
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
}
var counter Counter
err = json.NewDecoder(resp.Body).Decode(&counter)
if err != nil {
return nil, err
}
if counter.Index >= len(s.specs) {
return nil, ErrClosed
}
return s.specs[counter.Index], nil
}
func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
return len(s.specs)
}
func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
return -1, false
}
func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
return -1, false
}

View File

@ -0,0 +1,45 @@
package spec_iterator
import (
"github.com/onsi/ginkgo/internal/spec"
)
type SerialIterator struct {
specs []*spec.Spec
index int
}
func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
return &SerialIterator{
specs: specs,
index: 0,
}
}
func (s *SerialIterator) Next() (*spec.Spec, error) {
if s.index >= len(s.specs) {
return nil, ErrClosed
}
spec := s.specs[s.index]
s.index += 1
return spec, nil
}
func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
return len(s.specs)
}
func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
return len(s.specs), true
}
func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
count := 0
for _, s := range s.specs {
if !s.Skipped() && !s.Pending() {
count += 1
}
}
return count, true
}

View File

@ -0,0 +1,47 @@
package spec_iterator
import "github.com/onsi/ginkgo/internal/spec"
type ShardedParallelIterator struct {
specs []*spec.Spec
index int
maxIndex int
}
func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
startIndex, count := ParallelizedIndexRange(len(specs), total, node)
return &ShardedParallelIterator{
specs: specs,
index: startIndex,
maxIndex: startIndex + count,
}
}
func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
if s.index >= s.maxIndex {
return nil, ErrClosed
}
spec := s.specs[s.index]
s.index += 1
return spec, nil
}
func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
return len(s.specs)
}
func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
return s.maxIndex - s.index, true
}
func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
count := 0
for i := s.index; i < s.maxIndex; i += 1 {
if !s.specs[i].Skipped() && !s.specs[i].Pending() {
count += 1
}
}
return count, true
}

View File

@ -0,0 +1,20 @@
package spec_iterator
import (
"errors"
"github.com/onsi/ginkgo/internal/spec"
)
var ErrClosed = errors.New("no more specs to run")
type SpecIterator interface {
Next() (*spec.Spec, error)
NumberOfSpecsPriorToIteration() int
NumberOfSpecsToProcessIfKnown() (int, bool)
NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
}
type Counter struct {
Index int `json:"index"`
}

View File

@ -7,6 +7,8 @@ import (
"sync"
"syscall"
"github.com/onsi/ginkgo/internal/spec_iterator"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/internal/leafnodes"
"github.com/onsi/ginkgo/internal/spec"
@ -20,7 +22,7 @@ import (
type SpecRunner struct {
description string
beforeSuiteNode leafnodes.SuiteNode
specs *spec.Specs
iterator spec_iterator.SpecIterator
afterSuiteNode leafnodes.SuiteNode
reporters []reporters.Reporter
startTime time.Time
@ -29,14 +31,15 @@ type SpecRunner struct {
writer Writer.WriterInterface
config config.GinkgoConfigType
interrupted bool
processedSpecs []*spec.Spec
lock *sync.Mutex
}
func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
return &SpecRunner{
description: description,
beforeSuiteNode: beforeSuiteNode,
specs: specs,
iterator: iterator,
afterSuiteNode: afterSuiteNode,
reporters: reporters,
writer: writer,
@ -53,7 +56,9 @@ func (runner *SpecRunner) Run() bool {
}
runner.reportSuiteWillBegin()
go runner.registerForInterrupts()
signalRegistered := make(chan struct{})
go runner.registerForInterrupts(signalRegistered)
<-signalRegistered
suitePassed := runner.runBeforeSuite()
@ -79,7 +84,18 @@ func (runner *SpecRunner) performDryRun() {
runner.reportBeforeSuite(summary)
}
for _, spec := range runner.specs.Specs() {
for {
spec, err := runner.iterator.Next()
if err == spec_iterator.ErrClosed {
break
}
if err != nil {
fmt.Println("failed to iterate over tests:\n" + err.Error())
break
}
runner.processedSpecs = append(runner.processedSpecs, spec)
summary := spec.Summary(runner.suiteID)
runner.reportSpecWillRun(summary)
if summary.State == types.SpecStateInvalid {
@ -130,28 +146,39 @@ func (runner *SpecRunner) runAfterSuite() bool {
func (runner *SpecRunner) runSpecs() bool {
suiteFailed := false
skipRemainingSpecs := false
for _, spec := range runner.specs.Specs() {
for {
spec, err := runner.iterator.Next()
if err == spec_iterator.ErrClosed {
break
}
if err != nil {
fmt.Println("failed to iterate over tests:\n" + err.Error())
suiteFailed = true
break
}
runner.processedSpecs = append(runner.processedSpecs, spec)
if runner.wasInterrupted() {
return suiteFailed
break
}
if skipRemainingSpecs {
spec.Skip()
}
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
if !spec.Skipped() && !spec.Pending() {
runner.runningSpec = spec
spec.Run(runner.writer)
runner.runningSpec = nil
if spec.Failed() {
if passed := runner.runSpec(spec); !passed {
suiteFailed = true
}
} else if spec.Pending() && runner.config.FailOnPending {
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
suiteFailed = true
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
} else {
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
}
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
if spec.Failed() && runner.config.FailFast {
skipRemainingSpecs = true
}
@ -160,6 +187,26 @@ func (runner *SpecRunner) runSpecs() bool {
return !suiteFailed
}
func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) {
maxAttempts := 1
if runner.config.FlakeAttempts > 0 {
// uninitialized configs count as 1
maxAttempts = runner.config.FlakeAttempts
}
for i := 0; i < maxAttempts; i++ {
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
runner.runningSpec = spec
spec.Run(runner.writer)
runner.runningSpec = nil
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
if !spec.Failed() {
return true
}
}
return false
}
func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
if runner.runningSpec == nil {
return nil, false
@ -168,9 +215,10 @@ func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
return runner.runningSpec.Summary(runner.suiteID), true
}
func (runner *SpecRunner) registerForInterrupts() {
func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
close(signalRegistered)
<-c
signal.Stop(c)
@ -225,7 +273,7 @@ func (runner *SpecRunner) wasInterrupted() bool {
func (runner *SpecRunner) reportSuiteWillBegin() {
runner.startTime = time.Now()
summary := runner.summary(true)
summary := runner.suiteWillBeginSummary()
for _, reporter := range runner.reporters {
reporter.SpecSuiteWillBegin(runner.config, summary)
}
@ -252,6 +300,9 @@ func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
}
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
if len(summary.CapturedOutput) == 0 {
summary.CapturedOutput = string(runner.writer.Bytes())
}
for i := len(runner.reporters) - 1; i >= 1; i-- {
runner.reporters[i].SpecDidComplete(summary)
}
@ -264,17 +315,17 @@ func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, fail
}
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
summary := runner.summary(success)
summary := runner.suiteDidEndSummary(success)
summary.RunTime = time.Since(runner.startTime)
for _, reporter := range runner.reporters {
reporter.SpecSuiteDidEnd(summary)
}
}
func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) {
func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
count = 0
for _, spec := range runner.specs.Specs() {
for _, spec := range runner.processedSpecs {
if filter(spec) {
count++
}
@ -283,28 +334,37 @@ func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool)
return count
}
func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return !ex.Skipped() && !ex.Pending()
})
numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return ex.Pending()
})
numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return ex.Skipped()
})
numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return ex.Passed()
})
numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return ex.Flaked()
})
numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return ex.Failed()
})
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
var known bool
numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
if !known {
numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
}
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
}
@ -313,12 +373,39 @@ func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
SuiteSucceeded: success,
SuiteID: runner.suiteID,
NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(),
NumberOfTotalSpecs: len(runner.specs.Specs()),
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
NumberOfTotalSpecs: len(runner.processedSpecs),
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
NumberOfPendingSpecs: numberOfPendingSpecs,
NumberOfSkippedSpecs: numberOfSkippedSpecs,
NumberOfPassedSpecs: numberOfPassedSpecs,
NumberOfFailedSpecs: numberOfFailedSpecs,
NumberOfFlakedSpecs: numberOfFlakedSpecs,
}
}
func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
if !known {
numTotal = -1
}
numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
if !known {
numToRun = -1
}
return &types.SuiteSummary{
SuiteDescription: runner.description,
SuiteID: runner.suiteID,
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
NumberOfTotalSpecs: numTotal,
NumberOfSpecsThatWillBeRun: numToRun,
NumberOfPendingSpecs: -1,
NumberOfSkippedSpecs: -1,
NumberOfPassedSpecs: -1,
NumberOfFailedSpecs: -1,
NumberOfFlakedSpecs: -1,
}
}

View File

@ -2,8 +2,11 @@ package suite
import (
"math/rand"
"net/http"
"time"
"github.com/onsi/ginkgo/internal/spec_iterator"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/internal/containernode"
"github.com/onsi/ginkgo/internal/failer"
@ -52,18 +55,18 @@ func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []report
r := rand.New(rand.NewSource(config.RandomSeed))
suite.topLevelContainer.Shuffle(r)
specs := suite.generateSpecs(description, config)
suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config)
iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
suite.running = true
success := suite.runner.Run()
if !success {
t.Fail()
}
return success, specs.HasProgrammaticFocus()
return success, hasProgrammaticFocus
}
func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs {
func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
specsSlice := []*spec.Spec{}
suite.topLevelContainer.BackPropagateProgrammaticFocus()
for _, collatedNodes := range suite.topLevelContainer.Collate() {
@ -71,6 +74,7 @@ func (suite *Suite) generateSpecs(description string, config config.GinkgoConfig
}
specs := spec.NewSpecs(specsSlice)
specs.RegexScansFilePath = config.RegexScansFilePath
if config.RandomizeAllSpecs {
specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
@ -82,14 +86,25 @@ func (suite *Suite) generateSpecs(description string, config config.GinkgoConfig
specs.SkipMeasurements()
}
var iterator spec_iterator.SpecIterator
if config.ParallelTotal > 1 {
specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode)
iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
resp, err := http.Get(config.SyncHost + "/has-counter")
if err != nil || resp.StatusCode != http.StatusOK {
iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
}
} else {
iterator = spec_iterator.NewSerialIterator(specs.Specs())
}
return specs
return iterator, specs.HasProgrammaticFocus()
}
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
if !suite.running {
return nil, false
}
return suite.runner.CurrentSpecSummary()
}
@ -137,35 +152,42 @@ func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagT
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.running {
suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation)
suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation)
}
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
}
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
if suite.running {
suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation)
suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation)
}
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
}
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.running {
suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation)
suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation)
}
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
}
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.running {
suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation)
suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation)
}
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
}
func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.running {
suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation)
}
suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
}
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
if suite.running {
suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation)
suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation)
}
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
}

View File

@ -50,7 +50,7 @@ func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
}
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
fmt.Fprintf(t.writer, format, args...)
t.Log(fmt.Sprintf(format, args...))
}
func (t *ginkgoTestingTProxy) Failed() bool {
@ -65,7 +65,7 @@ func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
}
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
fmt.Printf(format, args...)
t.Skip(fmt.Sprintf(format, args...))
}
func (t *ginkgoTestingTProxy) SkipNow() {

View File

@ -26,6 +26,11 @@ func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
}
func (writer *FakeGinkgoWriter) Bytes() []byte {
writer.EventStream = append(writer.EventStream, "BYTES")
return nil
}
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
return 0, nil
}

View File

@ -12,13 +12,15 @@ type WriterInterface interface {
Truncate()
DumpOut()
DumpOutWithHeader(header string)
Bytes() []byte
}
type Writer struct {
buffer *bytes.Buffer
outWriter io.Writer
lock *sync.Mutex
stream bool
buffer *bytes.Buffer
outWriter io.Writer
lock *sync.Mutex
stream bool
redirector io.Writer
}
func New(outWriter io.Writer) *Writer {
@ -30,6 +32,10 @@ func New(outWriter io.Writer) *Writer {
}
}
func (w *Writer) AndRedirectTo(writer io.Writer) {
w.redirector = writer
}
func (w *Writer) SetStream(stream bool) {
w.lock.Lock()
defer w.lock.Unlock()
@ -40,11 +46,14 @@ func (w *Writer) Write(b []byte) (n int, err error) {
w.lock.Lock()
defer w.lock.Unlock()
n, err = w.buffer.Write(b)
if w.redirector != nil {
w.redirector.Write(b)
}
if w.stream {
return w.outWriter.Write(b)
} else {
return w.buffer.Write(b)
}
return n, err
}
func (w *Writer) Truncate() {
@ -61,6 +70,15 @@ func (w *Writer) DumpOut() {
}
}
func (w *Writer) Bytes() []byte {
w.lock.Lock()
defer w.lock.Unlock()
b := w.buffer.Bytes()
copied := make([]byte, len(b))
copy(copied, b)
return copied
}
func (w *Writer) DumpOutWithHeader(header string) {
w.lock.Lock()
defer w.lock.Unlock()

View File

@ -29,9 +29,10 @@ func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer st
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
if config.ParallelTotal > 1 {
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct)
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct)
} else {
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
}
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
}
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
@ -56,16 +57,19 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary)
switch specSummary.State {
case types.SpecStatePassed:
if specSummary.IsMeasurement {
reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
reporter.stenographer.AnnounceSuccessfulMeasurement(specSummary, reporter.config.Succinct)
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
reporter.stenographer.AnnounceSuccessfulSlowSpec(specSummary, reporter.config.Succinct)
} else {
reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
reporter.stenographer.AnnounceSuccessfulSpec(specSummary)
if reporter.config.ReportPassed {
reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
}
}
case types.SpecStatePending:
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
case types.SpecStateSkipped:
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace)
case types.SpecStateTimedOut:
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
case types.SpecStatePanicked:

View File

@ -11,26 +11,37 @@ package reporters
import (
"encoding/xml"
"fmt"
"math"
"os"
"path/filepath"
"strings"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
"os"
"strings"
)
type JUnitTestSuite struct {
XMLName xml.Name `xml:"testsuite"`
TestCases []JUnitTestCase `xml:"testcase"`
Name string `xml:"name,attr"`
Tests int `xml:"tests,attr"`
Failures int `xml:"failures,attr"`
Errors int `xml:"errors,attr"`
Time float64 `xml:"time,attr"`
}
type JUnitTestCase struct {
Name string `xml:"name,attr"`
ClassName string `xml:"classname,attr"`
PassedMessage *JUnitPassedMessage `xml:"passed,omitempty"`
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
Time float64 `xml:"time,attr"`
SystemOut string `xml:"system-out,omitempty"`
}
type JUnitPassedMessage struct {
Message string `xml:",chardata"`
}
type JUnitFailureMessage struct {
@ -43,9 +54,10 @@ type JUnitSkipped struct {
}
type JUnitReporter struct {
suite JUnitTestSuite
filename string
testSuiteName string
suite JUnitTestSuite
filename string
testSuiteName string
ReporterConfig config.DefaultReporterConfigType
}
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
@ -55,12 +67,13 @@ func NewJUnitReporter(filename string) *JUnitReporter {
}
}
func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) {
reporter.suite = JUnitTestSuite{
Tests: summary.NumberOfSpecsThatWillBeRun,
Name: summary.SuiteDescription,
TestCases: []JUnitTestCase{},
}
reporter.testSuiteName = summary.SuiteDescription
reporter.ReporterConfig = config.DefaultReporterConfig
}
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
@ -74,6 +87,10 @@ func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary
reporter.handleSetupSummary("AfterSuite", setupSummary)
}
func failureMessage(failure types.SpecFailure) string {
return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String())
}
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
if setupSummary.State != types.SpecStatePassed {
testCase := JUnitTestCase{
@ -83,8 +100,9 @@ func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *typ
testCase.FailureMessage = &JUnitFailureMessage{
Type: reporter.failureTypeForState(setupSummary.State),
Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message),
Message: failureMessage(setupSummary.Failure),
}
testCase.SystemOut = setupSummary.CapturedOutput
testCase.Time = setupSummary.RunTime.Seconds()
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
}
@ -95,11 +113,22 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
ClassName: reporter.testSuiteName,
}
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
testCase.PassedMessage = &JUnitPassedMessage{
Message: specSummary.CapturedOutput,
}
}
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
testCase.FailureMessage = &JUnitFailureMessage{
Type: reporter.failureTypeForState(specSummary.State),
Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message),
Message: failureMessage(specSummary.Failure),
}
if specSummary.State == types.SpecStatePanicked {
testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s",
specSummary.Failure.ForwardedPanic,
specSummary.Failure.Location.FullStackTrace)
}
testCase.SystemOut = specSummary.CapturedOutput
}
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
testCase.Skipped = &JUnitSkipped{}
@ -109,19 +138,33 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
}
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
reporter.suite.Time = summary.RunTime.Seconds()
reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000
reporter.suite.Failures = summary.NumberOfFailedSpecs
file, err := os.Create(reporter.filename)
reporter.suite.Errors = 0
if reporter.ReporterConfig.ReportFile != "" {
reporter.filename = reporter.ReporterConfig.ReportFile
fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename)
}
filePath, _ := filepath.Abs(reporter.filename)
dirPath := filepath.Dir(filePath)
err := os.MkdirAll(dirPath, os.ModePerm)
if err != nil {
fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error())
}
file, err := os.Create(filePath)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error())
}
defer file.Close()
file.WriteString(xml.Header)
encoder := xml.NewEncoder(file)
encoder.Indent(" ", " ")
err = encoder.Encode(reporter.suite)
if err != nil {
fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error())
if err == nil {
fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath)
} else {
fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error())
}
}

View File

@ -22,24 +22,24 @@ func (s *consoleStenographer) colorize(colorCode string, format string, args ...
}
func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
fmt.Println(text)
fmt.Println(strings.Repeat(bannerCharacter, len(text)))
fmt.Fprintln(s.w, text)
fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text)))
}
func (s *consoleStenographer) printNewLine() {
fmt.Println("")
fmt.Fprintln(s.w, "")
}
func (s *consoleStenographer) printDelimiter() {
fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
}
func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
fmt.Print(s.indent(indentation, format, args...))
fmt.Fprint(s.w, s.indent(indentation, format, args...))
}
func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
fmt.Println(s.indent(indentation, format, args...))
fmt.Fprintln(s.w, s.indent(indentation, format, args...))
}
func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {

View File

@ -74,14 +74,18 @@ func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, s
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
}
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct)
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct)
}
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
}
func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct)
}
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
}
@ -101,16 +105,16 @@ func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
stenographer.registerCall("AnnounceCapturedOutput", output)
}
func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
stenographer.registerCall("AnnounceSuccesfulSpec", spec)
func (stenographer *FakeStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
stenographer.registerCall("AnnounceSuccessfulSpec", spec)
}
func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
func (stenographer *FakeStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
stenographer.registerCall("AnnounceSuccessfulSlowSpec", spec, succinct)
}
func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
func (stenographer *FakeStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
stenographer.registerCall("AnnounceSuccessfulMeasurement", spec, succinct)
}
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {

View File

@ -8,6 +8,7 @@ package stenographer
import (
"fmt"
"io"
"runtime"
"strings"
@ -35,7 +36,8 @@ const (
type Stenographer interface {
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
AnnounceAggregatedParallelRun(nodes int, succinct bool)
AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool)
AnnounceParallelRun(node int, nodes int, succinct bool)
AnnounceTotalNumberOfSpecs(total int, succinct bool)
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
@ -45,9 +47,9 @@ type Stenographer interface {
AnnounceCapturedOutput(output string)
AnnounceSuccesfulSpec(spec *types.SpecSummary)
AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
AnnounceSuccessfulSpec(spec *types.SpecSummary)
AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool)
AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool)
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
@ -59,22 +61,26 @@ type Stenographer interface {
SummarizeFailures(summaries []*types.SpecSummary)
}
func New(color bool) Stenographer {
func New(color bool, enableFlakes bool, writer io.Writer) Stenographer {
denoter := "•"
if runtime.GOOS == "windows" {
denoter = "+"
}
return &consoleStenographer{
color: color,
denoter: denoter,
cursorState: cursorStateTop,
color: color,
denoter: denoter,
cursorState: cursorStateTop,
enableFlakes: enableFlakes,
w: writer,
}
}
type consoleStenographer struct {
color bool
denoter string
cursorState cursorStateType
color bool
denoter string
cursorState cursorStateType
enableFlakes bool
w io.Writer
}
var alternatingColors = []string{defaultStyle, grayColor}
@ -92,17 +98,15 @@ func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64
s.printNewLine()
}
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
if succinct {
s.print(0, "- node #%d ", node)
return
}
s.println(0,
"Parallel test node %s/%s. Assigned %s of %s specs.",
"Parallel test node %s/%s.",
s.colorize(boldStyle, "%d", node),
s.colorize(boldStyle, "%d", nodes),
s.colorize(boldStyle, "%d", specsToRun),
s.colorize(boldStyle, "%d", totalSpecs),
)
s.printNewLine()
}
@ -134,6 +138,20 @@ func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, s
s.printNewLine()
}
func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
if succinct {
s.print(0, "- %d specs ", total)
s.stream()
return
}
s.println(0,
"Will run %s specs",
s.colorize(boldStyle, "%d", total),
)
s.printNewLine()
}
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
if succinct && summary.SuiteSucceeded {
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
@ -153,11 +171,16 @@ func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSumm
status = s.colorize(boldStyle+redColor, "FAIL!")
}
flakes := ""
if s.enableFlakes {
flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs)
}
s.print(0,
"%s -- %s | %s | %s | %s ",
"%s -- %s | %s | %s | %s\n",
status,
s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs),
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes,
s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
)
@ -222,12 +245,12 @@ func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
s.midBlock()
}
func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
func (s *consoleStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) {
s.print(0, s.colorize(greenColor, s.denoter))
s.stream()
}
func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
func (s *consoleStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) {
s.printBlockWithMessage(
s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
"",
@ -236,7 +259,7 @@ func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary,
)
}
func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
func (s *consoleStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) {
s.printBlockWithMessage(
s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
s.measurementReport(spec, succinct),
@ -506,15 +529,15 @@ func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinc
message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
s.colorize(boldStyle, "%s", measurement.Name),
measurement.SmallestLabel,
s.colorize(greenColor, "%.3f", measurement.Smallest),
s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
measurement.Units,
measurement.AverageLabel,
s.colorize(cyanColor, "%.3f", measurement.Average),
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
measurement.Units,
s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
measurement.Units,
measurement.LargestLabel,
s.colorize(redColor, "%.3f", measurement.Largest),
s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
measurement.Units,
))
}
@ -531,15 +554,15 @@ func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinc
s.colorize(boldStyle, "%s", measurement.Name),
info,
measurement.SmallestLabel,
s.colorize(greenColor, "%.3f", measurement.Smallest),
s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
measurement.Units,
measurement.LargestLabel,
s.colorize(redColor, "%.3f", measurement.Largest),
s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
measurement.Units,
measurement.AverageLabel,
s.colorize(cyanColor, "%.3f", measurement.Average),
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
measurement.Units,
s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
measurement.Units,
))
}

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Yasuhiro Matsumoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,43 @@
# go-colorable
Colorable writer for windows.
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
This package is possible to handle escape sequence for ansi color on windows.
## Too Bad!
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
## So Good!
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
## Usage
```go
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
logrus.SetOutput(colorable.NewColorableStdout())
logrus.Info("succeeded")
logrus.Warn("not correct")
logrus.Error("something error")
logrus.Fatal("panic")
```
You can compile above code on non-windows OSs.
## Installation
```
$ go get github.com/mattn/go-colorable
```
# License
MIT
# Author
Yasuhiro Matsumoto (a.k.a mattn)

View File

@ -0,0 +1,24 @@
// +build !windows
package colorable
import (
"io"
"os"
)
func NewColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
}
return file
}
func NewColorableStdout() io.Writer {
return os.Stdout
}
func NewColorableStderr() io.Writer {
return os.Stderr
}

View File

@ -0,0 +1,783 @@
package colorable
import (
"bytes"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
"syscall"
"unsafe"
"github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty"
)
const (
foregroundBlue = 0x1
foregroundGreen = 0x2
foregroundRed = 0x4
foregroundIntensity = 0x8
foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
backgroundBlue = 0x10
backgroundGreen = 0x20
backgroundRed = 0x40
backgroundIntensity = 0x80
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
)
type wchar uint16
type short int16
type dword uint32
type word uint16
type coord struct {
x short
y short
}
type smallRect struct {
left short
top short
right short
bottom short
}
type consoleScreenBufferInfo struct {
size coord
cursorPosition coord
attributes word
window smallRect
maximumWindowSize coord
}
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
)
type Writer struct {
out io.Writer
handle syscall.Handle
lastbuf bytes.Buffer
oldattr word
}
func NewColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
}
if isatty.IsTerminal(file.Fd()) {
var csbi consoleScreenBufferInfo
handle := syscall.Handle(file.Fd())
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
} else {
return file
}
}
func NewColorableStdout() io.Writer {
return NewColorable(os.Stdout)
}
func NewColorableStderr() io.Writer {
return NewColorable(os.Stderr)
}
var color256 = map[int]int{
0: 0x000000,
1: 0x800000,
2: 0x008000,
3: 0x808000,
4: 0x000080,
5: 0x800080,
6: 0x008080,
7: 0xc0c0c0,
8: 0x808080,
9: 0xff0000,
10: 0x00ff00,
11: 0xffff00,
12: 0x0000ff,
13: 0xff00ff,
14: 0x00ffff,
15: 0xffffff,
16: 0x000000,
17: 0x00005f,
18: 0x000087,
19: 0x0000af,
20: 0x0000d7,
21: 0x0000ff,
22: 0x005f00,
23: 0x005f5f,
24: 0x005f87,
25: 0x005faf,
26: 0x005fd7,
27: 0x005fff,
28: 0x008700,
29: 0x00875f,
30: 0x008787,
31: 0x0087af,
32: 0x0087d7,
33: 0x0087ff,
34: 0x00af00,
35: 0x00af5f,
36: 0x00af87,
37: 0x00afaf,
38: 0x00afd7,
39: 0x00afff,
40: 0x00d700,
41: 0x00d75f,
42: 0x00d787,
43: 0x00d7af,
44: 0x00d7d7,
45: 0x00d7ff,
46: 0x00ff00,
47: 0x00ff5f,
48: 0x00ff87,
49: 0x00ffaf,
50: 0x00ffd7,
51: 0x00ffff,
52: 0x5f0000,
53: 0x5f005f,
54: 0x5f0087,
55: 0x5f00af,
56: 0x5f00d7,
57: 0x5f00ff,
58: 0x5f5f00,
59: 0x5f5f5f,
60: 0x5f5f87,
61: 0x5f5faf,
62: 0x5f5fd7,
63: 0x5f5fff,
64: 0x5f8700,
65: 0x5f875f,
66: 0x5f8787,
67: 0x5f87af,
68: 0x5f87d7,
69: 0x5f87ff,
70: 0x5faf00,
71: 0x5faf5f,
72: 0x5faf87,
73: 0x5fafaf,
74: 0x5fafd7,
75: 0x5fafff,
76: 0x5fd700,
77: 0x5fd75f,
78: 0x5fd787,
79: 0x5fd7af,
80: 0x5fd7d7,
81: 0x5fd7ff,
82: 0x5fff00,
83: 0x5fff5f,
84: 0x5fff87,
85: 0x5fffaf,
86: 0x5fffd7,
87: 0x5fffff,
88: 0x870000,
89: 0x87005f,
90: 0x870087,
91: 0x8700af,
92: 0x8700d7,
93: 0x8700ff,
94: 0x875f00,
95: 0x875f5f,
96: 0x875f87,
97: 0x875faf,
98: 0x875fd7,
99: 0x875fff,
100: 0x878700,
101: 0x87875f,
102: 0x878787,
103: 0x8787af,
104: 0x8787d7,
105: 0x8787ff,
106: 0x87af00,
107: 0x87af5f,
108: 0x87af87,
109: 0x87afaf,
110: 0x87afd7,
111: 0x87afff,
112: 0x87d700,
113: 0x87d75f,
114: 0x87d787,
115: 0x87d7af,
116: 0x87d7d7,
117: 0x87d7ff,
118: 0x87ff00,
119: 0x87ff5f,
120: 0x87ff87,
121: 0x87ffaf,
122: 0x87ffd7,
123: 0x87ffff,
124: 0xaf0000,
125: 0xaf005f,
126: 0xaf0087,
127: 0xaf00af,
128: 0xaf00d7,
129: 0xaf00ff,
130: 0xaf5f00,
131: 0xaf5f5f,
132: 0xaf5f87,
133: 0xaf5faf,
134: 0xaf5fd7,
135: 0xaf5fff,
136: 0xaf8700,
137: 0xaf875f,
138: 0xaf8787,
139: 0xaf87af,
140: 0xaf87d7,
141: 0xaf87ff,
142: 0xafaf00,
143: 0xafaf5f,
144: 0xafaf87,
145: 0xafafaf,
146: 0xafafd7,
147: 0xafafff,
148: 0xafd700,
149: 0xafd75f,
150: 0xafd787,
151: 0xafd7af,
152: 0xafd7d7,
153: 0xafd7ff,
154: 0xafff00,
155: 0xafff5f,
156: 0xafff87,
157: 0xafffaf,
158: 0xafffd7,
159: 0xafffff,
160: 0xd70000,
161: 0xd7005f,
162: 0xd70087,
163: 0xd700af,
164: 0xd700d7,
165: 0xd700ff,
166: 0xd75f00,
167: 0xd75f5f,
168: 0xd75f87,
169: 0xd75faf,
170: 0xd75fd7,
171: 0xd75fff,
172: 0xd78700,
173: 0xd7875f,
174: 0xd78787,
175: 0xd787af,
176: 0xd787d7,
177: 0xd787ff,
178: 0xd7af00,
179: 0xd7af5f,
180: 0xd7af87,
181: 0xd7afaf,
182: 0xd7afd7,
183: 0xd7afff,
184: 0xd7d700,
185: 0xd7d75f,
186: 0xd7d787,
187: 0xd7d7af,
188: 0xd7d7d7,
189: 0xd7d7ff,
190: 0xd7ff00,
191: 0xd7ff5f,
192: 0xd7ff87,
193: 0xd7ffaf,
194: 0xd7ffd7,
195: 0xd7ffff,
196: 0xff0000,
197: 0xff005f,
198: 0xff0087,
199: 0xff00af,
200: 0xff00d7,
201: 0xff00ff,
202: 0xff5f00,
203: 0xff5f5f,
204: 0xff5f87,
205: 0xff5faf,
206: 0xff5fd7,
207: 0xff5fff,
208: 0xff8700,
209: 0xff875f,
210: 0xff8787,
211: 0xff87af,
212: 0xff87d7,
213: 0xff87ff,
214: 0xffaf00,
215: 0xffaf5f,
216: 0xffaf87,
217: 0xffafaf,
218: 0xffafd7,
219: 0xffafff,
220: 0xffd700,
221: 0xffd75f,
222: 0xffd787,
223: 0xffd7af,
224: 0xffd7d7,
225: 0xffd7ff,
226: 0xffff00,
227: 0xffff5f,
228: 0xffff87,
229: 0xffffaf,
230: 0xffffd7,
231: 0xffffff,
232: 0x080808,
233: 0x121212,
234: 0x1c1c1c,
235: 0x262626,
236: 0x303030,
237: 0x3a3a3a,
238: 0x444444,
239: 0x4e4e4e,
240: 0x585858,
241: 0x626262,
242: 0x6c6c6c,
243: 0x767676,
244: 0x808080,
245: 0x8a8a8a,
246: 0x949494,
247: 0x9e9e9e,
248: 0xa8a8a8,
249: 0xb2b2b2,
250: 0xbcbcbc,
251: 0xc6c6c6,
252: 0xd0d0d0,
253: 0xdadada,
254: 0xe4e4e4,
255: 0xeeeeee,
}
func (w *Writer) Write(data []byte) (n int, err error) {
var csbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
er := bytes.NewBuffer(data)
loop:
for {
r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
if r1 == 0 {
break loop
}
c1, _, err := er.ReadRune()
if err != nil {
break loop
}
if c1 != 0x1b {
fmt.Fprint(w.out, string(c1))
continue
}
c2, _, err := er.ReadRune()
if err != nil {
w.lastbuf.WriteRune(c1)
break loop
}
if c2 != 0x5b {
w.lastbuf.WriteRune(c1)
w.lastbuf.WriteRune(c2)
continue
}
var buf bytes.Buffer
var m rune
for {
c, _, err := er.ReadRune()
if err != nil {
w.lastbuf.WriteRune(c1)
w.lastbuf.WriteRune(c2)
w.lastbuf.Write(buf.Bytes())
break loop
}
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
m = c
break
}
buf.Write([]byte(string(c)))
}
var csbi consoleScreenBufferInfo
switch m {
case 'A':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.y -= short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'B':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.y += short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'C':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.x -= short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'D':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
if n, err = strconv.Atoi(buf.String()); err == nil {
var csbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.x += short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
}
case 'E':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.x = 0
csbi.cursorPosition.y += short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'F':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.x = 0
csbi.cursorPosition.y -= short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'G':
n, err = strconv.Atoi(buf.String())
if err != nil {
continue
}
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
csbi.cursorPosition.x = short(n)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'H':
token := strings.Split(buf.String(), ";")
if len(token) != 2 {
continue
}
n1, err := strconv.Atoi(token[0])
if err != nil {
continue
}
n2, err := strconv.Atoi(token[1])
if err != nil {
continue
}
csbi.cursorPosition.x = short(n2)
csbi.cursorPosition.x = short(n1)
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
case 'J':
n, err := strconv.Atoi(buf.String())
if err != nil {
continue
}
var cursor coord
switch n {
case 0:
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
case 1:
cursor = coord{x: csbi.window.left, y: csbi.window.top}
case 2:
cursor = coord{x: csbi.window.left, y: csbi.window.top}
}
var count, written dword
count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
case 'K':
n, err := strconv.Atoi(buf.String())
if err != nil {
continue
}
var cursor coord
switch n {
case 0:
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
case 1:
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
case 2:
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
}
var count, written dword
count = dword(csbi.size.x - csbi.cursorPosition.x)
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
case 'm':
attr := csbi.attributes
cs := buf.String()
if cs == "" {
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
continue
}
token := strings.Split(cs, ";")
for i := 0; i < len(token); i += 1 {
ns := token[i]
if n, err = strconv.Atoi(ns); err == nil {
switch {
case n == 0 || n == 100:
attr = w.oldattr
case 1 <= n && n <= 5:
attr |= foregroundIntensity
case n == 7:
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
case 22 == n || n == 25 || n == 25:
attr |= foregroundIntensity
case n == 27:
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
case 30 <= n && n <= 37:
attr = (attr & backgroundMask)
if (n-30)&1 != 0 {
attr |= foregroundRed
}
if (n-30)&2 != 0 {
attr |= foregroundGreen
}
if (n-30)&4 != 0 {
attr |= foregroundBlue
}
case n == 38: // set foreground color.
if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
if n256, err := strconv.Atoi(token[i+2]); err == nil {
if n256foreAttr == nil {
n256setup()
}
attr &= backgroundMask
attr |= n256foreAttr[n256]
i += 2
}
} else {
attr = attr & (w.oldattr & backgroundMask)
}
case n == 39: // reset foreground color.
attr &= backgroundMask
attr |= w.oldattr & foregroundMask
case 40 <= n && n <= 47:
attr = (attr & foregroundMask)
if (n-40)&1 != 0 {
attr |= backgroundRed
}
if (n-40)&2 != 0 {
attr |= backgroundGreen
}
if (n-40)&4 != 0 {
attr |= backgroundBlue
}
case n == 48: // set background color.
if i < len(token)-2 && token[i+1] == "5" {
if n256, err := strconv.Atoi(token[i+2]); err == nil {
if n256backAttr == nil {
n256setup()
}
attr &= foregroundMask
attr |= n256backAttr[n256]
i += 2
}
} else {
attr = attr & (w.oldattr & foregroundMask)
}
case n == 49: // reset foreground color.
attr &= foregroundMask
attr |= w.oldattr & backgroundMask
case 90 <= n && n <= 97:
attr = (attr & backgroundMask)
attr |= foregroundIntensity
if (n-90)&1 != 0 {
attr |= foregroundRed
}
if (n-90)&2 != 0 {
attr |= foregroundGreen
}
if (n-90)&4 != 0 {
attr |= foregroundBlue
}
case 100 <= n && n <= 107:
attr = (attr & foregroundMask)
attr |= backgroundIntensity
if (n-100)&1 != 0 {
attr |= backgroundRed
}
if (n-100)&2 != 0 {
attr |= backgroundGreen
}
if (n-100)&4 != 0 {
attr |= backgroundBlue
}
}
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
}
}
}
}
return len(data) - w.lastbuf.Len(), nil
}
type consoleColor struct {
rgb int
red bool
green bool
blue bool
intensity bool
}
func (c consoleColor) foregroundAttr() (attr word) {
if c.red {
attr |= foregroundRed
}
if c.green {
attr |= foregroundGreen
}
if c.blue {
attr |= foregroundBlue
}
if c.intensity {
attr |= foregroundIntensity
}
return
}
func (c consoleColor) backgroundAttr() (attr word) {
if c.red {
attr |= backgroundRed
}
if c.green {
attr |= backgroundGreen
}
if c.blue {
attr |= backgroundBlue
}
if c.intensity {
attr |= backgroundIntensity
}
return
}
var color16 = []consoleColor{
consoleColor{0x000000, false, false, false, false},
consoleColor{0x000080, false, false, true, false},
consoleColor{0x008000, false, true, false, false},
consoleColor{0x008080, false, true, true, false},
consoleColor{0x800000, true, false, false, false},
consoleColor{0x800080, true, false, true, false},
consoleColor{0x808000, true, true, false, false},
consoleColor{0xc0c0c0, true, true, true, false},
consoleColor{0x808080, false, false, false, true},
consoleColor{0x0000ff, false, false, true, true},
consoleColor{0x00ff00, false, true, false, true},
consoleColor{0x00ffff, false, true, true, true},
consoleColor{0xff0000, true, false, false, true},
consoleColor{0xff00ff, true, false, true, true},
consoleColor{0xffff00, true, true, false, true},
consoleColor{0xffffff, true, true, true, true},
}
type hsv struct {
h, s, v float32
}
func (a hsv) dist(b hsv) float32 {
dh := a.h - b.h
switch {
case dh > 0.5:
dh = 1 - dh
case dh < -0.5:
dh = -1 - dh
}
ds := a.s - b.s
dv := a.v - b.v
return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
}
func toHSV(rgb int) hsv {
r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
float32((rgb&0x00FF00)>>8)/256.0,
float32(rgb&0x0000FF)/256.0
min, max := minmax3f(r, g, b)
h := max - min
if h > 0 {
if max == r {
h = (g - b) / h
if h < 0 {
h += 6
}
} else if max == g {
h = 2 + (b-r)/h
} else {
h = 4 + (r-g)/h
}
}
h /= 6.0
s := max - min
if max != 0 {
s /= max
}
v := max
return hsv{h: h, s: s, v: v}
}
type hsvTable []hsv
func toHSVTable(rgbTable []consoleColor) hsvTable {
t := make(hsvTable, len(rgbTable))
for i, c := range rgbTable {
t[i] = toHSV(c.rgb)
}
return t
}
func (t hsvTable) find(rgb int) consoleColor {
hsv := toHSV(rgb)
n := 7
l := float32(5.0)
for i, p := range t {
d := hsv.dist(p)
if d < l {
l, n = d, i
}
}
return color16[n]
}
func minmax3f(a, b, c float32) (min, max float32) {
if a < b {
if b < c {
return a, c
} else if a < c {
return a, b
} else {
return c, b
}
} else {
if a < c {
return b, c
} else if b < c {
return b, a
} else {
return c, a
}
}
}
var n256foreAttr []word
var n256backAttr []word
func n256setup() {
n256foreAttr = make([]word, 256)
n256backAttr = make([]word, 256)
t := toHSVTable(color16)
for i, rgb := range color256 {
c := t.find(rgb)
n256foreAttr[i] = c.foregroundAttr()
n256backAttr[i] = c.backgroundAttr()
}
}

View File

@ -0,0 +1,57 @@
package colorable
import (
"bytes"
"fmt"
"io"
)
type NonColorable struct {
out io.Writer
lastbuf bytes.Buffer
}
func NewNonColorable(w io.Writer) io.Writer {
return &NonColorable{out: w}
}
func (w *NonColorable) Write(data []byte) (n int, err error) {
er := bytes.NewBuffer(data)
loop:
for {
c1, _, err := er.ReadRune()
if err != nil {
break loop
}
if c1 != 0x1b {
fmt.Fprint(w.out, string(c1))
continue
}
c2, _, err := er.ReadRune()
if err != nil {
w.lastbuf.WriteRune(c1)
break loop
}
if c2 != 0x5b {
w.lastbuf.WriteRune(c1)
w.lastbuf.WriteRune(c2)
continue
}
var buf bytes.Buffer
for {
c, _, err := er.ReadRune()
if err != nil {
w.lastbuf.WriteRune(c1)
w.lastbuf.WriteRune(c2)
w.lastbuf.Write(buf.Bytes())
break loop
}
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
break
}
buf.Write([]byte(string(c)))
}
}
return len(data) - w.lastbuf.Len(), nil
}

View File

@ -0,0 +1,9 @@
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
MIT License (Expat)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,37 @@
# go-isatty
isatty for golang
## Usage
```go
package main
import (
"fmt"
"github.com/mattn/go-isatty"
"os"
)
func main() {
if isatty.IsTerminal(os.Stdout.Fd()) {
fmt.Println("Is Terminal")
} else {
fmt.Println("Is Not Terminal")
}
}
```
## Installation
```
$ go get github.com/mattn/go-isatty
```
# License
MIT
# Author
Yasuhiro Matsumoto (a.k.a mattn)

View File

@ -0,0 +1,2 @@
// Package isatty implements interface to isatty
package isatty

View File

@ -0,0 +1,9 @@
// +build appengine
package isatty
// IsTerminal returns true if the file descriptor is terminal which
// is always false on on appengine classic which is a sandboxed PaaS.
func IsTerminal(fd uintptr) bool {
return false
}

View File

@ -0,0 +1,18 @@
// +build darwin freebsd openbsd netbsd
// +build !appengine
package isatty
import (
"syscall"
"unsafe"
)
const ioctlReadTermios = syscall.TIOCGETA
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
var termios syscall.Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

View File

@ -0,0 +1,18 @@
// +build linux
// +build !appengine
package isatty
import (
"syscall"
"unsafe"
)
const ioctlReadTermios = syscall.TCGETS
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
var termios syscall.Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

View File

@ -0,0 +1,16 @@
// +build solaris
// +build !appengine
package isatty
import (
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
func IsTerminal(fd uintptr) bool {
var termio unix.Termio
err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
return err == nil
}

View File

@ -0,0 +1,19 @@
// +build windows
// +build !appengine
package isatty
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

View File

@ -10,10 +10,11 @@ package reporters
import (
"fmt"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
"io"
"strings"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
)
const (
@ -21,8 +22,9 @@ const (
)
type TeamCityReporter struct {
writer io.Writer
testSuiteName string
writer io.Writer
testSuiteName string
ReporterConfig config.DefaultReporterConfigType
}
func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
@ -33,7 +35,7 @@ func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
reporter.testSuiteName = escape(summary.SuiteDescription)
fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName)
fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']\n", messageId, reporter.testSuiteName)
}
func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
@ -47,38 +49,50 @@ func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSumm
func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
if setupSummary.State != types.SpecStatePassed {
testName := escape(name)
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
message := escape(setupSummary.Failure.ComponentCodeLocation.String())
details := escape(setupSummary.Failure.Message)
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName)
message := reporter.failureMessage(setupSummary.Failure)
details := reporter.failureDetails(setupSummary.Failure)
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details)
durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds)
}
}
func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName)
}
func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
details := escape(specSummary.CapturedOutput)
fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']\n", messageId, testName, details)
}
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
message := escape(specSummary.Failure.ComponentCodeLocation.String())
details := escape(specSummary.Failure.Message)
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
message := reporter.failureMessage(specSummary.Failure)
details := reporter.failureDetails(specSummary.Failure)
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details)
}
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName)
fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']\n", messageId, testName)
}
durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds)
}
func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName)
fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']\n", messageId, reporter.testSuiteName)
}
func (reporter *TeamCityReporter) failureMessage(failure types.SpecFailure) string {
return escape(failure.ComponentCodeLocation.String())
}
func (reporter *TeamCityReporter) failureDetails(failure types.SpecFailure) string {
return escape(fmt.Sprintf("%s\n%s", failure.Message, failure.Location.String()))
}
func escape(output string) string {

View File

@ -1,9 +1,25 @@
package types
import "time"
import (
"strconv"
"time"
)
const GINKGO_FOCUS_EXIT_CODE = 197
/*
SuiteSummary represents the a summary of the test suite and is passed to both
Reporter.SpecSuiteWillBegin
Reporter.SpecSuiteDidEnd
this is unfortunate as these two methods should receive different objects. When running in parallel
each node does not deterministically know how many specs it will end up running.
Unfortunately making such a change would break backward compatibility.
Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unknown fields
with -1.
*/
type SuiteSummary struct {
SuiteDescription string
SuiteSucceeded bool
@ -16,7 +32,10 @@ type SuiteSummary struct {
NumberOfSkippedSpecs int
NumberOfPassedSpecs int
NumberOfFailedSpecs int
RunTime time.Duration
// Flaked specs are those that failed initially, but then passed on a
// subsequent try.
NumberOfFlakedSpecs int
RunTime time.Duration
}
type SpecSummary struct {
@ -100,6 +119,17 @@ type SpecMeasurement struct {
LargestLabel string
AverageLabel string
Units string
Precision int
}
func (s SpecMeasurement) PrecisionFmt() string {
if s.Precision == 0 {
return "%f"
}
str := strconv.Itoa(s.Precision)
return "%." + str + "f"
}
type SpecState uint
@ -129,6 +159,7 @@ const (
SpecComponentTypeAfterSuite
SpecComponentTypeBeforeEach
SpecComponentTypeJustBeforeEach
SpecComponentTypeJustAfterEach
SpecComponentTypeAfterEach
SpecComponentTypeIt
SpecComponentTypeMeasure

View File

@ -1,3 +1,5 @@
.DS_Store
*.test
.
.idea
gomega.iml

Some files were not shown because too many files have changed in this diff Show More