mirror of
https://gitea.com/gitea/act_runner.git
synced 2025-06-12 17:47:14 +02:00
Refactor to new framework (#98)
- Adjust directory structure ```text ├── internal │ ├── app │ │ ├── artifactcache │ │ ├── cmd │ │ ├── poll │ │ └── run │ └── pkg │ ├── client │ ├── config │ ├── envcheck │ ├── labels │ ├── report │ └── ver └── main.go ``` - New pkg `labels` to parse label - New pkg `report` to report logs to Gitea - Remove pkg `engine`, use `envcheck` to check if docker running. - Rewrite `runtime` to `run` - Rewrite `poller` to `poll` - Simplify some code and remove what's useless. Reviewed-on: https://gitea.com/gitea/act_runner/pulls/98 Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com> Co-authored-by: Jason Song <i@wolfogre.com> Co-committed-by: Jason Song <i@wolfogre.com>
This commit is contained in:
12
internal/app/artifactcache/doc.go
Normal file
12
internal/app/artifactcache/doc.go
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
// Package artifactcache provides a cache handler for the runner.
|
||||
//
|
||||
// Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server
|
||||
//
|
||||
// TODO: Authorization
|
||||
// TODO: Restrictions for accessing a cache, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
|
||||
// TODO: Force deleting cache entries, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
|
||||
|
||||
package artifactcache
|
416
internal/app/artifactcache/handler.go
Normal file
416
internal/app/artifactcache/handler.go
Normal file
@ -0,0 +1,416 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/go-chi/render"
|
||||
log "github.com/sirupsen/logrus"
|
||||
_ "modernc.org/sqlite"
|
||||
"xorm.io/builder"
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
const (
|
||||
urlBase = "/_apis/artifactcache"
|
||||
)
|
||||
|
||||
var logger = log.StandardLogger().WithField("module", "cache_request")
|
||||
|
||||
type Handler struct {
|
||||
engine engine
|
||||
storage *Storage
|
||||
router *chi.Mux
|
||||
listener net.Listener
|
||||
|
||||
gc atomic.Bool
|
||||
gcAt time.Time
|
||||
|
||||
outboundIP string
|
||||
}
|
||||
|
||||
func StartHandler(dir, outboundIP string, port uint16) (*Handler, error) {
|
||||
h := &Handler{}
|
||||
|
||||
if dir == "" {
|
||||
if home, err := os.UserHomeDir(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
dir = filepath.Join(home, ".cache", "actcache")
|
||||
}
|
||||
}
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
e, err := xorm.NewEngine("sqlite", filepath.Join(dir, "sqlite.db"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := e.Sync(&Cache{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.engine = engine{e: e}
|
||||
|
||||
storage, err := NewStorage(filepath.Join(dir, "cache"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.storage = storage
|
||||
|
||||
if outboundIP != "" {
|
||||
h.outboundIP = outboundIP
|
||||
} else if ip, err := getOutboundIP(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
h.outboundIP = ip.String()
|
||||
}
|
||||
|
||||
router := chi.NewRouter()
|
||||
router.Use(middleware.RequestLogger(&middleware.DefaultLogFormatter{Logger: logger}))
|
||||
router.Use(func(handler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
handler.ServeHTTP(w, r)
|
||||
go h.gcCache()
|
||||
})
|
||||
})
|
||||
router.Use(middleware.Logger)
|
||||
router.Route(urlBase, func(r chi.Router) {
|
||||
r.Get("/cache", h.find)
|
||||
r.Route("/caches", func(r chi.Router) {
|
||||
r.Post("/", h.reserve)
|
||||
r.Route("/{id}", func(r chi.Router) {
|
||||
r.Patch("/", h.upload)
|
||||
r.Post("/", h.commit)
|
||||
})
|
||||
})
|
||||
r.Get("/artifacts/{id}", h.get)
|
||||
r.Post("/clean", h.clean)
|
||||
})
|
||||
|
||||
h.router = router
|
||||
|
||||
h.gcCache()
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
if err := http.Serve(listener, h.router); err != nil {
|
||||
logger.Errorf("http serve: %v", err)
|
||||
}
|
||||
}()
|
||||
h.listener = listener
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (h *Handler) ExternalURL() string {
|
||||
// TODO: make the external url configurable if necessary
|
||||
return fmt.Sprintf("http://%s:%d",
|
||||
h.outboundIP,
|
||||
h.listener.Addr().(*net.TCPAddr).Port)
|
||||
}
|
||||
|
||||
// GET /_apis/artifactcache/cache
|
||||
func (h *Handler) find(w http.ResponseWriter, r *http.Request) {
|
||||
keys := strings.Split(r.URL.Query().Get("keys"), ",")
|
||||
version := r.URL.Query().Get("version")
|
||||
|
||||
cache, err := h.findCache(r.Context(), keys, version)
|
||||
if err != nil {
|
||||
responseJson(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
if cache == nil {
|
||||
responseJson(w, r, 204)
|
||||
return
|
||||
}
|
||||
|
||||
if ok, err := h.storage.Exist(cache.ID); err != nil {
|
||||
responseJson(w, r, 500, err)
|
||||
return
|
||||
} else if !ok {
|
||||
_ = h.engine.Exec(func(sess *xorm.Session) error {
|
||||
_, err := sess.Delete(cache)
|
||||
return err
|
||||
})
|
||||
responseJson(w, r, 204)
|
||||
return
|
||||
}
|
||||
responseJson(w, r, 200, map[string]any{
|
||||
"result": "hit",
|
||||
"archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID),
|
||||
"cacheKey": cache.Key,
|
||||
})
|
||||
}
|
||||
|
||||
// POST /_apis/artifactcache/caches
|
||||
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request) {
|
||||
cache := &Cache{}
|
||||
if err := render.Bind(r, cache); err != nil {
|
||||
responseJson(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
|
||||
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
|
||||
return sess.Where(builder.Eq{"key": cache.Key, "version": cache.Version}).Get(&Cache{})
|
||||
}); err != nil {
|
||||
responseJson(w, r, 500, err)
|
||||
return
|
||||
} else if ok {
|
||||
responseJson(w, r, 400, fmt.Errorf("already exist"))
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
||||
_, err := sess.Insert(cache)
|
||||
return err
|
||||
}); err != nil {
|
||||
responseJson(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
responseJson(w, r, 200, map[string]any{
|
||||
"cacheId": cache.ID,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// PATCH /_apis/artifactcache/caches/:id
|
||||
func (h *Handler) upload(w http.ResponseWriter, r *http.Request) {
|
||||
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
|
||||
if err != nil {
|
||||
responseJson(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
|
||||
cache := &Cache{
|
||||
ID: id,
|
||||
}
|
||||
|
||||
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
|
||||
return sess.Get(cache)
|
||||
}); err != nil {
|
||||
responseJson(w, r, 500, err)
|
||||
return
|
||||
} else if !ok {
|
||||
responseJson(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
|
||||
return
|
||||
}
|
||||
|
||||
if cache.Complete {
|
||||
responseJson(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
||||
return
|
||||
}
|
||||
start, _, err := parseContentRange(r.Header.Get("Content-Range"))
|
||||
if err != nil {
|
||||
responseJson(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
if err := h.storage.Write(cache.ID, start, r.Body); err != nil {
|
||||
responseJson(w, r, 500, err)
|
||||
}
|
||||
h.useCache(r.Context(), id)
|
||||
responseJson(w, r, 200)
|
||||
}
|
||||
|
||||
// POST /_apis/artifactcache/caches/:id
|
||||
func (h *Handler) commit(w http.ResponseWriter, r *http.Request) {
|
||||
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
|
||||
if err != nil {
|
||||
responseJson(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
|
||||
cache := &Cache{
|
||||
ID: id,
|
||||
}
|
||||
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
|
||||
return sess.Get(cache)
|
||||
}); err != nil {
|
||||
responseJson(w, r, 500, err)
|
||||
return
|
||||
} else if !ok {
|
||||
responseJson(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
|
||||
return
|
||||
}
|
||||
|
||||
if cache.Complete {
|
||||
responseJson(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.storage.Commit(cache.ID, cache.Size); err != nil {
|
||||
responseJson(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
|
||||
cache.Complete = true
|
||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
||||
_, err := sess.ID(cache.ID).Cols("complete").Update(cache)
|
||||
return err
|
||||
}); err != nil {
|
||||
responseJson(w, r, 500, err)
|
||||
return
|
||||
}
|
||||
|
||||
responseJson(w, r, 200)
|
||||
}
|
||||
|
||||
// GET /_apis/artifactcache/artifacts/:id
|
||||
func (h *Handler) get(w http.ResponseWriter, r *http.Request) {
|
||||
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
|
||||
if err != nil {
|
||||
responseJson(w, r, 400, err)
|
||||
return
|
||||
}
|
||||
h.useCache(r.Context(), id)
|
||||
h.storage.Serve(w, r, id)
|
||||
}
|
||||
|
||||
// POST /_apis/artifactcache/clean
|
||||
func (h *Handler) clean(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO: don't support force deleting cache entries
|
||||
// see: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
|
||||
|
||||
responseJson(w, r, 200)
|
||||
}
|
||||
|
||||
// if not found, return (nil, nil) instead of an error.
|
||||
func (h *Handler) findCache(ctx context.Context, keys []string, version string) (*Cache, error) {
|
||||
if len(keys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
key := keys[0] // the first key is for exact match.
|
||||
|
||||
cache := &Cache{}
|
||||
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
|
||||
return sess.Where(builder.Eq{"key": key, "version": version, "complete": true}).Get(cache)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
return cache, nil
|
||||
}
|
||||
|
||||
for _, prefix := range keys[1:] {
|
||||
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
|
||||
return sess.Where(builder.And(
|
||||
builder.Like{"key", prefix + "%"},
|
||||
builder.Eq{"version": version, "complete": true},
|
||||
)).OrderBy("id DESC").Get(cache)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
return cache, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (h *Handler) useCache(ctx context.Context, id int64) {
|
||||
// keep quiet
|
||||
_ = h.engine.Exec(func(sess *xorm.Session) error {
|
||||
_, err := sess.Context(ctx).Cols("used_at").Update(&Cache{
|
||||
ID: id,
|
||||
UsedAt: time.Now().Unix(),
|
||||
})
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Handler) gcCache() {
|
||||
if h.gc.Load() {
|
||||
return
|
||||
}
|
||||
if !h.gc.CompareAndSwap(false, true) {
|
||||
return
|
||||
}
|
||||
defer h.gc.Store(false)
|
||||
|
||||
if time.Since(h.gcAt) < time.Hour {
|
||||
logger.Infof("skip gc: %v", h.gcAt.String())
|
||||
return
|
||||
}
|
||||
h.gcAt = time.Now()
|
||||
logger.Infof("gc: %v", h.gcAt.String())
|
||||
|
||||
const (
|
||||
keepUsed = 30 * 24 * time.Hour
|
||||
keepUnused = 7 * 24 * time.Hour
|
||||
keepTemp = 5 * time.Minute
|
||||
)
|
||||
|
||||
var caches []*Cache
|
||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
||||
return sess.Where(builder.And(builder.Lt{"used_at": time.Now().Add(-keepTemp).Unix()}, builder.Eq{"complete": false})).
|
||||
Find(&caches)
|
||||
}); err != nil {
|
||||
logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
||||
_, err := sess.Delete(cache)
|
||||
return err
|
||||
}); err != nil {
|
||||
logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
|
||||
caches = caches[:0]
|
||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
||||
return sess.Where(builder.Lt{"used_at": time.Now().Add(-keepUnused).Unix()}).
|
||||
Find(&caches)
|
||||
}); err != nil {
|
||||
logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
||||
_, err := sess.Delete(cache)
|
||||
return err
|
||||
}); err != nil {
|
||||
logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
|
||||
caches = caches[:0]
|
||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
||||
return sess.Where(builder.Lt{"created_at": time.Now().Add(-keepUsed).Unix()}).
|
||||
Find(&caches)
|
||||
}); err != nil {
|
||||
logger.Warnf("find caches: %v", err)
|
||||
} else {
|
||||
for _, cache := range caches {
|
||||
h.storage.Remove(cache.ID)
|
||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
||||
_, err := sess.Delete(cache)
|
||||
return err
|
||||
}); err != nil {
|
||||
logger.Warnf("delete cache: %v", err)
|
||||
continue
|
||||
}
|
||||
logger.Infof("deleted cache: %+v", cache)
|
||||
}
|
||||
}
|
||||
}
|
30
internal/app/artifactcache/model.go
Normal file
30
internal/app/artifactcache/model.go
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type Cache struct {
|
||||
ID int64 `xorm:"id pk autoincr" json:"-"`
|
||||
Key string `xorm:"TEXT index unique(key_version)" json:"key"`
|
||||
Version string `xorm:"TEXT unique(key_version)" json:"version"`
|
||||
Size int64 `json:"cacheSize"`
|
||||
Complete bool `xorm:"index(complete_used_at)" json:"-"`
|
||||
UsedAt int64 `xorm:"index(complete_used_at) updated" json:"-"`
|
||||
CreatedAt int64 `xorm:"index created" json:"-"`
|
||||
}
|
||||
|
||||
// Bind implements render.Binder
|
||||
func (c *Cache) Bind(_ *http.Request) error {
|
||||
if c.Key == "" {
|
||||
return fmt.Errorf("missing key")
|
||||
}
|
||||
if c.Version == "" {
|
||||
return fmt.Errorf("missing version")
|
||||
}
|
||||
return nil
|
||||
}
|
129
internal/app/artifactcache/storage.go
Normal file
129
internal/app/artifactcache/storage.go
Normal file
@ -0,0 +1,129 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type Storage struct {
|
||||
rootDir string
|
||||
}
|
||||
|
||||
func NewStorage(rootDir string) (*Storage, error) {
|
||||
if err := os.MkdirAll(rootDir, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Storage{
|
||||
rootDir: rootDir,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Storage) Exist(id int64) (bool, error) {
|
||||
name := s.filename(id)
|
||||
if _, err := os.Stat(name); os.IsNotExist(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *Storage) Write(id int64, offset int64, reader io.Reader) error {
|
||||
name := s.tempName(id, offset)
|
||||
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
file, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = io.Copy(file, reader)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Storage) Commit(id int64, size int64) error {
|
||||
defer func() {
|
||||
_ = os.RemoveAll(s.tempDir(id))
|
||||
}()
|
||||
|
||||
name := s.filename(id)
|
||||
tempNames, err := s.tempNames(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
file, err := os.Create(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var written int64
|
||||
for _, v := range tempNames {
|
||||
f, err := os.Open(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := io.Copy(file, f)
|
||||
_ = f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
written += n
|
||||
}
|
||||
|
||||
if written != size {
|
||||
_ = file.Close()
|
||||
_ = os.Remove(name)
|
||||
return fmt.Errorf("broken file: %v != %v", written, size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) Serve(w http.ResponseWriter, r *http.Request, id int64) {
|
||||
name := s.filename(id)
|
||||
http.ServeFile(w, r, name)
|
||||
}
|
||||
|
||||
func (s *Storage) Remove(id int64) {
|
||||
_ = os.Remove(s.filename(id))
|
||||
_ = os.RemoveAll(s.tempDir(id))
|
||||
}
|
||||
|
||||
func (s *Storage) filename(id int64) string {
|
||||
return filepath.Join(s.rootDir, fmt.Sprintf("%02x", id%0xff), fmt.Sprint(id))
|
||||
}
|
||||
|
||||
func (s *Storage) tempDir(id int64) string {
|
||||
return filepath.Join(s.rootDir, "tmp", fmt.Sprint(id))
|
||||
}
|
||||
|
||||
func (s *Storage) tempName(id, offset int64) string {
|
||||
return filepath.Join(s.tempDir(id), fmt.Sprintf("%016x", offset))
|
||||
}
|
||||
|
||||
func (s *Storage) tempNames(id int64) ([]string, error) {
|
||||
dir := s.tempDir(id)
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var names []string
|
||||
for _, v := range files {
|
||||
if !v.IsDir() {
|
||||
names = append(names, filepath.Join(dir, v.Name()))
|
||||
}
|
||||
}
|
||||
return names, nil
|
||||
}
|
100
internal/app/artifactcache/util.go
Normal file
100
internal/app/artifactcache/util.go
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package artifactcache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/go-chi/render"
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
func responseJson(w http.ResponseWriter, r *http.Request, code int, v ...any) {
|
||||
render.Status(r, code)
|
||||
if len(v) == 0 || v[0] == nil {
|
||||
render.JSON(w, r, struct{}{})
|
||||
} else if err, ok := v[0].(error); ok {
|
||||
logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err)
|
||||
render.JSON(w, r, map[string]any{
|
||||
"error": err.Error(),
|
||||
})
|
||||
} else {
|
||||
render.JSON(w, r, v[0])
|
||||
}
|
||||
}
|
||||
|
||||
func parseContentRange(s string) (int64, int64, error) {
|
||||
// support the format like "bytes 11-22/*" only
|
||||
s, _, _ = strings.Cut(strings.TrimPrefix(s, "bytes "), "/")
|
||||
s1, s2, _ := strings.Cut(s, "-")
|
||||
|
||||
start, err := strconv.ParseInt(s1, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
|
||||
}
|
||||
stop, err := strconv.ParseInt(s2, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
|
||||
}
|
||||
return start, stop, nil
|
||||
}
|
||||
|
||||
func getOutboundIP() (net.IP, error) {
|
||||
// FIXME: It makes more sense to use the gateway IP address of container network
|
||||
if conn, err := net.Dial("udp", "8.8.8.8:80"); err == nil {
|
||||
defer conn.Close()
|
||||
return conn.LocalAddr().(*net.UDPAddr).IP, nil
|
||||
}
|
||||
if ifaces, err := net.Interfaces(); err == nil {
|
||||
for _, i := range ifaces {
|
||||
if addrs, err := i.Addrs(); err == nil {
|
||||
for _, addr := range addrs {
|
||||
var ip net.IP
|
||||
switch v := addr.(type) {
|
||||
case *net.IPNet:
|
||||
ip = v.IP
|
||||
case *net.IPAddr:
|
||||
ip = v.IP
|
||||
}
|
||||
if ip.IsGlobalUnicast() {
|
||||
return ip, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no outbound IP address found")
|
||||
}
|
||||
|
||||
// engine is a wrapper of *xorm.Engine, with a lock.
|
||||
// To avoid racing of sqlite, we don't care performance here.
|
||||
type engine struct {
|
||||
e *xorm.Engine
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
func (e *engine) Exec(f func(*xorm.Session) error) error {
|
||||
e.m.Lock()
|
||||
defer e.m.Unlock()
|
||||
|
||||
sess := e.e.NewSession()
|
||||
defer sess.Close()
|
||||
|
||||
return f(sess)
|
||||
}
|
||||
|
||||
func (e *engine) ExecBool(f func(*xorm.Session) (bool, error)) (bool, error) {
|
||||
e.m.Lock()
|
||||
defer e.m.Unlock()
|
||||
|
||||
sess := e.e.NewSession()
|
||||
defer sess.Close()
|
||||
|
||||
return f(sess)
|
||||
}
|
72
internal/app/cmd/cmd.go
Normal file
72
internal/app/cmd/cmd.go
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/ver"
|
||||
)
|
||||
|
||||
func Execute(ctx context.Context) {
|
||||
// ./act_runner
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "act_runner [event name to run]\nIf no event name passed, will default to \"on: push\"",
|
||||
Short: "Run GitHub actions locally by specifying the event name (e.g. `push`) or an action name directly.",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
Version: ver.Version(),
|
||||
SilenceUsage: true,
|
||||
}
|
||||
configFile := ""
|
||||
rootCmd.PersistentFlags().StringVarP(&configFile, "config", "c", "", "Config file path")
|
||||
|
||||
// ./act_runner register
|
||||
var regArgs registerArgs
|
||||
registerCmd := &cobra.Command{
|
||||
Use: "register",
|
||||
Short: "Register a runner to the server",
|
||||
Args: cobra.MaximumNArgs(0),
|
||||
RunE: runRegister(ctx, ®Args, &configFile), // must use a pointer to regArgs
|
||||
}
|
||||
registerCmd.Flags().BoolVar(®Args.NoInteractive, "no-interactive", false, "Disable interactive mode")
|
||||
registerCmd.Flags().StringVar(®Args.InstanceAddr, "instance", "", "Gitea instance address")
|
||||
registerCmd.Flags().StringVar(®Args.Token, "token", "", "Runner token")
|
||||
registerCmd.Flags().StringVar(®Args.RunnerName, "name", "", "Runner name")
|
||||
registerCmd.Flags().StringVar(®Args.Labels, "labels", "", "Runner tags, comma separated")
|
||||
rootCmd.AddCommand(registerCmd)
|
||||
|
||||
// ./act_runner daemon
|
||||
daemonCmd := &cobra.Command{
|
||||
Use: "daemon",
|
||||
Short: "Run as a runner daemon",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: runDaemon(ctx, &configFile),
|
||||
}
|
||||
rootCmd.AddCommand(daemonCmd)
|
||||
|
||||
// ./act_runner exec
|
||||
rootCmd.AddCommand(loadExecCmd(ctx))
|
||||
|
||||
// ./act_runner config
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
Use: "generate-config",
|
||||
Short: "Generate an example config file",
|
||||
Args: cobra.MaximumNArgs(0),
|
||||
Run: func(_ *cobra.Command, _ []string) {
|
||||
fmt.Printf("%s", config.Example)
|
||||
},
|
||||
})
|
||||
|
||||
// hide completion command
|
||||
rootCmd.CompletionOptions.HiddenDefaultCmd = true
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
98
internal/app/cmd/daemon.go
Normal file
98
internal/app/cmd/daemon.go
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/app/poll"
|
||||
"gitea.com/gitea/act_runner/internal/app/run"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/envcheck"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/labels"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/ver"
|
||||
)
|
||||
|
||||
func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
log.Infoln("Starting runner daemon")
|
||||
|
||||
cfg, err := config.LoadDefault(*configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid configuration: %w", err)
|
||||
}
|
||||
|
||||
initLogging(cfg)
|
||||
|
||||
reg, err := config.LoadRegistration(cfg.Runner.File)
|
||||
if os.IsNotExist(err) {
|
||||
log.Error("registration file not found, please register the runner first")
|
||||
return err
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("failed to load registration file: %w", err)
|
||||
}
|
||||
|
||||
ls := labels.Labels{}
|
||||
for _, l := range reg.Labels {
|
||||
label, err := labels.Parse(l)
|
||||
if err != nil {
|
||||
log.WithError(err).Warnf("ignored invalid label %q", l)
|
||||
continue
|
||||
}
|
||||
ls = append(ls, label)
|
||||
}
|
||||
if len(ls) == 0 {
|
||||
log.Warn("no labels configured, runner may not be able to pick up jobs")
|
||||
}
|
||||
|
||||
if ls.RequireDocker() {
|
||||
if err := envcheck.CheckIfDockerRunning(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cli := client.New(
|
||||
reg.Address,
|
||||
cfg.Runner.Insecure,
|
||||
reg.UUID,
|
||||
reg.Token,
|
||||
ver.Version(),
|
||||
)
|
||||
|
||||
runner := run.NewRunner(cfg, reg, cli)
|
||||
poller := poll.New(cfg, cli, runner)
|
||||
|
||||
poller.Poll(ctx)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// initLogging setup the global logrus logger.
|
||||
func initLogging(cfg *config.Config) {
|
||||
isTerm := isatty.IsTerminal(os.Stdout.Fd())
|
||||
log.SetFormatter(&log.TextFormatter{
|
||||
DisableColors: !isTerm,
|
||||
FullTimestamp: true,
|
||||
})
|
||||
|
||||
if l := cfg.Log.Level; l != "" {
|
||||
level, err := log.ParseLevel(l)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
Errorf("invalid log level: %q", l)
|
||||
}
|
||||
if log.GetLevel() != level {
|
||||
log.Infof("log level changed to %v", level)
|
||||
log.SetLevel(level)
|
||||
}
|
||||
}
|
||||
}
|
468
internal/app/cmd/exec.go
Normal file
468
internal/app/cmd/exec.go
Normal file
@ -0,0 +1,468 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// Copyright 2019 nektos
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/nektos/act/pkg/artifacts"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/nektos/act/pkg/runner"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/term"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/app/artifactcache"
|
||||
)
|
||||
|
||||
type executeArgs struct {
|
||||
runList bool
|
||||
job string
|
||||
event string
|
||||
workdir string
|
||||
workflowsPath string
|
||||
noWorkflowRecurse bool
|
||||
autodetectEvent bool
|
||||
forcePull bool
|
||||
forceRebuild bool
|
||||
jsonLogger bool
|
||||
envs []string
|
||||
envfile string
|
||||
secrets []string
|
||||
defaultActionsUrl string
|
||||
insecureSecrets bool
|
||||
privileged bool
|
||||
usernsMode string
|
||||
containerArchitecture string
|
||||
containerDaemonSocket string
|
||||
useGitIgnore bool
|
||||
containerCapAdd []string
|
||||
containerCapDrop []string
|
||||
artifactServerPath string
|
||||
artifactServerAddr string
|
||||
artifactServerPort string
|
||||
noSkipCheckout bool
|
||||
debug bool
|
||||
dryrun bool
|
||||
image string
|
||||
cacheHandler *artifactcache.Handler
|
||||
}
|
||||
|
||||
// WorkflowsPath returns path to workflow file(s)
|
||||
func (i *executeArgs) WorkflowsPath() string {
|
||||
return i.resolve(i.workflowsPath)
|
||||
}
|
||||
|
||||
// Envfile returns path to .env
|
||||
func (i *executeArgs) Envfile() string {
|
||||
return i.resolve(i.envfile)
|
||||
}
|
||||
|
||||
func (i *executeArgs) LoadSecrets() map[string]string {
|
||||
s := make(map[string]string)
|
||||
for _, secretPair := range i.secrets {
|
||||
secretPairParts := strings.SplitN(secretPair, "=", 2)
|
||||
secretPairParts[0] = strings.ToUpper(secretPairParts[0])
|
||||
if strings.ToUpper(s[secretPairParts[0]]) == secretPairParts[0] {
|
||||
log.Errorf("Secret %s is already defined (secrets are case insensitive)", secretPairParts[0])
|
||||
}
|
||||
if len(secretPairParts) == 2 {
|
||||
s[secretPairParts[0]] = secretPairParts[1]
|
||||
} else if env, ok := os.LookupEnv(secretPairParts[0]); ok && env != "" {
|
||||
s[secretPairParts[0]] = env
|
||||
} else {
|
||||
fmt.Printf("Provide value for '%s': ", secretPairParts[0])
|
||||
val, err := term.ReadPassword(int(os.Stdin.Fd()))
|
||||
fmt.Println()
|
||||
if err != nil {
|
||||
log.Errorf("failed to read input: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
s[secretPairParts[0]] = string(val)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func readEnvs(path string, envs map[string]string) bool {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
env, err := godotenv.Read(path)
|
||||
if err != nil {
|
||||
log.Fatalf("Error loading from %s: %v", path, err)
|
||||
}
|
||||
for k, v := range env {
|
||||
envs[k] = v
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (i *executeArgs) LoadEnvs() map[string]string {
|
||||
envs := make(map[string]string)
|
||||
if i.envs != nil {
|
||||
for _, envVar := range i.envs {
|
||||
e := strings.SplitN(envVar, `=`, 2)
|
||||
if len(e) == 2 {
|
||||
envs[e[0]] = e[1]
|
||||
} else {
|
||||
envs[e[0]] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = readEnvs(i.Envfile(), envs)
|
||||
|
||||
envs["ACTIONS_CACHE_URL"] = i.cacheHandler.ExternalURL() + "/"
|
||||
|
||||
return envs
|
||||
}
|
||||
|
||||
// Workdir returns path to workdir
|
||||
func (i *executeArgs) Workdir() string {
|
||||
return i.resolve(".")
|
||||
}
|
||||
|
||||
func (i *executeArgs) resolve(path string) string {
|
||||
basedir, err := filepath.Abs(i.workdir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if path == "" {
|
||||
return path
|
||||
}
|
||||
if !filepath.IsAbs(path) {
|
||||
path = filepath.Join(basedir, path)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func printList(plan *model.Plan) error {
|
||||
type lineInfoDef struct {
|
||||
jobID string
|
||||
jobName string
|
||||
stage string
|
||||
wfName string
|
||||
wfFile string
|
||||
events string
|
||||
}
|
||||
lineInfos := []lineInfoDef{}
|
||||
|
||||
header := lineInfoDef{
|
||||
jobID: "Job ID",
|
||||
jobName: "Job name",
|
||||
stage: "Stage",
|
||||
wfName: "Workflow name",
|
||||
wfFile: "Workflow file",
|
||||
events: "Events",
|
||||
}
|
||||
|
||||
jobs := map[string]bool{}
|
||||
duplicateJobIDs := false
|
||||
|
||||
jobIDMaxWidth := len(header.jobID)
|
||||
jobNameMaxWidth := len(header.jobName)
|
||||
stageMaxWidth := len(header.stage)
|
||||
wfNameMaxWidth := len(header.wfName)
|
||||
wfFileMaxWidth := len(header.wfFile)
|
||||
eventsMaxWidth := len(header.events)
|
||||
|
||||
for i, stage := range plan.Stages {
|
||||
for _, r := range stage.Runs {
|
||||
jobID := r.JobID
|
||||
line := lineInfoDef{
|
||||
jobID: jobID,
|
||||
jobName: r.String(),
|
||||
stage: strconv.Itoa(i),
|
||||
wfName: r.Workflow.Name,
|
||||
wfFile: r.Workflow.File,
|
||||
events: strings.Join(r.Workflow.On(), `,`),
|
||||
}
|
||||
if _, ok := jobs[jobID]; ok {
|
||||
duplicateJobIDs = true
|
||||
} else {
|
||||
jobs[jobID] = true
|
||||
}
|
||||
lineInfos = append(lineInfos, line)
|
||||
if jobIDMaxWidth < len(line.jobID) {
|
||||
jobIDMaxWidth = len(line.jobID)
|
||||
}
|
||||
if jobNameMaxWidth < len(line.jobName) {
|
||||
jobNameMaxWidth = len(line.jobName)
|
||||
}
|
||||
if stageMaxWidth < len(line.stage) {
|
||||
stageMaxWidth = len(line.stage)
|
||||
}
|
||||
if wfNameMaxWidth < len(line.wfName) {
|
||||
wfNameMaxWidth = len(line.wfName)
|
||||
}
|
||||
if wfFileMaxWidth < len(line.wfFile) {
|
||||
wfFileMaxWidth = len(line.wfFile)
|
||||
}
|
||||
if eventsMaxWidth < len(line.events) {
|
||||
eventsMaxWidth = len(line.events)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
jobIDMaxWidth += 2
|
||||
jobNameMaxWidth += 2
|
||||
stageMaxWidth += 2
|
||||
wfNameMaxWidth += 2
|
||||
wfFileMaxWidth += 2
|
||||
|
||||
fmt.Printf("%*s%*s%*s%*s%*s%*s\n",
|
||||
-stageMaxWidth, header.stage,
|
||||
-jobIDMaxWidth, header.jobID,
|
||||
-jobNameMaxWidth, header.jobName,
|
||||
-wfNameMaxWidth, header.wfName,
|
||||
-wfFileMaxWidth, header.wfFile,
|
||||
-eventsMaxWidth, header.events,
|
||||
)
|
||||
for _, line := range lineInfos {
|
||||
fmt.Printf("%*s%*s%*s%*s%*s%*s\n",
|
||||
-stageMaxWidth, line.stage,
|
||||
-jobIDMaxWidth, line.jobID,
|
||||
-jobNameMaxWidth, line.jobName,
|
||||
-wfNameMaxWidth, line.wfName,
|
||||
-wfFileMaxWidth, line.wfFile,
|
||||
-eventsMaxWidth, line.events,
|
||||
)
|
||||
}
|
||||
if duplicateJobIDs {
|
||||
fmt.Print("\nDetected multiple jobs with the same job name, use `-W` to specify the path to the specific workflow.\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runExecList(ctx context.Context, planner model.WorkflowPlanner, execArgs *executeArgs) error {
|
||||
// plan with filtered jobs - to be used for filtering only
|
||||
var filterPlan *model.Plan
|
||||
|
||||
// Determine the event name to be filtered
|
||||
var filterEventName string = ""
|
||||
|
||||
if len(execArgs.event) > 0 {
|
||||
log.Infof("Using chosed event for filtering: %s", execArgs.event)
|
||||
filterEventName = execArgs.event
|
||||
} else if execArgs.autodetectEvent {
|
||||
// collect all events from loaded workflows
|
||||
events := planner.GetEvents()
|
||||
|
||||
// set default event type to first event from many available
|
||||
// this way user dont have to specify the event.
|
||||
log.Infof("Using first detected workflow event for filtering: %s", events[0])
|
||||
|
||||
filterEventName = events[0]
|
||||
}
|
||||
|
||||
var err error
|
||||
if execArgs.job != "" {
|
||||
log.Infof("Preparing plan with a job: %s", execArgs.job)
|
||||
filterPlan, err = planner.PlanJob(execArgs.job)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if filterEventName != "" {
|
||||
log.Infof("Preparing plan for a event: %s", filterEventName)
|
||||
filterPlan, err = planner.PlanEvent(filterEventName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Infof("Preparing plan with all jobs")
|
||||
filterPlan, err = planner.PlanAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
printList(filterPlan)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command, args []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
planner, err := model.NewWorkflowPlanner(execArgs.WorkflowsPath(), execArgs.noWorkflowRecurse)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if execArgs.runList {
|
||||
return runExecList(ctx, planner, execArgs)
|
||||
}
|
||||
|
||||
// plan with triggered jobs
|
||||
var plan *model.Plan
|
||||
|
||||
// Determine the event name to be triggered
|
||||
var eventName string
|
||||
|
||||
// collect all events from loaded workflows
|
||||
events := planner.GetEvents()
|
||||
|
||||
if len(execArgs.event) > 0 {
|
||||
log.Infof("Using chosed event for filtering: %s", execArgs.event)
|
||||
eventName = args[0]
|
||||
} else if len(events) == 1 && len(events[0]) > 0 {
|
||||
log.Infof("Using the only detected workflow event: %s", events[0])
|
||||
eventName = events[0]
|
||||
} else if execArgs.autodetectEvent && len(events) > 0 && len(events[0]) > 0 {
|
||||
// set default event type to first event from many available
|
||||
// this way user dont have to specify the event.
|
||||
log.Infof("Using first detected workflow event: %s", events[0])
|
||||
eventName = events[0]
|
||||
} else {
|
||||
log.Infof("Using default workflow event: push")
|
||||
eventName = "push"
|
||||
}
|
||||
|
||||
// build the plan for this run
|
||||
if execArgs.job != "" {
|
||||
log.Infof("Planning job: %s", execArgs.job)
|
||||
plan, err = planner.PlanJob(execArgs.job)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Infof("Planning jobs for event: %s", eventName)
|
||||
plan, err = planner.PlanEvent(eventName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
maxLifetime := 3 * time.Hour
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
maxLifetime = time.Until(deadline)
|
||||
}
|
||||
|
||||
// init a cache server
|
||||
handler, err := artifactcache.StartHandler("", "", 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("cache handler listens on: %v", handler.ExternalURL())
|
||||
execArgs.cacheHandler = handler
|
||||
|
||||
// run the plan
|
||||
config := &runner.Config{
|
||||
Workdir: execArgs.Workdir(),
|
||||
BindWorkdir: false,
|
||||
ReuseContainers: false,
|
||||
ForcePull: execArgs.forcePull,
|
||||
ForceRebuild: execArgs.forceRebuild,
|
||||
LogOutput: true,
|
||||
JSONLogger: execArgs.jsonLogger,
|
||||
Env: execArgs.LoadEnvs(),
|
||||
Secrets: execArgs.LoadSecrets(),
|
||||
InsecureSecrets: execArgs.insecureSecrets,
|
||||
Privileged: execArgs.privileged,
|
||||
UsernsMode: execArgs.usernsMode,
|
||||
ContainerArchitecture: execArgs.containerArchitecture,
|
||||
ContainerDaemonSocket: execArgs.containerDaemonSocket,
|
||||
UseGitIgnore: execArgs.useGitIgnore,
|
||||
// GitHubInstance: t.client.Address(),
|
||||
ContainerCapAdd: execArgs.containerCapAdd,
|
||||
ContainerCapDrop: execArgs.containerCapDrop,
|
||||
AutoRemove: true,
|
||||
ArtifactServerPath: execArgs.artifactServerPath,
|
||||
ArtifactServerPort: execArgs.artifactServerPort,
|
||||
NoSkipCheckout: execArgs.noSkipCheckout,
|
||||
// PresetGitHubContext: preset,
|
||||
// EventJSON: string(eventJSON),
|
||||
ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%s", eventName),
|
||||
ContainerMaxLifetime: maxLifetime,
|
||||
ContainerNetworkMode: "bridge",
|
||||
DefaultActionInstance: execArgs.defaultActionsUrl,
|
||||
PlatformPicker: func(_ []string) string {
|
||||
return execArgs.image
|
||||
},
|
||||
}
|
||||
|
||||
// TODO: handle log level config
|
||||
// waiting https://gitea.com/gitea/act/pulls/19
|
||||
// if !execArgs.debug {
|
||||
// logLevel := log.Level(log.InfoLevel)
|
||||
// config.JobLoggerLevel = &logLevel
|
||||
// }
|
||||
|
||||
r, err := runner.New(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(execArgs.artifactServerPath) == 0 {
|
||||
tempDir, err := os.MkdirTemp("", "gitea-act-")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
execArgs.artifactServerPath = tempDir
|
||||
}
|
||||
|
||||
artifactCancel := artifacts.Serve(ctx, execArgs.artifactServerPath, execArgs.artifactServerAddr, execArgs.artifactServerPort)
|
||||
log.Debugf("artifacts server started at %s:%s", execArgs.artifactServerPath, execArgs.artifactServerPort)
|
||||
|
||||
ctx = common.WithDryrun(ctx, execArgs.dryrun)
|
||||
executor := r.NewPlanExecutor(plan).Finally(func(ctx context.Context) error {
|
||||
artifactCancel()
|
||||
return nil
|
||||
})
|
||||
|
||||
return executor(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func loadExecCmd(ctx context.Context) *cobra.Command {
|
||||
execArg := executeArgs{}
|
||||
|
||||
execCmd := &cobra.Command{
|
||||
Use: "exec",
|
||||
Short: "Run workflow locally.",
|
||||
Args: cobra.MaximumNArgs(20),
|
||||
RunE: runExec(ctx, &execArg),
|
||||
}
|
||||
|
||||
execCmd.Flags().BoolVarP(&execArg.runList, "list", "l", false, "list workflows")
|
||||
execCmd.Flags().StringVarP(&execArg.job, "job", "j", "", "run a specific job ID")
|
||||
execCmd.Flags().StringVarP(&execArg.event, "event", "E", "", "run a event name")
|
||||
execCmd.PersistentFlags().StringVarP(&execArg.workflowsPath, "workflows", "W", "./.gitea/workflows/", "path to workflow file(s)")
|
||||
execCmd.PersistentFlags().StringVarP(&execArg.workdir, "directory", "C", ".", "working directory")
|
||||
execCmd.PersistentFlags().BoolVarP(&execArg.noWorkflowRecurse, "no-recurse", "", false, "Flag to disable running workflows from subdirectories of specified path in '--workflows'/'-W' flag")
|
||||
execCmd.Flags().BoolVarP(&execArg.autodetectEvent, "detect-event", "", false, "Use first event type from workflow as event that triggered the workflow")
|
||||
execCmd.Flags().BoolVarP(&execArg.forcePull, "pull", "p", false, "pull docker image(s) even if already present")
|
||||
execCmd.Flags().BoolVarP(&execArg.forceRebuild, "rebuild", "", false, "rebuild local action docker image(s) even if already present")
|
||||
execCmd.PersistentFlags().BoolVar(&execArg.jsonLogger, "json", false, "Output logs in json format")
|
||||
execCmd.Flags().StringArrayVarP(&execArg.envs, "env", "", []string{}, "env to make available to actions with optional value (e.g. --env myenv=foo or --env myenv)")
|
||||
execCmd.PersistentFlags().StringVarP(&execArg.envfile, "env-file", "", ".env", "environment file to read and use as env in the containers")
|
||||
execCmd.Flags().StringArrayVarP(&execArg.secrets, "secret", "s", []string{}, "secret to make available to actions with optional value (e.g. -s mysecret=foo or -s mysecret)")
|
||||
execCmd.PersistentFlags().BoolVarP(&execArg.insecureSecrets, "insecure-secrets", "", false, "NOT RECOMMENDED! Doesn't hide secrets while printing logs.")
|
||||
execCmd.Flags().BoolVar(&execArg.privileged, "privileged", false, "use privileged mode")
|
||||
execCmd.Flags().StringVar(&execArg.usernsMode, "userns", "", "user namespace to use")
|
||||
execCmd.PersistentFlags().StringVarP(&execArg.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.")
|
||||
execCmd.PersistentFlags().StringVarP(&execArg.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Path to Docker daemon socket which will be mounted to containers")
|
||||
execCmd.Flags().BoolVar(&execArg.useGitIgnore, "use-gitignore", true, "Controls whether paths specified in .gitignore should be copied into container")
|
||||
execCmd.Flags().StringArrayVarP(&execArg.containerCapAdd, "container-cap-add", "", []string{}, "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)")
|
||||
execCmd.Flags().StringArrayVarP(&execArg.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)")
|
||||
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPath, "artifact-server-path", "", ".", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.")
|
||||
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens (will only bind to localhost).")
|
||||
execCmd.PersistentFlags().StringVarP(&execArg.defaultActionsUrl, "default-actions-url", "", "https://gitea.com", "Defines the default url of action instance.")
|
||||
execCmd.PersistentFlags().BoolVarP(&execArg.noSkipCheckout, "no-skip-checkout", "", false, "Do not skip actions/checkout")
|
||||
execCmd.PersistentFlags().BoolVarP(&execArg.debug, "debug", "d", false, "enable debug log")
|
||||
execCmd.PersistentFlags().BoolVarP(&execArg.dryrun, "dryrun", "n", false, "dryrun mode")
|
||||
execCmd.PersistentFlags().StringVarP(&execArg.image, "image", "i", "node:16-bullseye", "docker image to use")
|
||||
|
||||
return execCmd
|
||||
}
|
334
internal/app/cmd/register.go
Normal file
334
internal/app/cmd/register.go
Normal file
@ -0,0 +1,334 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
goruntime "runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
pingv1 "code.gitea.io/actions-proto-go/ping/v1"
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"github.com/bufbuild/connect-go"
|
||||
"github.com/mattn/go-isatty"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/labels"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/ver"
|
||||
)
|
||||
|
||||
// runRegister registers a runner to the server
|
||||
func runRegister(ctx context.Context, regArgs *registerArgs, configFile *string) func(*cobra.Command, []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
log.SetReportCaller(false)
|
||||
isTerm := isatty.IsTerminal(os.Stdout.Fd())
|
||||
log.SetFormatter(&log.TextFormatter{
|
||||
DisableColors: !isTerm,
|
||||
DisableTimestamp: true,
|
||||
})
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
log.Infof("Registering runner, arch=%s, os=%s, version=%s.",
|
||||
goruntime.GOARCH, goruntime.GOOS, ver.Version())
|
||||
|
||||
// runner always needs root permission
|
||||
if os.Getuid() != 0 {
|
||||
// TODO: use a better way to check root permission
|
||||
log.Warnf("Runner in user-mode.")
|
||||
}
|
||||
|
||||
if regArgs.NoInteractive {
|
||||
if err := registerNoInteractive(*configFile, regArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
if err := registerInteractive(*configFile); err != nil {
|
||||
log.Fatal(err)
|
||||
return
|
||||
}
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt)
|
||||
<-c
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// registerArgs represents the arguments for register command
|
||||
type registerArgs struct {
|
||||
NoInteractive bool
|
||||
InstanceAddr string
|
||||
Token string
|
||||
RunnerName string
|
||||
Labels string
|
||||
}
|
||||
|
||||
type registerStage int8
|
||||
|
||||
const (
|
||||
StageUnknown registerStage = -1
|
||||
StageOverwriteLocalConfig registerStage = iota + 1
|
||||
StageInputInstance
|
||||
StageInputToken
|
||||
StageInputRunnerName
|
||||
StageInputCustomLabels
|
||||
StageWaitingForRegistration
|
||||
StageExit
|
||||
)
|
||||
|
||||
var defaultLabels = []string{
|
||||
"ubuntu-latest:docker://node:16-bullseye",
|
||||
"ubuntu-22.04:docker://node:16-bullseye", // There's no node:16-bookworm yet
|
||||
"ubuntu-20.04:docker://node:16-bullseye",
|
||||
"ubuntu-18.04:docker://node:16-buster",
|
||||
}
|
||||
|
||||
type registerInputs struct {
|
||||
InstanceAddr string
|
||||
Token string
|
||||
RunnerName string
|
||||
CustomLabels []string
|
||||
}
|
||||
|
||||
func (r *registerInputs) validate() error {
|
||||
if r.InstanceAddr == "" {
|
||||
return fmt.Errorf("instance address is empty")
|
||||
}
|
||||
if r.Token == "" {
|
||||
return fmt.Errorf("token is empty")
|
||||
}
|
||||
if len(r.CustomLabels) > 0 {
|
||||
return validateLabels(r.CustomLabels)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateLabels(ls []string) error {
|
||||
for _, label := range ls {
|
||||
if _, err := labels.Parse(label); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *registerInputs) assignToNext(stage registerStage, value string) registerStage {
|
||||
// must set instance address and token.
|
||||
// if empty, keep current stage.
|
||||
if stage == StageInputInstance || stage == StageInputToken {
|
||||
if value == "" {
|
||||
return stage
|
||||
}
|
||||
}
|
||||
|
||||
// set hostname for runner name if empty
|
||||
if stage == StageInputRunnerName && value == "" {
|
||||
value, _ = os.Hostname()
|
||||
}
|
||||
|
||||
switch stage {
|
||||
case StageOverwriteLocalConfig:
|
||||
if value == "Y" || value == "y" {
|
||||
return StageInputInstance
|
||||
}
|
||||
return StageExit
|
||||
case StageInputInstance:
|
||||
r.InstanceAddr = value
|
||||
return StageInputToken
|
||||
case StageInputToken:
|
||||
r.Token = value
|
||||
return StageInputRunnerName
|
||||
case StageInputRunnerName:
|
||||
r.RunnerName = value
|
||||
return StageInputCustomLabels
|
||||
case StageInputCustomLabels:
|
||||
r.CustomLabels = defaultLabels
|
||||
if value != "" {
|
||||
r.CustomLabels = strings.Split(value, ",")
|
||||
}
|
||||
|
||||
if validateLabels(r.CustomLabels) != nil {
|
||||
log.Infoln("Invalid labels, please input again, leave blank to use the default labels (for example, ubuntu-20.04:docker://node:16-bullseye,ubuntu-18.04:docker://node:16-buster,linux_arm:host)")
|
||||
return StageInputCustomLabels
|
||||
}
|
||||
return StageWaitingForRegistration
|
||||
}
|
||||
return StageUnknown
|
||||
}
|
||||
|
||||
func registerInteractive(configFile string) error {
|
||||
var (
|
||||
reader = bufio.NewReader(os.Stdin)
|
||||
stage = StageInputInstance
|
||||
inputs = new(registerInputs)
|
||||
)
|
||||
|
||||
cfg, err := config.LoadDefault(configFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load config: %v", err)
|
||||
}
|
||||
if f, err := os.Stat(cfg.Runner.File); err == nil && !f.IsDir() {
|
||||
stage = StageOverwriteLocalConfig
|
||||
}
|
||||
|
||||
for {
|
||||
printStageHelp(stage)
|
||||
|
||||
cmdString, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stage = inputs.assignToNext(stage, strings.TrimSpace(cmdString))
|
||||
|
||||
if stage == StageWaitingForRegistration {
|
||||
log.Infof("Registering runner, name=%s, instance=%s, labels=%v.", inputs.RunnerName, inputs.InstanceAddr, inputs.CustomLabels)
|
||||
if err := doRegister(cfg, inputs); err != nil {
|
||||
log.Errorf("Failed to register runner: %v", err)
|
||||
} else {
|
||||
log.Infof("Runner registered successfully.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if stage == StageExit {
|
||||
return nil
|
||||
}
|
||||
|
||||
if stage <= StageUnknown {
|
||||
log.Errorf("Invalid input, please re-run act command.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printStageHelp(stage registerStage) {
|
||||
switch stage {
|
||||
case StageOverwriteLocalConfig:
|
||||
log.Infoln("Runner is already registered, overwrite local config? [y/N]")
|
||||
case StageInputInstance:
|
||||
log.Infoln("Enter the Gitea instance URL (for example, https://gitea.com/):")
|
||||
case StageInputToken:
|
||||
log.Infoln("Enter the runner token:")
|
||||
case StageInputRunnerName:
|
||||
hostname, _ := os.Hostname()
|
||||
log.Infof("Enter the runner name (if set empty, use hostname: %s):\n", hostname)
|
||||
case StageInputCustomLabels:
|
||||
log.Infoln("Enter the runner labels, leave blank to use the default labels (comma-separated, for example, ubuntu-20.04:docker://node:16-bullseye,ubuntu-18.04:docker://node:16-buster,linux_arm:host):")
|
||||
case StageWaitingForRegistration:
|
||||
log.Infoln("Waiting for registration...")
|
||||
}
|
||||
}
|
||||
|
||||
func registerNoInteractive(configFile string, regArgs *registerArgs) error {
|
||||
cfg, err := config.LoadDefault(configFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inputs := ®isterInputs{
|
||||
InstanceAddr: regArgs.InstanceAddr,
|
||||
Token: regArgs.Token,
|
||||
RunnerName: regArgs.RunnerName,
|
||||
CustomLabels: defaultLabels,
|
||||
}
|
||||
regArgs.Labels = strings.TrimSpace(regArgs.Labels)
|
||||
if regArgs.Labels != "" {
|
||||
inputs.CustomLabels = strings.Split(regArgs.Labels, ",")
|
||||
}
|
||||
if inputs.RunnerName == "" {
|
||||
inputs.RunnerName, _ = os.Hostname()
|
||||
log.Infof("Runner name is empty, use hostname '%s'.", inputs.RunnerName)
|
||||
}
|
||||
if err := inputs.validate(); err != nil {
|
||||
log.WithError(err).Errorf("Invalid input, please re-run act command.")
|
||||
return nil
|
||||
}
|
||||
if err := doRegister(cfg, inputs); err != nil {
|
||||
log.Errorf("Failed to register runner: %v", err)
|
||||
return nil
|
||||
}
|
||||
log.Infof("Runner registered successfully.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRegister(cfg *config.Config, inputs *registerInputs) error {
|
||||
ctx := context.Background()
|
||||
|
||||
// initial http client
|
||||
cli := client.New(
|
||||
inputs.InstanceAddr,
|
||||
cfg.Runner.Insecure,
|
||||
"",
|
||||
"",
|
||||
ver.Version(),
|
||||
)
|
||||
|
||||
for {
|
||||
_, err := cli.Ping(ctx, connect.NewRequest(&pingv1.PingRequest{
|
||||
Data: inputs.RunnerName,
|
||||
}))
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
Errorln("Cannot ping the Gitea instance server")
|
||||
// TODO: if ping failed, retry or exit
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
log.Debugln("Successfully pinged the Gitea instance server")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
reg := &config.Registration{
|
||||
Name: inputs.RunnerName,
|
||||
Token: inputs.Token,
|
||||
Address: inputs.InstanceAddr,
|
||||
Labels: inputs.CustomLabels,
|
||||
}
|
||||
|
||||
ls := make([]string, len(reg.Labels))
|
||||
for i, v := range reg.Labels {
|
||||
l, _ := labels.Parse(v)
|
||||
ls[i] = l.Name
|
||||
}
|
||||
// register new runner.
|
||||
resp, err := cli.Register(ctx, connect.NewRequest(&runnerv1.RegisterRequest{
|
||||
Name: reg.Name,
|
||||
Token: reg.Token,
|
||||
AgentLabels: ls,
|
||||
}))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("poller: cannot register new runner")
|
||||
return err
|
||||
}
|
||||
|
||||
reg.ID = resp.Msg.Runner.Id
|
||||
reg.UUID = resp.Msg.Runner.Uuid
|
||||
reg.Name = resp.Msg.Runner.Name
|
||||
reg.Token = resp.Msg.Runner.Token
|
||||
|
||||
if err := config.SaveRegistration(cfg.Runner.File, reg); err != nil {
|
||||
return fmt.Errorf("failed to save runner config: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
82
internal/app/poll/poller.go
Normal file
82
internal/app/poll/poller.go
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package poll
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"github.com/bufbuild/connect-go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/app/run"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
)
|
||||
|
||||
type Poller struct {
|
||||
client client.Client
|
||||
runner *run.Runner
|
||||
capacity int
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, client client.Client, runner *run.Runner) *Poller {
|
||||
return &Poller{
|
||||
client: client,
|
||||
runner: runner,
|
||||
capacity: cfg.Runner.Capacity,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Poller) Poll(ctx context.Context) {
|
||||
limiter := rate.NewLimiter(rate.Every(2*time.Second), 1)
|
||||
wg := &sync.WaitGroup{}
|
||||
for i := 0; i < p.capacity; i++ {
|
||||
wg.Add(1)
|
||||
go p.poll(ctx, wg, limiter)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (p *Poller) poll(ctx context.Context, wg *sync.WaitGroup, limiter *rate.Limiter) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
if err := limiter.Wait(ctx); err != nil {
|
||||
if ctx.Err() != nil {
|
||||
log.WithError(err).Debug("limiter wait failed")
|
||||
}
|
||||
return
|
||||
}
|
||||
task, ok := p.fetchTask(ctx)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if err := p.runner.Run(ctx, task); err != nil {
|
||||
log.WithError(err).Error("failed to run task")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
|
||||
reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(&runnerv1.FetchTaskRequest{}))
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to fetch task")
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if resp.Msg.Task == nil {
|
||||
return nil, false
|
||||
}
|
||||
return resp.Msg.Task, true
|
||||
}
|
199
internal/app/run/runner.go
Normal file
199
internal/app/run/runner.go
Normal file
@ -0,0 +1,199 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package run
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
"github.com/nektos/act/pkg/common"
|
||||
"github.com/nektos/act/pkg/model"
|
||||
"github.com/nektos/act/pkg/runner"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/app/artifactcache"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/labels"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/report"
|
||||
"gitea.com/gitea/act_runner/internal/pkg/ver"
|
||||
)
|
||||
|
||||
// Runner runs the pipeline.
|
||||
type Runner struct {
|
||||
name string
|
||||
|
||||
cfg *config.Config
|
||||
|
||||
client client.Client
|
||||
labels labels.Labels
|
||||
envs map[string]string
|
||||
|
||||
runningTasks sync.Map
|
||||
}
|
||||
|
||||
func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client) *Runner {
|
||||
ls := labels.Labels{}
|
||||
for _, v := range reg.Labels {
|
||||
if l, err := labels.Parse(v); err == nil {
|
||||
ls = append(ls, l)
|
||||
}
|
||||
}
|
||||
envs := make(map[string]string, len(cfg.Runner.Envs))
|
||||
for k, v := range cfg.Runner.Envs {
|
||||
envs[k] = v
|
||||
}
|
||||
if cfg.Cache.Enabled == nil || *cfg.Cache.Enabled {
|
||||
cacheHandler, err := artifactcache.StartHandler(cfg.Cache.Dir, cfg.Cache.Host, cfg.Cache.Port)
|
||||
if err != nil {
|
||||
log.Errorf("cannot init cache server, it will be disabled: %v", err)
|
||||
// go on
|
||||
} else {
|
||||
envs["ACTIONS_CACHE_URL"] = cacheHandler.ExternalURL() + "/"
|
||||
}
|
||||
}
|
||||
|
||||
return &Runner{
|
||||
name: reg.Name,
|
||||
cfg: cfg,
|
||||
client: cli,
|
||||
labels: ls,
|
||||
envs: envs,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Runner) Run(ctx context.Context, task *runnerv1.Task) error {
|
||||
if _, ok := r.runningTasks.Load(task.Id); ok {
|
||||
return fmt.Errorf("task %d is already running", task.Id)
|
||||
} else {
|
||||
r.runningTasks.Store(task.Id, struct{}{})
|
||||
defer r.runningTasks.Delete(task.Id)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, r.cfg.Runner.Timeout)
|
||||
defer cancel()
|
||||
reporter := report.NewReporter(ctx, cancel, r.client, task)
|
||||
var runErr error
|
||||
defer func() {
|
||||
lastWords := ""
|
||||
if runErr != nil {
|
||||
lastWords = runErr.Error()
|
||||
}
|
||||
_ = reporter.Close(lastWords)
|
||||
}()
|
||||
reporter.RunDaemon()
|
||||
runErr = r.run(ctx, task, reporter)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.Reporter) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("panic: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
reporter.Logf("%s(version:%s) received task %v of job %v, be triggered by event: %s", r.name, ver.Version(), task.Id, task.Context.Fields["job"].GetStringValue(), task.Context.Fields["event_name"].GetStringValue())
|
||||
|
||||
workflow, err := model.ReadWorkflow(bytes.NewReader(task.WorkflowPayload))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobIDs := workflow.GetJobIDs()
|
||||
if len(jobIDs) != 1 {
|
||||
return fmt.Errorf("multiple jobs found: %v", jobIDs)
|
||||
}
|
||||
jobID := jobIDs[0]
|
||||
plan, err := model.CombineWorkflowPlanner(workflow).PlanJob(jobID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
job := workflow.GetJob(jobID)
|
||||
reporter.ResetSteps(len(job.Steps))
|
||||
|
||||
taskContext := task.Context.Fields
|
||||
|
||||
log.Infof("task %v repo is %v %v %v", task.Id, taskContext["repository"].GetStringValue(),
|
||||
taskContext["gitea_default_actions_url"].GetStringValue(),
|
||||
r.client.Address())
|
||||
|
||||
preset := &model.GithubContext{
|
||||
Event: taskContext["event"].GetStructValue().AsMap(),
|
||||
RunID: taskContext["run_id"].GetStringValue(),
|
||||
RunNumber: taskContext["run_number"].GetStringValue(),
|
||||
Actor: taskContext["actor"].GetStringValue(),
|
||||
Repository: taskContext["repository"].GetStringValue(),
|
||||
EventName: taskContext["event_name"].GetStringValue(),
|
||||
Sha: taskContext["sha"].GetStringValue(),
|
||||
Ref: taskContext["ref"].GetStringValue(),
|
||||
RefName: taskContext["ref_name"].GetStringValue(),
|
||||
RefType: taskContext["ref_type"].GetStringValue(),
|
||||
HeadRef: taskContext["head_ref"].GetStringValue(),
|
||||
BaseRef: taskContext["base_ref"].GetStringValue(),
|
||||
Token: taskContext["token"].GetStringValue(),
|
||||
RepositoryOwner: taskContext["repository_owner"].GetStringValue(),
|
||||
RetentionDays: taskContext["retention_days"].GetStringValue(),
|
||||
}
|
||||
if t := task.Secrets["GITEA_TOKEN"]; t != "" {
|
||||
preset.Token = t
|
||||
} else if t := task.Secrets["GITHUB_TOKEN"]; t != "" {
|
||||
preset.Token = t
|
||||
}
|
||||
|
||||
eventJSON, err := json.Marshal(preset.Event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
maxLifetime := 3 * time.Hour
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
maxLifetime = time.Until(deadline)
|
||||
}
|
||||
|
||||
runnerConfig := &runner.Config{
|
||||
// On Linux, Workdir will be like "/<owner>/<repo>"
|
||||
// On Windows, Workdir will be like "\<owner>\<repo>"
|
||||
Workdir: filepath.FromSlash(string(filepath.Separator) + preset.Repository),
|
||||
BindWorkdir: false,
|
||||
|
||||
ReuseContainers: false,
|
||||
ForcePull: false,
|
||||
ForceRebuild: false,
|
||||
LogOutput: true,
|
||||
JSONLogger: false,
|
||||
Env: r.envs,
|
||||
Secrets: task.Secrets,
|
||||
GitHubInstance: r.client.Address(),
|
||||
AutoRemove: true,
|
||||
NoSkipCheckout: true,
|
||||
PresetGitHubContext: preset,
|
||||
EventJSON: string(eventJSON),
|
||||
ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%d", task.Id),
|
||||
ContainerMaxLifetime: maxLifetime,
|
||||
ContainerNetworkMode: r.cfg.Container.NetworkMode,
|
||||
DefaultActionInstance: taskContext["gitea_default_actions_url"].GetStringValue(),
|
||||
PlatformPicker: r.labels.PickPlatform,
|
||||
}
|
||||
|
||||
rr, err := runner.New(runnerConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
executor := rr.NewPlanExecutor(plan)
|
||||
|
||||
reporter.Logf("workflow prepared")
|
||||
|
||||
// add logger recorders
|
||||
ctx = common.WithLoggerHook(ctx, reporter)
|
||||
|
||||
return executor(ctx)
|
||||
}
|
17
internal/pkg/client/client.go
Normal file
17
internal/pkg/client/client.go
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"code.gitea.io/actions-proto-go/ping/v1/pingv1connect"
|
||||
"code.gitea.io/actions-proto-go/runner/v1/runnerv1connect"
|
||||
)
|
||||
|
||||
// A Client manages communication with the runner.
|
||||
type Client interface {
|
||||
pingv1connect.PingServiceClient
|
||||
runnerv1connect.RunnerServiceClient
|
||||
Address() string
|
||||
Insecure() bool
|
||||
}
|
10
internal/pkg/client/header.go
Normal file
10
internal/pkg/client/header.go
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package client
|
||||
|
||||
const (
|
||||
UUIDHeader = "x-runner-uuid"
|
||||
TokenHeader = "x-runner-token"
|
||||
VersionHeader = "x-runner-version"
|
||||
)
|
81
internal/pkg/client/http.go
Normal file
81
internal/pkg/client/http.go
Normal file
@ -0,0 +1,81 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/actions-proto-go/ping/v1/pingv1connect"
|
||||
"code.gitea.io/actions-proto-go/runner/v1/runnerv1connect"
|
||||
"github.com/bufbuild/connect-go"
|
||||
)
|
||||
|
||||
func getHttpClient(endpoint string, insecure bool) *http.Client {
|
||||
if strings.HasPrefix(endpoint, "https://") && insecure {
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return http.DefaultClient
|
||||
}
|
||||
|
||||
// New returns a new runner client.
|
||||
func New(endpoint string, insecure bool, uuid, token, version string, opts ...connect.ClientOption) *HTTPClient {
|
||||
baseURL := strings.TrimRight(endpoint, "/") + "/api/actions"
|
||||
|
||||
opts = append(opts, connect.WithInterceptors(connect.UnaryInterceptorFunc(func(next connect.UnaryFunc) connect.UnaryFunc {
|
||||
return func(ctx context.Context, req connect.AnyRequest) (connect.AnyResponse, error) {
|
||||
if uuid != "" {
|
||||
req.Header().Set(UUIDHeader, uuid)
|
||||
}
|
||||
if token != "" {
|
||||
req.Header().Set(TokenHeader, token)
|
||||
}
|
||||
if version != "" {
|
||||
req.Header().Set(VersionHeader, version)
|
||||
}
|
||||
return next(ctx, req)
|
||||
}
|
||||
})))
|
||||
|
||||
return &HTTPClient{
|
||||
PingServiceClient: pingv1connect.NewPingServiceClient(
|
||||
getHttpClient(endpoint, insecure),
|
||||
baseURL,
|
||||
opts...,
|
||||
),
|
||||
RunnerServiceClient: runnerv1connect.NewRunnerServiceClient(
|
||||
getHttpClient(endpoint, insecure),
|
||||
baseURL,
|
||||
opts...,
|
||||
),
|
||||
endpoint: endpoint,
|
||||
insecure: insecure,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HTTPClient) Address() string {
|
||||
return c.endpoint
|
||||
}
|
||||
|
||||
func (c *HTTPClient) Insecure() bool {
|
||||
return c.insecure
|
||||
}
|
||||
|
||||
var _ Client = (*HTTPClient)(nil)
|
||||
|
||||
// An HTTPClient manages communication with the runner API.
|
||||
type HTTPClient struct {
|
||||
pingv1connect.PingServiceClient
|
||||
runnerv1connect.RunnerServiceClient
|
||||
endpoint string
|
||||
insecure bool
|
||||
}
|
42
internal/pkg/config/config.example.yaml
Normal file
42
internal/pkg/config/config.example.yaml
Normal file
@ -0,0 +1,42 @@
|
||||
# Example configuration file, it's safe to copy this as the default config file without any modification.
|
||||
|
||||
log:
|
||||
# The level of logging, can be trace, debug, info, warn, error, fatal
|
||||
level: info
|
||||
|
||||
runner:
|
||||
# Where to store the registration result.
|
||||
file: .runner
|
||||
# Execute how many tasks concurrently at the same time.
|
||||
capacity: 1
|
||||
# Extra environment variables to run jobs.
|
||||
envs:
|
||||
A_TEST_ENV_NAME_1: a_test_env_value_1
|
||||
A_TEST_ENV_NAME_2: a_test_env_value_2
|
||||
# Extra environment variables to run jobs from a file.
|
||||
# It will be ignored if it's empty or the file doesn't exist.
|
||||
env_file: .env
|
||||
# The timeout for a job to be finished.
|
||||
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
|
||||
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
|
||||
timeout: 3h
|
||||
# Whether skip verifying the TLS certificate of the Gitea instance.
|
||||
insecure: false
|
||||
|
||||
cache:
|
||||
# Enable cache server to use actions/cache.
|
||||
enabled: true
|
||||
# The directory to store the cache data.
|
||||
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
|
||||
dir: ""
|
||||
# The host of the cache server.
|
||||
# It's not for the address to listen, but the address to connect from job containers.
|
||||
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
|
||||
host: ""
|
||||
# The port of the cache server.
|
||||
# 0 means to use a random available port.
|
||||
port: 0
|
||||
|
||||
container:
|
||||
# Which network to use for the job containers. Could be bridge, host, none, or the name of a custom network.
|
||||
network_mode: bridge
|
95
internal/pkg/config/config.go
Normal file
95
internal/pkg/config/config.go
Normal file
@ -0,0 +1,95 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Log struct {
|
||||
Level string `yaml:"level"`
|
||||
} `yaml:"log"`
|
||||
Runner struct {
|
||||
File string `yaml:"file"`
|
||||
Capacity int `yaml:"capacity"`
|
||||
Envs map[string]string `yaml:"envs"`
|
||||
EnvFile string `yaml:"env_file"`
|
||||
Timeout time.Duration `yaml:"timeout"`
|
||||
Insecure bool `yaml:"insecure"`
|
||||
} `yaml:"runner"`
|
||||
Cache struct {
|
||||
Enabled *bool `yaml:"enabled"` // pointer to distinguish between false and not set, and it will be true if not set
|
||||
Dir string `yaml:"dir"`
|
||||
Host string `yaml:"host"`
|
||||
Port uint16 `yaml:"port"`
|
||||
} `yaml:"cache"`
|
||||
Container struct {
|
||||
NetworkMode string `yaml:"network_mode"`
|
||||
}
|
||||
}
|
||||
|
||||
// LoadDefault returns the default configuration.
|
||||
// If file is not empty, it will be used to load the configuration.
|
||||
func LoadDefault(file string) (*Config, error) {
|
||||
cfg := &Config{}
|
||||
if file != "" {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
decoder := yaml.NewDecoder(f)
|
||||
if err := decoder.Decode(&cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
compatibleWithOldEnvs(file != "", cfg)
|
||||
|
||||
if cfg.Runner.EnvFile != "" {
|
||||
if stat, err := os.Stat(cfg.Runner.EnvFile); err == nil && !stat.IsDir() {
|
||||
envs, err := godotenv.Read(cfg.Runner.EnvFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read env file %q: %w", cfg.Runner.EnvFile, err)
|
||||
}
|
||||
for k, v := range envs {
|
||||
cfg.Runner.Envs[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.Log.Level == "" {
|
||||
cfg.Log.Level = "info"
|
||||
}
|
||||
if cfg.Runner.File == "" {
|
||||
cfg.Runner.File = ".runner"
|
||||
}
|
||||
if cfg.Runner.Capacity <= 0 {
|
||||
cfg.Runner.Capacity = 1
|
||||
}
|
||||
if cfg.Runner.Timeout <= 0 {
|
||||
cfg.Runner.Timeout = 3 * time.Hour
|
||||
}
|
||||
if cfg.Cache.Enabled == nil {
|
||||
b := true
|
||||
cfg.Cache.Enabled = &b
|
||||
}
|
||||
if *cfg.Cache.Enabled {
|
||||
if cfg.Cache.Dir == "" {
|
||||
home, _ := os.UserHomeDir()
|
||||
cfg.Cache.Dir = filepath.Join(home, ".cache", "actcache")
|
||||
}
|
||||
}
|
||||
if cfg.Container.NetworkMode == "" {
|
||||
cfg.Container.NetworkMode = "bridge"
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
62
internal/pkg/config/deprecated.go
Normal file
62
internal/pkg/config/deprecated.go
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Deprecated: could be removed in the future. TODO: remove it when Gitea 1.20.0 is released.
|
||||
// Be compatible with old envs.
|
||||
func compatibleWithOldEnvs(fileUsed bool, cfg *Config) {
|
||||
handleEnv := func(key string) (string, bool) {
|
||||
if v, ok := os.LookupEnv(key); ok {
|
||||
if fileUsed {
|
||||
log.Warnf("env %s has been ignored because config file is used", key)
|
||||
return "", false
|
||||
}
|
||||
log.Warnf("env %s will be deprecated, please use config file instead", key)
|
||||
return v, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
if v, ok := handleEnv("GITEA_DEBUG"); ok {
|
||||
if b, _ := strconv.ParseBool(v); b {
|
||||
cfg.Log.Level = "debug"
|
||||
}
|
||||
}
|
||||
if v, ok := handleEnv("GITEA_TRACE"); ok {
|
||||
if b, _ := strconv.ParseBool(v); b {
|
||||
cfg.Log.Level = "trace"
|
||||
}
|
||||
}
|
||||
if v, ok := handleEnv("GITEA_RUNNER_CAPACITY"); ok {
|
||||
if i, _ := strconv.Atoi(v); i > 0 {
|
||||
cfg.Runner.Capacity = i
|
||||
}
|
||||
}
|
||||
if v, ok := handleEnv("GITEA_RUNNER_FILE"); ok {
|
||||
cfg.Runner.File = v
|
||||
}
|
||||
if v, ok := handleEnv("GITEA_RUNNER_ENVIRON"); ok {
|
||||
splits := strings.Split(v, ",")
|
||||
if cfg.Runner.Envs == nil {
|
||||
cfg.Runner.Envs = map[string]string{}
|
||||
}
|
||||
for _, split := range splits {
|
||||
kv := strings.SplitN(split, ":", 2)
|
||||
if len(kv) == 2 && kv[0] != "" {
|
||||
cfg.Runner.Envs[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
if v, ok := handleEnv("GITEA_RUNNER_ENV_FILE"); ok {
|
||||
cfg.Runner.EnvFile = v
|
||||
}
|
||||
}
|
9
internal/pkg/config/embed.go
Normal file
9
internal/pkg/config/embed.go
Normal file
@ -0,0 +1,9 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package config
|
||||
|
||||
import _ "embed"
|
||||
|
||||
//go:embed config.example.yaml
|
||||
var Example []byte
|
54
internal/pkg/config/registration.go
Normal file
54
internal/pkg/config/registration.go
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
)
|
||||
|
||||
const registrationWarning = "This file is automatically generated by act-runner. Do not edit it manually unless you know what you are doing. Removing this file will cause act runner to re-register as a new runner."
|
||||
|
||||
// Registration is the registration information for a runner
|
||||
type Registration struct {
|
||||
Warning string `json:"WARNING"` // Warning message to display, it's always the registrationWarning constant
|
||||
|
||||
ID int64 `json:"id"`
|
||||
UUID string `json:"uuid"`
|
||||
Name string `json:"name"`
|
||||
Token string `json:"token"`
|
||||
Address string `json:"address"`
|
||||
Labels []string `json:"labels"`
|
||||
}
|
||||
|
||||
func LoadRegistration(file string) (*Registration, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var reg Registration
|
||||
if err := json.NewDecoder(f).Decode(®); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reg.Warning = ""
|
||||
|
||||
return ®, nil
|
||||
}
|
||||
|
||||
func SaveRegistration(file string, reg *Registration) error {
|
||||
f, err := os.Create(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
reg.Warning = registrationWarning
|
||||
|
||||
enc := json.NewEncoder(f)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(reg)
|
||||
}
|
5
internal/pkg/envcheck/doc.go
Normal file
5
internal/pkg/envcheck/doc.go
Normal file
@ -0,0 +1,5 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
// Package envcheck provides a simple way to check if the environment is ready to run jobs.
|
||||
package envcheck
|
27
internal/pkg/envcheck/docker.go
Normal file
27
internal/pkg/envcheck/docker.go
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package envcheck
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
func CheckIfDockerRunning(ctx context.Context) error {
|
||||
// TODO: if runner support configures to use docker, we need config.Config to pass in
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cli.Close()
|
||||
|
||||
_, err = cli.Ping(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot ping the docker daemon, does it running? %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
84
internal/pkg/labels/labels.go
Normal file
84
internal/pkg/labels/labels.go
Normal file
@ -0,0 +1,84 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package labels
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
SchemeHost = "host"
|
||||
SchemeDocker = "docker"
|
||||
)
|
||||
|
||||
type Label struct {
|
||||
Name string
|
||||
Schema string
|
||||
Arg string
|
||||
}
|
||||
|
||||
func Parse(str string) (*Label, error) {
|
||||
splits := strings.SplitN(str, ":", 3)
|
||||
label := &Label{
|
||||
Name: splits[0],
|
||||
Schema: "host",
|
||||
Arg: "",
|
||||
}
|
||||
if len(splits) >= 2 {
|
||||
label.Schema = splits[1]
|
||||
}
|
||||
if len(splits) >= 3 {
|
||||
label.Arg = splits[2]
|
||||
}
|
||||
if label.Schema != SchemeHost && label.Schema != SchemeDocker {
|
||||
return nil, fmt.Errorf("unsupported schema: %s", label.Schema)
|
||||
}
|
||||
return label, nil
|
||||
}
|
||||
|
||||
type Labels []*Label
|
||||
|
||||
func (l Labels) RequireDocker() bool {
|
||||
for _, label := range l {
|
||||
if label.Schema == SchemeDocker {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (l Labels) PickPlatform(runsOn []string) string {
|
||||
platforms := make(map[string]string, len(l))
|
||||
for _, label := range l {
|
||||
switch label.Schema {
|
||||
case SchemeDocker:
|
||||
// "//" will be ignored
|
||||
// TODO maybe we should use 'ubuntu-18.04:docker:node:16-buster' instead
|
||||
platforms[label.Name] = strings.TrimPrefix(label.Arg, "//")
|
||||
case SchemeHost:
|
||||
platforms[label.Name] = "-self-hosted"
|
||||
default:
|
||||
// It should not happen, because Parse has checked it.
|
||||
continue
|
||||
}
|
||||
}
|
||||
for _, v := range runsOn {
|
||||
if v, ok := platforms[v]; ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: support multiple labels
|
||||
// like:
|
||||
// ["ubuntu-22.04"] => "ubuntu:22.04"
|
||||
// ["with-gpu"] => "linux:with-gpu"
|
||||
// ["ubuntu-22.04", "with-gpu"] => "ubuntu:22.04_with-gpu"
|
||||
|
||||
// return default.
|
||||
// So the runner receives a task with a label that the runner doesn't have,
|
||||
// it happens when the user have edited the label of the runner in the web UI.
|
||||
// TODO: it may be not correct, what if the runner is used as host mode only?
|
||||
return "node:16-bullseye"
|
||||
}
|
64
internal/pkg/labels/labels_test.go
Normal file
64
internal/pkg/labels/labels_test.go
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package labels
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
tests := []struct {
|
||||
args string
|
||||
want *Label
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
args: "ubuntu:docker://node:18",
|
||||
want: &Label{
|
||||
Name: "ubuntu",
|
||||
Schema: "docker",
|
||||
Arg: "//node:18",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
args: "ubuntu:host",
|
||||
want: &Label{
|
||||
Name: "ubuntu",
|
||||
Schema: "host",
|
||||
Arg: "",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
args: "ubuntu",
|
||||
want: &Label{
|
||||
Name: "ubuntu",
|
||||
Schema: "host",
|
||||
Arg: "",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
args: "ubuntu:vm:ubuntu-18.04",
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.args, func(t *testing.T) {
|
||||
got, err := Parse(tt.args)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.DeepEqual(t, got, tt.want)
|
||||
})
|
||||
}
|
||||
}
|
298
internal/pkg/report/reporter.go
Normal file
298
internal/pkg/report/reporter.go
Normal file
@ -0,0 +1,298 @@
|
||||
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package report
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||
retry "github.com/avast/retry-go/v4"
|
||||
"github.com/bufbuild/connect-go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||
)
|
||||
|
||||
type Reporter struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
closed bool
|
||||
client client.Client
|
||||
clientM sync.Mutex
|
||||
|
||||
logOffset int
|
||||
logRows []*runnerv1.LogRow
|
||||
logReplacer *strings.Replacer
|
||||
state *runnerv1.TaskState
|
||||
stateM sync.RWMutex
|
||||
}
|
||||
|
||||
func NewReporter(ctx context.Context, cancel context.CancelFunc, client client.Client, task *runnerv1.Task) *Reporter {
|
||||
var oldnew []string
|
||||
if v := task.Context.Fields["token"].GetStringValue(); v != "" {
|
||||
oldnew = append(oldnew, v, "***")
|
||||
}
|
||||
for _, v := range task.Secrets {
|
||||
oldnew = append(oldnew, v, "***")
|
||||
}
|
||||
|
||||
return &Reporter{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
client: client,
|
||||
logReplacer: strings.NewReplacer(oldnew...),
|
||||
state: &runnerv1.TaskState{
|
||||
Id: task.Id,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) ResetSteps(l int) {
|
||||
r.stateM.Lock()
|
||||
defer r.stateM.Unlock()
|
||||
for i := 0; i < l; i++ {
|
||||
r.state.Steps = append(r.state.Steps, &runnerv1.StepState{
|
||||
Id: int64(i),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) Levels() []log.Level {
|
||||
return log.AllLevels
|
||||
}
|
||||
|
||||
func (r *Reporter) Fire(entry *log.Entry) error {
|
||||
r.stateM.Lock()
|
||||
defer r.stateM.Unlock()
|
||||
|
||||
log.WithFields(entry.Data).Trace(entry.Message)
|
||||
|
||||
timestamp := entry.Time
|
||||
if r.state.StartedAt == nil {
|
||||
r.state.StartedAt = timestamppb.New(timestamp)
|
||||
}
|
||||
|
||||
stage := entry.Data["stage"]
|
||||
|
||||
if stage != "Main" {
|
||||
if v, ok := entry.Data["jobResult"]; ok {
|
||||
if jobResult, ok := r.parseResult(v); ok {
|
||||
r.state.Result = jobResult
|
||||
r.state.StoppedAt = timestamppb.New(timestamp)
|
||||
for _, s := range r.state.Steps {
|
||||
if s.Result == runnerv1.Result_RESULT_UNSPECIFIED {
|
||||
s.Result = runnerv1.Result_RESULT_CANCELLED
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !r.duringSteps() {
|
||||
r.logRows = append(r.logRows, r.parseLogRow(entry))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var step *runnerv1.StepState
|
||||
if v, ok := entry.Data["stepNumber"]; ok {
|
||||
if v, ok := v.(int); ok && len(r.state.Steps) > v {
|
||||
step = r.state.Steps[v]
|
||||
}
|
||||
}
|
||||
if step == nil {
|
||||
if !r.duringSteps() {
|
||||
r.logRows = append(r.logRows, r.parseLogRow(entry))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if step.StartedAt == nil {
|
||||
step.StartedAt = timestamppb.New(timestamp)
|
||||
}
|
||||
if v, ok := entry.Data["raw_output"]; ok {
|
||||
if rawOutput, ok := v.(bool); ok && rawOutput {
|
||||
if step.LogLength == 0 {
|
||||
step.LogIndex = int64(r.logOffset + len(r.logRows))
|
||||
}
|
||||
step.LogLength++
|
||||
r.logRows = append(r.logRows, r.parseLogRow(entry))
|
||||
}
|
||||
} else if !r.duringSteps() {
|
||||
r.logRows = append(r.logRows, r.parseLogRow(entry))
|
||||
}
|
||||
if v, ok := entry.Data["stepResult"]; ok {
|
||||
if stepResult, ok := r.parseResult(v); ok {
|
||||
if step.LogLength == 0 {
|
||||
step.LogIndex = int64(r.logOffset + len(r.logRows))
|
||||
}
|
||||
step.Result = stepResult
|
||||
step.StoppedAt = timestamppb.New(timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reporter) RunDaemon() {
|
||||
if r.closed {
|
||||
return
|
||||
}
|
||||
if r.ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_ = r.ReportLog(false)
|
||||
_ = r.ReportState()
|
||||
|
||||
time.AfterFunc(time.Second, r.RunDaemon)
|
||||
}
|
||||
|
||||
func (r *Reporter) Logf(format string, a ...interface{}) {
|
||||
r.stateM.Lock()
|
||||
defer r.stateM.Unlock()
|
||||
|
||||
if !r.duringSteps() {
|
||||
r.logRows = append(r.logRows, &runnerv1.LogRow{
|
||||
Time: timestamppb.Now(),
|
||||
Content: fmt.Sprintf(format, a...),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reporter) Close(lastWords string) error {
|
||||
r.closed = true
|
||||
|
||||
r.stateM.Lock()
|
||||
if r.state.Result == runnerv1.Result_RESULT_UNSPECIFIED {
|
||||
if lastWords == "" {
|
||||
lastWords = "Early termination"
|
||||
}
|
||||
for _, v := range r.state.Steps {
|
||||
if v.Result == runnerv1.Result_RESULT_UNSPECIFIED {
|
||||
v.Result = runnerv1.Result_RESULT_CANCELLED
|
||||
}
|
||||
}
|
||||
r.state.Result = runnerv1.Result_RESULT_FAILURE
|
||||
r.logRows = append(r.logRows, &runnerv1.LogRow{
|
||||
Time: timestamppb.Now(),
|
||||
Content: lastWords,
|
||||
})
|
||||
return nil
|
||||
} else if lastWords != "" {
|
||||
r.logRows = append(r.logRows, &runnerv1.LogRow{
|
||||
Time: timestamppb.Now(),
|
||||
Content: lastWords,
|
||||
})
|
||||
}
|
||||
r.stateM.Unlock()
|
||||
|
||||
return retry.Do(func() error {
|
||||
if err := r.ReportLog(true); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.ReportState()
|
||||
}, retry.Context(r.ctx))
|
||||
}
|
||||
|
||||
func (r *Reporter) ReportLog(noMore bool) error {
|
||||
r.clientM.Lock()
|
||||
defer r.clientM.Unlock()
|
||||
|
||||
r.stateM.RLock()
|
||||
rows := r.logRows
|
||||
r.stateM.RUnlock()
|
||||
|
||||
resp, err := r.client.UpdateLog(r.ctx, connect.NewRequest(&runnerv1.UpdateLogRequest{
|
||||
TaskId: r.state.Id,
|
||||
Index: int64(r.logOffset),
|
||||
Rows: rows,
|
||||
NoMore: noMore,
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ack := int(resp.Msg.AckIndex)
|
||||
if ack < r.logOffset {
|
||||
return fmt.Errorf("submitted logs are lost")
|
||||
}
|
||||
|
||||
r.stateM.Lock()
|
||||
r.logRows = r.logRows[ack-r.logOffset:]
|
||||
r.logOffset = ack
|
||||
r.stateM.Unlock()
|
||||
|
||||
if noMore && ack < r.logOffset+len(rows) {
|
||||
return fmt.Errorf("not all logs are submitted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reporter) ReportState() error {
|
||||
r.clientM.Lock()
|
||||
defer r.clientM.Unlock()
|
||||
|
||||
r.stateM.RLock()
|
||||
state := proto.Clone(r.state).(*runnerv1.TaskState)
|
||||
r.stateM.RUnlock()
|
||||
|
||||
resp, err := r.client.UpdateTask(r.ctx, connect.NewRequest(&runnerv1.UpdateTaskRequest{
|
||||
State: state,
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Msg.State != nil && resp.Msg.State.Result == runnerv1.Result_RESULT_CANCELLED {
|
||||
r.cancel()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reporter) duringSteps() bool {
|
||||
if steps := r.state.Steps; len(steps) == 0 {
|
||||
return false
|
||||
} else if first := steps[0]; first.Result == runnerv1.Result_RESULT_UNSPECIFIED && first.LogLength == 0 {
|
||||
return false
|
||||
} else if last := steps[len(steps)-1]; last.Result != runnerv1.Result_RESULT_UNSPECIFIED {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var stringToResult = map[string]runnerv1.Result{
|
||||
"success": runnerv1.Result_RESULT_SUCCESS,
|
||||
"failure": runnerv1.Result_RESULT_FAILURE,
|
||||
"skipped": runnerv1.Result_RESULT_SKIPPED,
|
||||
"cancelled": runnerv1.Result_RESULT_CANCELLED,
|
||||
}
|
||||
|
||||
func (r *Reporter) parseResult(result interface{}) (runnerv1.Result, bool) {
|
||||
str := ""
|
||||
if v, ok := result.(string); ok { // for jobResult
|
||||
str = v
|
||||
} else if v, ok := result.(fmt.Stringer); ok { // for stepResult
|
||||
str = v.String()
|
||||
}
|
||||
|
||||
ret, ok := stringToResult[str]
|
||||
return ret, ok
|
||||
}
|
||||
|
||||
func (r *Reporter) parseLogRow(entry *log.Entry) *runnerv1.LogRow {
|
||||
content := strings.TrimRightFunc(entry.Message, func(r rune) bool { return r == '\r' || r == '\n' })
|
||||
content = r.logReplacer.Replace(content)
|
||||
return &runnerv1.LogRow{
|
||||
Time: timestamppb.New(entry.Time),
|
||||
Content: content,
|
||||
}
|
||||
}
|
11
internal/pkg/ver/version.go
Normal file
11
internal/pkg/ver/version.go
Normal file
@ -0,0 +1,11 @@
|
||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package ver
|
||||
|
||||
// go build -ldflags "-X gitea.com/gitea/act_runner/internal/pkg/ver.version=1.2.3"
|
||||
var version = "dev"
|
||||
|
||||
func Version() string {
|
||||
return version
|
||||
}
|
Reference in New Issue
Block a user