Refactor to new framework (#98)

- Adjust directory structure
```text
├── internal
│   ├── app
│   │   ├── artifactcache
│   │   ├── cmd
│   │   ├── poll
│   │   └── run
│   └── pkg
│       ├── client
│       ├── config
│       ├── envcheck
│       ├── labels
│       ├── report
│       └── ver
└── main.go
```
- New pkg `labels` to parse label
- New pkg `report` to report logs to Gitea
- Remove pkg `engine`, use `envcheck` to check if docker running.
- Rewrite `runtime` to `run`
- Rewrite `poller` to `poll`
- Simplify some code and remove what's useless.

Reviewed-on: https://gitea.com/gitea/act_runner/pulls/98
Reviewed-by: Lunny Xiao <xiaolunwen@gmail.com>
Co-authored-by: Jason Song <i@wolfogre.com>
Co-committed-by: Jason Song <i@wolfogre.com>
This commit is contained in:
Jason Song
2023-04-04 21:32:04 +08:00
committed by Lunny Xiao
parent df3cb60978
commit 220efa69c0
42 changed files with 630 additions and 974 deletions

View File

@ -0,0 +1,12 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
// Package artifactcache provides a cache handler for the runner.
//
// Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server
//
// TODO: Authorization
// TODO: Restrictions for accessing a cache, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
// TODO: Force deleting cache entries, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
package artifactcache

View File

@ -0,0 +1,416 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package artifactcache
import (
"context"
"fmt"
"net"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/go-chi/render"
log "github.com/sirupsen/logrus"
_ "modernc.org/sqlite"
"xorm.io/builder"
"xorm.io/xorm"
)
const (
urlBase = "/_apis/artifactcache"
)
var logger = log.StandardLogger().WithField("module", "cache_request")
type Handler struct {
engine engine
storage *Storage
router *chi.Mux
listener net.Listener
gc atomic.Bool
gcAt time.Time
outboundIP string
}
func StartHandler(dir, outboundIP string, port uint16) (*Handler, error) {
h := &Handler{}
if dir == "" {
if home, err := os.UserHomeDir(); err != nil {
return nil, err
} else {
dir = filepath.Join(home, ".cache", "actcache")
}
}
if err := os.MkdirAll(dir, 0o755); err != nil {
return nil, err
}
e, err := xorm.NewEngine("sqlite", filepath.Join(dir, "sqlite.db"))
if err != nil {
return nil, err
}
if err := e.Sync(&Cache{}); err != nil {
return nil, err
}
h.engine = engine{e: e}
storage, err := NewStorage(filepath.Join(dir, "cache"))
if err != nil {
return nil, err
}
h.storage = storage
if outboundIP != "" {
h.outboundIP = outboundIP
} else if ip, err := getOutboundIP(); err != nil {
return nil, err
} else {
h.outboundIP = ip.String()
}
router := chi.NewRouter()
router.Use(middleware.RequestLogger(&middleware.DefaultLogFormatter{Logger: logger}))
router.Use(func(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler.ServeHTTP(w, r)
go h.gcCache()
})
})
router.Use(middleware.Logger)
router.Route(urlBase, func(r chi.Router) {
r.Get("/cache", h.find)
r.Route("/caches", func(r chi.Router) {
r.Post("/", h.reserve)
r.Route("/{id}", func(r chi.Router) {
r.Patch("/", h.upload)
r.Post("/", h.commit)
})
})
r.Get("/artifacts/{id}", h.get)
r.Post("/clean", h.clean)
})
h.router = router
h.gcCache()
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces
if err != nil {
return nil, err
}
go func() {
if err := http.Serve(listener, h.router); err != nil {
logger.Errorf("http serve: %v", err)
}
}()
h.listener = listener
return h, nil
}
func (h *Handler) ExternalURL() string {
// TODO: make the external url configurable if necessary
return fmt.Sprintf("http://%s:%d",
h.outboundIP,
h.listener.Addr().(*net.TCPAddr).Port)
}
// GET /_apis/artifactcache/cache
func (h *Handler) find(w http.ResponseWriter, r *http.Request) {
keys := strings.Split(r.URL.Query().Get("keys"), ",")
version := r.URL.Query().Get("version")
cache, err := h.findCache(r.Context(), keys, version)
if err != nil {
responseJson(w, r, 500, err)
return
}
if cache == nil {
responseJson(w, r, 204)
return
}
if ok, err := h.storage.Exist(cache.ID); err != nil {
responseJson(w, r, 500, err)
return
} else if !ok {
_ = h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Delete(cache)
return err
})
responseJson(w, r, 204)
return
}
responseJson(w, r, 200, map[string]any{
"result": "hit",
"archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID),
"cacheKey": cache.Key,
})
}
// POST /_apis/artifactcache/caches
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request) {
cache := &Cache{}
if err := render.Bind(r, cache); err != nil {
responseJson(w, r, 400, err)
return
}
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
return sess.Where(builder.Eq{"key": cache.Key, "version": cache.Version}).Get(&Cache{})
}); err != nil {
responseJson(w, r, 500, err)
return
} else if ok {
responseJson(w, r, 400, fmt.Errorf("already exist"))
return
}
if err := h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Insert(cache)
return err
}); err != nil {
responseJson(w, r, 500, err)
return
}
responseJson(w, r, 200, map[string]any{
"cacheId": cache.ID,
})
return
}
// PATCH /_apis/artifactcache/caches/:id
func (h *Handler) upload(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
if err != nil {
responseJson(w, r, 400, err)
return
}
cache := &Cache{
ID: id,
}
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
return sess.Get(cache)
}); err != nil {
responseJson(w, r, 500, err)
return
} else if !ok {
responseJson(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
return
}
if cache.Complete {
responseJson(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
return
}
start, _, err := parseContentRange(r.Header.Get("Content-Range"))
if err != nil {
responseJson(w, r, 400, err)
return
}
if err := h.storage.Write(cache.ID, start, r.Body); err != nil {
responseJson(w, r, 500, err)
}
h.useCache(r.Context(), id)
responseJson(w, r, 200)
}
// POST /_apis/artifactcache/caches/:id
func (h *Handler) commit(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
if err != nil {
responseJson(w, r, 400, err)
return
}
cache := &Cache{
ID: id,
}
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
return sess.Get(cache)
}); err != nil {
responseJson(w, r, 500, err)
return
} else if !ok {
responseJson(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
return
}
if cache.Complete {
responseJson(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
return
}
if err := h.storage.Commit(cache.ID, cache.Size); err != nil {
responseJson(w, r, 500, err)
return
}
cache.Complete = true
if err := h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.ID(cache.ID).Cols("complete").Update(cache)
return err
}); err != nil {
responseJson(w, r, 500, err)
return
}
responseJson(w, r, 200)
}
// GET /_apis/artifactcache/artifacts/:id
func (h *Handler) get(w http.ResponseWriter, r *http.Request) {
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
if err != nil {
responseJson(w, r, 400, err)
return
}
h.useCache(r.Context(), id)
h.storage.Serve(w, r, id)
}
// POST /_apis/artifactcache/clean
func (h *Handler) clean(w http.ResponseWriter, r *http.Request) {
// TODO: don't support force deleting cache entries
// see: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
responseJson(w, r, 200)
}
// if not found, return (nil, nil) instead of an error.
func (h *Handler) findCache(ctx context.Context, keys []string, version string) (*Cache, error) {
if len(keys) == 0 {
return nil, nil
}
key := keys[0] // the first key is for exact match.
cache := &Cache{}
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
return sess.Where(builder.Eq{"key": key, "version": version, "complete": true}).Get(cache)
}); err != nil {
return nil, err
} else if ok {
return cache, nil
}
for _, prefix := range keys[1:] {
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
return sess.Where(builder.And(
builder.Like{"key", prefix + "%"},
builder.Eq{"version": version, "complete": true},
)).OrderBy("id DESC").Get(cache)
}); err != nil {
return nil, err
} else if ok {
return cache, nil
}
}
return nil, nil
}
func (h *Handler) useCache(ctx context.Context, id int64) {
// keep quiet
_ = h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Context(ctx).Cols("used_at").Update(&Cache{
ID: id,
UsedAt: time.Now().Unix(),
})
return err
})
}
func (h *Handler) gcCache() {
if h.gc.Load() {
return
}
if !h.gc.CompareAndSwap(false, true) {
return
}
defer h.gc.Store(false)
if time.Since(h.gcAt) < time.Hour {
logger.Infof("skip gc: %v", h.gcAt.String())
return
}
h.gcAt = time.Now()
logger.Infof("gc: %v", h.gcAt.String())
const (
keepUsed = 30 * 24 * time.Hour
keepUnused = 7 * 24 * time.Hour
keepTemp = 5 * time.Minute
)
var caches []*Cache
if err := h.engine.Exec(func(sess *xorm.Session) error {
return sess.Where(builder.And(builder.Lt{"used_at": time.Now().Add(-keepTemp).Unix()}, builder.Eq{"complete": false})).
Find(&caches)
}); err != nil {
logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Delete(cache)
return err
}); err != nil {
logger.Warnf("delete cache: %v", err)
continue
}
logger.Infof("deleted cache: %+v", cache)
}
}
caches = caches[:0]
if err := h.engine.Exec(func(sess *xorm.Session) error {
return sess.Where(builder.Lt{"used_at": time.Now().Add(-keepUnused).Unix()}).
Find(&caches)
}); err != nil {
logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Delete(cache)
return err
}); err != nil {
logger.Warnf("delete cache: %v", err)
continue
}
logger.Infof("deleted cache: %+v", cache)
}
}
caches = caches[:0]
if err := h.engine.Exec(func(sess *xorm.Session) error {
return sess.Where(builder.Lt{"created_at": time.Now().Add(-keepUsed).Unix()}).
Find(&caches)
}); err != nil {
logger.Warnf("find caches: %v", err)
} else {
for _, cache := range caches {
h.storage.Remove(cache.ID)
if err := h.engine.Exec(func(sess *xorm.Session) error {
_, err := sess.Delete(cache)
return err
}); err != nil {
logger.Warnf("delete cache: %v", err)
continue
}
logger.Infof("deleted cache: %+v", cache)
}
}
}

View File

@ -0,0 +1,30 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package artifactcache
import (
"fmt"
"net/http"
)
type Cache struct {
ID int64 `xorm:"id pk autoincr" json:"-"`
Key string `xorm:"TEXT index unique(key_version)" json:"key"`
Version string `xorm:"TEXT unique(key_version)" json:"version"`
Size int64 `json:"cacheSize"`
Complete bool `xorm:"index(complete_used_at)" json:"-"`
UsedAt int64 `xorm:"index(complete_used_at) updated" json:"-"`
CreatedAt int64 `xorm:"index created" json:"-"`
}
// Bind implements render.Binder
func (c *Cache) Bind(_ *http.Request) error {
if c.Key == "" {
return fmt.Errorf("missing key")
}
if c.Version == "" {
return fmt.Errorf("missing version")
}
return nil
}

View File

@ -0,0 +1,129 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package artifactcache
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
)
type Storage struct {
rootDir string
}
func NewStorage(rootDir string) (*Storage, error) {
if err := os.MkdirAll(rootDir, 0o755); err != nil {
return nil, err
}
return &Storage{
rootDir: rootDir,
}, nil
}
func (s *Storage) Exist(id int64) (bool, error) {
name := s.filename(id)
if _, err := os.Stat(name); os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, err
}
return true, nil
}
func (s *Storage) Write(id int64, offset int64, reader io.Reader) error {
name := s.tempName(id, offset)
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
return err
}
file, err := os.Create(name)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(file, reader)
return err
}
func (s *Storage) Commit(id int64, size int64) error {
defer func() {
_ = os.RemoveAll(s.tempDir(id))
}()
name := s.filename(id)
tempNames, err := s.tempNames(id)
if err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
return err
}
file, err := os.Create(name)
if err != nil {
return err
}
defer file.Close()
var written int64
for _, v := range tempNames {
f, err := os.Open(v)
if err != nil {
return err
}
n, err := io.Copy(file, f)
_ = f.Close()
if err != nil {
return err
}
written += n
}
if written != size {
_ = file.Close()
_ = os.Remove(name)
return fmt.Errorf("broken file: %v != %v", written, size)
}
return nil
}
func (s *Storage) Serve(w http.ResponseWriter, r *http.Request, id int64) {
name := s.filename(id)
http.ServeFile(w, r, name)
}
func (s *Storage) Remove(id int64) {
_ = os.Remove(s.filename(id))
_ = os.RemoveAll(s.tempDir(id))
}
func (s *Storage) filename(id int64) string {
return filepath.Join(s.rootDir, fmt.Sprintf("%02x", id%0xff), fmt.Sprint(id))
}
func (s *Storage) tempDir(id int64) string {
return filepath.Join(s.rootDir, "tmp", fmt.Sprint(id))
}
func (s *Storage) tempName(id, offset int64) string {
return filepath.Join(s.tempDir(id), fmt.Sprintf("%016x", offset))
}
func (s *Storage) tempNames(id int64) ([]string, error) {
dir := s.tempDir(id)
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var names []string
for _, v := range files {
if !v.IsDir() {
names = append(names, filepath.Join(dir, v.Name()))
}
}
return names, nil
}

View File

@ -0,0 +1,100 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package artifactcache
import (
"fmt"
"net"
"net/http"
"strconv"
"strings"
"sync"
"github.com/go-chi/render"
"xorm.io/xorm"
)
func responseJson(w http.ResponseWriter, r *http.Request, code int, v ...any) {
render.Status(r, code)
if len(v) == 0 || v[0] == nil {
render.JSON(w, r, struct{}{})
} else if err, ok := v[0].(error); ok {
logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err)
render.JSON(w, r, map[string]any{
"error": err.Error(),
})
} else {
render.JSON(w, r, v[0])
}
}
func parseContentRange(s string) (int64, int64, error) {
// support the format like "bytes 11-22/*" only
s, _, _ = strings.Cut(strings.TrimPrefix(s, "bytes "), "/")
s1, s2, _ := strings.Cut(s, "-")
start, err := strconv.ParseInt(s1, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
}
stop, err := strconv.ParseInt(s2, 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
}
return start, stop, nil
}
func getOutboundIP() (net.IP, error) {
// FIXME: It makes more sense to use the gateway IP address of container network
if conn, err := net.Dial("udp", "8.8.8.8:80"); err == nil {
defer conn.Close()
return conn.LocalAddr().(*net.UDPAddr).IP, nil
}
if ifaces, err := net.Interfaces(); err == nil {
for _, i := range ifaces {
if addrs, err := i.Addrs(); err == nil {
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip.IsGlobalUnicast() {
return ip, nil
}
}
}
}
}
return nil, fmt.Errorf("no outbound IP address found")
}
// engine is a wrapper of *xorm.Engine, with a lock.
// To avoid racing of sqlite, we don't care performance here.
type engine struct {
e *xorm.Engine
m sync.Mutex
}
func (e *engine) Exec(f func(*xorm.Session) error) error {
e.m.Lock()
defer e.m.Unlock()
sess := e.e.NewSession()
defer sess.Close()
return f(sess)
}
func (e *engine) ExecBool(f func(*xorm.Session) (bool, error)) (bool, error) {
e.m.Lock()
defer e.m.Unlock()
sess := e.e.NewSession()
defer sess.Close()
return f(sess)
}

72
internal/app/cmd/cmd.go Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package cmd
import (
"context"
"fmt"
"os"
"github.com/spf13/cobra"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/ver"
)
func Execute(ctx context.Context) {
// ./act_runner
rootCmd := &cobra.Command{
Use: "act_runner [event name to run]\nIf no event name passed, will default to \"on: push\"",
Short: "Run GitHub actions locally by specifying the event name (e.g. `push`) or an action name directly.",
Args: cobra.MaximumNArgs(1),
Version: ver.Version(),
SilenceUsage: true,
}
configFile := ""
rootCmd.PersistentFlags().StringVarP(&configFile, "config", "c", "", "Config file path")
// ./act_runner register
var regArgs registerArgs
registerCmd := &cobra.Command{
Use: "register",
Short: "Register a runner to the server",
Args: cobra.MaximumNArgs(0),
RunE: runRegister(ctx, &regArgs, &configFile), // must use a pointer to regArgs
}
registerCmd.Flags().BoolVar(&regArgs.NoInteractive, "no-interactive", false, "Disable interactive mode")
registerCmd.Flags().StringVar(&regArgs.InstanceAddr, "instance", "", "Gitea instance address")
registerCmd.Flags().StringVar(&regArgs.Token, "token", "", "Runner token")
registerCmd.Flags().StringVar(&regArgs.RunnerName, "name", "", "Runner name")
registerCmd.Flags().StringVar(&regArgs.Labels, "labels", "", "Runner tags, comma separated")
rootCmd.AddCommand(registerCmd)
// ./act_runner daemon
daemonCmd := &cobra.Command{
Use: "daemon",
Short: "Run as a runner daemon",
Args: cobra.MaximumNArgs(1),
RunE: runDaemon(ctx, &configFile),
}
rootCmd.AddCommand(daemonCmd)
// ./act_runner exec
rootCmd.AddCommand(loadExecCmd(ctx))
// ./act_runner config
rootCmd.AddCommand(&cobra.Command{
Use: "generate-config",
Short: "Generate an example config file",
Args: cobra.MaximumNArgs(0),
Run: func(_ *cobra.Command, _ []string) {
fmt.Printf("%s", config.Example)
},
})
// hide completion command
rootCmd.CompletionOptions.HiddenDefaultCmd = true
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
}

View File

@ -0,0 +1,98 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package cmd
import (
"context"
"fmt"
"os"
"github.com/mattn/go-isatty"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gitea.com/gitea/act_runner/internal/app/poll"
"gitea.com/gitea/act_runner/internal/app/run"
"gitea.com/gitea/act_runner/internal/pkg/client"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/envcheck"
"gitea.com/gitea/act_runner/internal/pkg/labels"
"gitea.com/gitea/act_runner/internal/pkg/ver"
)
func runDaemon(ctx context.Context, configFile *string) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
log.Infoln("Starting runner daemon")
cfg, err := config.LoadDefault(*configFile)
if err != nil {
return fmt.Errorf("invalid configuration: %w", err)
}
initLogging(cfg)
reg, err := config.LoadRegistration(cfg.Runner.File)
if os.IsNotExist(err) {
log.Error("registration file not found, please register the runner first")
return err
} else if err != nil {
return fmt.Errorf("failed to load registration file: %w", err)
}
ls := labels.Labels{}
for _, l := range reg.Labels {
label, err := labels.Parse(l)
if err != nil {
log.WithError(err).Warnf("ignored invalid label %q", l)
continue
}
ls = append(ls, label)
}
if len(ls) == 0 {
log.Warn("no labels configured, runner may not be able to pick up jobs")
}
if ls.RequireDocker() {
if err := envcheck.CheckIfDockerRunning(ctx); err != nil {
return err
}
}
cli := client.New(
reg.Address,
cfg.Runner.Insecure,
reg.UUID,
reg.Token,
ver.Version(),
)
runner := run.NewRunner(cfg, reg, cli)
poller := poll.New(cfg, cli, runner)
poller.Poll(ctx)
return nil
}
}
// initLogging setup the global logrus logger.
func initLogging(cfg *config.Config) {
isTerm := isatty.IsTerminal(os.Stdout.Fd())
log.SetFormatter(&log.TextFormatter{
DisableColors: !isTerm,
FullTimestamp: true,
})
if l := cfg.Log.Level; l != "" {
level, err := log.ParseLevel(l)
if err != nil {
log.WithError(err).
Errorf("invalid log level: %q", l)
}
if log.GetLevel() != level {
log.Infof("log level changed to %v", level)
log.SetLevel(level)
}
}
}

468
internal/app/cmd/exec.go Normal file
View File

@ -0,0 +1,468 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// Copyright 2019 nektos
// SPDX-License-Identifier: MIT
package cmd
import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/joho/godotenv"
"github.com/nektos/act/pkg/artifacts"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/model"
"github.com/nektos/act/pkg/runner"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/term"
"gitea.com/gitea/act_runner/internal/app/artifactcache"
)
type executeArgs struct {
runList bool
job string
event string
workdir string
workflowsPath string
noWorkflowRecurse bool
autodetectEvent bool
forcePull bool
forceRebuild bool
jsonLogger bool
envs []string
envfile string
secrets []string
defaultActionsUrl string
insecureSecrets bool
privileged bool
usernsMode string
containerArchitecture string
containerDaemonSocket string
useGitIgnore bool
containerCapAdd []string
containerCapDrop []string
artifactServerPath string
artifactServerAddr string
artifactServerPort string
noSkipCheckout bool
debug bool
dryrun bool
image string
cacheHandler *artifactcache.Handler
}
// WorkflowsPath returns path to workflow file(s)
func (i *executeArgs) WorkflowsPath() string {
return i.resolve(i.workflowsPath)
}
// Envfile returns path to .env
func (i *executeArgs) Envfile() string {
return i.resolve(i.envfile)
}
func (i *executeArgs) LoadSecrets() map[string]string {
s := make(map[string]string)
for _, secretPair := range i.secrets {
secretPairParts := strings.SplitN(secretPair, "=", 2)
secretPairParts[0] = strings.ToUpper(secretPairParts[0])
if strings.ToUpper(s[secretPairParts[0]]) == secretPairParts[0] {
log.Errorf("Secret %s is already defined (secrets are case insensitive)", secretPairParts[0])
}
if len(secretPairParts) == 2 {
s[secretPairParts[0]] = secretPairParts[1]
} else if env, ok := os.LookupEnv(secretPairParts[0]); ok && env != "" {
s[secretPairParts[0]] = env
} else {
fmt.Printf("Provide value for '%s': ", secretPairParts[0])
val, err := term.ReadPassword(int(os.Stdin.Fd()))
fmt.Println()
if err != nil {
log.Errorf("failed to read input: %v", err)
os.Exit(1)
}
s[secretPairParts[0]] = string(val)
}
}
return s
}
func readEnvs(path string, envs map[string]string) bool {
if _, err := os.Stat(path); err == nil {
env, err := godotenv.Read(path)
if err != nil {
log.Fatalf("Error loading from %s: %v", path, err)
}
for k, v := range env {
envs[k] = v
}
return true
}
return false
}
func (i *executeArgs) LoadEnvs() map[string]string {
envs := make(map[string]string)
if i.envs != nil {
for _, envVar := range i.envs {
e := strings.SplitN(envVar, `=`, 2)
if len(e) == 2 {
envs[e[0]] = e[1]
} else {
envs[e[0]] = ""
}
}
}
_ = readEnvs(i.Envfile(), envs)
envs["ACTIONS_CACHE_URL"] = i.cacheHandler.ExternalURL() + "/"
return envs
}
// Workdir returns path to workdir
func (i *executeArgs) Workdir() string {
return i.resolve(".")
}
func (i *executeArgs) resolve(path string) string {
basedir, err := filepath.Abs(i.workdir)
if err != nil {
log.Fatal(err)
}
if path == "" {
return path
}
if !filepath.IsAbs(path) {
path = filepath.Join(basedir, path)
}
return path
}
func printList(plan *model.Plan) error {
type lineInfoDef struct {
jobID string
jobName string
stage string
wfName string
wfFile string
events string
}
lineInfos := []lineInfoDef{}
header := lineInfoDef{
jobID: "Job ID",
jobName: "Job name",
stage: "Stage",
wfName: "Workflow name",
wfFile: "Workflow file",
events: "Events",
}
jobs := map[string]bool{}
duplicateJobIDs := false
jobIDMaxWidth := len(header.jobID)
jobNameMaxWidth := len(header.jobName)
stageMaxWidth := len(header.stage)
wfNameMaxWidth := len(header.wfName)
wfFileMaxWidth := len(header.wfFile)
eventsMaxWidth := len(header.events)
for i, stage := range plan.Stages {
for _, r := range stage.Runs {
jobID := r.JobID
line := lineInfoDef{
jobID: jobID,
jobName: r.String(),
stage: strconv.Itoa(i),
wfName: r.Workflow.Name,
wfFile: r.Workflow.File,
events: strings.Join(r.Workflow.On(), `,`),
}
if _, ok := jobs[jobID]; ok {
duplicateJobIDs = true
} else {
jobs[jobID] = true
}
lineInfos = append(lineInfos, line)
if jobIDMaxWidth < len(line.jobID) {
jobIDMaxWidth = len(line.jobID)
}
if jobNameMaxWidth < len(line.jobName) {
jobNameMaxWidth = len(line.jobName)
}
if stageMaxWidth < len(line.stage) {
stageMaxWidth = len(line.stage)
}
if wfNameMaxWidth < len(line.wfName) {
wfNameMaxWidth = len(line.wfName)
}
if wfFileMaxWidth < len(line.wfFile) {
wfFileMaxWidth = len(line.wfFile)
}
if eventsMaxWidth < len(line.events) {
eventsMaxWidth = len(line.events)
}
}
}
jobIDMaxWidth += 2
jobNameMaxWidth += 2
stageMaxWidth += 2
wfNameMaxWidth += 2
wfFileMaxWidth += 2
fmt.Printf("%*s%*s%*s%*s%*s%*s\n",
-stageMaxWidth, header.stage,
-jobIDMaxWidth, header.jobID,
-jobNameMaxWidth, header.jobName,
-wfNameMaxWidth, header.wfName,
-wfFileMaxWidth, header.wfFile,
-eventsMaxWidth, header.events,
)
for _, line := range lineInfos {
fmt.Printf("%*s%*s%*s%*s%*s%*s\n",
-stageMaxWidth, line.stage,
-jobIDMaxWidth, line.jobID,
-jobNameMaxWidth, line.jobName,
-wfNameMaxWidth, line.wfName,
-wfFileMaxWidth, line.wfFile,
-eventsMaxWidth, line.events,
)
}
if duplicateJobIDs {
fmt.Print("\nDetected multiple jobs with the same job name, use `-W` to specify the path to the specific workflow.\n")
}
return nil
}
func runExecList(ctx context.Context, planner model.WorkflowPlanner, execArgs *executeArgs) error {
// plan with filtered jobs - to be used for filtering only
var filterPlan *model.Plan
// Determine the event name to be filtered
var filterEventName string = ""
if len(execArgs.event) > 0 {
log.Infof("Using chosed event for filtering: %s", execArgs.event)
filterEventName = execArgs.event
} else if execArgs.autodetectEvent {
// collect all events from loaded workflows
events := planner.GetEvents()
// set default event type to first event from many available
// this way user dont have to specify the event.
log.Infof("Using first detected workflow event for filtering: %s", events[0])
filterEventName = events[0]
}
var err error
if execArgs.job != "" {
log.Infof("Preparing plan with a job: %s", execArgs.job)
filterPlan, err = planner.PlanJob(execArgs.job)
if err != nil {
return err
}
} else if filterEventName != "" {
log.Infof("Preparing plan for a event: %s", filterEventName)
filterPlan, err = planner.PlanEvent(filterEventName)
if err != nil {
return err
}
} else {
log.Infof("Preparing plan with all jobs")
filterPlan, err = planner.PlanAll()
if err != nil {
return err
}
}
printList(filterPlan)
return nil
}
func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command, args []string) error {
return func(cmd *cobra.Command, args []string) error {
planner, err := model.NewWorkflowPlanner(execArgs.WorkflowsPath(), execArgs.noWorkflowRecurse)
if err != nil {
return err
}
if execArgs.runList {
return runExecList(ctx, planner, execArgs)
}
// plan with triggered jobs
var plan *model.Plan
// Determine the event name to be triggered
var eventName string
// collect all events from loaded workflows
events := planner.GetEvents()
if len(execArgs.event) > 0 {
log.Infof("Using chosed event for filtering: %s", execArgs.event)
eventName = args[0]
} else if len(events) == 1 && len(events[0]) > 0 {
log.Infof("Using the only detected workflow event: %s", events[0])
eventName = events[0]
} else if execArgs.autodetectEvent && len(events) > 0 && len(events[0]) > 0 {
// set default event type to first event from many available
// this way user dont have to specify the event.
log.Infof("Using first detected workflow event: %s", events[0])
eventName = events[0]
} else {
log.Infof("Using default workflow event: push")
eventName = "push"
}
// build the plan for this run
if execArgs.job != "" {
log.Infof("Planning job: %s", execArgs.job)
plan, err = planner.PlanJob(execArgs.job)
if err != nil {
return err
}
} else {
log.Infof("Planning jobs for event: %s", eventName)
plan, err = planner.PlanEvent(eventName)
if err != nil {
return err
}
}
maxLifetime := 3 * time.Hour
if deadline, ok := ctx.Deadline(); ok {
maxLifetime = time.Until(deadline)
}
// init a cache server
handler, err := artifactcache.StartHandler("", "", 0)
if err != nil {
return err
}
log.Infof("cache handler listens on: %v", handler.ExternalURL())
execArgs.cacheHandler = handler
// run the plan
config := &runner.Config{
Workdir: execArgs.Workdir(),
BindWorkdir: false,
ReuseContainers: false,
ForcePull: execArgs.forcePull,
ForceRebuild: execArgs.forceRebuild,
LogOutput: true,
JSONLogger: execArgs.jsonLogger,
Env: execArgs.LoadEnvs(),
Secrets: execArgs.LoadSecrets(),
InsecureSecrets: execArgs.insecureSecrets,
Privileged: execArgs.privileged,
UsernsMode: execArgs.usernsMode,
ContainerArchitecture: execArgs.containerArchitecture,
ContainerDaemonSocket: execArgs.containerDaemonSocket,
UseGitIgnore: execArgs.useGitIgnore,
// GitHubInstance: t.client.Address(),
ContainerCapAdd: execArgs.containerCapAdd,
ContainerCapDrop: execArgs.containerCapDrop,
AutoRemove: true,
ArtifactServerPath: execArgs.artifactServerPath,
ArtifactServerPort: execArgs.artifactServerPort,
NoSkipCheckout: execArgs.noSkipCheckout,
// PresetGitHubContext: preset,
// EventJSON: string(eventJSON),
ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%s", eventName),
ContainerMaxLifetime: maxLifetime,
ContainerNetworkMode: "bridge",
DefaultActionInstance: execArgs.defaultActionsUrl,
PlatformPicker: func(_ []string) string {
return execArgs.image
},
}
// TODO: handle log level config
// waiting https://gitea.com/gitea/act/pulls/19
// if !execArgs.debug {
// logLevel := log.Level(log.InfoLevel)
// config.JobLoggerLevel = &logLevel
// }
r, err := runner.New(config)
if err != nil {
return err
}
if len(execArgs.artifactServerPath) == 0 {
tempDir, err := os.MkdirTemp("", "gitea-act-")
if err != nil {
fmt.Println(err)
}
defer os.RemoveAll(tempDir)
execArgs.artifactServerPath = tempDir
}
artifactCancel := artifacts.Serve(ctx, execArgs.artifactServerPath, execArgs.artifactServerAddr, execArgs.artifactServerPort)
log.Debugf("artifacts server started at %s:%s", execArgs.artifactServerPath, execArgs.artifactServerPort)
ctx = common.WithDryrun(ctx, execArgs.dryrun)
executor := r.NewPlanExecutor(plan).Finally(func(ctx context.Context) error {
artifactCancel()
return nil
})
return executor(ctx)
}
}
func loadExecCmd(ctx context.Context) *cobra.Command {
execArg := executeArgs{}
execCmd := &cobra.Command{
Use: "exec",
Short: "Run workflow locally.",
Args: cobra.MaximumNArgs(20),
RunE: runExec(ctx, &execArg),
}
execCmd.Flags().BoolVarP(&execArg.runList, "list", "l", false, "list workflows")
execCmd.Flags().StringVarP(&execArg.job, "job", "j", "", "run a specific job ID")
execCmd.Flags().StringVarP(&execArg.event, "event", "E", "", "run a event name")
execCmd.PersistentFlags().StringVarP(&execArg.workflowsPath, "workflows", "W", "./.gitea/workflows/", "path to workflow file(s)")
execCmd.PersistentFlags().StringVarP(&execArg.workdir, "directory", "C", ".", "working directory")
execCmd.PersistentFlags().BoolVarP(&execArg.noWorkflowRecurse, "no-recurse", "", false, "Flag to disable running workflows from subdirectories of specified path in '--workflows'/'-W' flag")
execCmd.Flags().BoolVarP(&execArg.autodetectEvent, "detect-event", "", false, "Use first event type from workflow as event that triggered the workflow")
execCmd.Flags().BoolVarP(&execArg.forcePull, "pull", "p", false, "pull docker image(s) even if already present")
execCmd.Flags().BoolVarP(&execArg.forceRebuild, "rebuild", "", false, "rebuild local action docker image(s) even if already present")
execCmd.PersistentFlags().BoolVar(&execArg.jsonLogger, "json", false, "Output logs in json format")
execCmd.Flags().StringArrayVarP(&execArg.envs, "env", "", []string{}, "env to make available to actions with optional value (e.g. --env myenv=foo or --env myenv)")
execCmd.PersistentFlags().StringVarP(&execArg.envfile, "env-file", "", ".env", "environment file to read and use as env in the containers")
execCmd.Flags().StringArrayVarP(&execArg.secrets, "secret", "s", []string{}, "secret to make available to actions with optional value (e.g. -s mysecret=foo or -s mysecret)")
execCmd.PersistentFlags().BoolVarP(&execArg.insecureSecrets, "insecure-secrets", "", false, "NOT RECOMMENDED! Doesn't hide secrets while printing logs.")
execCmd.Flags().BoolVar(&execArg.privileged, "privileged", false, "use privileged mode")
execCmd.Flags().StringVar(&execArg.usernsMode, "userns", "", "user namespace to use")
execCmd.PersistentFlags().StringVarP(&execArg.containerArchitecture, "container-architecture", "", "", "Architecture which should be used to run containers, e.g.: linux/amd64. If not specified, will use host default architecture. Requires Docker server API Version 1.41+. Ignored on earlier Docker server platforms.")
execCmd.PersistentFlags().StringVarP(&execArg.containerDaemonSocket, "container-daemon-socket", "", "/var/run/docker.sock", "Path to Docker daemon socket which will be mounted to containers")
execCmd.Flags().BoolVar(&execArg.useGitIgnore, "use-gitignore", true, "Controls whether paths specified in .gitignore should be copied into container")
execCmd.Flags().StringArrayVarP(&execArg.containerCapAdd, "container-cap-add", "", []string{}, "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)")
execCmd.Flags().StringArrayVarP(&execArg.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)")
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPath, "artifact-server-path", "", ".", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.")
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens (will only bind to localhost).")
execCmd.PersistentFlags().StringVarP(&execArg.defaultActionsUrl, "default-actions-url", "", "https://gitea.com", "Defines the default url of action instance.")
execCmd.PersistentFlags().BoolVarP(&execArg.noSkipCheckout, "no-skip-checkout", "", false, "Do not skip actions/checkout")
execCmd.PersistentFlags().BoolVarP(&execArg.debug, "debug", "d", false, "enable debug log")
execCmd.PersistentFlags().BoolVarP(&execArg.dryrun, "dryrun", "n", false, "dryrun mode")
execCmd.PersistentFlags().StringVarP(&execArg.image, "image", "i", "node:16-bullseye", "docker image to use")
return execCmd
}

View File

@ -0,0 +1,334 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package cmd
import (
"bufio"
"context"
"fmt"
"os"
"os/signal"
goruntime "runtime"
"strings"
"time"
pingv1 "code.gitea.io/actions-proto-go/ping/v1"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"github.com/bufbuild/connect-go"
"github.com/mattn/go-isatty"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"gitea.com/gitea/act_runner/internal/pkg/client"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/labels"
"gitea.com/gitea/act_runner/internal/pkg/ver"
)
// runRegister registers a runner to the server
func runRegister(ctx context.Context, regArgs *registerArgs, configFile *string) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
log.SetReportCaller(false)
isTerm := isatty.IsTerminal(os.Stdout.Fd())
log.SetFormatter(&log.TextFormatter{
DisableColors: !isTerm,
DisableTimestamp: true,
})
log.SetLevel(log.DebugLevel)
log.Infof("Registering runner, arch=%s, os=%s, version=%s.",
goruntime.GOARCH, goruntime.GOOS, ver.Version())
// runner always needs root permission
if os.Getuid() != 0 {
// TODO: use a better way to check root permission
log.Warnf("Runner in user-mode.")
}
if regArgs.NoInteractive {
if err := registerNoInteractive(*configFile, regArgs); err != nil {
return err
}
} else {
go func() {
if err := registerInteractive(*configFile); err != nil {
log.Fatal(err)
return
}
os.Exit(0)
}()
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
<-c
}
return nil
}
}
// registerArgs represents the arguments for register command
type registerArgs struct {
NoInteractive bool
InstanceAddr string
Token string
RunnerName string
Labels string
}
type registerStage int8
const (
StageUnknown registerStage = -1
StageOverwriteLocalConfig registerStage = iota + 1
StageInputInstance
StageInputToken
StageInputRunnerName
StageInputCustomLabels
StageWaitingForRegistration
StageExit
)
var defaultLabels = []string{
"ubuntu-latest:docker://node:16-bullseye",
"ubuntu-22.04:docker://node:16-bullseye", // There's no node:16-bookworm yet
"ubuntu-20.04:docker://node:16-bullseye",
"ubuntu-18.04:docker://node:16-buster",
}
type registerInputs struct {
InstanceAddr string
Token string
RunnerName string
CustomLabels []string
}
func (r *registerInputs) validate() error {
if r.InstanceAddr == "" {
return fmt.Errorf("instance address is empty")
}
if r.Token == "" {
return fmt.Errorf("token is empty")
}
if len(r.CustomLabels) > 0 {
return validateLabels(r.CustomLabels)
}
return nil
}
func validateLabels(ls []string) error {
for _, label := range ls {
if _, err := labels.Parse(label); err != nil {
return err
}
}
return nil
}
func (r *registerInputs) assignToNext(stage registerStage, value string) registerStage {
// must set instance address and token.
// if empty, keep current stage.
if stage == StageInputInstance || stage == StageInputToken {
if value == "" {
return stage
}
}
// set hostname for runner name if empty
if stage == StageInputRunnerName && value == "" {
value, _ = os.Hostname()
}
switch stage {
case StageOverwriteLocalConfig:
if value == "Y" || value == "y" {
return StageInputInstance
}
return StageExit
case StageInputInstance:
r.InstanceAddr = value
return StageInputToken
case StageInputToken:
r.Token = value
return StageInputRunnerName
case StageInputRunnerName:
r.RunnerName = value
return StageInputCustomLabels
case StageInputCustomLabels:
r.CustomLabels = defaultLabels
if value != "" {
r.CustomLabels = strings.Split(value, ",")
}
if validateLabels(r.CustomLabels) != nil {
log.Infoln("Invalid labels, please input again, leave blank to use the default labels (for example, ubuntu-20.04:docker://node:16-bullseye,ubuntu-18.04:docker://node:16-buster,linux_arm:host)")
return StageInputCustomLabels
}
return StageWaitingForRegistration
}
return StageUnknown
}
func registerInteractive(configFile string) error {
var (
reader = bufio.NewReader(os.Stdin)
stage = StageInputInstance
inputs = new(registerInputs)
)
cfg, err := config.LoadDefault(configFile)
if err != nil {
return fmt.Errorf("failed to load config: %v", err)
}
if f, err := os.Stat(cfg.Runner.File); err == nil && !f.IsDir() {
stage = StageOverwriteLocalConfig
}
for {
printStageHelp(stage)
cmdString, err := reader.ReadString('\n')
if err != nil {
return err
}
stage = inputs.assignToNext(stage, strings.TrimSpace(cmdString))
if stage == StageWaitingForRegistration {
log.Infof("Registering runner, name=%s, instance=%s, labels=%v.", inputs.RunnerName, inputs.InstanceAddr, inputs.CustomLabels)
if err := doRegister(cfg, inputs); err != nil {
log.Errorf("Failed to register runner: %v", err)
} else {
log.Infof("Runner registered successfully.")
}
return nil
}
if stage == StageExit {
return nil
}
if stage <= StageUnknown {
log.Errorf("Invalid input, please re-run act command.")
return nil
}
}
}
func printStageHelp(stage registerStage) {
switch stage {
case StageOverwriteLocalConfig:
log.Infoln("Runner is already registered, overwrite local config? [y/N]")
case StageInputInstance:
log.Infoln("Enter the Gitea instance URL (for example, https://gitea.com/):")
case StageInputToken:
log.Infoln("Enter the runner token:")
case StageInputRunnerName:
hostname, _ := os.Hostname()
log.Infof("Enter the runner name (if set empty, use hostname: %s):\n", hostname)
case StageInputCustomLabels:
log.Infoln("Enter the runner labels, leave blank to use the default labels (comma-separated, for example, ubuntu-20.04:docker://node:16-bullseye,ubuntu-18.04:docker://node:16-buster,linux_arm:host):")
case StageWaitingForRegistration:
log.Infoln("Waiting for registration...")
}
}
func registerNoInteractive(configFile string, regArgs *registerArgs) error {
cfg, err := config.LoadDefault(configFile)
if err != nil {
return err
}
inputs := &registerInputs{
InstanceAddr: regArgs.InstanceAddr,
Token: regArgs.Token,
RunnerName: regArgs.RunnerName,
CustomLabels: defaultLabels,
}
regArgs.Labels = strings.TrimSpace(regArgs.Labels)
if regArgs.Labels != "" {
inputs.CustomLabels = strings.Split(regArgs.Labels, ",")
}
if inputs.RunnerName == "" {
inputs.RunnerName, _ = os.Hostname()
log.Infof("Runner name is empty, use hostname '%s'.", inputs.RunnerName)
}
if err := inputs.validate(); err != nil {
log.WithError(err).Errorf("Invalid input, please re-run act command.")
return nil
}
if err := doRegister(cfg, inputs); err != nil {
log.Errorf("Failed to register runner: %v", err)
return nil
}
log.Infof("Runner registered successfully.")
return nil
}
func doRegister(cfg *config.Config, inputs *registerInputs) error {
ctx := context.Background()
// initial http client
cli := client.New(
inputs.InstanceAddr,
cfg.Runner.Insecure,
"",
"",
ver.Version(),
)
for {
_, err := cli.Ping(ctx, connect.NewRequest(&pingv1.PingRequest{
Data: inputs.RunnerName,
}))
select {
case <-ctx.Done():
return nil
default:
}
if ctx.Err() != nil {
break
}
if err != nil {
log.WithError(err).
Errorln("Cannot ping the Gitea instance server")
// TODO: if ping failed, retry or exit
time.Sleep(time.Second)
} else {
log.Debugln("Successfully pinged the Gitea instance server")
break
}
}
reg := &config.Registration{
Name: inputs.RunnerName,
Token: inputs.Token,
Address: inputs.InstanceAddr,
Labels: inputs.CustomLabels,
}
ls := make([]string, len(reg.Labels))
for i, v := range reg.Labels {
l, _ := labels.Parse(v)
ls[i] = l.Name
}
// register new runner.
resp, err := cli.Register(ctx, connect.NewRequest(&runnerv1.RegisterRequest{
Name: reg.Name,
Token: reg.Token,
AgentLabels: ls,
}))
if err != nil {
log.WithError(err).Error("poller: cannot register new runner")
return err
}
reg.ID = resp.Msg.Runner.Id
reg.UUID = resp.Msg.Runner.Uuid
reg.Name = resp.Msg.Runner.Name
reg.Token = resp.Msg.Runner.Token
if err := config.SaveRegistration(cfg.Runner.File, reg); err != nil {
return fmt.Errorf("failed to save runner config: %w", err)
}
return nil
}

View File

@ -0,0 +1,82 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package poll
import (
"context"
"errors"
"sync"
"time"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"github.com/bufbuild/connect-go"
log "github.com/sirupsen/logrus"
"golang.org/x/time/rate"
"gitea.com/gitea/act_runner/internal/app/run"
"gitea.com/gitea/act_runner/internal/pkg/client"
"gitea.com/gitea/act_runner/internal/pkg/config"
)
type Poller struct {
client client.Client
runner *run.Runner
capacity int
}
func New(cfg *config.Config, client client.Client, runner *run.Runner) *Poller {
return &Poller{
client: client,
runner: runner,
capacity: cfg.Runner.Capacity,
}
}
func (p *Poller) Poll(ctx context.Context) {
limiter := rate.NewLimiter(rate.Every(2*time.Second), 1)
wg := &sync.WaitGroup{}
for i := 0; i < p.capacity; i++ {
wg.Add(1)
go p.poll(ctx, wg, limiter)
}
wg.Wait()
}
func (p *Poller) poll(ctx context.Context, wg *sync.WaitGroup, limiter *rate.Limiter) {
defer wg.Done()
for {
if err := limiter.Wait(ctx); err != nil {
if ctx.Err() != nil {
log.WithError(err).Debug("limiter wait failed")
}
return
}
task, ok := p.fetchTask(ctx)
if !ok {
continue
}
if err := p.runner.Run(ctx, task); err != nil {
log.WithError(err).Error("failed to run task")
}
}
}
func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(&runnerv1.FetchTaskRequest{}))
if errors.Is(err, context.DeadlineExceeded) {
err = nil
}
if err != nil {
log.WithError(err).Error("failed to fetch task")
return nil, false
}
if resp.Msg.Task == nil {
return nil, false
}
return resp.Msg.Task, true
}

199
internal/app/run/runner.go Normal file
View File

@ -0,0 +1,199 @@
// Copyright 2022 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package run
import (
"bytes"
"context"
"encoding/json"
"fmt"
"path/filepath"
"sync"
"time"
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/model"
"github.com/nektos/act/pkg/runner"
log "github.com/sirupsen/logrus"
"gitea.com/gitea/act_runner/internal/app/artifactcache"
"gitea.com/gitea/act_runner/internal/pkg/client"
"gitea.com/gitea/act_runner/internal/pkg/config"
"gitea.com/gitea/act_runner/internal/pkg/labels"
"gitea.com/gitea/act_runner/internal/pkg/report"
"gitea.com/gitea/act_runner/internal/pkg/ver"
)
// Runner runs the pipeline.
type Runner struct {
name string
cfg *config.Config
client client.Client
labels labels.Labels
envs map[string]string
runningTasks sync.Map
}
func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client) *Runner {
ls := labels.Labels{}
for _, v := range reg.Labels {
if l, err := labels.Parse(v); err == nil {
ls = append(ls, l)
}
}
envs := make(map[string]string, len(cfg.Runner.Envs))
for k, v := range cfg.Runner.Envs {
envs[k] = v
}
if cfg.Cache.Enabled == nil || *cfg.Cache.Enabled {
cacheHandler, err := artifactcache.StartHandler(cfg.Cache.Dir, cfg.Cache.Host, cfg.Cache.Port)
if err != nil {
log.Errorf("cannot init cache server, it will be disabled: %v", err)
// go on
} else {
envs["ACTIONS_CACHE_URL"] = cacheHandler.ExternalURL() + "/"
}
}
return &Runner{
name: reg.Name,
cfg: cfg,
client: cli,
labels: ls,
envs: envs,
}
}
func (r *Runner) Run(ctx context.Context, task *runnerv1.Task) error {
if _, ok := r.runningTasks.Load(task.Id); ok {
return fmt.Errorf("task %d is already running", task.Id)
} else {
r.runningTasks.Store(task.Id, struct{}{})
defer r.runningTasks.Delete(task.Id)
}
ctx, cancel := context.WithTimeout(ctx, r.cfg.Runner.Timeout)
defer cancel()
reporter := report.NewReporter(ctx, cancel, r.client, task)
var runErr error
defer func() {
lastWords := ""
if runErr != nil {
lastWords = runErr.Error()
}
_ = reporter.Close(lastWords)
}()
reporter.RunDaemon()
runErr = r.run(ctx, task, reporter)
return nil
}
func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.Reporter) (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("panic: %v", r)
}
}()
reporter.Logf("%s(version:%s) received task %v of job %v, be triggered by event: %s", r.name, ver.Version(), task.Id, task.Context.Fields["job"].GetStringValue(), task.Context.Fields["event_name"].GetStringValue())
workflow, err := model.ReadWorkflow(bytes.NewReader(task.WorkflowPayload))
if err != nil {
return err
}
jobIDs := workflow.GetJobIDs()
if len(jobIDs) != 1 {
return fmt.Errorf("multiple jobs found: %v", jobIDs)
}
jobID := jobIDs[0]
plan, err := model.CombineWorkflowPlanner(workflow).PlanJob(jobID)
if err != nil {
return err
}
job := workflow.GetJob(jobID)
reporter.ResetSteps(len(job.Steps))
taskContext := task.Context.Fields
log.Infof("task %v repo is %v %v %v", task.Id, taskContext["repository"].GetStringValue(),
taskContext["gitea_default_actions_url"].GetStringValue(),
r.client.Address())
preset := &model.GithubContext{
Event: taskContext["event"].GetStructValue().AsMap(),
RunID: taskContext["run_id"].GetStringValue(),
RunNumber: taskContext["run_number"].GetStringValue(),
Actor: taskContext["actor"].GetStringValue(),
Repository: taskContext["repository"].GetStringValue(),
EventName: taskContext["event_name"].GetStringValue(),
Sha: taskContext["sha"].GetStringValue(),
Ref: taskContext["ref"].GetStringValue(),
RefName: taskContext["ref_name"].GetStringValue(),
RefType: taskContext["ref_type"].GetStringValue(),
HeadRef: taskContext["head_ref"].GetStringValue(),
BaseRef: taskContext["base_ref"].GetStringValue(),
Token: taskContext["token"].GetStringValue(),
RepositoryOwner: taskContext["repository_owner"].GetStringValue(),
RetentionDays: taskContext["retention_days"].GetStringValue(),
}
if t := task.Secrets["GITEA_TOKEN"]; t != "" {
preset.Token = t
} else if t := task.Secrets["GITHUB_TOKEN"]; t != "" {
preset.Token = t
}
eventJSON, err := json.Marshal(preset.Event)
if err != nil {
return err
}
maxLifetime := 3 * time.Hour
if deadline, ok := ctx.Deadline(); ok {
maxLifetime = time.Until(deadline)
}
runnerConfig := &runner.Config{
// On Linux, Workdir will be like "/<owner>/<repo>"
// On Windows, Workdir will be like "\<owner>\<repo>"
Workdir: filepath.FromSlash(string(filepath.Separator) + preset.Repository),
BindWorkdir: false,
ReuseContainers: false,
ForcePull: false,
ForceRebuild: false,
LogOutput: true,
JSONLogger: false,
Env: r.envs,
Secrets: task.Secrets,
GitHubInstance: r.client.Address(),
AutoRemove: true,
NoSkipCheckout: true,
PresetGitHubContext: preset,
EventJSON: string(eventJSON),
ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%d", task.Id),
ContainerMaxLifetime: maxLifetime,
ContainerNetworkMode: r.cfg.Container.NetworkMode,
DefaultActionInstance: taskContext["gitea_default_actions_url"].GetStringValue(),
PlatformPicker: r.labels.PickPlatform,
}
rr, err := runner.New(runnerConfig)
if err != nil {
return err
}
executor := rr.NewPlanExecutor(plan)
reporter.Logf("workflow prepared")
// add logger recorders
ctx = common.WithLoggerHook(ctx, reporter)
return executor(ctx)
}