mirror of
https://gitea.com/gitea/act_runner.git
synced 2025-06-14 10:37:12 +02:00
Compare commits
36 Commits
Author | SHA1 | Date | |
---|---|---|---|
de4160b023 | |||
609c0a0773 | |||
0c029f7e79 | |||
eef3c32eb2 | |||
c40b651873 | |||
b498341857 | |||
0d727eb262 | |||
7c71c94366 | |||
49d2cb0cb5 | |||
85626b6bbd | |||
35400f76fa | |||
0cf31b2d22 | |||
c8cc7b2448 | |||
3be962cdb3 | |||
a5edbc9ac4 | |||
66bab3d805 | |||
293926f5d5 | |||
43c5ba923f | |||
acc5afc428 | |||
27a1a90d25 | |||
83ec0ba909 | |||
ed86e2f15a | |||
d4bebccc12 | |||
c75b67e892 | |||
bc6031eff7 | |||
c69c353d93 | |||
fcc016e9b3 | |||
d5caee38f2 | |||
9e26208e13 | |||
a05c5ba3ad | |||
c248520a66 | |||
10d639cc6b | |||
5a8134410d | |||
b79c3aa1a3 | |||
9c6499ec08 | |||
d139faa40c |
@ -1,20 +1,37 @@
|
|||||||
name: goreleaser
|
name: release-nightly
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ main ]
|
branches: [ main ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
GOPATH: /go_path
|
||||||
|
GOCACHE: /go_cache
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
goreleaser:
|
goreleaser:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0 # all history for all branches and tags
|
||||||
- run: git fetch --force --tags
|
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '>=1.20.1'
|
go-version: '>=1.20.1'
|
||||||
|
- uses: https://gitea.com/actions/go-hashfiles@v0.0.1
|
||||||
|
id: hash-go
|
||||||
|
with:
|
||||||
|
patterns: |
|
||||||
|
go.mod
|
||||||
|
go.sum
|
||||||
|
- name: cache go
|
||||||
|
id: cache-go
|
||||||
|
uses: https://github.com/actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/go_path
|
||||||
|
/go_cache
|
||||||
|
key: go_path-${{ steps.hash-go.outputs.hash }}
|
||||||
- name: goreleaser
|
- name: goreleaser
|
||||||
uses: https://github.com/goreleaser/goreleaser-action@v4
|
uses: https://github.com/goreleaser/goreleaser-action@v4
|
||||||
with:
|
with:
|
||||||
@ -28,3 +45,53 @@ jobs:
|
|||||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||||
S3_REGION: ${{ secrets.AWS_REGION }}
|
S3_REGION: ${{ secrets.AWS_REGION }}
|
||||||
S3_BUCKET: ${{ secrets.AWS_BUCKET }}
|
S3_BUCKET: ${{ secrets.AWS_BUCKET }}
|
||||||
|
release-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: catthehacker/ubuntu:act-latest
|
||||||
|
env:
|
||||||
|
DOCKER_ORG: gitea
|
||||||
|
DOCKER_LATEST: nightly
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # all history for all branches and tags
|
||||||
|
|
||||||
|
- name: dockerfile lint check
|
||||||
|
uses: https://github.com/hadolint/hadolint-action@v3.1.0
|
||||||
|
with:
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
|
- name: Set up Docker BuildX
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Get Meta
|
||||||
|
id: meta
|
||||||
|
run: |
|
||||||
|
echo REPO_NAME=$(echo ${GITHUB_REPOSITORY} | awk -F"/" '{print $2}') >> $GITHUB_OUTPUT
|
||||||
|
echo REPO_VERSION=$(git describe --tags --always | sed 's/^v//') >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Build and push
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
env:
|
||||||
|
ACTIONS_RUNTIME_TOKEN: '' # See https://gitea.com/gitea/act_runner/issues/119
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: ./Dockerfile
|
||||||
|
platforms: |
|
||||||
|
linux/amd64
|
||||||
|
linux/arm64
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ env.DOCKER_LATEST }}
|
||||||
|
|
||||||
|
108
.gitea/workflows/release-tag.yml
Normal file
108
.gitea/workflows/release-tag.yml
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
name: release-tag
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- '*'
|
||||||
|
|
||||||
|
env:
|
||||||
|
GOPATH: /go_path
|
||||||
|
GOCACHE: /go_cache
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
goreleaser:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # all history for all branches and tags
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '>=1.20.1'
|
||||||
|
- uses: https://gitea.com/actions/go-hashfiles@v0.0.1
|
||||||
|
id: hash-go
|
||||||
|
with:
|
||||||
|
patterns: |
|
||||||
|
go.mod
|
||||||
|
go.sum
|
||||||
|
- name: cache go
|
||||||
|
id: cache-go
|
||||||
|
uses: https://github.com/actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/go_path
|
||||||
|
/go_cache
|
||||||
|
key: go_path-${{ steps.hash-go.outputs.hash }}
|
||||||
|
- name: Import GPG key
|
||||||
|
id: import_gpg
|
||||||
|
uses: https://github.com/crazy-max/ghaction-import-gpg@v5
|
||||||
|
with:
|
||||||
|
gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }}
|
||||||
|
passphrase: ${{ secrets.PASSPHRASE }}
|
||||||
|
fingerprint: CC64B1DB67ABBEECAB24B6455FC346329753F4B0
|
||||||
|
- name: goreleaser
|
||||||
|
uses: https://github.com/goreleaser/goreleaser-action@v4
|
||||||
|
with:
|
||||||
|
distribution: goreleaser-pro
|
||||||
|
version: latest
|
||||||
|
args: release
|
||||||
|
env:
|
||||||
|
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
||||||
|
AWS_REGION: ${{ secrets.AWS_REGION }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_KEY_ID }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||||
|
S3_REGION: ${{ secrets.AWS_REGION }}
|
||||||
|
S3_BUCKET: ${{ secrets.AWS_BUCKET }}
|
||||||
|
GORELEASER_FORCE_TOKEN: 'gitea'
|
||||||
|
GITEA_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
GPG_FINGERPRINT: ${{ steps.import_gpg.outputs.fingerprint }}
|
||||||
|
release-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: catthehacker/ubuntu:act-latest
|
||||||
|
env:
|
||||||
|
DOCKER_ORG: gitea
|
||||||
|
DOCKER_LATEST: latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # all history for all branches and tags
|
||||||
|
|
||||||
|
- name: dockerfile lint check
|
||||||
|
uses: https://github.com/hadolint/hadolint-action@v3.1.0
|
||||||
|
with:
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
|
- name: Set up Docker BuildX
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Get Meta
|
||||||
|
id: meta
|
||||||
|
run: |
|
||||||
|
echo REPO_NAME=$(echo ${GITHUB_REPOSITORY} | awk -F"/" '{print $2}') >> $GITHUB_OUTPUT
|
||||||
|
echo REPO_VERSION=$(git describe --tags --always | sed 's/^v//') >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Build and push
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
env:
|
||||||
|
ACTIONS_RUNTIME_TOKEN: '' # See https://gitea.com/gitea/act_runner/issues/119
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: ./Dockerfile
|
||||||
|
platforms: |
|
||||||
|
linux/amd64
|
||||||
|
linux/arm64
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ steps.meta.outputs.REPO_VERSION }}
|
||||||
|
${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ env.DOCKER_LATEST }}
|
@ -4,7 +4,6 @@ on:
|
|||||||
- pull_request
|
- pull_request
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GOPROXY: https://goproxy.io,direct
|
|
||||||
GOPATH: /go_path
|
GOPATH: /go_path
|
||||||
GOCACHE: /go_cache
|
GOCACHE: /go_cache
|
||||||
|
|
||||||
@ -13,31 +12,31 @@ jobs:
|
|||||||
name: check and test
|
name: check and test
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: cache go path
|
- uses: actions/checkout@v3
|
||||||
id: cache-go-path
|
|
||||||
uses: https://github.com/actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: /go_path
|
|
||||||
key: go_path-${{ github.repository }}-${{ github.ref_name }}
|
|
||||||
restore-keys: |
|
|
||||||
go_path-${{ github.repository }}-
|
|
||||||
go_path-
|
|
||||||
- name: cache go cache
|
|
||||||
id: cache-go-cache
|
|
||||||
uses: https://github.com/actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: /go_cache
|
|
||||||
key: go_cache-${{ github.repository }}-${{ github.ref_name }}
|
|
||||||
restore-keys: |
|
|
||||||
go_cache-${{ github.repository }}-
|
|
||||||
go_cache-
|
|
||||||
- uses: actions/setup-go@v3
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: 1.20
|
go-version: '>=1.20.1'
|
||||||
- uses: actions/checkout@v3
|
- uses: https://gitea.com/actions/go-hashfiles@v0.0.1
|
||||||
|
id: hash-go
|
||||||
|
with:
|
||||||
|
patterns: |
|
||||||
|
go.mod
|
||||||
|
go.sum
|
||||||
|
- name: cache go
|
||||||
|
id: cache-go
|
||||||
|
uses: https://github.com/actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/go_path
|
||||||
|
/go_cache
|
||||||
|
key: go_path-${{ steps.hash-go.outputs.hash }}
|
||||||
- name: vet checks
|
- name: vet checks
|
||||||
run: make vet
|
run: make vet
|
||||||
- name: build
|
- name: build
|
||||||
run: make build
|
run: make build
|
||||||
- name: test
|
- name: test
|
||||||
run: make test
|
run: make test
|
||||||
|
- name: dockerfile lint check
|
||||||
|
uses: https://github.com/hadolint/hadolint-action@v3.1.0
|
||||||
|
with:
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -8,3 +8,5 @@ coverage.txt
|
|||||||
# MS VSCode
|
# MS VSCode
|
||||||
.vscode
|
.vscode
|
||||||
__debug_bin
|
__debug_bin
|
||||||
|
# gorelease binary folder
|
||||||
|
dist
|
||||||
|
@ -71,9 +71,8 @@ builds:
|
|||||||
no_unique_dist_dir: true
|
no_unique_dist_dir: true
|
||||||
hooks:
|
hooks:
|
||||||
post:
|
post:
|
||||||
- cmd: tar -cJf {{ .Path }}.xz {{ .Path }}
|
- cmd: xz -k -9 {{ .Path }}
|
||||||
env:
|
dir: ./dist/
|
||||||
- XZ_OPT=-9
|
|
||||||
- cmd: sh .goreleaser.checksum.sh {{ .Path }}
|
- cmd: sh .goreleaser.checksum.sh {{ .Path }}
|
||||||
- cmd: sh .goreleaser.checksum.sh {{ .Path }}.xz
|
- cmd: sh .goreleaser.checksum.sh {{ .Path }}.xz
|
||||||
|
|
||||||
@ -101,7 +100,16 @@ snapshot:
|
|||||||
name_template: "{{ .Branch }}-devel"
|
name_template: "{{ .Branch }}-devel"
|
||||||
|
|
||||||
nightly:
|
nightly:
|
||||||
name_template: "{{ .Branch }}"
|
name_template: "nightly"
|
||||||
|
|
||||||
|
gitea_urls:
|
||||||
|
api: https://gitea.com/api/v1
|
||||||
|
download: https://gitea.com
|
||||||
|
|
||||||
|
release:
|
||||||
|
extra_files:
|
||||||
|
- glob: ./**.xz
|
||||||
|
- glob: ./**.xz.sha256
|
||||||
|
|
||||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
|
# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
|
||||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
17
Dockerfile
Normal file
17
Dockerfile
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
FROM golang:1.20-alpine3.17 as builder
|
||||||
|
RUN apk add --no-cache make=4.3-r1
|
||||||
|
|
||||||
|
COPY . /opt/src/act_runner
|
||||||
|
WORKDIR /opt/src/act_runner
|
||||||
|
|
||||||
|
RUN make clean && make build
|
||||||
|
|
||||||
|
FROM alpine:3.17
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
git=2.38.5-r0 bash=5.2.15-r0 \
|
||||||
|
&& rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
|
COPY --from=builder /opt/src/act_runner/act_runner /usr/local/bin/act_runner
|
||||||
|
COPY run.sh /opt/act/run.sh
|
||||||
|
|
||||||
|
ENTRYPOINT ["/opt/act/run.sh"]
|
16
Makefile
16
Makefile
@ -16,6 +16,10 @@ WINDOWS_ARCHS ?= windows/amd64
|
|||||||
GO_FMT_FILES := $(shell find . -type f -name "*.go" ! -name "generated.*")
|
GO_FMT_FILES := $(shell find . -type f -name "*.go" ! -name "generated.*")
|
||||||
GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*")
|
GOFILES := $(shell find . -type f -name "*.go" -o -name "go.mod" ! -name "generated.*")
|
||||||
|
|
||||||
|
DOCKER_IMAGE ?= gitea/act_runner
|
||||||
|
DOCKER_TAG ?= nightly
|
||||||
|
DOCKER_REF := $(DOCKER_IMAGE):$(DOCKER_TAG)
|
||||||
|
|
||||||
ifneq ($(shell uname), Darwin)
|
ifneq ($(shell uname), Darwin)
|
||||||
EXTLDFLAGS = -extldflags "-static" $(null)
|
EXTLDFLAGS = -extldflags "-static" $(null)
|
||||||
else
|
else
|
||||||
@ -61,6 +65,9 @@ else
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
GO_PACKAGES_TO_VET ?= $(filter-out gitea.com/gitea/act_runner/internal/pkg/client/mocks,$(shell $(GO) list ./...))
|
||||||
|
|
||||||
|
|
||||||
TAGS ?=
|
TAGS ?=
|
||||||
LDFLAGS ?= -X "gitea.com/gitea/act_runner/internal/pkg/ver.version=$(RELASE_VERSION)"
|
LDFLAGS ?= -X "gitea.com/gitea/act_runner/internal/pkg/ver.version=$(RELASE_VERSION)"
|
||||||
|
|
||||||
@ -101,7 +108,7 @@ test: fmt-check
|
|||||||
vet:
|
vet:
|
||||||
@echo "Running go vet..."
|
@echo "Running go vet..."
|
||||||
@$(GO) build code.gitea.io/gitea-vet
|
@$(GO) build code.gitea.io/gitea-vet
|
||||||
@$(GO) vet -vettool=gitea-vet ./...
|
@$(GO) vet -vettool=gitea-vet $(GO_PACKAGES_TO_VET)
|
||||||
|
|
||||||
install: $(GOFILES)
|
install: $(GOFILES)
|
||||||
$(GO) install -v -tags '$(TAGS)' -ldflags '$(EXTLDFLAGS)-s -w $(LDFLAGS)'
|
$(GO) install -v -tags '$(TAGS)' -ldflags '$(EXTLDFLAGS)-s -w $(LDFLAGS)'
|
||||||
@ -156,6 +163,13 @@ release-check: | $(DIST_DIRS)
|
|||||||
release-compress: | $(DIST_DIRS)
|
release-compress: | $(DIST_DIRS)
|
||||||
cd $(DIST)/release/; for file in `find . -type f -name "*"`; do echo "compressing $${file}" && $(GO) run $(GXZ_PAGAGE) -k -9 $${file}; done;
|
cd $(DIST)/release/; for file in `find . -type f -name "*"`; do echo "compressing $${file}" && $(GO) run $(GXZ_PAGAGE) -k -9 $${file}; done;
|
||||||
|
|
||||||
|
.PHONY: docker
|
||||||
|
docker:
|
||||||
|
if ! docker buildx version >/dev/null 2>&1; then \
|
||||||
|
ARG_DISABLE_CONTENT_TRUST=--disable-content-trust=false; \
|
||||||
|
fi; \
|
||||||
|
docker build $${ARG_DISABLE_CONTENT_TRUST} -t $(DOCKER_REF) .
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
$(GO) clean -x -i ./...
|
$(GO) clean -x -i ./...
|
||||||
rm -rf coverage.txt $(EXECUTABLE) $(DIST)
|
rm -rf coverage.txt $(EXECUTABLE) $(DIST)
|
||||||
|
41
README.md
41
README.md
@ -6,11 +6,11 @@ Act runner is a runner for Gitea based on [Gitea fork](https://gitea.com/gitea/a
|
|||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
Docker Engine Community version is required. To install Docker CE, follow the official [install instructions](https://docs.docker.com/engine/install/).
|
Docker Engine Community version is required for docker mode. To install Docker CE, follow the official [install instructions](https://docs.docker.com/engine/install/).
|
||||||
|
|
||||||
### Download pre-built binary
|
### Download pre-built binary
|
||||||
|
|
||||||
Visit https://dl.gitea.com/act_runner/ and download the right version for your platform.
|
Visit [here](https://dl.gitea.com/act_runner/) and download the right version for your platform.
|
||||||
|
|
||||||
### Build from source
|
### Build from source
|
||||||
|
|
||||||
@ -18,6 +18,12 @@ Visit https://dl.gitea.com/act_runner/ and download the right version for your p
|
|||||||
make build
|
make build
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Build a docker image
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make docker
|
||||||
|
```
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
### Register
|
### Register
|
||||||
@ -66,7 +72,6 @@ If the registry succeed, it will run immediately. Next time, you could run the r
|
|||||||
./act_runner daemon
|
./act_runner daemon
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Configuration
|
### Configuration
|
||||||
|
|
||||||
You can also configure the runner with a configuration file.
|
You can also configure the runner with a configuration file.
|
||||||
@ -82,3 +87,33 @@ You can specify the configuration file path with `-c`/`--config` argument.
|
|||||||
./act_runner -c config.yaml register # register with config file
|
./act_runner -c config.yaml register # register with config file
|
||||||
./act_runner -c config.yaml deamon # run with config file
|
./act_runner -c config.yaml deamon # run with config file
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Run a docker container
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker run -e GITEA_INSTANCE_URL=http://192.168.8.18:3000 -e GITEA_RUNNER_REGISTRATION_TOKEN=<runner_token> -v /var/run/docker.sock:/var/run/docker.sock -v $PWD/data:/data --name my_runner gitea/act_runner:nightly
|
||||||
|
```
|
||||||
|
|
||||||
|
The `/data` directory inside the docker container contains the runner API keys after registration.
|
||||||
|
It must be persisted, otherwise the runner would try to register again, using the same, now defunct registration token.
|
||||||
|
|
||||||
|
### Running in docker-compose
|
||||||
|
|
||||||
|
```yml
|
||||||
|
...
|
||||||
|
gitea:
|
||||||
|
image: gitea/gitea
|
||||||
|
...
|
||||||
|
|
||||||
|
runner:
|
||||||
|
image: gitea/act_runner
|
||||||
|
restart: always
|
||||||
|
depends_on:
|
||||||
|
- gitea
|
||||||
|
volumes:
|
||||||
|
- ./data/act_runner:/data
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
environment:
|
||||||
|
- GITEA_INSTANCE_URL=<instance url>
|
||||||
|
- GITEA_RUNNER_REGISTRATION_TOKEN=<registration token>
|
||||||
|
```
|
||||||
|
89
go.mod
89
go.mod
@ -3,109 +3,88 @@ module gitea.com/gitea/act_runner
|
|||||||
go 1.20
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
code.gitea.io/actions-proto-go v0.2.0
|
code.gitea.io/actions-proto-go v0.2.1
|
||||||
code.gitea.io/gitea-vet v0.2.3-0.20230113022436-2b1561217fa5
|
code.gitea.io/gitea-vet v0.2.3-0.20230113022436-2b1561217fa5
|
||||||
github.com/avast/retry-go/v4 v4.3.1
|
github.com/avast/retry-go/v4 v4.3.1
|
||||||
github.com/bufbuild/connect-go v1.3.1
|
github.com/bufbuild/connect-go v1.3.1
|
||||||
github.com/docker/docker v23.0.1+incompatible
|
github.com/docker/docker v23.0.4+incompatible
|
||||||
github.com/go-chi/chi/v5 v5.0.8
|
|
||||||
github.com/go-chi/render v1.0.2
|
|
||||||
github.com/joho/godotenv v1.5.1
|
github.com/joho/godotenv v1.5.1
|
||||||
github.com/mattn/go-isatty v0.0.17
|
github.com/mattn/go-isatty v0.0.18
|
||||||
github.com/nektos/act v0.0.0
|
github.com/nektos/act v0.0.0
|
||||||
github.com/sirupsen/logrus v1.9.0
|
github.com/sirupsen/logrus v1.9.0
|
||||||
github.com/spf13/cobra v1.6.1
|
github.com/spf13/cobra v1.7.0
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.8.2
|
||||||
golang.org/x/term v0.6.0
|
golang.org/x/term v0.7.0
|
||||||
golang.org/x/time v0.1.0
|
golang.org/x/time v0.1.0
|
||||||
google.golang.org/protobuf v1.28.1
|
google.golang.org/protobuf v1.28.1
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
gotest.tools/v3 v3.4.0
|
gotest.tools/v3 v3.4.0
|
||||||
modernc.org/sqlite v1.14.2
|
|
||||||
xorm.io/builder v0.3.11-0.20220531020008-1bd24a7dc978
|
|
||||||
xorm.io/xorm v1.3.2
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||||
github.com/Masterminds/semver v1.5.0 // indirect
|
github.com/Masterminds/semver v1.5.0 // indirect
|
||||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20220404123522-616f957b79ad // indirect
|
github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect
|
||||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
github.com/acomagu/bufpipe v1.0.4 // indirect
|
||||||
github.com/ajg/form v1.5.1 // indirect
|
github.com/cloudflare/circl v1.1.0 // indirect
|
||||||
github.com/containerd/containerd v1.6.18 // indirect
|
github.com/containerd/containerd v1.6.20 // indirect
|
||||||
github.com/creack/pty v1.1.18 // indirect
|
github.com/creack/pty v1.1.18 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/docker/cli v23.0.1+incompatible // indirect
|
github.com/docker/cli v23.0.4+incompatible // indirect
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-units v0.5.0 // indirect
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
github.com/emirpasic/gods v1.12.0 // indirect
|
github.com/emirpasic/gods v1.18.1 // indirect
|
||||||
github.com/fatih/color v1.13.0 // indirect
|
github.com/fatih/color v1.15.0 // indirect
|
||||||
github.com/go-git/gcfg v1.5.0 // indirect
|
github.com/go-git/gcfg v1.5.0 // indirect
|
||||||
github.com/go-git/go-billy/v5 v5.4.1 // indirect
|
github.com/go-git/go-billy/v5 v5.4.1 // indirect
|
||||||
github.com/go-git/go-git/v5 v5.4.2 // indirect
|
github.com/go-git/go-git/v5 v5.6.2-0.20230411180853-ce62f3e9ff86 // indirect
|
||||||
github.com/goccy/go-json v0.8.1 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
github.com/google/go-cmp v0.5.9 // indirect
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/imdario/mergo v0.3.15 // indirect
|
||||||
github.com/imdario/mergo v0.3.13 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
|
||||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
|
||||||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||||
github.com/klauspost/compress v1.15.12 // indirect
|
github.com/klauspost/compress v1.15.12 // indirect
|
||||||
|
github.com/kr/pretty v0.3.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
github.com/moby/buildkit v0.11.6 // indirect
|
||||||
github.com/moby/buildkit v0.11.4 // indirect
|
|
||||||
github.com/moby/patternmatcher v0.5.0 // indirect
|
github.com/moby/patternmatcher v0.5.0 // indirect
|
||||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
|
||||||
github.com/onsi/ginkgo v1.12.1 // indirect
|
|
||||||
github.com/onsi/gomega v1.10.3 // indirect
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
|
||||||
github.com/opencontainers/runc v1.1.3 // indirect
|
github.com/opencontainers/runc v1.1.5 // indirect
|
||||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||||
|
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
github.com/rhysd/actionlint v1.6.24 // indirect
|
||||||
github.com/rhysd/actionlint v1.6.23 // indirect
|
github.com/rivo/uniseg v0.4.4 // indirect
|
||||||
github.com/rivo/uniseg v0.4.3 // indirect
|
|
||||||
github.com/robfig/cron v1.2.0 // indirect
|
github.com/robfig/cron v1.2.0 // indirect
|
||||||
github.com/sergi/go-diff v1.2.0 // indirect
|
github.com/sergi/go-diff v1.2.0 // indirect
|
||||||
|
github.com/skeema/knownhosts v1.1.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/syndtr/goleveldb v1.0.0 // indirect
|
github.com/stretchr/objx v0.5.0 // indirect
|
||||||
github.com/xanzy/ssh-agent v0.3.1 // indirect
|
github.com/timshannon/bolthold v0.0.0-20210913165410-232392fc8a6a // indirect
|
||||||
|
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||||
golang.org/x/crypto v0.2.0 // indirect
|
go.etcd.io/bbolt v1.3.7 // indirect
|
||||||
golang.org/x/mod v0.4.2 // indirect
|
golang.org/x/crypto v0.6.0 // indirect
|
||||||
golang.org/x/net v0.7.0 // indirect
|
golang.org/x/net v0.9.0 // indirect
|
||||||
golang.org/x/sync v0.1.0 // indirect
|
golang.org/x/sync v0.1.0 // indirect
|
||||||
golang.org/x/sys v0.6.0 // indirect
|
golang.org/x/sys v0.7.0 // indirect
|
||||||
golang.org/x/tools v0.1.5 // indirect
|
golang.org/x/tools v0.8.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
|
||||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
lukechampine.com/uint128 v1.1.1 // indirect
|
|
||||||
modernc.org/cc/v3 v3.35.18 // indirect
|
|
||||||
modernc.org/ccgo/v3 v3.12.82 // indirect
|
|
||||||
modernc.org/libc v1.11.87 // indirect
|
|
||||||
modernc.org/mathutil v1.4.1 // indirect
|
|
||||||
modernc.org/memory v1.0.5 // indirect
|
|
||||||
modernc.org/opt v0.1.1 // indirect
|
|
||||||
modernc.org/strutil v1.1.1 // indirect
|
|
||||||
modernc.org/token v1.0.0 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/nektos/act => gitea.com/gitea/act v0.243.2-0.20230323041428-929ea6df751b
|
replace github.com/nektos/act => gitea.com/gitea/act v0.245.1
|
||||||
|
@ -1,12 +0,0 @@
|
|||||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
|
||||||
// SPDX-License-Identifier: MIT
|
|
||||||
|
|
||||||
// Package artifactcache provides a cache handler for the runner.
|
|
||||||
//
|
|
||||||
// Inspired by https://github.com/sp-ricard-valverde/github-act-cache-server
|
|
||||||
//
|
|
||||||
// TODO: Authorization
|
|
||||||
// TODO: Restrictions for accessing a cache, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache
|
|
||||||
// TODO: Force deleting cache entries, see https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
|
|
||||||
|
|
||||||
package artifactcache
|
|
@ -1,416 +0,0 @@
|
|||||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
|
||||||
// SPDX-License-Identifier: MIT
|
|
||||||
|
|
||||||
package artifactcache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-chi/chi/v5"
|
|
||||||
"github.com/go-chi/chi/v5/middleware"
|
|
||||||
"github.com/go-chi/render"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
_ "modernc.org/sqlite"
|
|
||||||
"xorm.io/builder"
|
|
||||||
"xorm.io/xorm"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
urlBase = "/_apis/artifactcache"
|
|
||||||
)
|
|
||||||
|
|
||||||
var logger = log.StandardLogger().WithField("module", "cache_request")
|
|
||||||
|
|
||||||
type Handler struct {
|
|
||||||
engine engine
|
|
||||||
storage *Storage
|
|
||||||
router *chi.Mux
|
|
||||||
listener net.Listener
|
|
||||||
|
|
||||||
gc atomic.Bool
|
|
||||||
gcAt time.Time
|
|
||||||
|
|
||||||
outboundIP string
|
|
||||||
}
|
|
||||||
|
|
||||||
func StartHandler(dir, outboundIP string, port uint16) (*Handler, error) {
|
|
||||||
h := &Handler{}
|
|
||||||
|
|
||||||
if dir == "" {
|
|
||||||
if home, err := os.UserHomeDir(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else {
|
|
||||||
dir = filepath.Join(home, ".cache", "actcache")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
e, err := xorm.NewEngine("sqlite", filepath.Join(dir, "sqlite.db"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := e.Sync(&Cache{}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
h.engine = engine{e: e}
|
|
||||||
|
|
||||||
storage, err := NewStorage(filepath.Join(dir, "cache"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
h.storage = storage
|
|
||||||
|
|
||||||
if outboundIP != "" {
|
|
||||||
h.outboundIP = outboundIP
|
|
||||||
} else if ip, err := getOutboundIP(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else {
|
|
||||||
h.outboundIP = ip.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
router := chi.NewRouter()
|
|
||||||
router.Use(middleware.RequestLogger(&middleware.DefaultLogFormatter{Logger: logger}))
|
|
||||||
router.Use(func(handler http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
handler.ServeHTTP(w, r)
|
|
||||||
go h.gcCache()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
router.Use(middleware.Logger)
|
|
||||||
router.Route(urlBase, func(r chi.Router) {
|
|
||||||
r.Get("/cache", h.find)
|
|
||||||
r.Route("/caches", func(r chi.Router) {
|
|
||||||
r.Post("/", h.reserve)
|
|
||||||
r.Route("/{id}", func(r chi.Router) {
|
|
||||||
r.Patch("/", h.upload)
|
|
||||||
r.Post("/", h.commit)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
r.Get("/artifacts/{id}", h.get)
|
|
||||||
r.Post("/clean", h.clean)
|
|
||||||
})
|
|
||||||
|
|
||||||
h.router = router
|
|
||||||
|
|
||||||
h.gcCache()
|
|
||||||
|
|
||||||
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) // listen on all interfaces
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
if err := http.Serve(listener, h.router); err != nil {
|
|
||||||
logger.Errorf("http serve: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
h.listener = listener
|
|
||||||
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) ExternalURL() string {
|
|
||||||
// TODO: make the external url configurable if necessary
|
|
||||||
return fmt.Sprintf("http://%s:%d",
|
|
||||||
h.outboundIP,
|
|
||||||
h.listener.Addr().(*net.TCPAddr).Port)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GET /_apis/artifactcache/cache
|
|
||||||
func (h *Handler) find(w http.ResponseWriter, r *http.Request) {
|
|
||||||
keys := strings.Split(r.URL.Query().Get("keys"), ",")
|
|
||||||
version := r.URL.Query().Get("version")
|
|
||||||
|
|
||||||
cache, err := h.findCache(r.Context(), keys, version)
|
|
||||||
if err != nil {
|
|
||||||
responseJson(w, r, 500, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if cache == nil {
|
|
||||||
responseJson(w, r, 204)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if ok, err := h.storage.Exist(cache.ID); err != nil {
|
|
||||||
responseJson(w, r, 500, err)
|
|
||||||
return
|
|
||||||
} else if !ok {
|
|
||||||
_ = h.engine.Exec(func(sess *xorm.Session) error {
|
|
||||||
_, err := sess.Delete(cache)
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
responseJson(w, r, 204)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
responseJson(w, r, 200, map[string]any{
|
|
||||||
"result": "hit",
|
|
||||||
"archiveLocation": fmt.Sprintf("%s%s/artifacts/%d", h.ExternalURL(), urlBase, cache.ID),
|
|
||||||
"cacheKey": cache.Key,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// POST /_apis/artifactcache/caches
|
|
||||||
func (h *Handler) reserve(w http.ResponseWriter, r *http.Request) {
|
|
||||||
cache := &Cache{}
|
|
||||||
if err := render.Bind(r, cache); err != nil {
|
|
||||||
responseJson(w, r, 400, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
|
|
||||||
return sess.Where(builder.Eq{"key": cache.Key, "version": cache.Version}).Get(&Cache{})
|
|
||||||
}); err != nil {
|
|
||||||
responseJson(w, r, 500, err)
|
|
||||||
return
|
|
||||||
} else if ok {
|
|
||||||
responseJson(w, r, 400, fmt.Errorf("already exist"))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
|
||||||
_, err := sess.Insert(cache)
|
|
||||||
return err
|
|
||||||
}); err != nil {
|
|
||||||
responseJson(w, r, 500, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
responseJson(w, r, 200, map[string]any{
|
|
||||||
"cacheId": cache.ID,
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// PATCH /_apis/artifactcache/caches/:id
|
|
||||||
func (h *Handler) upload(w http.ResponseWriter, r *http.Request) {
|
|
||||||
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
responseJson(w, r, 400, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cache := &Cache{
|
|
||||||
ID: id,
|
|
||||||
}
|
|
||||||
|
|
||||||
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
|
|
||||||
return sess.Get(cache)
|
|
||||||
}); err != nil {
|
|
||||||
responseJson(w, r, 500, err)
|
|
||||||
return
|
|
||||||
} else if !ok {
|
|
||||||
responseJson(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if cache.Complete {
|
|
||||||
responseJson(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
start, _, err := parseContentRange(r.Header.Get("Content-Range"))
|
|
||||||
if err != nil {
|
|
||||||
responseJson(w, r, 400, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := h.storage.Write(cache.ID, start, r.Body); err != nil {
|
|
||||||
responseJson(w, r, 500, err)
|
|
||||||
}
|
|
||||||
h.useCache(r.Context(), id)
|
|
||||||
responseJson(w, r, 200)
|
|
||||||
}
|
|
||||||
|
|
||||||
// POST /_apis/artifactcache/caches/:id
|
|
||||||
func (h *Handler) commit(w http.ResponseWriter, r *http.Request) {
|
|
||||||
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
responseJson(w, r, 400, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cache := &Cache{
|
|
||||||
ID: id,
|
|
||||||
}
|
|
||||||
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
|
|
||||||
return sess.Get(cache)
|
|
||||||
}); err != nil {
|
|
||||||
responseJson(w, r, 500, err)
|
|
||||||
return
|
|
||||||
} else if !ok {
|
|
||||||
responseJson(w, r, 400, fmt.Errorf("cache %d: not reserved", id))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if cache.Complete {
|
|
||||||
responseJson(w, r, 400, fmt.Errorf("cache %v %q: already complete", cache.ID, cache.Key))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.storage.Commit(cache.ID, cache.Size); err != nil {
|
|
||||||
responseJson(w, r, 500, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cache.Complete = true
|
|
||||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
|
||||||
_, err := sess.ID(cache.ID).Cols("complete").Update(cache)
|
|
||||||
return err
|
|
||||||
}); err != nil {
|
|
||||||
responseJson(w, r, 500, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
responseJson(w, r, 200)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GET /_apis/artifactcache/artifacts/:id
|
|
||||||
func (h *Handler) get(w http.ResponseWriter, r *http.Request) {
|
|
||||||
id, err := strconv.ParseInt(chi.URLParam(r, "id"), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
responseJson(w, r, 400, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.useCache(r.Context(), id)
|
|
||||||
h.storage.Serve(w, r, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// POST /_apis/artifactcache/clean
|
|
||||||
func (h *Handler) clean(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// TODO: don't support force deleting cache entries
|
|
||||||
// see: https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#force-deleting-cache-entries
|
|
||||||
|
|
||||||
responseJson(w, r, 200)
|
|
||||||
}
|
|
||||||
|
|
||||||
// if not found, return (nil, nil) instead of an error.
|
|
||||||
func (h *Handler) findCache(ctx context.Context, keys []string, version string) (*Cache, error) {
|
|
||||||
if len(keys) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
key := keys[0] // the first key is for exact match.
|
|
||||||
|
|
||||||
cache := &Cache{}
|
|
||||||
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
|
|
||||||
return sess.Where(builder.Eq{"key": key, "version": version, "complete": true}).Get(cache)
|
|
||||||
}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else if ok {
|
|
||||||
return cache, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, prefix := range keys[1:] {
|
|
||||||
if ok, err := h.engine.ExecBool(func(sess *xorm.Session) (bool, error) {
|
|
||||||
return sess.Where(builder.And(
|
|
||||||
builder.Like{"key", prefix + "%"},
|
|
||||||
builder.Eq{"version": version, "complete": true},
|
|
||||||
)).OrderBy("id DESC").Get(cache)
|
|
||||||
}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else if ok {
|
|
||||||
return cache, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) useCache(ctx context.Context, id int64) {
|
|
||||||
// keep quiet
|
|
||||||
_ = h.engine.Exec(func(sess *xorm.Session) error {
|
|
||||||
_, err := sess.Context(ctx).Cols("used_at").Update(&Cache{
|
|
||||||
ID: id,
|
|
||||||
UsedAt: time.Now().Unix(),
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) gcCache() {
|
|
||||||
if h.gc.Load() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !h.gc.CompareAndSwap(false, true) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer h.gc.Store(false)
|
|
||||||
|
|
||||||
if time.Since(h.gcAt) < time.Hour {
|
|
||||||
logger.Infof("skip gc: %v", h.gcAt.String())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.gcAt = time.Now()
|
|
||||||
logger.Infof("gc: %v", h.gcAt.String())
|
|
||||||
|
|
||||||
const (
|
|
||||||
keepUsed = 30 * 24 * time.Hour
|
|
||||||
keepUnused = 7 * 24 * time.Hour
|
|
||||||
keepTemp = 5 * time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
var caches []*Cache
|
|
||||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
|
||||||
return sess.Where(builder.And(builder.Lt{"used_at": time.Now().Add(-keepTemp).Unix()}, builder.Eq{"complete": false})).
|
|
||||||
Find(&caches)
|
|
||||||
}); err != nil {
|
|
||||||
logger.Warnf("find caches: %v", err)
|
|
||||||
} else {
|
|
||||||
for _, cache := range caches {
|
|
||||||
h.storage.Remove(cache.ID)
|
|
||||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
|
||||||
_, err := sess.Delete(cache)
|
|
||||||
return err
|
|
||||||
}); err != nil {
|
|
||||||
logger.Warnf("delete cache: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
logger.Infof("deleted cache: %+v", cache)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
caches = caches[:0]
|
|
||||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
|
||||||
return sess.Where(builder.Lt{"used_at": time.Now().Add(-keepUnused).Unix()}).
|
|
||||||
Find(&caches)
|
|
||||||
}); err != nil {
|
|
||||||
logger.Warnf("find caches: %v", err)
|
|
||||||
} else {
|
|
||||||
for _, cache := range caches {
|
|
||||||
h.storage.Remove(cache.ID)
|
|
||||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
|
||||||
_, err := sess.Delete(cache)
|
|
||||||
return err
|
|
||||||
}); err != nil {
|
|
||||||
logger.Warnf("delete cache: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
logger.Infof("deleted cache: %+v", cache)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
caches = caches[:0]
|
|
||||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
|
||||||
return sess.Where(builder.Lt{"created_at": time.Now().Add(-keepUsed).Unix()}).
|
|
||||||
Find(&caches)
|
|
||||||
}); err != nil {
|
|
||||||
logger.Warnf("find caches: %v", err)
|
|
||||||
} else {
|
|
||||||
for _, cache := range caches {
|
|
||||||
h.storage.Remove(cache.ID)
|
|
||||||
if err := h.engine.Exec(func(sess *xorm.Session) error {
|
|
||||||
_, err := sess.Delete(cache)
|
|
||||||
return err
|
|
||||||
}); err != nil {
|
|
||||||
logger.Warnf("delete cache: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
logger.Infof("deleted cache: %+v", cache)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,30 +0,0 @@
|
|||||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
|
||||||
// SPDX-License-Identifier: MIT
|
|
||||||
|
|
||||||
package artifactcache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Cache struct {
|
|
||||||
ID int64 `xorm:"id pk autoincr" json:"-"`
|
|
||||||
Key string `xorm:"TEXT index unique(key_version)" json:"key"`
|
|
||||||
Version string `xorm:"TEXT unique(key_version)" json:"version"`
|
|
||||||
Size int64 `json:"cacheSize"`
|
|
||||||
Complete bool `xorm:"index(complete_used_at)" json:"-"`
|
|
||||||
UsedAt int64 `xorm:"index(complete_used_at) updated" json:"-"`
|
|
||||||
CreatedAt int64 `xorm:"index created" json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bind implements render.Binder
|
|
||||||
func (c *Cache) Bind(_ *http.Request) error {
|
|
||||||
if c.Key == "" {
|
|
||||||
return fmt.Errorf("missing key")
|
|
||||||
}
|
|
||||||
if c.Version == "" {
|
|
||||||
return fmt.Errorf("missing version")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,129 +0,0 @@
|
|||||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
|
||||||
// SPDX-License-Identifier: MIT
|
|
||||||
|
|
||||||
package artifactcache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Storage struct {
|
|
||||||
rootDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewStorage(rootDir string) (*Storage, error) {
|
|
||||||
if err := os.MkdirAll(rootDir, 0o755); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Storage{
|
|
||||||
rootDir: rootDir,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Exist(id int64) (bool, error) {
|
|
||||||
name := s.filename(id)
|
|
||||||
if _, err := os.Stat(name); os.IsNotExist(err) {
|
|
||||||
return false, nil
|
|
||||||
} else if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Write(id int64, offset int64, reader io.Reader) error {
|
|
||||||
name := s.tempName(id, offset)
|
|
||||||
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
file, err := os.Create(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
_, err = io.Copy(file, reader)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Commit(id int64, size int64) error {
|
|
||||||
defer func() {
|
|
||||||
_ = os.RemoveAll(s.tempDir(id))
|
|
||||||
}()
|
|
||||||
|
|
||||||
name := s.filename(id)
|
|
||||||
tempNames, err := s.tempNames(id)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.MkdirAll(filepath.Dir(name), 0o755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
file, err := os.Create(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
var written int64
|
|
||||||
for _, v := range tempNames {
|
|
||||||
f, err := os.Open(v)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n, err := io.Copy(file, f)
|
|
||||||
_ = f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
written += n
|
|
||||||
}
|
|
||||||
|
|
||||||
if written != size {
|
|
||||||
_ = file.Close()
|
|
||||||
_ = os.Remove(name)
|
|
||||||
return fmt.Errorf("broken file: %v != %v", written, size)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Serve(w http.ResponseWriter, r *http.Request, id int64) {
|
|
||||||
name := s.filename(id)
|
|
||||||
http.ServeFile(w, r, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) Remove(id int64) {
|
|
||||||
_ = os.Remove(s.filename(id))
|
|
||||||
_ = os.RemoveAll(s.tempDir(id))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) filename(id int64) string {
|
|
||||||
return filepath.Join(s.rootDir, fmt.Sprintf("%02x", id%0xff), fmt.Sprint(id))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) tempDir(id int64) string {
|
|
||||||
return filepath.Join(s.rootDir, "tmp", fmt.Sprint(id))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) tempName(id, offset int64) string {
|
|
||||||
return filepath.Join(s.tempDir(id), fmt.Sprintf("%016x", offset))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) tempNames(id int64) ([]string, error) {
|
|
||||||
dir := s.tempDir(id)
|
|
||||||
files, err := os.ReadDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var names []string
|
|
||||||
for _, v := range files {
|
|
||||||
if !v.IsDir() {
|
|
||||||
names = append(names, filepath.Join(dir, v.Name()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return names, nil
|
|
||||||
}
|
|
@ -1,100 +0,0 @@
|
|||||||
// Copyright 2023 The Gitea Authors. All rights reserved.
|
|
||||||
// SPDX-License-Identifier: MIT
|
|
||||||
|
|
||||||
package artifactcache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/go-chi/render"
|
|
||||||
"xorm.io/xorm"
|
|
||||||
)
|
|
||||||
|
|
||||||
func responseJson(w http.ResponseWriter, r *http.Request, code int, v ...any) {
|
|
||||||
render.Status(r, code)
|
|
||||||
if len(v) == 0 || v[0] == nil {
|
|
||||||
render.JSON(w, r, struct{}{})
|
|
||||||
} else if err, ok := v[0].(error); ok {
|
|
||||||
logger.Errorf("%v %v: %v", r.Method, r.RequestURI, err)
|
|
||||||
render.JSON(w, r, map[string]any{
|
|
||||||
"error": err.Error(),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
render.JSON(w, r, v[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseContentRange(s string) (int64, int64, error) {
|
|
||||||
// support the format like "bytes 11-22/*" only
|
|
||||||
s, _, _ = strings.Cut(strings.TrimPrefix(s, "bytes "), "/")
|
|
||||||
s1, s2, _ := strings.Cut(s, "-")
|
|
||||||
|
|
||||||
start, err := strconv.ParseInt(s1, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
|
|
||||||
}
|
|
||||||
stop, err := strconv.ParseInt(s2, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, 0, fmt.Errorf("parse %q: %w", s, err)
|
|
||||||
}
|
|
||||||
return start, stop, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getOutboundIP() (net.IP, error) {
|
|
||||||
// FIXME: It makes more sense to use the gateway IP address of container network
|
|
||||||
if conn, err := net.Dial("udp", "8.8.8.8:80"); err == nil {
|
|
||||||
defer conn.Close()
|
|
||||||
return conn.LocalAddr().(*net.UDPAddr).IP, nil
|
|
||||||
}
|
|
||||||
if ifaces, err := net.Interfaces(); err == nil {
|
|
||||||
for _, i := range ifaces {
|
|
||||||
if addrs, err := i.Addrs(); err == nil {
|
|
||||||
for _, addr := range addrs {
|
|
||||||
var ip net.IP
|
|
||||||
switch v := addr.(type) {
|
|
||||||
case *net.IPNet:
|
|
||||||
ip = v.IP
|
|
||||||
case *net.IPAddr:
|
|
||||||
ip = v.IP
|
|
||||||
}
|
|
||||||
if ip.IsGlobalUnicast() {
|
|
||||||
return ip, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("no outbound IP address found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// engine is a wrapper of *xorm.Engine, with a lock.
|
|
||||||
// To avoid racing of sqlite, we don't care performance here.
|
|
||||||
type engine struct {
|
|
||||||
e *xorm.Engine
|
|
||||||
m sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *engine) Exec(f func(*xorm.Session) error) error {
|
|
||||||
e.m.Lock()
|
|
||||||
defer e.m.Unlock()
|
|
||||||
|
|
||||||
sess := e.e.NewSession()
|
|
||||||
defer sess.Close()
|
|
||||||
|
|
||||||
return f(sess)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *engine) ExecBool(f func(*xorm.Session) (bool, error)) (bool, error) {
|
|
||||||
e.m.Lock()
|
|
||||||
defer e.m.Unlock()
|
|
||||||
|
|
||||||
sess := e.e.NewSession()
|
|
||||||
defer sess.Close()
|
|
||||||
|
|
||||||
return f(sess)
|
|
||||||
}
|
|
@ -14,6 +14,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/joho/godotenv"
|
"github.com/joho/godotenv"
|
||||||
|
"github.com/nektos/act/pkg/artifactcache"
|
||||||
"github.com/nektos/act/pkg/artifacts"
|
"github.com/nektos/act/pkg/artifacts"
|
||||||
"github.com/nektos/act/pkg/common"
|
"github.com/nektos/act/pkg/common"
|
||||||
"github.com/nektos/act/pkg/model"
|
"github.com/nektos/act/pkg/model"
|
||||||
@ -21,8 +22,6 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/internal/app/artifactcache"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type executeArgs struct {
|
type executeArgs struct {
|
||||||
@ -48,6 +47,7 @@ type executeArgs struct {
|
|||||||
useGitIgnore bool
|
useGitIgnore bool
|
||||||
containerCapAdd []string
|
containerCapAdd []string
|
||||||
containerCapDrop []string
|
containerCapDrop []string
|
||||||
|
containerOptions string
|
||||||
artifactServerPath string
|
artifactServerPath string
|
||||||
artifactServerAddr string
|
artifactServerAddr string
|
||||||
artifactServerPort string
|
artifactServerPort string
|
||||||
@ -313,7 +313,7 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
|
|||||||
|
|
||||||
if len(execArgs.event) > 0 {
|
if len(execArgs.event) > 0 {
|
||||||
log.Infof("Using chosed event for filtering: %s", execArgs.event)
|
log.Infof("Using chosed event for filtering: %s", execArgs.event)
|
||||||
eventName = args[0]
|
eventName = execArgs.event
|
||||||
} else if len(events) == 1 && len(events[0]) > 0 {
|
} else if len(events) == 1 && len(events[0]) > 0 {
|
||||||
log.Infof("Using the only detected workflow event: %s", events[0])
|
log.Infof("Using the only detected workflow event: %s", events[0])
|
||||||
eventName = events[0]
|
eventName = events[0]
|
||||||
@ -348,7 +348,7 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
|
|||||||
}
|
}
|
||||||
|
|
||||||
// init a cache server
|
// init a cache server
|
||||||
handler, err := artifactcache.StartHandler("", "", 0)
|
handler, err := artifactcache.StartHandler("", "", 0, log.StandardLogger().WithField("module", "cache_request"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -375,6 +375,7 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
|
|||||||
// GitHubInstance: t.client.Address(),
|
// GitHubInstance: t.client.Address(),
|
||||||
ContainerCapAdd: execArgs.containerCapAdd,
|
ContainerCapAdd: execArgs.containerCapAdd,
|
||||||
ContainerCapDrop: execArgs.containerCapDrop,
|
ContainerCapDrop: execArgs.containerCapDrop,
|
||||||
|
ContainerOptions: execArgs.containerOptions,
|
||||||
AutoRemove: true,
|
AutoRemove: true,
|
||||||
ArtifactServerPath: execArgs.artifactServerPath,
|
ArtifactServerPath: execArgs.artifactServerPath,
|
||||||
ArtifactServerPort: execArgs.artifactServerPort,
|
ArtifactServerPort: execArgs.artifactServerPort,
|
||||||
@ -390,12 +391,10 @@ func runExec(ctx context.Context, execArgs *executeArgs) func(cmd *cobra.Command
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: handle log level config
|
if !execArgs.debug {
|
||||||
// waiting https://gitea.com/gitea/act/pulls/19
|
logLevel := log.Level(log.InfoLevel)
|
||||||
// if !execArgs.debug {
|
config.JobLoggerLevel = &logLevel
|
||||||
// logLevel := log.Level(log.InfoLevel)
|
}
|
||||||
// config.JobLoggerLevel = &logLevel
|
|
||||||
// }
|
|
||||||
|
|
||||||
r, err := runner.New(config)
|
r, err := runner.New(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -456,6 +455,7 @@ func loadExecCmd(ctx context.Context) *cobra.Command {
|
|||||||
execCmd.Flags().BoolVar(&execArg.useGitIgnore, "use-gitignore", true, "Controls whether paths specified in .gitignore should be copied into container")
|
execCmd.Flags().BoolVar(&execArg.useGitIgnore, "use-gitignore", true, "Controls whether paths specified in .gitignore should be copied into container")
|
||||||
execCmd.Flags().StringArrayVarP(&execArg.containerCapAdd, "container-cap-add", "", []string{}, "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)")
|
execCmd.Flags().StringArrayVarP(&execArg.containerCapAdd, "container-cap-add", "", []string{}, "kernel capabilities to add to the workflow containers (e.g. --container-cap-add SYS_PTRACE)")
|
||||||
execCmd.Flags().StringArrayVarP(&execArg.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)")
|
execCmd.Flags().StringArrayVarP(&execArg.containerCapDrop, "container-cap-drop", "", []string{}, "kernel capabilities to remove from the workflow containers (e.g. --container-cap-drop SYS_PTRACE)")
|
||||||
|
execCmd.Flags().StringVarP(&execArg.containerOptions, "container-opts", "", "", "container options")
|
||||||
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPath, "artifact-server-path", "", ".", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.")
|
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPath, "artifact-server-path", "", ".", "Defines the path where the artifact server stores uploads and retrieves downloads from. If not specified the artifact server will not start.")
|
||||||
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens (will only bind to localhost).")
|
execCmd.PersistentFlags().StringVarP(&execArg.artifactServerPort, "artifact-server-port", "", "34567", "Defines the port where the artifact server listens (will only bind to localhost).")
|
||||||
execCmd.PersistentFlags().StringVarP(&execArg.defaultActionsUrl, "default-actions-url", "", "https://gitea.com", "Defines the default url of action instance.")
|
execCmd.PersistentFlags().StringVarP(&execArg.defaultActionsUrl, "default-actions-url", "", "https://gitea.com", "Defines the default url of action instance.")
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||||
"github.com/bufbuild/connect-go"
|
"github.com/bufbuild/connect-go"
|
||||||
@ -20,23 +19,23 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Poller struct {
|
type Poller struct {
|
||||||
client client.Client
|
client client.Client
|
||||||
runner *run.Runner
|
runner *run.Runner
|
||||||
capacity int
|
cfg *config.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(cfg *config.Config, client client.Client, runner *run.Runner) *Poller {
|
func New(cfg *config.Config, client client.Client, runner *run.Runner) *Poller {
|
||||||
return &Poller{
|
return &Poller{
|
||||||
client: client,
|
client: client,
|
||||||
runner: runner,
|
runner: runner,
|
||||||
capacity: cfg.Runner.Capacity,
|
cfg: cfg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Poller) Poll(ctx context.Context) {
|
func (p *Poller) Poll(ctx context.Context) {
|
||||||
limiter := rate.NewLimiter(rate.Every(2*time.Second), 1)
|
limiter := rate.NewLimiter(rate.Every(p.cfg.Runner.FetchInterval), 1)
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
for i := 0; i < p.capacity; i++ {
|
for i := 0; i < p.cfg.Runner.Capacity; i++ {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go p.poll(ctx, wg, limiter)
|
go p.poll(ctx, wg, limiter)
|
||||||
}
|
}
|
||||||
@ -63,7 +62,7 @@ func (p *Poller) poll(ctx context.Context, wg *sync.WaitGroup, limiter *rate.Lim
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
|
func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
|
||||||
reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
reqCtx, cancel := context.WithTimeout(ctx, p.cfg.Runner.FetchTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(&runnerv1.FetchTaskRequest{}))
|
resp, err := p.client.FetchTask(reqCtx, connect.NewRequest(&runnerv1.FetchTaskRequest{}))
|
||||||
@ -75,7 +74,7 @@ func (p *Poller) fetchTask(ctx context.Context) (*runnerv1.Task, bool) {
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.Msg.Task == nil {
|
if resp == nil || resp.Msg == nil || resp.Msg.Task == nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
return resp.Msg.Task, true
|
return resp.Msg.Task, true
|
||||||
|
@ -4,21 +4,21 @@
|
|||||||
package run
|
package run
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||||
|
"github.com/nektos/act/pkg/artifactcache"
|
||||||
"github.com/nektos/act/pkg/common"
|
"github.com/nektos/act/pkg/common"
|
||||||
"github.com/nektos/act/pkg/model"
|
"github.com/nektos/act/pkg/model"
|
||||||
"github.com/nektos/act/pkg/runner"
|
"github.com/nektos/act/pkg/runner"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"gitea.com/gitea/act_runner/internal/app/artifactcache"
|
|
||||||
"gitea.com/gitea/act_runner/internal/pkg/client"
|
"gitea.com/gitea/act_runner/internal/pkg/client"
|
||||||
"gitea.com/gitea/act_runner/internal/pkg/config"
|
"gitea.com/gitea/act_runner/internal/pkg/config"
|
||||||
"gitea.com/gitea/act_runner/internal/pkg/labels"
|
"gitea.com/gitea/act_runner/internal/pkg/labels"
|
||||||
@ -51,7 +51,12 @@ func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client)
|
|||||||
envs[k] = v
|
envs[k] = v
|
||||||
}
|
}
|
||||||
if cfg.Cache.Enabled == nil || *cfg.Cache.Enabled {
|
if cfg.Cache.Enabled == nil || *cfg.Cache.Enabled {
|
||||||
cacheHandler, err := artifactcache.StartHandler(cfg.Cache.Dir, cfg.Cache.Host, cfg.Cache.Port)
|
cacheHandler, err := artifactcache.StartHandler(
|
||||||
|
cfg.Cache.Dir,
|
||||||
|
cfg.Cache.Host,
|
||||||
|
cfg.Cache.Port,
|
||||||
|
log.StandardLogger().WithField("module", "cache_request"),
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("cannot init cache server, it will be disabled: %v", err)
|
log.Errorf("cannot init cache server, it will be disabled: %v", err)
|
||||||
// go on
|
// go on
|
||||||
@ -60,6 +65,14 @@ func NewRunner(cfg *config.Config, reg *config.Registration, cli client.Client)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set artifact gitea api
|
||||||
|
artifactGiteaAPI := strings.TrimSuffix(cli.Address(), "/") + "/api/actions_pipeline/"
|
||||||
|
envs["ACTIONS_RUNTIME_URL"] = artifactGiteaAPI
|
||||||
|
|
||||||
|
// Set specific environments to distinguish between Gitea and GitHub
|
||||||
|
envs["GITEA_ACTIONS"] = "true"
|
||||||
|
envs["GITEA_ACTIONS_RUNNER_VERSION"] = ver.Version()
|
||||||
|
|
||||||
return &Runner{
|
return &Runner{
|
||||||
name: reg.Name,
|
name: reg.Name,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
@ -103,16 +116,11 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
|||||||
|
|
||||||
reporter.Logf("%s(version:%s) received task %v of job %v, be triggered by event: %s", r.name, ver.Version(), task.Id, task.Context.Fields["job"].GetStringValue(), task.Context.Fields["event_name"].GetStringValue())
|
reporter.Logf("%s(version:%s) received task %v of job %v, be triggered by event: %s", r.name, ver.Version(), task.Id, task.Context.Fields["job"].GetStringValue(), task.Context.Fields["event_name"].GetStringValue())
|
||||||
|
|
||||||
workflow, err := model.ReadWorkflow(bytes.NewReader(task.WorkflowPayload))
|
workflow, jobID, err := generateWorkflow(task)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
jobIDs := workflow.GetJobIDs()
|
|
||||||
if len(jobIDs) != 1 {
|
|
||||||
return fmt.Errorf("multiple jobs found: %v", jobIDs)
|
|
||||||
}
|
|
||||||
jobID := jobIDs[0]
|
|
||||||
plan, err := model.CombineWorkflowPlanner(workflow).PlanJob(jobID)
|
plan, err := model.CombineWorkflowPlanner(workflow).PlanJob(jobID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -149,6 +157,9 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
|||||||
preset.Token = t
|
preset.Token = t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// use task token to action api token
|
||||||
|
r.envs["ACTIONS_RUNTIME_TOKEN"] = preset.Token
|
||||||
|
|
||||||
eventJSON, err := json.Marshal(preset.Event)
|
eventJSON, err := json.Marshal(preset.Event)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -160,9 +171,9 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
|||||||
}
|
}
|
||||||
|
|
||||||
runnerConfig := &runner.Config{
|
runnerConfig := &runner.Config{
|
||||||
// On Linux, Workdir will be like "/<owner>/<repo>"
|
// On Linux, Workdir will be like "/<parent_directory>/<owner>/<repo>"
|
||||||
// On Windows, Workdir will be like "\<owner>\<repo>"
|
// On Windows, Workdir will be like "\<parent_directory>\<owner>\<repo>"
|
||||||
Workdir: filepath.FromSlash(string(filepath.Separator) + preset.Repository),
|
Workdir: filepath.FromSlash(fmt.Sprintf("/%s/%s", r.cfg.Container.WorkdirParent, preset.Repository)),
|
||||||
BindWorkdir: false,
|
BindWorkdir: false,
|
||||||
|
|
||||||
ReuseContainers: false,
|
ReuseContainers: false,
|
||||||
@ -180,8 +191,11 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
|||||||
ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%d", task.Id),
|
ContainerNamePrefix: fmt.Sprintf("GITEA-ACTIONS-TASK-%d", task.Id),
|
||||||
ContainerMaxLifetime: maxLifetime,
|
ContainerMaxLifetime: maxLifetime,
|
||||||
ContainerNetworkMode: r.cfg.Container.NetworkMode,
|
ContainerNetworkMode: r.cfg.Container.NetworkMode,
|
||||||
|
ContainerOptions: r.cfg.Container.Options,
|
||||||
|
Privileged: r.cfg.Container.Privileged,
|
||||||
DefaultActionInstance: taskContext["gitea_default_actions_url"].GetStringValue(),
|
DefaultActionInstance: taskContext["gitea_default_actions_url"].GetStringValue(),
|
||||||
PlatformPicker: r.labels.PickPlatform,
|
PlatformPicker: r.labels.PickPlatform,
|
||||||
|
Vars: task.Vars,
|
||||||
}
|
}
|
||||||
|
|
||||||
rr, err := runner.New(runnerConfig)
|
rr, err := runner.New(runnerConfig)
|
||||||
@ -195,5 +209,7 @@ func (r *Runner) run(ctx context.Context, task *runnerv1.Task, reporter *report.
|
|||||||
// add logger recorders
|
// add logger recorders
|
||||||
ctx = common.WithLoggerHook(ctx, reporter)
|
ctx = common.WithLoggerHook(ctx, reporter)
|
||||||
|
|
||||||
return executor(ctx)
|
execErr := executor(ctx)
|
||||||
|
reporter.SetOutputs(job.Outputs)
|
||||||
|
return execErr
|
||||||
}
|
}
|
||||||
|
54
internal/app/run/workflow.go
Normal file
54
internal/app/run/workflow.go
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package run
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||||
|
"github.com/nektos/act/pkg/model"
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
func generateWorkflow(task *runnerv1.Task) (*model.Workflow, string, error) {
|
||||||
|
workflow, err := model.ReadWorkflow(bytes.NewReader(task.WorkflowPayload))
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
jobIDs := workflow.GetJobIDs()
|
||||||
|
if len(jobIDs) != 1 {
|
||||||
|
return nil, "", fmt.Errorf("multiple jobs found: %v", jobIDs)
|
||||||
|
}
|
||||||
|
jobID := jobIDs[0]
|
||||||
|
|
||||||
|
needJobIDs := make([]string, 0, len(task.Needs))
|
||||||
|
for id, need := range task.Needs {
|
||||||
|
needJobIDs = append(needJobIDs, id)
|
||||||
|
needJob := &model.Job{
|
||||||
|
Outputs: need.Outputs,
|
||||||
|
Result: strings.ToLower(strings.TrimPrefix(need.Result.String(), "RESULT_")),
|
||||||
|
}
|
||||||
|
workflow.Jobs[id] = needJob
|
||||||
|
}
|
||||||
|
sort.Strings(needJobIDs)
|
||||||
|
|
||||||
|
rawNeeds := yaml.Node{
|
||||||
|
Kind: yaml.SequenceNode,
|
||||||
|
Content: make([]*yaml.Node, 0, len(needJobIDs)),
|
||||||
|
}
|
||||||
|
for _, id := range needJobIDs {
|
||||||
|
rawNeeds.Content = append(rawNeeds.Content, &yaml.Node{
|
||||||
|
Kind: yaml.ScalarNode,
|
||||||
|
Value: id,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
workflow.Jobs[jobID].RawNeeds = rawNeeds
|
||||||
|
|
||||||
|
return workflow, jobID, nil
|
||||||
|
}
|
74
internal/app/run/workflow_test.go
Normal file
74
internal/app/run/workflow_test.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package run
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||||
|
"github.com/nektos/act/pkg/model"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"gotest.tools/v3/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_generateWorkflow(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
task *runnerv1.Task
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
assert func(t *testing.T, wf *model.Workflow)
|
||||||
|
want1 string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "has needs",
|
||||||
|
args: args{
|
||||||
|
task: &runnerv1.Task{
|
||||||
|
WorkflowPayload: []byte(`
|
||||||
|
name: Build and deploy
|
||||||
|
on: push
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
job9:
|
||||||
|
needs: build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- run: ./deploy --build ${{ needs.job1.outputs.output1 }}
|
||||||
|
- run: ./deploy --build ${{ needs.job2.outputs.output2 }}
|
||||||
|
`),
|
||||||
|
Needs: map[string]*runnerv1.TaskNeed{
|
||||||
|
"job1": {
|
||||||
|
Outputs: map[string]string{
|
||||||
|
"output1": "output1 value",
|
||||||
|
},
|
||||||
|
Result: runnerv1.Result_RESULT_SUCCESS,
|
||||||
|
},
|
||||||
|
"job2": {
|
||||||
|
Outputs: map[string]string{
|
||||||
|
"output2": "output2 value",
|
||||||
|
},
|
||||||
|
Result: runnerv1.Result_RESULT_SUCCESS,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
assert: func(t *testing.T, wf *model.Workflow) {
|
||||||
|
assert.DeepEqual(t, wf.GetJob("job9").Needs(), []string{"job1", "job2"})
|
||||||
|
},
|
||||||
|
want1: "job9",
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, got1, err := generateWorkflow(tt.args.task)
|
||||||
|
require.NoError(t, err)
|
||||||
|
tt.assert(t, got)
|
||||||
|
assert.Equal(t, got1, tt.want1)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -9,6 +9,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// A Client manages communication with the runner.
|
// A Client manages communication with the runner.
|
||||||
|
//
|
||||||
|
//go:generate mockery --name Client
|
||||||
type Client interface {
|
type Client interface {
|
||||||
pingv1connect.PingServiceClient
|
pingv1connect.PingServiceClient
|
||||||
runnerv1connect.RunnerServiceClient
|
runnerv1connect.RunnerServiceClient
|
||||||
|
193
internal/pkg/client/mocks/Client.go
Normal file
193
internal/pkg/client/mocks/Client.go
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
// Code generated by mockery v2.26.1. DO NOT EDIT.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
|
||||||
|
connect "github.com/bufbuild/connect-go"
|
||||||
|
|
||||||
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
|
||||||
|
pingv1 "code.gitea.io/actions-proto-go/ping/v1"
|
||||||
|
|
||||||
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client is an autogenerated mock type for the Client type
|
||||||
|
type Client struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// Address provides a mock function with given fields:
|
||||||
|
func (_m *Client) Address() string {
|
||||||
|
ret := _m.Called()
|
||||||
|
|
||||||
|
var r0 string
|
||||||
|
if rf, ok := ret.Get(0).(func() string); ok {
|
||||||
|
r0 = rf()
|
||||||
|
} else {
|
||||||
|
r0 = ret.Get(0).(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchTask provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) FetchTask(_a0 context.Context, _a1 *connect.Request[runnerv1.FetchTaskRequest]) (*connect.Response[runnerv1.FetchTaskResponse], error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *connect.Response[runnerv1.FetchTaskResponse]
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.FetchTaskRequest]) (*connect.Response[runnerv1.FetchTaskResponse], error)); ok {
|
||||||
|
return rf(_a0, _a1)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.FetchTaskRequest]) *connect.Response[runnerv1.FetchTaskResponse]); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*connect.Response[runnerv1.FetchTaskResponse])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.FetchTaskRequest]) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insecure provides a mock function with given fields:
|
||||||
|
func (_m *Client) Insecure() bool {
|
||||||
|
ret := _m.Called()
|
||||||
|
|
||||||
|
var r0 bool
|
||||||
|
if rf, ok := ret.Get(0).(func() bool); ok {
|
||||||
|
r0 = rf()
|
||||||
|
} else {
|
||||||
|
r0 = ret.Get(0).(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) Ping(_a0 context.Context, _a1 *connect.Request[pingv1.PingRequest]) (*connect.Response[pingv1.PingResponse], error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *connect.Response[pingv1.PingResponse]
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[pingv1.PingRequest]) (*connect.Response[pingv1.PingResponse], error)); ok {
|
||||||
|
return rf(_a0, _a1)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[pingv1.PingRequest]) *connect.Response[pingv1.PingResponse]); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*connect.Response[pingv1.PingResponse])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[pingv1.PingRequest]) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) Register(_a0 context.Context, _a1 *connect.Request[runnerv1.RegisterRequest]) (*connect.Response[runnerv1.RegisterResponse], error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *connect.Response[runnerv1.RegisterResponse]
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.RegisterRequest]) (*connect.Response[runnerv1.RegisterResponse], error)); ok {
|
||||||
|
return rf(_a0, _a1)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.RegisterRequest]) *connect.Response[runnerv1.RegisterResponse]); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*connect.Response[runnerv1.RegisterResponse])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.RegisterRequest]) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateLog provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) UpdateLog(_a0 context.Context, _a1 *connect.Request[runnerv1.UpdateLogRequest]) (*connect.Response[runnerv1.UpdateLogResponse], error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *connect.Response[runnerv1.UpdateLogResponse]
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateLogRequest]) (*connect.Response[runnerv1.UpdateLogResponse], error)); ok {
|
||||||
|
return rf(_a0, _a1)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateLogRequest]) *connect.Response[runnerv1.UpdateLogResponse]); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*connect.Response[runnerv1.UpdateLogResponse])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.UpdateLogRequest]) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateTask provides a mock function with given fields: _a0, _a1
|
||||||
|
func (_m *Client) UpdateTask(_a0 context.Context, _a1 *connect.Request[runnerv1.UpdateTaskRequest]) (*connect.Response[runnerv1.UpdateTaskResponse], error) {
|
||||||
|
ret := _m.Called(_a0, _a1)
|
||||||
|
|
||||||
|
var r0 *connect.Response[runnerv1.UpdateTaskResponse]
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateTaskRequest]) (*connect.Response[runnerv1.UpdateTaskResponse], error)); ok {
|
||||||
|
return rf(_a0, _a1)
|
||||||
|
}
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *connect.Request[runnerv1.UpdateTaskRequest]) *connect.Response[runnerv1.UpdateTaskResponse]); ok {
|
||||||
|
r0 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*connect.Response[runnerv1.UpdateTaskResponse])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, *connect.Request[runnerv1.UpdateTaskRequest]) error); ok {
|
||||||
|
r1 = rf(_a0, _a1)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockConstructorTestingTNewClient interface {
|
||||||
|
mock.TestingT
|
||||||
|
Cleanup(func())
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
|
func NewClient(t mockConstructorTestingTNewClient) *Client {
|
||||||
|
mock := &Client{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
@ -22,6 +22,10 @@ runner:
|
|||||||
timeout: 3h
|
timeout: 3h
|
||||||
# Whether skip verifying the TLS certificate of the Gitea instance.
|
# Whether skip verifying the TLS certificate of the Gitea instance.
|
||||||
insecure: false
|
insecure: false
|
||||||
|
# The timeout for fetching the job from the Gitea instance.
|
||||||
|
fetch_timeout: 5s
|
||||||
|
# The interval for fetching the job from the Gitea instance.
|
||||||
|
fetch_interval: 2s
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
# Enable cache server to use actions/cache.
|
# Enable cache server to use actions/cache.
|
||||||
@ -40,3 +44,10 @@ cache:
|
|||||||
container:
|
container:
|
||||||
# Which network to use for the job containers. Could be bridge, host, none, or the name of a custom network.
|
# Which network to use for the job containers. Could be bridge, host, none, or the name of a custom network.
|
||||||
network_mode: bridge
|
network_mode: bridge
|
||||||
|
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
|
||||||
|
privileged: false
|
||||||
|
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
|
||||||
|
options:
|
||||||
|
# The parent directory of a job's working directory.
|
||||||
|
# If it's empty, /workspace will be used.
|
||||||
|
workdir_parent:
|
||||||
|
@ -18,12 +18,14 @@ type Config struct {
|
|||||||
Level string `yaml:"level"`
|
Level string `yaml:"level"`
|
||||||
} `yaml:"log"`
|
} `yaml:"log"`
|
||||||
Runner struct {
|
Runner struct {
|
||||||
File string `yaml:"file"`
|
File string `yaml:"file"`
|
||||||
Capacity int `yaml:"capacity"`
|
Capacity int `yaml:"capacity"`
|
||||||
Envs map[string]string `yaml:"envs"`
|
Envs map[string]string `yaml:"envs"`
|
||||||
EnvFile string `yaml:"env_file"`
|
EnvFile string `yaml:"env_file"`
|
||||||
Timeout time.Duration `yaml:"timeout"`
|
Timeout time.Duration `yaml:"timeout"`
|
||||||
Insecure bool `yaml:"insecure"`
|
Insecure bool `yaml:"insecure"`
|
||||||
|
FetchTimeout time.Duration `yaml:"fetch_timeout"`
|
||||||
|
FetchInterval time.Duration `yaml:"fetch_interval"`
|
||||||
} `yaml:"runner"`
|
} `yaml:"runner"`
|
||||||
Cache struct {
|
Cache struct {
|
||||||
Enabled *bool `yaml:"enabled"` // pointer to distinguish between false and not set, and it will be true if not set
|
Enabled *bool `yaml:"enabled"` // pointer to distinguish between false and not set, and it will be true if not set
|
||||||
@ -32,8 +34,11 @@ type Config struct {
|
|||||||
Port uint16 `yaml:"port"`
|
Port uint16 `yaml:"port"`
|
||||||
} `yaml:"cache"`
|
} `yaml:"cache"`
|
||||||
Container struct {
|
Container struct {
|
||||||
NetworkMode string `yaml:"network_mode"`
|
NetworkMode string `yaml:"network_mode"`
|
||||||
}
|
Privileged bool `yaml:"privileged"`
|
||||||
|
Options string `yaml:"options"`
|
||||||
|
WorkdirParent string `yaml:"workdir_parent"`
|
||||||
|
} `yaml:"container"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadDefault returns the default configuration.
|
// LoadDefault returns the default configuration.
|
||||||
@ -90,6 +95,15 @@ func LoadDefault(file string) (*Config, error) {
|
|||||||
if cfg.Container.NetworkMode == "" {
|
if cfg.Container.NetworkMode == "" {
|
||||||
cfg.Container.NetworkMode = "bridge"
|
cfg.Container.NetworkMode = "bridge"
|
||||||
}
|
}
|
||||||
|
if cfg.Container.WorkdirParent == "" {
|
||||||
|
cfg.Container.WorkdirParent = "workspace"
|
||||||
|
}
|
||||||
|
if cfg.Runner.FetchTimeout <= 0 {
|
||||||
|
cfg.Runner.FetchTimeout = 5 * time.Second
|
||||||
|
}
|
||||||
|
if cfg.Runner.FetchInterval <= 0 {
|
||||||
|
cfg.Runner.FetchInterval = 2 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@ package report
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -31,8 +32,14 @@ type Reporter struct {
|
|||||||
logOffset int
|
logOffset int
|
||||||
logRows []*runnerv1.LogRow
|
logRows []*runnerv1.LogRow
|
||||||
logReplacer *strings.Replacer
|
logReplacer *strings.Replacer
|
||||||
state *runnerv1.TaskState
|
oldnew []string
|
||||||
stateM sync.RWMutex
|
|
||||||
|
state *runnerv1.TaskState
|
||||||
|
stateMu sync.RWMutex
|
||||||
|
outputs sync.Map
|
||||||
|
|
||||||
|
debugOutputEnabled bool
|
||||||
|
stopCommandEndToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewReporter(ctx context.Context, cancel context.CancelFunc, client client.Client, task *runnerv1.Task) *Reporter {
|
func NewReporter(ctx context.Context, cancel context.CancelFunc, client client.Client, task *runnerv1.Task) *Reporter {
|
||||||
@ -44,20 +51,27 @@ func NewReporter(ctx context.Context, cancel context.CancelFunc, client client.C
|
|||||||
oldnew = append(oldnew, v, "***")
|
oldnew = append(oldnew, v, "***")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Reporter{
|
rv := &Reporter{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
client: client,
|
client: client,
|
||||||
|
oldnew: oldnew,
|
||||||
logReplacer: strings.NewReplacer(oldnew...),
|
logReplacer: strings.NewReplacer(oldnew...),
|
||||||
state: &runnerv1.TaskState{
|
state: &runnerv1.TaskState{
|
||||||
Id: task.Id,
|
Id: task.Id,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if task.Secrets["ACTIONS_STEP_DEBUG"] == "true" {
|
||||||
|
rv.debugOutputEnabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return rv
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reporter) ResetSteps(l int) {
|
func (r *Reporter) ResetSteps(l int) {
|
||||||
r.stateM.Lock()
|
r.stateMu.Lock()
|
||||||
defer r.stateM.Unlock()
|
defer r.stateMu.Unlock()
|
||||||
for i := 0; i < l; i++ {
|
for i := 0; i < l; i++ {
|
||||||
r.state.Steps = append(r.state.Steps, &runnerv1.StepState{
|
r.state.Steps = append(r.state.Steps, &runnerv1.StepState{
|
||||||
Id: int64(i),
|
Id: int64(i),
|
||||||
@ -69,9 +83,16 @@ func (r *Reporter) Levels() []log.Level {
|
|||||||
return log.AllLevels
|
return log.AllLevels
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func appendIfNotNil[T any](s []*T, v *T) []*T {
|
||||||
|
if v != nil {
|
||||||
|
return append(s, v)
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Reporter) Fire(entry *log.Entry) error {
|
func (r *Reporter) Fire(entry *log.Entry) error {
|
||||||
r.stateM.Lock()
|
r.stateMu.Lock()
|
||||||
defer r.stateM.Unlock()
|
defer r.stateMu.Unlock()
|
||||||
|
|
||||||
log.WithFields(entry.Data).Trace(entry.Message)
|
log.WithFields(entry.Data).Trace(entry.Message)
|
||||||
|
|
||||||
@ -95,7 +116,7 @@ func (r *Reporter) Fire(entry *log.Entry) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !r.duringSteps() {
|
if !r.duringSteps() {
|
||||||
r.logRows = append(r.logRows, r.parseLogRow(entry))
|
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -108,7 +129,7 @@ func (r *Reporter) Fire(entry *log.Entry) error {
|
|||||||
}
|
}
|
||||||
if step == nil {
|
if step == nil {
|
||||||
if !r.duringSteps() {
|
if !r.duringSteps() {
|
||||||
r.logRows = append(r.logRows, r.parseLogRow(entry))
|
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -118,14 +139,16 @@ func (r *Reporter) Fire(entry *log.Entry) error {
|
|||||||
}
|
}
|
||||||
if v, ok := entry.Data["raw_output"]; ok {
|
if v, ok := entry.Data["raw_output"]; ok {
|
||||||
if rawOutput, ok := v.(bool); ok && rawOutput {
|
if rawOutput, ok := v.(bool); ok && rawOutput {
|
||||||
if step.LogLength == 0 {
|
if row := r.parseLogRow(entry); row != nil {
|
||||||
step.LogIndex = int64(r.logOffset + len(r.logRows))
|
if step.LogLength == 0 {
|
||||||
|
step.LogIndex = int64(r.logOffset + len(r.logRows))
|
||||||
|
}
|
||||||
|
step.LogLength++
|
||||||
|
r.logRows = append(r.logRows, row)
|
||||||
}
|
}
|
||||||
step.LogLength++
|
|
||||||
r.logRows = append(r.logRows, r.parseLogRow(entry))
|
|
||||||
}
|
}
|
||||||
} else if !r.duringSteps() {
|
} else if !r.duringSteps() {
|
||||||
r.logRows = append(r.logRows, r.parseLogRow(entry))
|
r.logRows = appendIfNotNil(r.logRows, r.parseLogRow(entry))
|
||||||
}
|
}
|
||||||
if v, ok := entry.Data["stepResult"]; ok {
|
if v, ok := entry.Data["stepResult"]; ok {
|
||||||
if stepResult, ok := r.parseResult(v); ok {
|
if stepResult, ok := r.parseResult(v); ok {
|
||||||
@ -155,9 +178,13 @@ func (r *Reporter) RunDaemon() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *Reporter) Logf(format string, a ...interface{}) {
|
func (r *Reporter) Logf(format string, a ...interface{}) {
|
||||||
r.stateM.Lock()
|
r.stateMu.Lock()
|
||||||
defer r.stateM.Unlock()
|
defer r.stateMu.Unlock()
|
||||||
|
|
||||||
|
r.logf(format, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reporter) logf(format string, a ...interface{}) {
|
||||||
if !r.duringSteps() {
|
if !r.duringSteps() {
|
||||||
r.logRows = append(r.logRows, &runnerv1.LogRow{
|
r.logRows = append(r.logRows, &runnerv1.LogRow{
|
||||||
Time: timestamppb.Now(),
|
Time: timestamppb.Now(),
|
||||||
@ -166,10 +193,30 @@ func (r *Reporter) Logf(format string, a ...interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Reporter) SetOutputs(outputs map[string]string) {
|
||||||
|
r.stateMu.Lock()
|
||||||
|
defer r.stateMu.Unlock()
|
||||||
|
|
||||||
|
for k, v := range outputs {
|
||||||
|
if len(k) > 255 {
|
||||||
|
r.logf("ignore output because the key is too long: %q", k)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if l := len(v); l > 1024*1024 {
|
||||||
|
log.Println("ignore output because the value is too long:", k, l)
|
||||||
|
r.logf("ignore output because the value %q is too long: %d", k, l)
|
||||||
|
}
|
||||||
|
if _, ok := r.outputs.Load(k); ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r.outputs.Store(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Reporter) Close(lastWords string) error {
|
func (r *Reporter) Close(lastWords string) error {
|
||||||
r.closed = true
|
r.closed = true
|
||||||
|
|
||||||
r.stateM.Lock()
|
r.stateMu.Lock()
|
||||||
if r.state.Result == runnerv1.Result_RESULT_UNSPECIFIED {
|
if r.state.Result == runnerv1.Result_RESULT_UNSPECIFIED {
|
||||||
if lastWords == "" {
|
if lastWords == "" {
|
||||||
lastWords = "Early termination"
|
lastWords = "Early termination"
|
||||||
@ -184,14 +231,14 @@ func (r *Reporter) Close(lastWords string) error {
|
|||||||
Time: timestamppb.Now(),
|
Time: timestamppb.Now(),
|
||||||
Content: lastWords,
|
Content: lastWords,
|
||||||
})
|
})
|
||||||
return nil
|
r.state.StoppedAt = timestamppb.Now()
|
||||||
} else if lastWords != "" {
|
} else if lastWords != "" {
|
||||||
r.logRows = append(r.logRows, &runnerv1.LogRow{
|
r.logRows = append(r.logRows, &runnerv1.LogRow{
|
||||||
Time: timestamppb.Now(),
|
Time: timestamppb.Now(),
|
||||||
Content: lastWords,
|
Content: lastWords,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
r.stateM.Unlock()
|
r.stateMu.Unlock()
|
||||||
|
|
||||||
return retry.Do(func() error {
|
return retry.Do(func() error {
|
||||||
if err := r.ReportLog(true); err != nil {
|
if err := r.ReportLog(true); err != nil {
|
||||||
@ -205,9 +252,9 @@ func (r *Reporter) ReportLog(noMore bool) error {
|
|||||||
r.clientM.Lock()
|
r.clientM.Lock()
|
||||||
defer r.clientM.Unlock()
|
defer r.clientM.Unlock()
|
||||||
|
|
||||||
r.stateM.RLock()
|
r.stateMu.RLock()
|
||||||
rows := r.logRows
|
rows := r.logRows
|
||||||
r.stateM.RUnlock()
|
r.stateMu.RUnlock()
|
||||||
|
|
||||||
resp, err := r.client.UpdateLog(r.ctx, connect.NewRequest(&runnerv1.UpdateLogRequest{
|
resp, err := r.client.UpdateLog(r.ctx, connect.NewRequest(&runnerv1.UpdateLogRequest{
|
||||||
TaskId: r.state.Id,
|
TaskId: r.state.Id,
|
||||||
@ -224,10 +271,10 @@ func (r *Reporter) ReportLog(noMore bool) error {
|
|||||||
return fmt.Errorf("submitted logs are lost")
|
return fmt.Errorf("submitted logs are lost")
|
||||||
}
|
}
|
||||||
|
|
||||||
r.stateM.Lock()
|
r.stateMu.Lock()
|
||||||
r.logRows = r.logRows[ack-r.logOffset:]
|
r.logRows = r.logRows[ack-r.logOffset:]
|
||||||
r.logOffset = ack
|
r.logOffset = ack
|
||||||
r.stateM.Unlock()
|
r.stateMu.Unlock()
|
||||||
|
|
||||||
if noMore && ack < r.logOffset+len(rows) {
|
if noMore && ack < r.logOffset+len(rows) {
|
||||||
return fmt.Errorf("not all logs are submitted")
|
return fmt.Errorf("not all logs are submitted")
|
||||||
@ -240,21 +287,45 @@ func (r *Reporter) ReportState() error {
|
|||||||
r.clientM.Lock()
|
r.clientM.Lock()
|
||||||
defer r.clientM.Unlock()
|
defer r.clientM.Unlock()
|
||||||
|
|
||||||
r.stateM.RLock()
|
r.stateMu.RLock()
|
||||||
state := proto.Clone(r.state).(*runnerv1.TaskState)
|
state := proto.Clone(r.state).(*runnerv1.TaskState)
|
||||||
r.stateM.RUnlock()
|
r.stateMu.RUnlock()
|
||||||
|
|
||||||
|
outputs := make(map[string]string)
|
||||||
|
r.outputs.Range(func(k, v interface{}) bool {
|
||||||
|
if val, ok := v.(string); ok {
|
||||||
|
outputs[k.(string)] = val
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
resp, err := r.client.UpdateTask(r.ctx, connect.NewRequest(&runnerv1.UpdateTaskRequest{
|
resp, err := r.client.UpdateTask(r.ctx, connect.NewRequest(&runnerv1.UpdateTaskRequest{
|
||||||
State: state,
|
State: state,
|
||||||
|
Outputs: outputs,
|
||||||
}))
|
}))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, k := range resp.Msg.SentOutputs {
|
||||||
|
r.outputs.Store(k, struct{}{})
|
||||||
|
}
|
||||||
|
|
||||||
if resp.Msg.State != nil && resp.Msg.State.Result == runnerv1.Result_RESULT_CANCELLED {
|
if resp.Msg.State != nil && resp.Msg.State.Result == runnerv1.Result_RESULT_CANCELLED {
|
||||||
r.cancel()
|
r.cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var noSent []string
|
||||||
|
r.outputs.Range(func(k, v interface{}) bool {
|
||||||
|
if _, ok := v.(string); ok {
|
||||||
|
noSent = append(noSent, k.(string))
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if len(noSent) > 0 {
|
||||||
|
return fmt.Errorf("there are still outputs that have not been sent: %v", noSent)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,11 +359,70 @@ func (r *Reporter) parseResult(result interface{}) (runnerv1.Result, bool) {
|
|||||||
return ret, ok
|
return ret, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var cmdRegex = regexp.MustCompile(`^::([^ :]+)( .*)?::(.*)$`)
|
||||||
|
|
||||||
|
func (r *Reporter) handleCommand(originalContent, command, parameters, value string) *string {
|
||||||
|
if r.stopCommandEndToken != "" && command != r.stopCommandEndToken {
|
||||||
|
return &originalContent
|
||||||
|
}
|
||||||
|
|
||||||
|
switch command {
|
||||||
|
case "add-mask":
|
||||||
|
r.addMask(value)
|
||||||
|
return nil
|
||||||
|
case "debug":
|
||||||
|
if r.debugOutputEnabled {
|
||||||
|
return &value
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case "notice":
|
||||||
|
// Not implemented yet, so just return the original content.
|
||||||
|
return &originalContent
|
||||||
|
case "warning":
|
||||||
|
// Not implemented yet, so just return the original content.
|
||||||
|
return &originalContent
|
||||||
|
case "error":
|
||||||
|
// Not implemented yet, so just return the original content.
|
||||||
|
return &originalContent
|
||||||
|
case "group":
|
||||||
|
// Returning the original content, because I think the frontend
|
||||||
|
// will use it when rendering the output.
|
||||||
|
return &originalContent
|
||||||
|
case "endgroup":
|
||||||
|
// Ditto
|
||||||
|
return &originalContent
|
||||||
|
case "stop-commands":
|
||||||
|
r.stopCommandEndToken = value
|
||||||
|
return nil
|
||||||
|
case r.stopCommandEndToken:
|
||||||
|
r.stopCommandEndToken = ""
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &originalContent
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Reporter) parseLogRow(entry *log.Entry) *runnerv1.LogRow {
|
func (r *Reporter) parseLogRow(entry *log.Entry) *runnerv1.LogRow {
|
||||||
content := strings.TrimRightFunc(entry.Message, func(r rune) bool { return r == '\r' || r == '\n' })
|
content := strings.TrimRightFunc(entry.Message, func(r rune) bool { return r == '\r' || r == '\n' })
|
||||||
|
|
||||||
|
matches := cmdRegex.FindStringSubmatch(content)
|
||||||
|
if matches != nil {
|
||||||
|
if output := r.handleCommand(content, matches[1], matches[2], matches[3]); output != nil {
|
||||||
|
content = *output
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
content = r.logReplacer.Replace(content)
|
content = r.logReplacer.Replace(content)
|
||||||
|
|
||||||
return &runnerv1.LogRow{
|
return &runnerv1.LogRow{
|
||||||
Time: timestamppb.New(entry.Time),
|
Time: timestamppb.New(entry.Time),
|
||||||
Content: content,
|
Content: content,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Reporter) addMask(msg string) {
|
||||||
|
r.oldnew = append(r.oldnew, msg, "***")
|
||||||
|
r.logReplacer = strings.NewReplacer(r.oldnew...)
|
||||||
|
}
|
||||||
|
197
internal/pkg/report/reporter_test.go
Normal file
197
internal/pkg/report/reporter_test.go
Normal file
@ -0,0 +1,197 @@
|
|||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package report
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
runnerv1 "code.gitea.io/actions-proto-go/runner/v1"
|
||||||
|
connect_go "github.com/bufbuild/connect-go"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"google.golang.org/protobuf/types/known/structpb"
|
||||||
|
|
||||||
|
"gitea.com/gitea/act_runner/internal/pkg/client/mocks"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReporter_parseLogRow(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
debugOutputEnabled bool
|
||||||
|
args []string
|
||||||
|
want []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"No command", false,
|
||||||
|
[]string{"Hello, world!"},
|
||||||
|
[]string{"Hello, world!"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Add-mask", false,
|
||||||
|
[]string{
|
||||||
|
"foo mysecret bar",
|
||||||
|
"::add-mask::mysecret",
|
||||||
|
"foo mysecret bar",
|
||||||
|
},
|
||||||
|
[]string{
|
||||||
|
"foo mysecret bar",
|
||||||
|
"<nil>",
|
||||||
|
"foo *** bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Debug enabled", true,
|
||||||
|
[]string{
|
||||||
|
"::debug::GitHub Actions runtime token access controls",
|
||||||
|
},
|
||||||
|
[]string{
|
||||||
|
"GitHub Actions runtime token access controls",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Debug not enabled", false,
|
||||||
|
[]string{
|
||||||
|
"::debug::GitHub Actions runtime token access controls",
|
||||||
|
},
|
||||||
|
[]string{
|
||||||
|
"<nil>",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"notice", false,
|
||||||
|
[]string{
|
||||||
|
"::notice file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work",
|
||||||
|
},
|
||||||
|
[]string{
|
||||||
|
"::notice file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"warning", false,
|
||||||
|
[]string{
|
||||||
|
"::warning file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work",
|
||||||
|
},
|
||||||
|
[]string{
|
||||||
|
"::warning file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"error", false,
|
||||||
|
[]string{
|
||||||
|
"::error file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work",
|
||||||
|
},
|
||||||
|
[]string{
|
||||||
|
"::error file=file.name,line=42,endLine=48,title=Cool Title::Gosh, that's not going to work",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group", false,
|
||||||
|
[]string{
|
||||||
|
"::group::",
|
||||||
|
"::endgroup::",
|
||||||
|
},
|
||||||
|
[]string{
|
||||||
|
"::group::",
|
||||||
|
"::endgroup::",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"stop-commands", false,
|
||||||
|
[]string{
|
||||||
|
"::add-mask::foo",
|
||||||
|
"::stop-commands::myverycoolstoptoken",
|
||||||
|
"::add-mask::bar",
|
||||||
|
"::debug::Stuff",
|
||||||
|
"myverycoolstoptoken",
|
||||||
|
"::add-mask::baz",
|
||||||
|
"::myverycoolstoptoken::",
|
||||||
|
"::add-mask::wibble",
|
||||||
|
"foo bar baz wibble",
|
||||||
|
},
|
||||||
|
[]string{
|
||||||
|
"<nil>",
|
||||||
|
"<nil>",
|
||||||
|
"::add-mask::bar",
|
||||||
|
"::debug::Stuff",
|
||||||
|
"myverycoolstoptoken",
|
||||||
|
"::add-mask::baz",
|
||||||
|
"<nil>",
|
||||||
|
"<nil>",
|
||||||
|
"*** bar baz ***",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"unknown command", false,
|
||||||
|
[]string{
|
||||||
|
"::set-mask::foo",
|
||||||
|
},
|
||||||
|
[]string{
|
||||||
|
"::set-mask::foo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
r := &Reporter{
|
||||||
|
logReplacer: strings.NewReplacer(),
|
||||||
|
debugOutputEnabled: tt.debugOutputEnabled,
|
||||||
|
}
|
||||||
|
for idx, arg := range tt.args {
|
||||||
|
rv := r.parseLogRow(&log.Entry{Message: arg})
|
||||||
|
got := "<nil>"
|
||||||
|
|
||||||
|
if rv != nil {
|
||||||
|
got = rv.Content
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, tt.want[idx], got)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReporter_Fire(t *testing.T) {
|
||||||
|
t.Run("ignore command lines", func(t *testing.T) {
|
||||||
|
client := mocks.NewClient(t)
|
||||||
|
client.On("UpdateLog", mock.Anything, mock.Anything).Return(func(_ context.Context, req *connect_go.Request[runnerv1.UpdateLogRequest]) (*connect_go.Response[runnerv1.UpdateLogResponse], error) {
|
||||||
|
t.Logf("Received UpdateLog: %s", req.Msg.String())
|
||||||
|
return connect_go.NewResponse(&runnerv1.UpdateLogResponse{
|
||||||
|
AckIndex: req.Msg.Index + int64(len(req.Msg.Rows)),
|
||||||
|
}), nil
|
||||||
|
})
|
||||||
|
client.On("UpdateTask", mock.Anything, mock.Anything).Return(func(_ context.Context, req *connect_go.Request[runnerv1.UpdateTaskRequest]) (*connect_go.Response[runnerv1.UpdateTaskResponse], error) {
|
||||||
|
t.Logf("Received UpdateTask: %s", req.Msg.String())
|
||||||
|
return connect_go.NewResponse(&runnerv1.UpdateTaskResponse{}), nil
|
||||||
|
})
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
taskCtx, err := structpb.NewStruct(map[string]interface{}{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
reporter := NewReporter(ctx, cancel, client, &runnerv1.Task{
|
||||||
|
Context: taskCtx,
|
||||||
|
})
|
||||||
|
defer func() {
|
||||||
|
assert.NoError(t, reporter.Close(""))
|
||||||
|
}()
|
||||||
|
reporter.ResetSteps(5)
|
||||||
|
|
||||||
|
dataStep0 := map[string]interface{}{
|
||||||
|
"stage": "Main",
|
||||||
|
"stepNumber": 0,
|
||||||
|
"raw_output": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.NoError(t, reporter.Fire(&log.Entry{Message: "regular log line", Data: dataStep0}))
|
||||||
|
assert.NoError(t, reporter.Fire(&log.Entry{Message: "::debug::debug log line", Data: dataStep0}))
|
||||||
|
assert.NoError(t, reporter.Fire(&log.Entry{Message: "regular log line", Data: dataStep0}))
|
||||||
|
assert.NoError(t, reporter.Fire(&log.Entry{Message: "::debug::debug log line", Data: dataStep0}))
|
||||||
|
assert.NoError(t, reporter.Fire(&log.Entry{Message: "::debug::debug log line", Data: dataStep0}))
|
||||||
|
assert.NoError(t, reporter.Fire(&log.Entry{Message: "regular log line", Data: dataStep0}))
|
||||||
|
|
||||||
|
assert.Equal(t, int64(3), reporter.state.Steps[0].LogLength)
|
||||||
|
})
|
||||||
|
}
|
45
run.sh
Executable file
45
run.sh
Executable file
@ -0,0 +1,45 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
if [[ ! -d /data ]]; then
|
||||||
|
mkdir -p /data
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd /data
|
||||||
|
|
||||||
|
CONFIG_ARG=""
|
||||||
|
if [[ ! -z "${CONFIG_FILE}" ]]; then
|
||||||
|
CONFIG_ARG="--config ${CONFIG_FILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Use the same ENV variable names as https://github.com/vegardit/docker-gitea-act-runner
|
||||||
|
|
||||||
|
if [[ ! -s .runner ]]; then
|
||||||
|
try=$((try + 1))
|
||||||
|
success=0
|
||||||
|
|
||||||
|
# The point of this loop is to make it simple, when running both act_runner and gitea in docker,
|
||||||
|
# for the act_runner to wait a moment for gitea to become available before erroring out. Within
|
||||||
|
# the context of a single docker-compose, something similar could be done via healthchecks, but
|
||||||
|
# this is more flexible.
|
||||||
|
while [[ $success -eq 0 ]] && [[ $try -lt ${GITEA_MAX_REG_ATTEMPTS:-10} ]]; do
|
||||||
|
act_runner register \
|
||||||
|
--instance "${GITEA_INSTANCE_URL}" \
|
||||||
|
--token "${GITEA_RUNNER_REGISTRATION_TOKEN}" \
|
||||||
|
--name "${GITEA_RUNNER_NAME:-`hostname`}" \
|
||||||
|
--labels "${GITEA_RUNNER_LABELS}" \
|
||||||
|
${CONFIG_ARG} --no-interactive > /tmp/reg.log 2>&1
|
||||||
|
|
||||||
|
cat /tmp/reg.log
|
||||||
|
|
||||||
|
cat /tmp/reg.log | grep 'Runner registered successfully' > /dev/null
|
||||||
|
if [[ $? -eq 0 ]]; then
|
||||||
|
echo "SUCCESS"
|
||||||
|
success=1
|
||||||
|
else
|
||||||
|
echo "Waiting to retry ..."
|
||||||
|
sleep 5
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
act_runner daemon ${CONFIG_ARG}
|
Reference in New Issue
Block a user