mirror of
https://github.com/actions/cache.git
synced 2025-06-25 11:51:11 +02:00
Compare commits
17 Commits
tiwarishub
...
v3.0.8
Author | SHA1 | Date | |
---|---|---|---|
fd5de65bc8 | |||
d49b6bb21d | |||
a7c34adf76 | |||
83394c99b7 | |||
e839c25979 | |||
33a923d660 | |||
a404368986 | |||
f4278025ab | |||
9916fe1701 | |||
318935ef66 | |||
85efbb58b9 | |||
4387dbc81a | |||
71e3ee5cce | |||
c316eb7911 | |||
0865c47f36 | |||
354a2ae15e | |||
a0e530f115 |
1
.github/auto_assign.yml
vendored
1
.github/auto_assign.yml
vendored
@ -6,6 +6,7 @@ addAssignees: false
|
|||||||
|
|
||||||
# A list of reviewers to be added to pull requests (GitHub user name)
|
# A list of reviewers to be added to pull requests (GitHub user name)
|
||||||
reviewers:
|
reviewers:
|
||||||
|
- phantsure
|
||||||
- kotewar
|
- kotewar
|
||||||
- aparna-ravindra
|
- aparna-ravindra
|
||||||
- tiwarishub
|
- tiwarishub
|
||||||
|
2
.github/workflows/auto-assign-issues.yml
vendored
2
.github/workflows/auto-assign-issues.yml
vendored
@ -11,5 +11,5 @@ jobs:
|
|||||||
- name: 'Auto-assign issue'
|
- name: 'Auto-assign issue'
|
||||||
uses: pozil/auto-assign-issue@v1.4.0
|
uses: pozil/auto-assign-issue@v1.4.0
|
||||||
with:
|
with:
|
||||||
assignees: kotewar,tiwarishub,aparna-ravindra,vsvipul,bishal-pdmsft
|
assignees: phantsure,kotewar,tiwarishub,aparna-ravindra,vsvipul,bishal-pdmsft
|
||||||
numOfAssignee: 1
|
numOfAssignee: 1
|
||||||
|
20
.licenses/npm/@actions/cache.dep.yml
generated
20
.licenses/npm/@actions/cache.dep.yml
generated
@ -1,20 +1,20 @@
|
|||||||
---
|
---
|
||||||
name: "@actions/cache"
|
name: "@actions/cache"
|
||||||
version: 2.0.6
|
version: 3.0.4
|
||||||
type: npm
|
type: npm
|
||||||
summary:
|
summary:
|
||||||
homepage:
|
homepage:
|
||||||
license: mit
|
license: mit
|
||||||
licenses:
|
licenses:
|
||||||
- sources: LICENSE.md
|
- sources: LICENSE.md
|
||||||
text: |-
|
text: |-
|
||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright 2019 GitHub
|
Copyright 2019 GitHub
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
notices: []
|
notices: []
|
||||||
|
14
README.md
14
README.md
@ -15,6 +15,11 @@ See ["Caching dependencies to speed up workflows"](https://help.github.com/githu
|
|||||||
* Updated the minimum runner version support from node 12 -> node 16.
|
* Updated the minimum runner version support from node 12 -> node 16.
|
||||||
* Fixed avoiding empty cache save when no files are available for caching.
|
* Fixed avoiding empty cache save when no files are available for caching.
|
||||||
* Fixed tar creation error while trying to create tar with path as `~/` home folder on `ubuntu-latest`.
|
* Fixed tar creation error while trying to create tar with path as `~/` home folder on `ubuntu-latest`.
|
||||||
|
* Fixed zstd failing on amazon linux 2.0 runners.
|
||||||
|
* Fixed cache not working with github workspace directory or current directory.
|
||||||
|
* Fixed the download stuck problem by introducing a timeout of 1 hour for cache downloads.
|
||||||
|
* Fix zstd not working for windows on gnu tar in issues.
|
||||||
|
* Allowing users to provide a custom timeout as input for aborting download of a cache segment using an environment variable `SEGMENT_DOWNLOAD_TIMEOUT_MIN`. Default is 60 minutes.
|
||||||
|
|
||||||
Refer [here](https://github.com/actions/cache/blob/v2/README.md) for previous versions
|
Refer [here](https://github.com/actions/cache/blob/v2/README.md) for previous versions
|
||||||
|
|
||||||
@ -32,6 +37,9 @@ If you are using this inside a container, a POSIX-compliant `tar` needs to be in
|
|||||||
* `restore-keys` - An ordered list of keys to use for restoring stale cache if no cache hit occurred for key. Note
|
* `restore-keys` - An ordered list of keys to use for restoring stale cache if no cache hit occurred for key. Note
|
||||||
`cache-hit` returns false in this case.
|
`cache-hit` returns false in this case.
|
||||||
|
|
||||||
|
#### Environment Variables
|
||||||
|
* `SEGMENT_DOWNLOAD_TIMEOUT_MIN` - Segment download timeout (in minutes, default `60`) to abort download of the segment if not completed in the defined number of minutes. [Read more](#cache-segment-restore-timeout)
|
||||||
|
|
||||||
### Outputs
|
### Outputs
|
||||||
|
|
||||||
* `cache-hit` - A boolean value to indicate an exact match was found for the key
|
* `cache-hit` - A boolean value to indicate an exact match was found for the key
|
||||||
@ -81,6 +89,7 @@ Every programming language and framework has its own way of caching.
|
|||||||
See [Examples](examples.md) for a list of `actions/cache` implementations for use with:
|
See [Examples](examples.md) for a list of `actions/cache` implementations for use with:
|
||||||
|
|
||||||
- [C# - NuGet](./examples.md#c---nuget)
|
- [C# - NuGet](./examples.md#c---nuget)
|
||||||
|
- [Clojure - Lein Deps](./examples.md#clojure---lein-deps)
|
||||||
- [D - DUB](./examples.md#d---dub)
|
- [D - DUB](./examples.md#d---dub)
|
||||||
- [Deno](./examples.md#deno)
|
- [Deno](./examples.md#deno)
|
||||||
- [Elixir - Mix](./examples.md#elixir---mix)
|
- [Elixir - Mix](./examples.md#elixir---mix)
|
||||||
@ -214,6 +223,11 @@ jobs:
|
|||||||
if: steps.cache-primes.outputs.cache-hit != 'true'
|
if: steps.cache-primes.outputs.cache-hit != 'true'
|
||||||
run: ./generate-primes -d prime-numbers
|
run: ./generate-primes -d prime-numbers
|
||||||
```
|
```
|
||||||
|
## Cache segment restore timeout
|
||||||
|
|
||||||
|
A cache gets downloaded in multiple segments of fixed sizes (`1GB` for a `32-bit` runner and `2GB` for a `64-bit` runner). Sometimes, a segment download gets stuck which causes the workflow job to be stuck forever and fail. Version `v3.0.8` of `actions/cache` introduces a segment download timeout. The segment download timeout will allow the segment download to get aborted and hence allow the job to proceed with a cache miss.
|
||||||
|
|
||||||
|
Default value of this timeout is 60 minutes and can be customized by specifying an [environment variable](https://docs.github.com/en/actions/learn-github-actions/environment-variables) named `SEGMENT_DOWNLOAD_TIMEOUT_MINS` with timeout value in minutes.
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
We would love for you to contribute to `actions/cache`, pull requests are welcome! Please see the [CONTRIBUTING.md](CONTRIBUTING.md) for more information.
|
We would love for you to contribute to `actions/cache`, pull requests are welcome! Please see the [CONTRIBUTING.md](CONTRIBUTING.md) for more information.
|
||||||
|
16
RELEASES.md
16
RELEASES.md
@ -14,4 +14,18 @@
|
|||||||
- Fixed avoiding empty cache save when no files are available for caching. ([issue](https://github.com/actions/cache/issues/624))
|
- Fixed avoiding empty cache save when no files are available for caching. ([issue](https://github.com/actions/cache/issues/624))
|
||||||
|
|
||||||
### 3.0.4
|
### 3.0.4
|
||||||
- Fixed tar creation error while trying to create tar with path as `~/` home folder on `ubuntu-latest`. ([issue](https://github.com/actions/cache/issues/689))
|
- Fixed tar creation error while trying to create tar with path as `~/` home folder on `ubuntu-latest`. ([issue](https://github.com/actions/cache/issues/689))
|
||||||
|
|
||||||
|
### 3.0.5
|
||||||
|
- Removed error handling by consuming actions/cache 3.0 toolkit, Now cache server error handling will be done by toolkit. ([PR](https://github.com/actions/cache/pull/834))
|
||||||
|
|
||||||
|
### 3.0.6
|
||||||
|
- Fixed [#809](https://github.com/actions/cache/issues/809) - zstd -d: no such file or directory error
|
||||||
|
- Fixed [#833](https://github.com/actions/cache/issues/833) - cache doesn't work with github workspace directory
|
||||||
|
|
||||||
|
### 3.0.7
|
||||||
|
- Fixed [#810](https://github.com/actions/cache/issues/810) - download stuck issue. A new timeout is introduced in the download process to abort the download if it gets stuck and doesn't finish within an hour.
|
||||||
|
|
||||||
|
### 3.0.8
|
||||||
|
- Fix zstd not working for windows on gnu tar in issues [#888](https://github.com/actions/cache/issues/888) and [#891](https://github.com/actions/cache/issues/891).
|
||||||
|
- Allowing users to provide a custom timeout as input for aborting download of a cache segment using an environment variable `SEGMENT_DOWNLOAD_TIMEOUT_MIN`. Default is 60 minutes.
|
@ -227,40 +227,6 @@ test("restore with no cache found", async () => {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
test("restore with server error should fail", async () => {
|
|
||||||
const path = "node_modules";
|
|
||||||
const key = "node-test";
|
|
||||||
testUtils.setInputs({
|
|
||||||
path: path,
|
|
||||||
key
|
|
||||||
});
|
|
||||||
|
|
||||||
const logWarningMock = jest.spyOn(actionUtils, "logWarning");
|
|
||||||
const failedMock = jest.spyOn(core, "setFailed");
|
|
||||||
const stateMock = jest.spyOn(core, "saveState");
|
|
||||||
const restoreCacheMock = jest
|
|
||||||
.spyOn(cache, "restoreCache")
|
|
||||||
.mockImplementationOnce(() => {
|
|
||||||
throw new Error("HTTP Error Occurred");
|
|
||||||
});
|
|
||||||
const setCacheHitOutputMock = jest.spyOn(actionUtils, "setCacheHitOutput");
|
|
||||||
|
|
||||||
await run();
|
|
||||||
|
|
||||||
expect(restoreCacheMock).toHaveBeenCalledTimes(1);
|
|
||||||
expect(restoreCacheMock).toHaveBeenCalledWith([path], key, []);
|
|
||||||
|
|
||||||
expect(stateMock).toHaveBeenCalledWith("CACHE_KEY", key);
|
|
||||||
|
|
||||||
expect(logWarningMock).toHaveBeenCalledTimes(1);
|
|
||||||
expect(logWarningMock).toHaveBeenCalledWith("HTTP Error Occurred");
|
|
||||||
|
|
||||||
expect(setCacheHitOutputMock).toHaveBeenCalledTimes(1);
|
|
||||||
expect(setCacheHitOutputMock).toHaveBeenCalledWith(false);
|
|
||||||
|
|
||||||
expect(failedMock).toHaveBeenCalledTimes(0);
|
|
||||||
});
|
|
||||||
|
|
||||||
test("restore with restore keys and no cache found", async () => {
|
test("restore with restore keys and no cache found", async () => {
|
||||||
const path = "node_modules";
|
const path = "node_modules";
|
||||||
const key = "node-test";
|
const key = "node-test";
|
||||||
|
@ -267,7 +267,6 @@ test("save with large cache outputs warning", async () => {
|
|||||||
});
|
});
|
||||||
|
|
||||||
test("save with reserve cache failure outputs warning", async () => {
|
test("save with reserve cache failure outputs warning", async () => {
|
||||||
const infoMock = jest.spyOn(core, "info");
|
|
||||||
const logWarningMock = jest.spyOn(actionUtils, "logWarning");
|
const logWarningMock = jest.spyOn(actionUtils, "logWarning");
|
||||||
const failedMock = jest.spyOn(core, "setFailed");
|
const failedMock = jest.spyOn(core, "setFailed");
|
||||||
|
|
||||||
@ -306,10 +305,10 @@ test("save with reserve cache failure outputs warning", async () => {
|
|||||||
expect.anything()
|
expect.anything()
|
||||||
);
|
);
|
||||||
|
|
||||||
expect(infoMock).toHaveBeenCalledWith(
|
expect(logWarningMock).toHaveBeenCalledWith(
|
||||||
`Unable to reserve cache with key ${primaryKey}, another job may be creating this cache.`
|
`Unable to reserve cache with key ${primaryKey}, another job may be creating this cache.`
|
||||||
);
|
);
|
||||||
expect(logWarningMock).toHaveBeenCalledTimes(0);
|
expect(logWarningMock).toHaveBeenCalledTimes(1);
|
||||||
expect(failedMock).toHaveBeenCalledTimes(0);
|
expect(failedMock).toHaveBeenCalledTimes(0);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
212
dist/restore/index.js
vendored
212
dist/restore/index.js
vendored
@ -1113,7 +1113,13 @@ function resolvePaths(patterns) {
|
|||||||
.replace(new RegExp(`\\${path.sep}`, 'g'), '/');
|
.replace(new RegExp(`\\${path.sep}`, 'g'), '/');
|
||||||
core.debug(`Matched: ${relativeFile}`);
|
core.debug(`Matched: ${relativeFile}`);
|
||||||
// Paths are made relative so the tar entries are all relative to the root of the workspace.
|
// Paths are made relative so the tar entries are all relative to the root of the workspace.
|
||||||
paths.push(`${relativeFile}`);
|
if (relativeFile === '') {
|
||||||
|
// path.relative returns empty string if workspace and file are equal
|
||||||
|
paths.push('.');
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
paths.push(`${relativeFile}`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
||||||
@ -5467,6 +5473,7 @@ const util = __importStar(__webpack_require__(669));
|
|||||||
const utils = __importStar(__webpack_require__(15));
|
const utils = __importStar(__webpack_require__(15));
|
||||||
const constants_1 = __webpack_require__(931);
|
const constants_1 = __webpack_require__(931);
|
||||||
const requestUtils_1 = __webpack_require__(899);
|
const requestUtils_1 = __webpack_require__(899);
|
||||||
|
const abort_controller_1 = __webpack_require__(106);
|
||||||
/**
|
/**
|
||||||
* Pipes the body of a HTTP response to a stream
|
* Pipes the body of a HTTP response to a stream
|
||||||
*
|
*
|
||||||
@ -5650,15 +5657,24 @@ function downloadCacheStorageSDK(archiveLocation, archivePath, options) {
|
|||||||
const fd = fs.openSync(archivePath, 'w');
|
const fd = fs.openSync(archivePath, 'w');
|
||||||
try {
|
try {
|
||||||
downloadProgress.startDisplayTimer();
|
downloadProgress.startDisplayTimer();
|
||||||
|
const controller = new abort_controller_1.AbortController();
|
||||||
|
const abortSignal = controller.signal;
|
||||||
while (!downloadProgress.isDone()) {
|
while (!downloadProgress.isDone()) {
|
||||||
const segmentStart = downloadProgress.segmentOffset + downloadProgress.segmentSize;
|
const segmentStart = downloadProgress.segmentOffset + downloadProgress.segmentSize;
|
||||||
const segmentSize = Math.min(maxSegmentSize, contentLength - segmentStart);
|
const segmentSize = Math.min(maxSegmentSize, contentLength - segmentStart);
|
||||||
downloadProgress.nextSegment(segmentSize);
|
downloadProgress.nextSegment(segmentSize);
|
||||||
const result = yield client.downloadToBuffer(segmentStart, segmentSize, {
|
const result = yield promiseWithTimeout(options.segmentTimeoutInMs || 3600000, client.downloadToBuffer(segmentStart, segmentSize, {
|
||||||
|
abortSignal,
|
||||||
concurrency: options.downloadConcurrency,
|
concurrency: options.downloadConcurrency,
|
||||||
onProgress: downloadProgress.onProgress()
|
onProgress: downloadProgress.onProgress()
|
||||||
});
|
}));
|
||||||
fs.writeFileSync(fd, result);
|
if (result === 'timeout') {
|
||||||
|
controller.abort();
|
||||||
|
throw new Error('Aborting cache download as the download time exceeded the timeout.');
|
||||||
|
}
|
||||||
|
else if (Buffer.isBuffer(result)) {
|
||||||
|
fs.writeFileSync(fd, result);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
@ -5669,6 +5685,16 @@ function downloadCacheStorageSDK(archiveLocation, archivePath, options) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
exports.downloadCacheStorageSDK = downloadCacheStorageSDK;
|
exports.downloadCacheStorageSDK = downloadCacheStorageSDK;
|
||||||
|
const promiseWithTimeout = (timeoutMs, promise) => __awaiter(void 0, void 0, void 0, function* () {
|
||||||
|
let timeoutHandle;
|
||||||
|
const timeoutPromise = new Promise(resolve => {
|
||||||
|
timeoutHandle = setTimeout(() => resolve('timeout'), timeoutMs);
|
||||||
|
});
|
||||||
|
return Promise.race([promise, timeoutPromise]).then(result => {
|
||||||
|
clearTimeout(timeoutHandle);
|
||||||
|
return result;
|
||||||
|
});
|
||||||
|
});
|
||||||
//# sourceMappingURL=downloadUtils.js.map
|
//# sourceMappingURL=downloadUtils.js.map
|
||||||
|
|
||||||
/***/ }),
|
/***/ }),
|
||||||
@ -37214,6 +37240,7 @@ const fs_1 = __webpack_require__(747);
|
|||||||
const path = __importStar(__webpack_require__(622));
|
const path = __importStar(__webpack_require__(622));
|
||||||
const utils = __importStar(__webpack_require__(15));
|
const utils = __importStar(__webpack_require__(15));
|
||||||
const constants_1 = __webpack_require__(931);
|
const constants_1 = __webpack_require__(931);
|
||||||
|
const IS_WINDOWS = process.platform === 'win32';
|
||||||
function getTarPath(args, compressionMethod) {
|
function getTarPath(args, compressionMethod) {
|
||||||
return __awaiter(this, void 0, void 0, function* () {
|
return __awaiter(this, void 0, void 0, function* () {
|
||||||
switch (process.platform) {
|
switch (process.platform) {
|
||||||
@ -37261,26 +37288,43 @@ function getWorkingDirectory() {
|
|||||||
var _a;
|
var _a;
|
||||||
return (_a = process.env['GITHUB_WORKSPACE']) !== null && _a !== void 0 ? _a : process.cwd();
|
return (_a = process.env['GITHUB_WORKSPACE']) !== null && _a !== void 0 ? _a : process.cwd();
|
||||||
}
|
}
|
||||||
|
// Common function for extractTar and listTar to get the compression method
|
||||||
|
function getCompressionProgram(compressionMethod) {
|
||||||
|
// -d: Decompress.
|
||||||
|
// unzstd is equivalent to 'zstd -d'
|
||||||
|
// --long=#: Enables long distance matching with # bits. Maximum is 30 (1GB) on 32-bit OS and 31 (2GB) on 64-bit.
|
||||||
|
// Using 30 here because we also support 32-bit self-hosted runners.
|
||||||
|
switch (compressionMethod) {
|
||||||
|
case constants_1.CompressionMethod.Zstd:
|
||||||
|
return [
|
||||||
|
'--use-compress-program',
|
||||||
|
IS_WINDOWS ? 'zstd -d --long=30' : 'unzstd --long=30'
|
||||||
|
];
|
||||||
|
case constants_1.CompressionMethod.ZstdWithoutLong:
|
||||||
|
return ['--use-compress-program', IS_WINDOWS ? 'zstd -d' : 'unzstd'];
|
||||||
|
default:
|
||||||
|
return ['-z'];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function listTar(archivePath, compressionMethod) {
|
||||||
|
return __awaiter(this, void 0, void 0, function* () {
|
||||||
|
const args = [
|
||||||
|
...getCompressionProgram(compressionMethod),
|
||||||
|
'-tf',
|
||||||
|
archivePath.replace(new RegExp(`\\${path.sep}`, 'g'), '/'),
|
||||||
|
'-P'
|
||||||
|
];
|
||||||
|
yield execTar(args, compressionMethod);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
exports.listTar = listTar;
|
||||||
function extractTar(archivePath, compressionMethod) {
|
function extractTar(archivePath, compressionMethod) {
|
||||||
return __awaiter(this, void 0, void 0, function* () {
|
return __awaiter(this, void 0, void 0, function* () {
|
||||||
// Create directory to extract tar into
|
// Create directory to extract tar into
|
||||||
const workingDirectory = getWorkingDirectory();
|
const workingDirectory = getWorkingDirectory();
|
||||||
yield io.mkdirP(workingDirectory);
|
yield io.mkdirP(workingDirectory);
|
||||||
// --d: Decompress.
|
|
||||||
// --long=#: Enables long distance matching with # bits. Maximum is 30 (1GB) on 32-bit OS and 31 (2GB) on 64-bit.
|
|
||||||
// Using 30 here because we also support 32-bit self-hosted runners.
|
|
||||||
function getCompressionProgram() {
|
|
||||||
switch (compressionMethod) {
|
|
||||||
case constants_1.CompressionMethod.Zstd:
|
|
||||||
return ['--use-compress-program', 'zstd -d --long=30'];
|
|
||||||
case constants_1.CompressionMethod.ZstdWithoutLong:
|
|
||||||
return ['--use-compress-program', 'zstd -d'];
|
|
||||||
default:
|
|
||||||
return ['-z'];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const args = [
|
const args = [
|
||||||
...getCompressionProgram(),
|
...getCompressionProgram(compressionMethod),
|
||||||
'-xf',
|
'-xf',
|
||||||
archivePath.replace(new RegExp(`\\${path.sep}`, 'g'), '/'),
|
archivePath.replace(new RegExp(`\\${path.sep}`, 'g'), '/'),
|
||||||
'-P',
|
'-P',
|
||||||
@ -37299,15 +37343,19 @@ function createTar(archiveFolder, sourceDirectories, compressionMethod) {
|
|||||||
fs_1.writeFileSync(path.join(archiveFolder, manifestFilename), sourceDirectories.join('\n'));
|
fs_1.writeFileSync(path.join(archiveFolder, manifestFilename), sourceDirectories.join('\n'));
|
||||||
const workingDirectory = getWorkingDirectory();
|
const workingDirectory = getWorkingDirectory();
|
||||||
// -T#: Compress using # working thread. If # is 0, attempt to detect and use the number of physical CPU cores.
|
// -T#: Compress using # working thread. If # is 0, attempt to detect and use the number of physical CPU cores.
|
||||||
|
// zstdmt is equivalent to 'zstd -T0'
|
||||||
// --long=#: Enables long distance matching with # bits. Maximum is 30 (1GB) on 32-bit OS and 31 (2GB) on 64-bit.
|
// --long=#: Enables long distance matching with # bits. Maximum is 30 (1GB) on 32-bit OS and 31 (2GB) on 64-bit.
|
||||||
// Using 30 here because we also support 32-bit self-hosted runners.
|
// Using 30 here because we also support 32-bit self-hosted runners.
|
||||||
// Long range mode is added to zstd in v1.3.2 release, so we will not use --long in older version of zstd.
|
// Long range mode is added to zstd in v1.3.2 release, so we will not use --long in older version of zstd.
|
||||||
function getCompressionProgram() {
|
function getCompressionProgram() {
|
||||||
switch (compressionMethod) {
|
switch (compressionMethod) {
|
||||||
case constants_1.CompressionMethod.Zstd:
|
case constants_1.CompressionMethod.Zstd:
|
||||||
return ['--use-compress-program', 'zstd -T0 --long=30'];
|
return [
|
||||||
|
'--use-compress-program',
|
||||||
|
IS_WINDOWS ? 'zstd -T0 --long=30' : 'zstdmt --long=30'
|
||||||
|
];
|
||||||
case constants_1.CompressionMethod.ZstdWithoutLong:
|
case constants_1.CompressionMethod.ZstdWithoutLong:
|
||||||
return ['--use-compress-program', 'zstd -T0'];
|
return ['--use-compress-program', IS_WINDOWS ? 'zstd -T0' : 'zstdmt'];
|
||||||
default:
|
default:
|
||||||
return ['-z'];
|
return ['-z'];
|
||||||
}
|
}
|
||||||
@ -37329,32 +37377,6 @@ function createTar(archiveFolder, sourceDirectories, compressionMethod) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
exports.createTar = createTar;
|
exports.createTar = createTar;
|
||||||
function listTar(archivePath, compressionMethod) {
|
|
||||||
return __awaiter(this, void 0, void 0, function* () {
|
|
||||||
// --d: Decompress.
|
|
||||||
// --long=#: Enables long distance matching with # bits.
|
|
||||||
// Maximum is 30 (1GB) on 32-bit OS and 31 (2GB) on 64-bit.
|
|
||||||
// Using 30 here because we also support 32-bit self-hosted runners.
|
|
||||||
function getCompressionProgram() {
|
|
||||||
switch (compressionMethod) {
|
|
||||||
case constants_1.CompressionMethod.Zstd:
|
|
||||||
return ['--use-compress-program', 'zstd -d --long=30'];
|
|
||||||
case constants_1.CompressionMethod.ZstdWithoutLong:
|
|
||||||
return ['--use-compress-program', 'zstd -d'];
|
|
||||||
default:
|
|
||||||
return ['-z'];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const args = [
|
|
||||||
...getCompressionProgram(),
|
|
||||||
'-tf',
|
|
||||||
archivePath.replace(new RegExp(`\\${path.sep}`, 'g'), '/'),
|
|
||||||
'-P'
|
|
||||||
];
|
|
||||||
yield execTar(args, compressionMethod);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
exports.listTar = listTar;
|
|
||||||
//# sourceMappingURL=tar.js.map
|
//# sourceMappingURL=tar.js.map
|
||||||
|
|
||||||
/***/ }),
|
/***/ }),
|
||||||
@ -40789,7 +40811,8 @@ function getDownloadOptions(copy) {
|
|||||||
const result = {
|
const result = {
|
||||||
useAzureSdk: true,
|
useAzureSdk: true,
|
||||||
downloadConcurrency: 8,
|
downloadConcurrency: 8,
|
||||||
timeoutInMs: 30000
|
timeoutInMs: 30000,
|
||||||
|
segmentTimeoutInMs: 3600000
|
||||||
};
|
};
|
||||||
if (copy) {
|
if (copy) {
|
||||||
if (typeof copy.useAzureSdk === 'boolean') {
|
if (typeof copy.useAzureSdk === 'boolean') {
|
||||||
@ -40801,10 +40824,21 @@ function getDownloadOptions(copy) {
|
|||||||
if (typeof copy.timeoutInMs === 'number') {
|
if (typeof copy.timeoutInMs === 'number') {
|
||||||
result.timeoutInMs = copy.timeoutInMs;
|
result.timeoutInMs = copy.timeoutInMs;
|
||||||
}
|
}
|
||||||
|
if (typeof copy.segmentTimeoutInMs === 'number') {
|
||||||
|
result.segmentTimeoutInMs = copy.segmentTimeoutInMs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const segmentDownloadTimeoutMins = process.env['SEGMENT_DOWNLOAD_TIMEOUT_MINS'];
|
||||||
|
if (segmentDownloadTimeoutMins &&
|
||||||
|
!isNaN(Number(segmentDownloadTimeoutMins)) &&
|
||||||
|
isFinite(Number(segmentDownloadTimeoutMins))) {
|
||||||
|
result.segmentTimeoutInMs = Number(segmentDownloadTimeoutMins) * 60 * 1000;
|
||||||
}
|
}
|
||||||
core.debug(`Use Azure SDK: ${result.useAzureSdk}`);
|
core.debug(`Use Azure SDK: ${result.useAzureSdk}`);
|
||||||
core.debug(`Download concurrency: ${result.downloadConcurrency}`);
|
core.debug(`Download concurrency: ${result.downloadConcurrency}`);
|
||||||
core.debug(`Request timeout (ms): ${result.timeoutInMs}`);
|
core.debug(`Request timeout (ms): ${result.timeoutInMs}`);
|
||||||
|
core.debug(`Cache segment download timeout mins env var: ${process.env['SEGMENT_DOWNLOAD_TIMEOUT_MINS']}`);
|
||||||
|
core.debug(`Segment download timeout (ms): ${result.segmentTimeoutInMs}`);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
exports.getDownloadOptions = getDownloadOptions;
|
exports.getDownloadOptions = getDownloadOptions;
|
||||||
@ -46850,17 +46884,18 @@ function restoreCache(paths, primaryKey, restoreKeys, options) {
|
|||||||
checkKey(key);
|
checkKey(key);
|
||||||
}
|
}
|
||||||
const compressionMethod = yield utils.getCompressionMethod();
|
const compressionMethod = yield utils.getCompressionMethod();
|
||||||
// path are needed to compute version
|
let archivePath = '';
|
||||||
const cacheEntry = yield cacheHttpClient.getCacheEntry(keys, paths, {
|
|
||||||
compressionMethod
|
|
||||||
});
|
|
||||||
if (!(cacheEntry === null || cacheEntry === void 0 ? void 0 : cacheEntry.archiveLocation)) {
|
|
||||||
// Cache not found
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
const archivePath = path.join(yield utils.createTempDirectory(), utils.getCacheFileName(compressionMethod));
|
|
||||||
core.debug(`Archive Path: ${archivePath}`);
|
|
||||||
try {
|
try {
|
||||||
|
// path are needed to compute version
|
||||||
|
const cacheEntry = yield cacheHttpClient.getCacheEntry(keys, paths, {
|
||||||
|
compressionMethod
|
||||||
|
});
|
||||||
|
if (!(cacheEntry === null || cacheEntry === void 0 ? void 0 : cacheEntry.archiveLocation)) {
|
||||||
|
// Cache not found
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
archivePath = path.join(yield utils.createTempDirectory(), utils.getCacheFileName(compressionMethod));
|
||||||
|
core.debug(`Archive Path: ${archivePath}`);
|
||||||
// Download the cache from the cache entry
|
// Download the cache from the cache entry
|
||||||
yield cacheHttpClient.downloadCache(cacheEntry.archiveLocation, archivePath, options);
|
yield cacheHttpClient.downloadCache(cacheEntry.archiveLocation, archivePath, options);
|
||||||
if (core.isDebug()) {
|
if (core.isDebug()) {
|
||||||
@ -46870,6 +46905,17 @@ function restoreCache(paths, primaryKey, restoreKeys, options) {
|
|||||||
core.info(`Cache Size: ~${Math.round(archiveFileSize / (1024 * 1024))} MB (${archiveFileSize} B)`);
|
core.info(`Cache Size: ~${Math.round(archiveFileSize / (1024 * 1024))} MB (${archiveFileSize} B)`);
|
||||||
yield tar_1.extractTar(archivePath, compressionMethod);
|
yield tar_1.extractTar(archivePath, compressionMethod);
|
||||||
core.info('Cache restored successfully');
|
core.info('Cache restored successfully');
|
||||||
|
return cacheEntry.cacheKey;
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
const typedError = error;
|
||||||
|
if (typedError.name === ValidationError.name) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// Supress all non-validation cache related errors because caching should be optional
|
||||||
|
core.warning(`Failed to restore: ${error.message}`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
// Try to delete the archive to save space
|
// Try to delete the archive to save space
|
||||||
@ -46880,7 +46926,7 @@ function restoreCache(paths, primaryKey, restoreKeys, options) {
|
|||||||
core.debug(`Failed to delete archive: ${error}`);
|
core.debug(`Failed to delete archive: ${error}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cacheEntry.cacheKey;
|
return undefined;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
exports.restoreCache = restoreCache;
|
exports.restoreCache = restoreCache;
|
||||||
@ -46898,7 +46944,7 @@ function saveCache(paths, key, options) {
|
|||||||
checkPaths(paths);
|
checkPaths(paths);
|
||||||
checkKey(key);
|
checkKey(key);
|
||||||
const compressionMethod = yield utils.getCompressionMethod();
|
const compressionMethod = yield utils.getCompressionMethod();
|
||||||
let cacheId = null;
|
let cacheId = -1;
|
||||||
const cachePaths = yield utils.resolvePaths(paths);
|
const cachePaths = yield utils.resolvePaths(paths);
|
||||||
core.debug('Cache Paths:');
|
core.debug('Cache Paths:');
|
||||||
core.debug(`${JSON.stringify(cachePaths)}`);
|
core.debug(`${JSON.stringify(cachePaths)}`);
|
||||||
@ -46937,6 +46983,18 @@ function saveCache(paths, key, options) {
|
|||||||
core.debug(`Saving Cache (ID: ${cacheId})`);
|
core.debug(`Saving Cache (ID: ${cacheId})`);
|
||||||
yield cacheHttpClient.saveCache(cacheId, archivePath, options);
|
yield cacheHttpClient.saveCache(cacheId, archivePath, options);
|
||||||
}
|
}
|
||||||
|
catch (error) {
|
||||||
|
const typedError = error;
|
||||||
|
if (typedError.name === ValidationError.name) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
else if (typedError.name === ReserveCacheError.name) {
|
||||||
|
core.info(`Failed to save: ${typedError.message}`);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
core.warning(`Failed to save: ${typedError.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
finally {
|
finally {
|
||||||
// Try to delete the archive to save space
|
// Try to delete the archive to save space
|
||||||
try {
|
try {
|
||||||
@ -48996,31 +49054,19 @@ function run() {
|
|||||||
const cachePaths = utils.getInputAsArray(constants_1.Inputs.Path, {
|
const cachePaths = utils.getInputAsArray(constants_1.Inputs.Path, {
|
||||||
required: true
|
required: true
|
||||||
});
|
});
|
||||||
try {
|
const cacheKey = yield cache.restoreCache(cachePaths, primaryKey, restoreKeys);
|
||||||
const cacheKey = yield cache.restoreCache(cachePaths, primaryKey, restoreKeys);
|
if (!cacheKey) {
|
||||||
if (!cacheKey) {
|
core.info(`Cache not found for input keys: ${[
|
||||||
core.info(`Cache not found for input keys: ${[
|
primaryKey,
|
||||||
primaryKey,
|
...restoreKeys
|
||||||
...restoreKeys
|
].join(", ")}`);
|
||||||
].join(", ")}`);
|
return;
|
||||||
return;
|
|
||||||
}
|
|
||||||
// Store the matched cache key
|
|
||||||
utils.setCacheState(cacheKey);
|
|
||||||
const isExactKeyMatch = utils.isExactKeyMatch(primaryKey, cacheKey);
|
|
||||||
utils.setCacheHitOutput(isExactKeyMatch);
|
|
||||||
core.info(`Cache restored from key: ${cacheKey}`);
|
|
||||||
}
|
|
||||||
catch (error) {
|
|
||||||
const typedError = error;
|
|
||||||
if (typedError.name === cache.ValidationError.name) {
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
utils.logWarning(typedError.message);
|
|
||||||
utils.setCacheHitOutput(false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
// Store the matched cache key
|
||||||
|
utils.setCacheState(cacheKey);
|
||||||
|
const isExactKeyMatch = utils.isExactKeyMatch(primaryKey, cacheKey);
|
||||||
|
utils.setCacheHitOutput(isExactKeyMatch);
|
||||||
|
core.info(`Cache restored from key: ${cacheKey}`);
|
||||||
}
|
}
|
||||||
catch (error) {
|
catch (error) {
|
||||||
core.setFailed(error.message);
|
core.setFailed(error.message);
|
||||||
|
196
dist/save/index.js
vendored
196
dist/save/index.js
vendored
@ -1113,7 +1113,13 @@ function resolvePaths(patterns) {
|
|||||||
.replace(new RegExp(`\\${path.sep}`, 'g'), '/');
|
.replace(new RegExp(`\\${path.sep}`, 'g'), '/');
|
||||||
core.debug(`Matched: ${relativeFile}`);
|
core.debug(`Matched: ${relativeFile}`);
|
||||||
// Paths are made relative so the tar entries are all relative to the root of the workspace.
|
// Paths are made relative so the tar entries are all relative to the root of the workspace.
|
||||||
paths.push(`${relativeFile}`);
|
if (relativeFile === '') {
|
||||||
|
// path.relative returns empty string if workspace and file are equal
|
||||||
|
paths.push('.');
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
paths.push(`${relativeFile}`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
||||||
@ -5467,6 +5473,7 @@ const util = __importStar(__webpack_require__(669));
|
|||||||
const utils = __importStar(__webpack_require__(15));
|
const utils = __importStar(__webpack_require__(15));
|
||||||
const constants_1 = __webpack_require__(931);
|
const constants_1 = __webpack_require__(931);
|
||||||
const requestUtils_1 = __webpack_require__(899);
|
const requestUtils_1 = __webpack_require__(899);
|
||||||
|
const abort_controller_1 = __webpack_require__(106);
|
||||||
/**
|
/**
|
||||||
* Pipes the body of a HTTP response to a stream
|
* Pipes the body of a HTTP response to a stream
|
||||||
*
|
*
|
||||||
@ -5650,15 +5657,24 @@ function downloadCacheStorageSDK(archiveLocation, archivePath, options) {
|
|||||||
const fd = fs.openSync(archivePath, 'w');
|
const fd = fs.openSync(archivePath, 'w');
|
||||||
try {
|
try {
|
||||||
downloadProgress.startDisplayTimer();
|
downloadProgress.startDisplayTimer();
|
||||||
|
const controller = new abort_controller_1.AbortController();
|
||||||
|
const abortSignal = controller.signal;
|
||||||
while (!downloadProgress.isDone()) {
|
while (!downloadProgress.isDone()) {
|
||||||
const segmentStart = downloadProgress.segmentOffset + downloadProgress.segmentSize;
|
const segmentStart = downloadProgress.segmentOffset + downloadProgress.segmentSize;
|
||||||
const segmentSize = Math.min(maxSegmentSize, contentLength - segmentStart);
|
const segmentSize = Math.min(maxSegmentSize, contentLength - segmentStart);
|
||||||
downloadProgress.nextSegment(segmentSize);
|
downloadProgress.nextSegment(segmentSize);
|
||||||
const result = yield client.downloadToBuffer(segmentStart, segmentSize, {
|
const result = yield promiseWithTimeout(options.segmentTimeoutInMs || 3600000, client.downloadToBuffer(segmentStart, segmentSize, {
|
||||||
|
abortSignal,
|
||||||
concurrency: options.downloadConcurrency,
|
concurrency: options.downloadConcurrency,
|
||||||
onProgress: downloadProgress.onProgress()
|
onProgress: downloadProgress.onProgress()
|
||||||
});
|
}));
|
||||||
fs.writeFileSync(fd, result);
|
if (result === 'timeout') {
|
||||||
|
controller.abort();
|
||||||
|
throw new Error('Aborting cache download as the download time exceeded the timeout.');
|
||||||
|
}
|
||||||
|
else if (Buffer.isBuffer(result)) {
|
||||||
|
fs.writeFileSync(fd, result);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
@ -5669,6 +5685,16 @@ function downloadCacheStorageSDK(archiveLocation, archivePath, options) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
exports.downloadCacheStorageSDK = downloadCacheStorageSDK;
|
exports.downloadCacheStorageSDK = downloadCacheStorageSDK;
|
||||||
|
const promiseWithTimeout = (timeoutMs, promise) => __awaiter(void 0, void 0, void 0, function* () {
|
||||||
|
let timeoutHandle;
|
||||||
|
const timeoutPromise = new Promise(resolve => {
|
||||||
|
timeoutHandle = setTimeout(() => resolve('timeout'), timeoutMs);
|
||||||
|
});
|
||||||
|
return Promise.race([promise, timeoutPromise]).then(result => {
|
||||||
|
clearTimeout(timeoutHandle);
|
||||||
|
return result;
|
||||||
|
});
|
||||||
|
});
|
||||||
//# sourceMappingURL=downloadUtils.js.map
|
//# sourceMappingURL=downloadUtils.js.map
|
||||||
|
|
||||||
/***/ }),
|
/***/ }),
|
||||||
@ -37214,6 +37240,7 @@ const fs_1 = __webpack_require__(747);
|
|||||||
const path = __importStar(__webpack_require__(622));
|
const path = __importStar(__webpack_require__(622));
|
||||||
const utils = __importStar(__webpack_require__(15));
|
const utils = __importStar(__webpack_require__(15));
|
||||||
const constants_1 = __webpack_require__(931);
|
const constants_1 = __webpack_require__(931);
|
||||||
|
const IS_WINDOWS = process.platform === 'win32';
|
||||||
function getTarPath(args, compressionMethod) {
|
function getTarPath(args, compressionMethod) {
|
||||||
return __awaiter(this, void 0, void 0, function* () {
|
return __awaiter(this, void 0, void 0, function* () {
|
||||||
switch (process.platform) {
|
switch (process.platform) {
|
||||||
@ -37261,26 +37288,43 @@ function getWorkingDirectory() {
|
|||||||
var _a;
|
var _a;
|
||||||
return (_a = process.env['GITHUB_WORKSPACE']) !== null && _a !== void 0 ? _a : process.cwd();
|
return (_a = process.env['GITHUB_WORKSPACE']) !== null && _a !== void 0 ? _a : process.cwd();
|
||||||
}
|
}
|
||||||
|
// Common function for extractTar and listTar to get the compression method
|
||||||
|
function getCompressionProgram(compressionMethod) {
|
||||||
|
// -d: Decompress.
|
||||||
|
// unzstd is equivalent to 'zstd -d'
|
||||||
|
// --long=#: Enables long distance matching with # bits. Maximum is 30 (1GB) on 32-bit OS and 31 (2GB) on 64-bit.
|
||||||
|
// Using 30 here because we also support 32-bit self-hosted runners.
|
||||||
|
switch (compressionMethod) {
|
||||||
|
case constants_1.CompressionMethod.Zstd:
|
||||||
|
return [
|
||||||
|
'--use-compress-program',
|
||||||
|
IS_WINDOWS ? 'zstd -d --long=30' : 'unzstd --long=30'
|
||||||
|
];
|
||||||
|
case constants_1.CompressionMethod.ZstdWithoutLong:
|
||||||
|
return ['--use-compress-program', IS_WINDOWS ? 'zstd -d' : 'unzstd'];
|
||||||
|
default:
|
||||||
|
return ['-z'];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function listTar(archivePath, compressionMethod) {
|
||||||
|
return __awaiter(this, void 0, void 0, function* () {
|
||||||
|
const args = [
|
||||||
|
...getCompressionProgram(compressionMethod),
|
||||||
|
'-tf',
|
||||||
|
archivePath.replace(new RegExp(`\\${path.sep}`, 'g'), '/'),
|
||||||
|
'-P'
|
||||||
|
];
|
||||||
|
yield execTar(args, compressionMethod);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
exports.listTar = listTar;
|
||||||
function extractTar(archivePath, compressionMethod) {
|
function extractTar(archivePath, compressionMethod) {
|
||||||
return __awaiter(this, void 0, void 0, function* () {
|
return __awaiter(this, void 0, void 0, function* () {
|
||||||
// Create directory to extract tar into
|
// Create directory to extract tar into
|
||||||
const workingDirectory = getWorkingDirectory();
|
const workingDirectory = getWorkingDirectory();
|
||||||
yield io.mkdirP(workingDirectory);
|
yield io.mkdirP(workingDirectory);
|
||||||
// --d: Decompress.
|
|
||||||
// --long=#: Enables long distance matching with # bits. Maximum is 30 (1GB) on 32-bit OS and 31 (2GB) on 64-bit.
|
|
||||||
// Using 30 here because we also support 32-bit self-hosted runners.
|
|
||||||
function getCompressionProgram() {
|
|
||||||
switch (compressionMethod) {
|
|
||||||
case constants_1.CompressionMethod.Zstd:
|
|
||||||
return ['--use-compress-program', 'zstd -d --long=30'];
|
|
||||||
case constants_1.CompressionMethod.ZstdWithoutLong:
|
|
||||||
return ['--use-compress-program', 'zstd -d'];
|
|
||||||
default:
|
|
||||||
return ['-z'];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const args = [
|
const args = [
|
||||||
...getCompressionProgram(),
|
...getCompressionProgram(compressionMethod),
|
||||||
'-xf',
|
'-xf',
|
||||||
archivePath.replace(new RegExp(`\\${path.sep}`, 'g'), '/'),
|
archivePath.replace(new RegExp(`\\${path.sep}`, 'g'), '/'),
|
||||||
'-P',
|
'-P',
|
||||||
@ -37299,15 +37343,19 @@ function createTar(archiveFolder, sourceDirectories, compressionMethod) {
|
|||||||
fs_1.writeFileSync(path.join(archiveFolder, manifestFilename), sourceDirectories.join('\n'));
|
fs_1.writeFileSync(path.join(archiveFolder, manifestFilename), sourceDirectories.join('\n'));
|
||||||
const workingDirectory = getWorkingDirectory();
|
const workingDirectory = getWorkingDirectory();
|
||||||
// -T#: Compress using # working thread. If # is 0, attempt to detect and use the number of physical CPU cores.
|
// -T#: Compress using # working thread. If # is 0, attempt to detect and use the number of physical CPU cores.
|
||||||
|
// zstdmt is equivalent to 'zstd -T0'
|
||||||
// --long=#: Enables long distance matching with # bits. Maximum is 30 (1GB) on 32-bit OS and 31 (2GB) on 64-bit.
|
// --long=#: Enables long distance matching with # bits. Maximum is 30 (1GB) on 32-bit OS and 31 (2GB) on 64-bit.
|
||||||
// Using 30 here because we also support 32-bit self-hosted runners.
|
// Using 30 here because we also support 32-bit self-hosted runners.
|
||||||
// Long range mode is added to zstd in v1.3.2 release, so we will not use --long in older version of zstd.
|
// Long range mode is added to zstd in v1.3.2 release, so we will not use --long in older version of zstd.
|
||||||
function getCompressionProgram() {
|
function getCompressionProgram() {
|
||||||
switch (compressionMethod) {
|
switch (compressionMethod) {
|
||||||
case constants_1.CompressionMethod.Zstd:
|
case constants_1.CompressionMethod.Zstd:
|
||||||
return ['--use-compress-program', 'zstd -T0 --long=30'];
|
return [
|
||||||
|
'--use-compress-program',
|
||||||
|
IS_WINDOWS ? 'zstd -T0 --long=30' : 'zstdmt --long=30'
|
||||||
|
];
|
||||||
case constants_1.CompressionMethod.ZstdWithoutLong:
|
case constants_1.CompressionMethod.ZstdWithoutLong:
|
||||||
return ['--use-compress-program', 'zstd -T0'];
|
return ['--use-compress-program', IS_WINDOWS ? 'zstd -T0' : 'zstdmt'];
|
||||||
default:
|
default:
|
||||||
return ['-z'];
|
return ['-z'];
|
||||||
}
|
}
|
||||||
@ -37329,32 +37377,6 @@ function createTar(archiveFolder, sourceDirectories, compressionMethod) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
exports.createTar = createTar;
|
exports.createTar = createTar;
|
||||||
function listTar(archivePath, compressionMethod) {
|
|
||||||
return __awaiter(this, void 0, void 0, function* () {
|
|
||||||
// --d: Decompress.
|
|
||||||
// --long=#: Enables long distance matching with # bits.
|
|
||||||
// Maximum is 30 (1GB) on 32-bit OS and 31 (2GB) on 64-bit.
|
|
||||||
// Using 30 here because we also support 32-bit self-hosted runners.
|
|
||||||
function getCompressionProgram() {
|
|
||||||
switch (compressionMethod) {
|
|
||||||
case constants_1.CompressionMethod.Zstd:
|
|
||||||
return ['--use-compress-program', 'zstd -d --long=30'];
|
|
||||||
case constants_1.CompressionMethod.ZstdWithoutLong:
|
|
||||||
return ['--use-compress-program', 'zstd -d'];
|
|
||||||
default:
|
|
||||||
return ['-z'];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const args = [
|
|
||||||
...getCompressionProgram(),
|
|
||||||
'-tf',
|
|
||||||
archivePath.replace(new RegExp(`\\${path.sep}`, 'g'), '/'),
|
|
||||||
'-P'
|
|
||||||
];
|
|
||||||
yield execTar(args, compressionMethod);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
exports.listTar = listTar;
|
|
||||||
//# sourceMappingURL=tar.js.map
|
//# sourceMappingURL=tar.js.map
|
||||||
|
|
||||||
/***/ }),
|
/***/ }),
|
||||||
@ -40789,7 +40811,8 @@ function getDownloadOptions(copy) {
|
|||||||
const result = {
|
const result = {
|
||||||
useAzureSdk: true,
|
useAzureSdk: true,
|
||||||
downloadConcurrency: 8,
|
downloadConcurrency: 8,
|
||||||
timeoutInMs: 30000
|
timeoutInMs: 30000,
|
||||||
|
segmentTimeoutInMs: 3600000
|
||||||
};
|
};
|
||||||
if (copy) {
|
if (copy) {
|
||||||
if (typeof copy.useAzureSdk === 'boolean') {
|
if (typeof copy.useAzureSdk === 'boolean') {
|
||||||
@ -40801,10 +40824,21 @@ function getDownloadOptions(copy) {
|
|||||||
if (typeof copy.timeoutInMs === 'number') {
|
if (typeof copy.timeoutInMs === 'number') {
|
||||||
result.timeoutInMs = copy.timeoutInMs;
|
result.timeoutInMs = copy.timeoutInMs;
|
||||||
}
|
}
|
||||||
|
if (typeof copy.segmentTimeoutInMs === 'number') {
|
||||||
|
result.segmentTimeoutInMs = copy.segmentTimeoutInMs;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const segmentDownloadTimeoutMins = process.env['SEGMENT_DOWNLOAD_TIMEOUT_MINS'];
|
||||||
|
if (segmentDownloadTimeoutMins &&
|
||||||
|
!isNaN(Number(segmentDownloadTimeoutMins)) &&
|
||||||
|
isFinite(Number(segmentDownloadTimeoutMins))) {
|
||||||
|
result.segmentTimeoutInMs = Number(segmentDownloadTimeoutMins) * 60 * 1000;
|
||||||
}
|
}
|
||||||
core.debug(`Use Azure SDK: ${result.useAzureSdk}`);
|
core.debug(`Use Azure SDK: ${result.useAzureSdk}`);
|
||||||
core.debug(`Download concurrency: ${result.downloadConcurrency}`);
|
core.debug(`Download concurrency: ${result.downloadConcurrency}`);
|
||||||
core.debug(`Request timeout (ms): ${result.timeoutInMs}`);
|
core.debug(`Request timeout (ms): ${result.timeoutInMs}`);
|
||||||
|
core.debug(`Cache segment download timeout mins env var: ${process.env['SEGMENT_DOWNLOAD_TIMEOUT_MINS']}`);
|
||||||
|
core.debug(`Segment download timeout (ms): ${result.segmentTimeoutInMs}`);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
exports.getDownloadOptions = getDownloadOptions;
|
exports.getDownloadOptions = getDownloadOptions;
|
||||||
@ -46792,24 +46826,12 @@ function run() {
|
|||||||
const cachePaths = utils.getInputAsArray(constants_1.Inputs.Path, {
|
const cachePaths = utils.getInputAsArray(constants_1.Inputs.Path, {
|
||||||
required: true
|
required: true
|
||||||
});
|
});
|
||||||
try {
|
const cacheId = yield cache.saveCache(cachePaths, primaryKey, {
|
||||||
yield cache.saveCache(cachePaths, primaryKey, {
|
uploadChunkSize: utils.getInputAsInt(constants_1.Inputs.UploadChunkSize)
|
||||||
uploadChunkSize: utils.getInputAsInt(constants_1.Inputs.UploadChunkSize)
|
});
|
||||||
});
|
if (cacheId != -1) {
|
||||||
core.info(`Cache saved with key: ${primaryKey}`);
|
core.info(`Cache saved with key: ${primaryKey}`);
|
||||||
}
|
}
|
||||||
catch (error) {
|
|
||||||
const typedError = error;
|
|
||||||
if (typedError.name === cache.ValidationError.name) {
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
else if (typedError.name === cache.ReserveCacheError.name) {
|
|
||||||
core.info(typedError.message);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
utils.logWarning(typedError.message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
catch (error) {
|
catch (error) {
|
||||||
utils.logWarning(error.message);
|
utils.logWarning(error.message);
|
||||||
@ -46948,17 +46970,18 @@ function restoreCache(paths, primaryKey, restoreKeys, options) {
|
|||||||
checkKey(key);
|
checkKey(key);
|
||||||
}
|
}
|
||||||
const compressionMethod = yield utils.getCompressionMethod();
|
const compressionMethod = yield utils.getCompressionMethod();
|
||||||
// path are needed to compute version
|
let archivePath = '';
|
||||||
const cacheEntry = yield cacheHttpClient.getCacheEntry(keys, paths, {
|
|
||||||
compressionMethod
|
|
||||||
});
|
|
||||||
if (!(cacheEntry === null || cacheEntry === void 0 ? void 0 : cacheEntry.archiveLocation)) {
|
|
||||||
// Cache not found
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
const archivePath = path.join(yield utils.createTempDirectory(), utils.getCacheFileName(compressionMethod));
|
|
||||||
core.debug(`Archive Path: ${archivePath}`);
|
|
||||||
try {
|
try {
|
||||||
|
// path are needed to compute version
|
||||||
|
const cacheEntry = yield cacheHttpClient.getCacheEntry(keys, paths, {
|
||||||
|
compressionMethod
|
||||||
|
});
|
||||||
|
if (!(cacheEntry === null || cacheEntry === void 0 ? void 0 : cacheEntry.archiveLocation)) {
|
||||||
|
// Cache not found
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
archivePath = path.join(yield utils.createTempDirectory(), utils.getCacheFileName(compressionMethod));
|
||||||
|
core.debug(`Archive Path: ${archivePath}`);
|
||||||
// Download the cache from the cache entry
|
// Download the cache from the cache entry
|
||||||
yield cacheHttpClient.downloadCache(cacheEntry.archiveLocation, archivePath, options);
|
yield cacheHttpClient.downloadCache(cacheEntry.archiveLocation, archivePath, options);
|
||||||
if (core.isDebug()) {
|
if (core.isDebug()) {
|
||||||
@ -46968,6 +46991,17 @@ function restoreCache(paths, primaryKey, restoreKeys, options) {
|
|||||||
core.info(`Cache Size: ~${Math.round(archiveFileSize / (1024 * 1024))} MB (${archiveFileSize} B)`);
|
core.info(`Cache Size: ~${Math.round(archiveFileSize / (1024 * 1024))} MB (${archiveFileSize} B)`);
|
||||||
yield tar_1.extractTar(archivePath, compressionMethod);
|
yield tar_1.extractTar(archivePath, compressionMethod);
|
||||||
core.info('Cache restored successfully');
|
core.info('Cache restored successfully');
|
||||||
|
return cacheEntry.cacheKey;
|
||||||
|
}
|
||||||
|
catch (error) {
|
||||||
|
const typedError = error;
|
||||||
|
if (typedError.name === ValidationError.name) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// Supress all non-validation cache related errors because caching should be optional
|
||||||
|
core.warning(`Failed to restore: ${error.message}`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
// Try to delete the archive to save space
|
// Try to delete the archive to save space
|
||||||
@ -46978,7 +47012,7 @@ function restoreCache(paths, primaryKey, restoreKeys, options) {
|
|||||||
core.debug(`Failed to delete archive: ${error}`);
|
core.debug(`Failed to delete archive: ${error}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return cacheEntry.cacheKey;
|
return undefined;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
exports.restoreCache = restoreCache;
|
exports.restoreCache = restoreCache;
|
||||||
@ -46996,7 +47030,7 @@ function saveCache(paths, key, options) {
|
|||||||
checkPaths(paths);
|
checkPaths(paths);
|
||||||
checkKey(key);
|
checkKey(key);
|
||||||
const compressionMethod = yield utils.getCompressionMethod();
|
const compressionMethod = yield utils.getCompressionMethod();
|
||||||
let cacheId = null;
|
let cacheId = -1;
|
||||||
const cachePaths = yield utils.resolvePaths(paths);
|
const cachePaths = yield utils.resolvePaths(paths);
|
||||||
core.debug('Cache Paths:');
|
core.debug('Cache Paths:');
|
||||||
core.debug(`${JSON.stringify(cachePaths)}`);
|
core.debug(`${JSON.stringify(cachePaths)}`);
|
||||||
@ -47035,6 +47069,18 @@ function saveCache(paths, key, options) {
|
|||||||
core.debug(`Saving Cache (ID: ${cacheId})`);
|
core.debug(`Saving Cache (ID: ${cacheId})`);
|
||||||
yield cacheHttpClient.saveCache(cacheId, archivePath, options);
|
yield cacheHttpClient.saveCache(cacheId, archivePath, options);
|
||||||
}
|
}
|
||||||
|
catch (error) {
|
||||||
|
const typedError = error;
|
||||||
|
if (typedError.name === ValidationError.name) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
else if (typedError.name === ReserveCacheError.name) {
|
||||||
|
core.info(`Failed to save: ${typedError.message}`);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
core.warning(`Failed to save: ${typedError.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
finally {
|
finally {
|
||||||
// Try to delete the archive to save space
|
// Try to delete the archive to save space
|
||||||
try {
|
try {
|
||||||
|
14
examples.md
14
examples.md
@ -1,6 +1,7 @@
|
|||||||
# Examples
|
# Examples
|
||||||
|
|
||||||
- [C# - NuGet](#c---nuget)
|
- [C# - NuGet](#c---nuget)
|
||||||
|
- [Clojure - Lein Deps](#clojure---lein-deps)
|
||||||
- [D - DUB](#d---dub)
|
- [D - DUB](#d---dub)
|
||||||
- [POSIX](#posix)
|
- [POSIX](#posix)
|
||||||
- [Windows](#windows)
|
- [Windows](#windows)
|
||||||
@ -80,6 +81,19 @@ steps:
|
|||||||
${{ runner.os }}-nuget-
|
${{ runner.os }}-nuget-
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Clojure - Lein Deps
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
- name: Cache lein project dependencies
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.m2/repository
|
||||||
|
key: ${{ runner.os }}-clojure-${{ hashFiles('**/project.clj') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-clojure
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## D - DUB
|
## D - DUB
|
||||||
|
|
||||||
### POSIX
|
### POSIX
|
||||||
|
18
package-lock.json
generated
18
package-lock.json
generated
@ -1,15 +1,15 @@
|
|||||||
{
|
{
|
||||||
"name": "cache",
|
"name": "cache",
|
||||||
"version": "3.0.4",
|
"version": "3.0.8",
|
||||||
"lockfileVersion": 2,
|
"lockfileVersion": 2,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "cache",
|
"name": "cache",
|
||||||
"version": "3.0.4",
|
"version": "3.0.8",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@actions/cache": "^2.0.6",
|
"@actions/cache": "^3.0.4",
|
||||||
"@actions/core": "^1.7.0",
|
"@actions/core": "^1.7.0",
|
||||||
"@actions/exec": "^1.1.1",
|
"@actions/exec": "^1.1.1",
|
||||||
"@actions/io": "^1.1.2"
|
"@actions/io": "^1.1.2"
|
||||||
@ -36,9 +36,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@actions/cache": {
|
"node_modules/@actions/cache": {
|
||||||
"version": "2.0.6",
|
"version": "3.0.4",
|
||||||
"resolved": "https://registry.npmjs.org/@actions/cache/-/cache-2.0.6.tgz",
|
"resolved": "https://registry.npmjs.org/@actions/cache/-/cache-3.0.4.tgz",
|
||||||
"integrity": "sha512-Z39ZrWaTRRPaV/AOQdY7hve+Iy/HloH5prpz+k+0lZgGQs/3SeO0UYSIakVuXOk2pdMZnl0Nv0PoK1rmh9YfGQ==",
|
"integrity": "sha512-9RwVL8/ISJoYWFNH1wR/C26E+M3HDkGPWmbFJMMCKwTkjbNZJreMT4XaR/EB1bheIvN4PREQxEQQVJ18IPnf/Q==",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@actions/core": "^1.2.6",
|
"@actions/core": "^1.2.6",
|
||||||
"@actions/exec": "^1.0.1",
|
"@actions/exec": "^1.0.1",
|
||||||
@ -9533,9 +9533,9 @@
|
|||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@actions/cache": {
|
"@actions/cache": {
|
||||||
"version": "2.0.6",
|
"version": "3.0.4",
|
||||||
"resolved": "https://registry.npmjs.org/@actions/cache/-/cache-2.0.6.tgz",
|
"resolved": "https://registry.npmjs.org/@actions/cache/-/cache-3.0.4.tgz",
|
||||||
"integrity": "sha512-Z39ZrWaTRRPaV/AOQdY7hve+Iy/HloH5prpz+k+0lZgGQs/3SeO0UYSIakVuXOk2pdMZnl0Nv0PoK1rmh9YfGQ==",
|
"integrity": "sha512-9RwVL8/ISJoYWFNH1wR/C26E+M3HDkGPWmbFJMMCKwTkjbNZJreMT4XaR/EB1bheIvN4PREQxEQQVJ18IPnf/Q==",
|
||||||
"requires": {
|
"requires": {
|
||||||
"@actions/core": "^1.2.6",
|
"@actions/core": "^1.2.6",
|
||||||
"@actions/exec": "^1.0.1",
|
"@actions/exec": "^1.0.1",
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "cache",
|
"name": "cache",
|
||||||
"version": "3.0.4",
|
"version": "3.0.8",
|
||||||
"private": true,
|
"private": true,
|
||||||
"description": "Cache dependencies and build outputs",
|
"description": "Cache dependencies and build outputs",
|
||||||
"main": "dist/restore/index.js",
|
"main": "dist/restore/index.js",
|
||||||
@ -23,7 +23,7 @@
|
|||||||
"author": "GitHub",
|
"author": "GitHub",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@actions/cache": "^2.0.6",
|
"@actions/cache": "^3.0.4",
|
||||||
"@actions/core": "^1.7.0",
|
"@actions/core": "^1.7.0",
|
||||||
"@actions/exec": "^1.1.1",
|
"@actions/exec": "^1.1.1",
|
||||||
"@actions/io": "^1.1.2"
|
"@actions/io": "^1.1.2"
|
||||||
@ -48,4 +48,4 @@
|
|||||||
"ts-jest": "^28.0.2",
|
"ts-jest": "^28.0.2",
|
||||||
"typescript": "^4.6.4"
|
"typescript": "^4.6.4"
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -29,38 +29,29 @@ async function run(): Promise<void> {
|
|||||||
required: true
|
required: true
|
||||||
});
|
});
|
||||||
|
|
||||||
try {
|
const cacheKey = await cache.restoreCache(
|
||||||
const cacheKey = await cache.restoreCache(
|
cachePaths,
|
||||||
cachePaths,
|
primaryKey,
|
||||||
primaryKey,
|
restoreKeys
|
||||||
restoreKeys
|
);
|
||||||
|
|
||||||
|
if (!cacheKey) {
|
||||||
|
core.info(
|
||||||
|
`Cache not found for input keys: ${[
|
||||||
|
primaryKey,
|
||||||
|
...restoreKeys
|
||||||
|
].join(", ")}`
|
||||||
);
|
);
|
||||||
if (!cacheKey) {
|
|
||||||
core.info(
|
|
||||||
`Cache not found for input keys: ${[
|
|
||||||
primaryKey,
|
|
||||||
...restoreKeys
|
|
||||||
].join(", ")}`
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store the matched cache key
|
return;
|
||||||
utils.setCacheState(cacheKey);
|
|
||||||
|
|
||||||
const isExactKeyMatch = utils.isExactKeyMatch(primaryKey, cacheKey);
|
|
||||||
utils.setCacheHitOutput(isExactKeyMatch);
|
|
||||||
|
|
||||||
core.info(`Cache restored from key: ${cacheKey}`);
|
|
||||||
} catch (error: unknown) {
|
|
||||||
const typedError = error as Error;
|
|
||||||
if (typedError.name === cache.ValidationError.name) {
|
|
||||||
throw error;
|
|
||||||
} else {
|
|
||||||
utils.logWarning(typedError.message);
|
|
||||||
utils.setCacheHitOutput(false);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Store the matched cache key
|
||||||
|
utils.setCacheState(cacheKey);
|
||||||
|
|
||||||
|
const isExactKeyMatch = utils.isExactKeyMatch(primaryKey, cacheKey);
|
||||||
|
utils.setCacheHitOutput(isExactKeyMatch);
|
||||||
|
core.info(`Cache restored from key: ${cacheKey}`);
|
||||||
} catch (error: unknown) {
|
} catch (error: unknown) {
|
||||||
core.setFailed((error as Error).message);
|
core.setFailed((error as Error).message);
|
||||||
}
|
}
|
||||||
|
18
src/save.ts
18
src/save.ts
@ -44,20 +44,12 @@ async function run(): Promise<void> {
|
|||||||
required: true
|
required: true
|
||||||
});
|
});
|
||||||
|
|
||||||
try {
|
const cacheId = await cache.saveCache(cachePaths, primaryKey, {
|
||||||
await cache.saveCache(cachePaths, primaryKey, {
|
uploadChunkSize: utils.getInputAsInt(Inputs.UploadChunkSize)
|
||||||
uploadChunkSize: utils.getInputAsInt(Inputs.UploadChunkSize)
|
});
|
||||||
});
|
|
||||||
|
if (cacheId != -1) {
|
||||||
core.info(`Cache saved with key: ${primaryKey}`);
|
core.info(`Cache saved with key: ${primaryKey}`);
|
||||||
} catch (error: unknown) {
|
|
||||||
const typedError = error as Error;
|
|
||||||
if (typedError.name === cache.ValidationError.name) {
|
|
||||||
throw error;
|
|
||||||
} else if (typedError.name === cache.ReserveCacheError.name) {
|
|
||||||
core.info(typedError.message);
|
|
||||||
} else {
|
|
||||||
utils.logWarning(typedError.message);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} catch (error: unknown) {
|
} catch (error: unknown) {
|
||||||
utils.logWarning((error as Error).message);
|
utils.logWarning((error as Error).message);
|
||||||
|
Reference in New Issue
Block a user