fix(dhcp): can not renew an ip address
The dhcp server is systemd-networkd, and the dhcp plugin can request an ip but can not renew it. The systemd-networkd just ignore the renew request. ``` 2024/09/14 21:46:00 no DHCP packet received within 10s 2024/09/14 21:46:00 retrying in 31.529038 seconds 2024/09/14 21:46:42 no DHCP packet received within 10s 2024/09/14 21:46:42 retrying in 63.150490 seconds 2024/09/14 21:47:45 98184616c91f15419f5cacd012697f85afaa2daeb5d3233e28b0ec21589fb45a/iot/eth1: no more tries 2024/09/14 21:47:45 98184616c91f15419f5cacd012697f85afaa2daeb5d3233e28b0ec21589fb45a/iot/eth1: renewal time expired, rebinding 2024/09/14 21:47:45 Link "eth1" down. Attempting to set up 2024/09/14 21:47:45 98184616c91f15419f5cacd012697f85afaa2daeb5d3233e28b0ec21589fb45a/iot/eth1: lease rebound, expiration is 2024-09-14 22:47:45.309270751 +0800 CST m=+11730.048516519 ``` Follow the https://datatracker.ietf.org/doc/html/rfc2131#section-4.3.6, following options must not be sent in renew - Requested IP Address - Server Identifier Since the upstream code has been inactive for 6 years, we should switch to another dhcpv4 library. The new selected one is https://github.com/insomniacslk/dhcp. Signed-off-by: Songmin Li <lisongmin@protonmail.com>
This commit is contained in:

committed by
Casey Callendrello

parent
e4950728ce
commit
d61e7e5e1f
29
vendor/github.com/u-root/uio/LICENSE
generated
vendored
Normal file
29
vendor/github.com/u-root/uio/LICENSE
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2012-2021, u-root Authors
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
62
vendor/github.com/u-root/uio/rand/random.go
generated
vendored
Normal file
62
vendor/github.com/u-root/uio/rand/random.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2019 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package rand implements cancelable reads from a cryptographically safe
|
||||
// random number source.
|
||||
package rand
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// Reader is a cryptographically safe random number source.
|
||||
var Reader = DefaultReaderWithContext(context.Background())
|
||||
|
||||
// Read blockingly reads from a random number source.
|
||||
func Read(b []byte) (int, error) {
|
||||
return Reader.Read(b)
|
||||
}
|
||||
|
||||
// ReadContext is a context-aware reader for random numbers.
|
||||
func ReadContext(ctx context.Context, b []byte) (int, error) {
|
||||
return Reader.ReadContext(ctx, b)
|
||||
}
|
||||
|
||||
// ContextReader is a cancelable io.Reader.
|
||||
type ContextReader interface {
|
||||
// Read behaves like a blocking io.Reader.Read.
|
||||
//
|
||||
// Read wraps ReadContext with a background context.
|
||||
Read(b []byte) (n int, err error)
|
||||
|
||||
// ReadContext is an io.Reader that blocks until data is available or
|
||||
// until ctx is done.
|
||||
ReadContext(ctx context.Context, b []byte) (n int, err error)
|
||||
}
|
||||
|
||||
// contextReader is a cancelable io.Reader.
|
||||
type contextReader interface {
|
||||
ReadContext(context.Context, []byte) (int, error)
|
||||
}
|
||||
|
||||
// ctxReader takes a contextReader and turns it into a ContextReader.
|
||||
type ctxReader struct {
|
||||
contextReader
|
||||
ctx context.Context //nolint:containedctx
|
||||
}
|
||||
|
||||
func (cr ctxReader) Read(b []byte) (int, error) {
|
||||
return cr.contextReader.ReadContext(cr.ctx, b)
|
||||
}
|
||||
|
||||
// DefaultReaderWithContext returns a context-aware io.Reader.
|
||||
//
|
||||
// Because this stores the context, only use this in situations where an
|
||||
// io.Reader is unavoidable.
|
||||
func DefaultReaderWithContext(ctx context.Context) ContextReader {
|
||||
return ctxReader{
|
||||
ctx: ctx,
|
||||
contextReader: defaultContextReader,
|
||||
}
|
||||
}
|
77
vendor/github.com/u-root/uio/rand/random_linux.go
generated
vendored
Normal file
77
vendor/github.com/u-root/uio/rand/random_linux.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2019 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package rand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var defaultContextReader = &getrandomReader{}
|
||||
|
||||
var backupReader = &urandomReader{}
|
||||
|
||||
type getrandomReader struct {
|
||||
once sync.Once
|
||||
backup bool
|
||||
}
|
||||
|
||||
// ReadContext implements a cancelable read from /dev/urandom.
|
||||
func (r *getrandomReader) ReadContext(ctx context.Context, b []byte) (int, error) {
|
||||
r.once.Do(func() {
|
||||
if os.Getenv("UROOT_NOHWRNG") != "" {
|
||||
r.backup = true
|
||||
return
|
||||
}
|
||||
if _, err := unix.Getrandom(b, unix.GRND_NONBLOCK); err == syscall.ENOSYS {
|
||||
r.backup = true
|
||||
}
|
||||
})
|
||||
if r.backup {
|
||||
return backupReader.ReadContext(ctx, b)
|
||||
}
|
||||
|
||||
for {
|
||||
// getrandom(2) with GRND_NONBLOCK uses the urandom number
|
||||
// source, but only returns numbers if the crng has been
|
||||
// initialized.
|
||||
//
|
||||
// This is preferrable to /dev/urandom, as /dev/urandom will
|
||||
// make up fake random numbers until the crng has been
|
||||
// initialized.
|
||||
n, err := unix.Getrandom(b, unix.GRND_NONBLOCK)
|
||||
if err == nil {
|
||||
return n, nil
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
|
||||
default:
|
||||
if err != syscall.EAGAIN && err != syscall.EINTR {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReadContextWithSlowLogs logs a helpful message if it takes a significant
|
||||
// amount of time (>2s) to produce random data.
|
||||
func (r *getrandomReader) ReadContextWithSlowLogs(ctx context.Context, b []byte) (int, error) {
|
||||
d := 2 * time.Second
|
||||
t := time.AfterFunc(d, func() {
|
||||
log.Printf("getrandom is taking a long time (>%v). "+
|
||||
"If running on hardware, consider enabling Linux's CONFIG_RANDOM_TRUST_CPU=y. "+
|
||||
"If running in a VM/emulator, try setting up virtio-rng.", d)
|
||||
})
|
||||
defer t.Stop()
|
||||
return r.ReadContext(ctx, b)
|
||||
}
|
32
vendor/github.com/u-root/uio/rand/random_std.go
generated
vendored
Normal file
32
vendor/github.com/u-root/uio/rand/random_std.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2020 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build plan9 || windows
|
||||
// +build plan9 windows
|
||||
|
||||
package rand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
)
|
||||
|
||||
var defaultContextReader = &cryptoRandReader{}
|
||||
|
||||
type cryptoRandReader struct{}
|
||||
|
||||
// ReadContext implements a cancelable read.
|
||||
func (r *cryptoRandReader) ReadContext(ctx context.Context, b []byte) (n int, err error) {
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
n, err = rand.Reader.Read(b)
|
||||
close(ch)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
case <-ch:
|
||||
return n, err
|
||||
}
|
||||
}
|
10
vendor/github.com/u-root/uio/rand/random_unix.go
generated
vendored
Normal file
10
vendor/github.com/u-root/uio/rand/random_unix.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2019 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build aix || darwin || dragonfly || freebsd || nacl || netbsd || openbsd || solaris
|
||||
// +build aix darwin dragonfly freebsd nacl netbsd openbsd solaris
|
||||
|
||||
package rand
|
||||
|
||||
var defaultContextReader = &urandomReader{}
|
60
vendor/github.com/u-root/uio/rand/random_urandom.go
generated
vendored
Normal file
60
vendor/github.com/u-root/uio/rand/random_urandom.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2019 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build aix || darwin || dragonfly || freebsd || nacl || netbsd || openbsd || solaris || linux
|
||||
// +build aix darwin dragonfly freebsd nacl netbsd openbsd solaris linux
|
||||
|
||||
package rand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// urandomReader is a contextReader.
|
||||
type urandomReader struct {
|
||||
once sync.Once
|
||||
|
||||
// fd is expected to be non-blocking.
|
||||
fd int
|
||||
}
|
||||
|
||||
func (r *urandomReader) init() error {
|
||||
var realErr error
|
||||
r.once.Do(func() {
|
||||
fd, err := unix.Open("/dev/urandom", unix.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
realErr = fmt.Errorf("open(/dev/urandom): %v", err)
|
||||
return
|
||||
}
|
||||
r.fd = fd
|
||||
})
|
||||
return realErr
|
||||
}
|
||||
|
||||
// ReadContext implements a cancelable read from /dev/urandom.
|
||||
func (r *urandomReader) ReadContext(ctx context.Context, b []byte) (int, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for {
|
||||
n, err := unix.Read(r.fd, b)
|
||||
if err == nil {
|
||||
return n, nil
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
|
||||
default:
|
||||
if err != syscall.EAGAIN && err != syscall.EINTR {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
41
vendor/github.com/u-root/uio/uio/alignreader.go
generated
vendored
Normal file
41
vendor/github.com/u-root/uio/uio/alignreader.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2019 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uio
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// AlignReader keeps track of how many bytes were read so the reader can be
|
||||
// aligned at a future time.
|
||||
type AlignReader struct {
|
||||
R io.Reader
|
||||
N int
|
||||
}
|
||||
|
||||
// Read reads from the underlying io.Reader.
|
||||
func (r *AlignReader) Read(b []byte) (int, error) {
|
||||
n, err := r.R.Read(b)
|
||||
r.N += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadByte reads one byte from the underlying io.Reader.
|
||||
func (r *AlignReader) ReadByte() (byte, error) {
|
||||
b := make([]byte, 1)
|
||||
_, err := io.ReadFull(r, b)
|
||||
return b[0], err
|
||||
}
|
||||
|
||||
// Align aligns the reader to the given number of bytes and returns the
|
||||
// bytes read to pad it.
|
||||
func (r *AlignReader) Align(n int) ([]byte, error) {
|
||||
if r.N%n == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
pad := make([]byte, n-r.N%n)
|
||||
m, err := io.ReadFull(r, pad)
|
||||
return pad[:m], err
|
||||
}
|
34
vendor/github.com/u-root/uio/uio/alignwriter.go
generated
vendored
Normal file
34
vendor/github.com/u-root/uio/uio/alignwriter.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2019 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// AlignWriter keeps track of how many bytes were written so the writer can be
|
||||
// aligned at a future time.
|
||||
type AlignWriter struct {
|
||||
W io.Writer
|
||||
N int
|
||||
}
|
||||
|
||||
// Write writes to the underlying io.Writew.
|
||||
func (w *AlignWriter) Write(b []byte) (int, error) {
|
||||
n, err := w.W.Write(b)
|
||||
w.N += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Align aligns the writer to the given number of bytes using the given pad
|
||||
// value.
|
||||
func (w *AlignWriter) Align(n int, pad byte) error {
|
||||
if w.N%n == 0 {
|
||||
return nil
|
||||
}
|
||||
_, err := w.Write(bytes.Repeat([]byte{pad}, n-w.N%n))
|
||||
return err
|
||||
}
|
89
vendor/github.com/u-root/uio/uio/archivereader.go
generated
vendored
Normal file
89
vendor/github.com/u-root/uio/uio/archivereader.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright 2021 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/pierrec/lz4/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
// preReadSizeBytes is the num of bytes pre-read from a io.Reader that will
|
||||
// be used to match against archive header.
|
||||
defaultArchivePreReadSizeBytes = 1024
|
||||
)
|
||||
|
||||
// ErrPreReadError indicates there was not enough underlying data to decompress.
|
||||
var ErrPreReadError = errors.New("pre-read nothing")
|
||||
|
||||
// ArchiveReader reads from a io.Reader, decompresses source bytes
|
||||
// when applicable.
|
||||
//
|
||||
// It allows probing for multiple archive format, while still able
|
||||
// to read from beginning, by pre-reading a small number of bytes.
|
||||
//
|
||||
// Always use newArchiveReader to initialize.
|
||||
type ArchiveReader struct {
|
||||
// src is where we read source bytes.
|
||||
src io.Reader
|
||||
|
||||
// buf stores pre-read bytes from original io.Reader. Archive format
|
||||
// detection will be done against it.
|
||||
buf []byte
|
||||
|
||||
// preReadSizeBytes is how many bytes we pre-read for magic number
|
||||
// matching for each archive type. This should be greater than or
|
||||
// equal to the largest header frame size of each supported archive
|
||||
// format.
|
||||
preReadSizeBytes int
|
||||
}
|
||||
|
||||
// NewArchiveReader is a decompression reader.
|
||||
func NewArchiveReader(r io.Reader) (ArchiveReader, error) {
|
||||
ar := ArchiveReader{
|
||||
src: r,
|
||||
// Randomly chosen, should be enough for most types:
|
||||
//
|
||||
// e.g. gzip with 10 byte header, lz4 with a header size
|
||||
// between 7 and 19 bytes.
|
||||
preReadSizeBytes: defaultArchivePreReadSizeBytes,
|
||||
}
|
||||
pbuf := make([]byte, ar.preReadSizeBytes)
|
||||
|
||||
nr, err := io.ReadFull(r, pbuf)
|
||||
// In case the image is smaller pre-read block size, 1kb for now.
|
||||
// Ever possible ? probably not in case a compression is needed!
|
||||
ar.buf = pbuf[:nr]
|
||||
if err == io.EOF {
|
||||
// If we could not pre-read anything, we can't determine if
|
||||
// it is a compressed file.
|
||||
ar.src = io.MultiReader(bytes.NewReader(pbuf[:nr]), r)
|
||||
return ar, ErrPreReadError
|
||||
}
|
||||
|
||||
// Try each supported compression type, return upon first match.
|
||||
|
||||
// Try lz4.
|
||||
// magic number error will be thrown if source is not a lz4 archive.
|
||||
// e.g. "lz4: bad magic number".
|
||||
if ok, err := lz4.ValidFrameHeader(ar.buf); err == nil && ok {
|
||||
ar.src = lz4.NewReader(io.MultiReader(bytes.NewReader(ar.buf), r))
|
||||
return ar, nil
|
||||
}
|
||||
|
||||
// Try other archive types here, gzip, xz, etc when needed.
|
||||
|
||||
// Last resort, read as is.
|
||||
ar.src = io.MultiReader(bytes.NewReader(ar.buf), r)
|
||||
return ar, nil
|
||||
}
|
||||
|
||||
// Read reads from the archive uncompressed.
|
||||
func (ar ArchiveReader) Read(p []byte) (n int, err error) {
|
||||
return ar.src.Read(p)
|
||||
}
|
380
vendor/github.com/u-root/uio/uio/buffer.go
generated
vendored
Normal file
380
vendor/github.com/u-root/uio/uio/buffer.go
generated
vendored
Normal file
@ -0,0 +1,380 @@
|
||||
// Copyright 2018 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Marshaler is the interface implemented by an object that can marshal itself
|
||||
// into binary form.
|
||||
//
|
||||
// Marshal appends data to the buffer b.
|
||||
type Marshaler interface {
|
||||
Marshal(l *Lexer)
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface implemented by an object that can unmarshal a
|
||||
// binary representation of itself.
|
||||
//
|
||||
// Unmarshal Consumes data from the buffer b.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal(l *Lexer) error
|
||||
}
|
||||
|
||||
// ToBytes marshals m in the given byte order.
|
||||
func ToBytes(m Marshaler, order binary.ByteOrder) []byte {
|
||||
l := NewLexer(NewBuffer(nil), order)
|
||||
m.Marshal(l)
|
||||
return l.Data()
|
||||
}
|
||||
|
||||
// FromBytes unmarshals b into obj in the given byte order.
|
||||
func FromBytes(obj Unmarshaler, b []byte, order binary.ByteOrder) error {
|
||||
l := NewLexer(NewBuffer(b), order)
|
||||
return obj.Unmarshal(l)
|
||||
}
|
||||
|
||||
// ToBigEndian marshals m to big endian byte order.
|
||||
func ToBigEndian(m Marshaler) []byte {
|
||||
l := NewBigEndianBuffer(nil)
|
||||
m.Marshal(l)
|
||||
return l.Data()
|
||||
}
|
||||
|
||||
// FromBigEndian unmarshals b into obj in big endian byte order.
|
||||
func FromBigEndian(obj Unmarshaler, b []byte) error {
|
||||
l := NewBigEndianBuffer(b)
|
||||
return obj.Unmarshal(l)
|
||||
}
|
||||
|
||||
// ToLittleEndian marshals m to little endian byte order.
|
||||
func ToLittleEndian(m Marshaler) []byte {
|
||||
l := NewLittleEndianBuffer(nil)
|
||||
m.Marshal(l)
|
||||
return l.Data()
|
||||
}
|
||||
|
||||
// FromLittleEndian unmarshals b into obj in little endian byte order.
|
||||
func FromLittleEndian(obj Unmarshaler, b []byte) error {
|
||||
l := NewLittleEndianBuffer(b)
|
||||
return obj.Unmarshal(l)
|
||||
}
|
||||
|
||||
// Buffer implements functions to manipulate byte slices in a zero-copy way.
|
||||
type Buffer struct {
|
||||
// data is the underlying data.
|
||||
data []byte
|
||||
|
||||
// byteCount keeps track of how many bytes have been consumed for
|
||||
// debugging.
|
||||
byteCount int
|
||||
}
|
||||
|
||||
// NewBuffer Consumes b for marshaling or unmarshaling in the given byte order.
|
||||
func NewBuffer(b []byte) *Buffer {
|
||||
return &Buffer{data: b}
|
||||
}
|
||||
|
||||
// Preallocate increases the capacity of the buffer by n bytes.
|
||||
func (b *Buffer) Preallocate(n int) {
|
||||
b.data = append(b.data, make([]byte, 0, n)...)
|
||||
}
|
||||
|
||||
// WriteN appends n bytes to the Buffer and returns a slice pointing to the
|
||||
// newly appended bytes.
|
||||
func (b *Buffer) WriteN(n int) []byte {
|
||||
b.data = append(b.data, make([]byte, n)...)
|
||||
return b.data[len(b.data)-n:]
|
||||
}
|
||||
|
||||
// ErrBufferTooShort is returned when a caller wants to read more bytes than
|
||||
// are available in the buffer.
|
||||
var ErrBufferTooShort = errors.New("buffer too short")
|
||||
|
||||
// ReadN consumes n bytes from the Buffer. It returns nil, false if there
|
||||
// aren't enough bytes left.
|
||||
func (b *Buffer) ReadN(n int) ([]byte, error) {
|
||||
if !b.Has(n) {
|
||||
return nil, fmt.Errorf("%w at position %d: have %d bytes, want %d bytes", ErrBufferTooShort, b.byteCount, b.Len(), n)
|
||||
}
|
||||
rval := b.data[:n]
|
||||
b.data = b.data[n:]
|
||||
b.byteCount += n
|
||||
return rval, nil
|
||||
}
|
||||
|
||||
// Data is unConsumed data remaining in the Buffer.
|
||||
func (b *Buffer) Data() []byte {
|
||||
return b.data
|
||||
}
|
||||
|
||||
// Has returns true if n bytes are available.
|
||||
func (b *Buffer) Has(n int) bool {
|
||||
return len(b.data) >= n
|
||||
}
|
||||
|
||||
// Len returns the length of the remaining bytes.
|
||||
func (b *Buffer) Len() int {
|
||||
return len(b.data)
|
||||
}
|
||||
|
||||
// Cap returns the available capacity.
|
||||
func (b *Buffer) Cap() int {
|
||||
return cap(b.data)
|
||||
}
|
||||
|
||||
// Lexer is a convenient encoder/decoder for buffers.
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// func (s *something) Unmarshal(l *Lexer) {
|
||||
// s.Foo = l.Read8()
|
||||
// s.Bar = l.Read8()
|
||||
// s.Baz = l.Read16()
|
||||
// return l.Error()
|
||||
// }
|
||||
type Lexer struct {
|
||||
*Buffer
|
||||
|
||||
// order is the byte order to write in / read in.
|
||||
order binary.ByteOrder
|
||||
|
||||
// err
|
||||
err error
|
||||
}
|
||||
|
||||
// NewLexer returns a new coder for buffers.
|
||||
func NewLexer(b *Buffer, order binary.ByteOrder) *Lexer {
|
||||
return &Lexer{
|
||||
Buffer: b,
|
||||
order: order,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLittleEndianBuffer returns a new little endian coder for a new buffer.
|
||||
func NewLittleEndianBuffer(b []byte) *Lexer {
|
||||
return &Lexer{
|
||||
Buffer: NewBuffer(b),
|
||||
order: binary.LittleEndian,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBigEndianBuffer returns a new big endian coder for a new buffer.
|
||||
func NewBigEndianBuffer(b []byte) *Lexer {
|
||||
return &Lexer{
|
||||
Buffer: NewBuffer(b),
|
||||
order: binary.BigEndian,
|
||||
}
|
||||
}
|
||||
|
||||
// NewNativeEndianBuffer returns a new native endian coder for a new buffer.
|
||||
func NewNativeEndianBuffer(b []byte) *Lexer {
|
||||
return &Lexer{
|
||||
Buffer: NewBuffer(b),
|
||||
order: binary.NativeEndian,
|
||||
}
|
||||
}
|
||||
|
||||
// SetError sets the error if no error has previously been set.
|
||||
//
|
||||
// The error can later be retried with Error or FinError methods.
|
||||
func (l *Lexer) SetError(err error) {
|
||||
if l.err == nil {
|
||||
l.err = err
|
||||
}
|
||||
}
|
||||
|
||||
// Consume returns a slice of the next n bytes from the buffer.
|
||||
//
|
||||
// Consume gives direct access to the underlying data.
|
||||
func (l *Lexer) Consume(n int) []byte {
|
||||
v, err := l.Buffer.ReadN(n)
|
||||
if err != nil {
|
||||
l.SetError(err)
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (l *Lexer) append(n int) []byte {
|
||||
return l.Buffer.WriteN(n)
|
||||
}
|
||||
|
||||
// Error returns an error if an error occurred reading from the buffer.
|
||||
func (l *Lexer) Error() error {
|
||||
return l.err
|
||||
}
|
||||
|
||||
// ErrUnreadBytes is returned when there is more data left to read in the buffer.
|
||||
var ErrUnreadBytes = errors.New("buffer contains unread bytes")
|
||||
|
||||
// FinError returns an error if an error occurred or if there is more data left
|
||||
// to read in the buffer.
|
||||
func (l *Lexer) FinError() error {
|
||||
if l.err != nil {
|
||||
return l.err
|
||||
}
|
||||
if l.Buffer.Len() > 0 {
|
||||
return ErrUnreadBytes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read8 reads a byte from the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) Read8() uint8 {
|
||||
v := l.Consume(1)
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
return v[0]
|
||||
}
|
||||
|
||||
// Read16 reads a 16-bit value from the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) Read16() uint16 {
|
||||
v := l.Consume(2)
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
return l.order.Uint16(v)
|
||||
}
|
||||
|
||||
// Read32 reads a 32-bit value from the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) Read32() uint32 {
|
||||
v := l.Consume(4)
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
return l.order.Uint32(v)
|
||||
}
|
||||
|
||||
// Read64 reads a 64-bit value from the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) Read64() uint64 {
|
||||
v := l.Consume(8)
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
return l.order.Uint64(v)
|
||||
}
|
||||
|
||||
// CopyN returns a copy of the next n bytes.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) CopyN(n int) []byte {
|
||||
v := l.Consume(n)
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
p := make([]byte, n)
|
||||
m := copy(p, v)
|
||||
return p[:m]
|
||||
}
|
||||
|
||||
// ReadAll Consumes and returns a copy of all remaining bytes in the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) ReadAll() []byte {
|
||||
return l.CopyN(l.Len())
|
||||
}
|
||||
|
||||
// ReadBytes reads exactly len(p) values from the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) ReadBytes(p []byte) {
|
||||
copy(p, l.Consume(len(p)))
|
||||
}
|
||||
|
||||
// Read implements io.Reader.Read.
|
||||
func (l *Lexer) Read(p []byte) (int, error) {
|
||||
v := l.Consume(len(p))
|
||||
if v == nil {
|
||||
return 0, l.Error()
|
||||
}
|
||||
return copy(p, v), nil
|
||||
}
|
||||
|
||||
// ReadData reads the binary representation of data from the buffer.
|
||||
//
|
||||
// See binary.Read.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) ReadData(data interface{}) {
|
||||
l.SetError(binary.Read(l, l.order, data))
|
||||
}
|
||||
|
||||
// WriteData writes a binary representation of data to the buffer.
|
||||
//
|
||||
// See binary.Write.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) WriteData(data interface{}) {
|
||||
l.SetError(binary.Write(l, l.order, data))
|
||||
}
|
||||
|
||||
// Write8 writes a byte to the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) Write8(v uint8) {
|
||||
l.append(1)[0] = v
|
||||
}
|
||||
|
||||
// Write16 writes a 16-bit value to the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) Write16(v uint16) {
|
||||
l.order.PutUint16(l.append(2), v)
|
||||
}
|
||||
|
||||
// Write32 writes a 32-bit value to the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) Write32(v uint32) {
|
||||
l.order.PutUint32(l.append(4), v)
|
||||
}
|
||||
|
||||
// Write64 writes a 64-bit value to the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) Write64(v uint64) {
|
||||
l.order.PutUint64(l.append(8), v)
|
||||
}
|
||||
|
||||
// Append returns a newly appended n-size Buffer to write to.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) Append(n int) []byte {
|
||||
return l.append(n)
|
||||
}
|
||||
|
||||
// WriteBytes writes p to the Buffer.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) WriteBytes(p []byte) {
|
||||
copy(l.append(len(p)), p)
|
||||
}
|
||||
|
||||
// Write implements io.Writer.Write.
|
||||
//
|
||||
// If an error occurred, Error() will return a non-nil error.
|
||||
func (l *Lexer) Write(p []byte) (int, error) {
|
||||
return copy(l.append(len(p)), p), nil
|
||||
}
|
||||
|
||||
// Align appends bytes to align the length of the buffer to be divisible by n.
|
||||
func (l *Lexer) Align(n int) {
|
||||
pad := ((l.Len() + n - 1) &^ (n - 1)) - l.Len()
|
||||
l.Append(pad)
|
||||
}
|
98
vendor/github.com/u-root/uio/uio/cached.go
generated
vendored
Normal file
98
vendor/github.com/u-root/uio/uio/cached.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright 2018 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// CachingReader is a lazily caching wrapper of an io.Reader.
|
||||
//
|
||||
// The wrapped io.Reader is only read from on demand, not upfront.
|
||||
type CachingReader struct {
|
||||
buf bytes.Buffer
|
||||
r io.Reader
|
||||
pos int
|
||||
eof bool
|
||||
}
|
||||
|
||||
// NewCachingReader buffers reads from r.
|
||||
//
|
||||
// r is only read from when Read() is called.
|
||||
func NewCachingReader(r io.Reader) *CachingReader {
|
||||
return &CachingReader{
|
||||
r: r,
|
||||
}
|
||||
}
|
||||
|
||||
func (cr *CachingReader) read(p []byte) (int, error) {
|
||||
n, err := cr.r.Read(p)
|
||||
cr.buf.Write(p[:n])
|
||||
if err == io.EOF || (n == 0 && err == nil) {
|
||||
cr.eof = true
|
||||
return n, io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// NewReader returns a new io.Reader that reads cr from offset 0.
|
||||
func (cr *CachingReader) NewReader() io.Reader {
|
||||
return Reader(cr)
|
||||
}
|
||||
|
||||
// Read reads from cr; implementing io.Reader.
|
||||
//
|
||||
// TODO(chrisko): Decide whether to keep this or only keep NewReader().
|
||||
func (cr *CachingReader) Read(p []byte) (int, error) {
|
||||
n, err := cr.ReadAt(p, int64(cr.pos))
|
||||
cr.pos += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadAt reads from cr; implementing io.ReaderAt.
|
||||
func (cr *CachingReader) ReadAt(p []byte, off int64) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
end := int(off) + len(p)
|
||||
|
||||
// Is the caller asking for some uncached bytes?
|
||||
unread := end - cr.buf.Len()
|
||||
if unread > 0 {
|
||||
// Avoiding allocations: use `p` to read more bytes.
|
||||
for unread > 0 {
|
||||
toRead := unread % len(p)
|
||||
if toRead == 0 {
|
||||
toRead = len(p)
|
||||
}
|
||||
|
||||
m, err := cr.read(p[:toRead])
|
||||
unread -= m
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If this is true, the entire file was read just to find out, but the
|
||||
// offset is beyond the end of the file.
|
||||
if off > int64(cr.buf.Len()) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
var err error
|
||||
// Did the caller ask for more than was available?
|
||||
//
|
||||
// Note that any io.ReaderAt implementation *must* return an error for
|
||||
// short reads.
|
||||
if cr.eof && unread > 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return copy(p, cr.buf.Bytes()[off:]), err
|
||||
}
|
165
vendor/github.com/u-root/uio/uio/lazy.go
generated
vendored
Normal file
165
vendor/github.com/u-root/uio/uio/lazy.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
// Copyright 2018 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ReadOneByte reads one byte from given io.ReaderAt.
|
||||
func ReadOneByte(r io.ReaderAt) error {
|
||||
buf := make([]byte, 1)
|
||||
n, err := r.ReadAt(buf, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != 1 {
|
||||
return fmt.Errorf("expected to read 1 byte, but got %d", n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LazyOpener is a lazy io.Reader.
|
||||
//
|
||||
// LazyOpener will use a given open function to derive an io.Reader when Read
|
||||
// is first called on the LazyOpener.
|
||||
type LazyOpener struct {
|
||||
r io.Reader
|
||||
s string
|
||||
err error
|
||||
open func() (io.Reader, error)
|
||||
}
|
||||
|
||||
// NewLazyOpener returns a lazy io.Reader based on `open`.
|
||||
func NewLazyOpener(filename string, open func() (io.Reader, error)) *LazyOpener {
|
||||
if len(filename) == 0 {
|
||||
return nil
|
||||
}
|
||||
return &LazyOpener{s: filename, open: open}
|
||||
}
|
||||
|
||||
// Read implements io.Reader.Read lazily.
|
||||
//
|
||||
// If called for the first time, the underlying reader will be obtained and
|
||||
// then used for the first and subsequent calls to Read.
|
||||
func (lr *LazyOpener) Read(p []byte) (int, error) {
|
||||
if lr.r == nil && lr.err == nil {
|
||||
lr.r, lr.err = lr.open()
|
||||
}
|
||||
if lr.err != nil {
|
||||
return 0, lr.err
|
||||
}
|
||||
return lr.r.Read(p)
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer.
|
||||
func (lr *LazyOpener) String() string {
|
||||
if len(lr.s) > 0 {
|
||||
return lr.s
|
||||
}
|
||||
if lr.r != nil {
|
||||
return fmt.Sprintf("%v", lr.r)
|
||||
}
|
||||
return "unopened mystery file"
|
||||
}
|
||||
|
||||
// Close implements io.Closer.Close.
|
||||
func (lr *LazyOpener) Close() error {
|
||||
if c, ok := lr.r.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LazyOpenerAt is a lazy io.ReaderAt.
|
||||
//
|
||||
// LazyOpenerAt will use a given open function to derive an io.ReaderAt when
|
||||
// ReadAt is first called.
|
||||
type LazyOpenerAt struct {
|
||||
r io.ReaderAt
|
||||
s string
|
||||
err error
|
||||
limit int64
|
||||
open func() (io.ReaderAt, error)
|
||||
}
|
||||
|
||||
// NewLazyFile returns a lazy ReaderAt opened from path.
|
||||
func NewLazyFile(path string) *LazyOpenerAt {
|
||||
if len(path) == 0 {
|
||||
return nil
|
||||
}
|
||||
return NewLazyOpenerAt(path, func() (io.ReaderAt, error) {
|
||||
return os.Open(path)
|
||||
})
|
||||
}
|
||||
|
||||
// NewLazyLimitFile returns a lazy ReaderAt opened from path with a limit reader on it.
|
||||
func NewLazyLimitFile(path string, limit int64) *LazyOpenerAt {
|
||||
if len(path) == 0 {
|
||||
return nil
|
||||
}
|
||||
return NewLazyLimitOpenerAt(path, limit, func() (io.ReaderAt, error) {
|
||||
return os.Open(path)
|
||||
})
|
||||
}
|
||||
|
||||
// NewLazyOpenerAt returns a lazy io.ReaderAt based on `open`.
|
||||
func NewLazyOpenerAt(filename string, open func() (io.ReaderAt, error)) *LazyOpenerAt {
|
||||
return &LazyOpenerAt{s: filename, open: open, limit: -1}
|
||||
}
|
||||
|
||||
// NewLazyLimitOpenerAt returns a lazy io.ReaderAt based on `open`.
|
||||
func NewLazyLimitOpenerAt(filename string, limit int64, open func() (io.ReaderAt, error)) *LazyOpenerAt {
|
||||
return &LazyOpenerAt{s: filename, open: open, limit: limit}
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer.
|
||||
func (loa *LazyOpenerAt) String() string {
|
||||
if len(loa.s) > 0 {
|
||||
return loa.s
|
||||
}
|
||||
if loa.r != nil {
|
||||
return fmt.Sprintf("%v", loa.r)
|
||||
}
|
||||
return "unopened mystery file"
|
||||
}
|
||||
|
||||
// File returns the backend file of the io.ReaderAt if it
|
||||
// is backed by a os.File.
|
||||
func (loa *LazyOpenerAt) File() *os.File {
|
||||
if f, ok := loa.r.(*os.File); ok {
|
||||
return f
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadAt implements io.ReaderAt.ReadAt.
|
||||
func (loa *LazyOpenerAt) ReadAt(p []byte, off int64) (int, error) {
|
||||
if loa.r == nil && loa.err == nil {
|
||||
loa.r, loa.err = loa.open()
|
||||
}
|
||||
if loa.err != nil {
|
||||
return 0, loa.err
|
||||
}
|
||||
if loa.limit > 0 {
|
||||
if off >= loa.limit {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(p)) > loa.limit-off {
|
||||
p = p[0 : loa.limit-off]
|
||||
}
|
||||
}
|
||||
return loa.r.ReadAt(p, off)
|
||||
}
|
||||
|
||||
// Close implements io.Closer.Close.
|
||||
func (loa *LazyOpenerAt) Close() error {
|
||||
if c, ok := loa.r.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
76
vendor/github.com/u-root/uio/uio/null.go
generated
vendored
Normal file
76
vendor/github.com/u-root/uio/uio/null.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2012-2019 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Discard implementation copied from the Go project:
|
||||
// https://golang.org/src/io/ioutil/ioutil.go.
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
|
||||
package uio
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// devNull implements an io.Writer and io.ReaderFrom that discards any writes.
|
||||
type devNull struct{}
|
||||
|
||||
// devNull implements ReaderFrom as an optimization so io.Copy to
|
||||
// ioutil.Discard can avoid doing unnecessary work.
|
||||
var _ io.ReaderFrom = devNull{}
|
||||
|
||||
// Write is an io.Writer.Write that discards data.
|
||||
func (devNull) Write(p []byte) (int, error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Name is like os.File.Name() and returns "null".
|
||||
func (devNull) Name() string {
|
||||
return "null"
|
||||
}
|
||||
|
||||
// WriteString implements io.StringWriter and discards given data.
|
||||
func (devNull) WriteString(s string) (int, error) {
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
var blackHolePool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, 8192)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
|
||||
// ReadFrom implements io.ReaderFrom and discards data being read.
|
||||
func (devNull) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
bufp := blackHolePool.Get().(*[]byte)
|
||||
var readSize int
|
||||
for {
|
||||
readSize, err = r.Read(*bufp)
|
||||
n += int64(readSize)
|
||||
if err != nil {
|
||||
blackHolePool.Put(bufp)
|
||||
if err == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close does nothing.
|
||||
func (devNull) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteNameCloser is the interface that groups Write, Close, and Name methods.
|
||||
type WriteNameCloser interface {
|
||||
io.Writer
|
||||
io.Closer
|
||||
Name() string
|
||||
}
|
||||
|
||||
// Discard is a WriteNameCloser on which all Write and Close calls succeed
|
||||
// without doing anything, and the Name call returns "null".
|
||||
var Discard WriteNameCloser = devNull{}
|
42
vendor/github.com/u-root/uio/uio/progress.go
generated
vendored
Normal file
42
vendor/github.com/u-root/uio/uio/progress.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2019 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uio
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ProgressReadCloser implements io.ReadCloser and prints Symbol to W after every
|
||||
// Interval bytes passes through RC.
|
||||
type ProgressReadCloser struct {
|
||||
RC io.ReadCloser
|
||||
|
||||
Symbol string
|
||||
Interval int
|
||||
W io.Writer
|
||||
|
||||
counter int
|
||||
written bool
|
||||
}
|
||||
|
||||
// Read implements io.Reader for ProgressReadCloser.
|
||||
func (rc *ProgressReadCloser) Read(p []byte) (n int, err error) {
|
||||
defer func() {
|
||||
numSymbols := (rc.counter%rc.Interval + n) / rc.Interval
|
||||
_, _ = rc.W.Write([]byte(strings.Repeat(rc.Symbol, numSymbols)))
|
||||
rc.counter += n
|
||||
rc.written = (rc.written || numSymbols > 0)
|
||||
if err == io.EOF && rc.written {
|
||||
_, _ = rc.W.Write([]byte("\n"))
|
||||
}
|
||||
}()
|
||||
return rc.RC.Read(p)
|
||||
}
|
||||
|
||||
// Close implements io.Closer for ProgressReader.
|
||||
func (rc *ProgressReadCloser) Close() error {
|
||||
return rc.RC.Close()
|
||||
}
|
67
vendor/github.com/u-root/uio/uio/reader.go
generated
vendored
Normal file
67
vendor/github.com/u-root/uio/uio/reader.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2018 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type inMemReaderAt interface {
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
// ReadAll reads everything that r contains.
|
||||
//
|
||||
// Callers *must* not modify bytes in the returned byte slice.
|
||||
//
|
||||
// If r is an in-memory representation, ReadAll will attempt to return a
|
||||
// pointer to those bytes directly.
|
||||
func ReadAll(r io.ReaderAt) ([]byte, error) {
|
||||
if imra, ok := r.(inMemReaderAt); ok {
|
||||
return imra.Bytes(), nil
|
||||
}
|
||||
return io.ReadAll(Reader(r))
|
||||
}
|
||||
|
||||
// Reader generates a Reader from a ReaderAt.
|
||||
func Reader(r io.ReaderAt) io.Reader {
|
||||
return io.NewSectionReader(r, 0, math.MaxInt64)
|
||||
}
|
||||
|
||||
// ReaderAtEqual compares the contents of r1 and r2.
|
||||
func ReaderAtEqual(r1, r2 io.ReaderAt) bool {
|
||||
var c, d []byte
|
||||
var r1err, r2err error
|
||||
if r1 != nil {
|
||||
c, r1err = ReadAll(r1)
|
||||
}
|
||||
if r2 != nil {
|
||||
d, r2err = ReadAll(r2)
|
||||
}
|
||||
return bytes.Equal(c, d) && reflect.DeepEqual(r1err, r2err)
|
||||
}
|
||||
|
||||
// ReadIntoFile reads all from io.Reader into the file at given path.
|
||||
//
|
||||
// If the file at given path does not exist, a new file will be created.
|
||||
// If the file exists at the given path, but not empty, it will be truncated.
|
||||
func ReadIntoFile(r io.Reader, p string) error {
|
||||
f, err := os.OpenFile(p, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = io.Copy(f, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.Close()
|
||||
}
|
9
vendor/github.com/u-root/uio/uio/uio.go
generated
vendored
Normal file
9
vendor/github.com/u-root/uio/uio/uio.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
// Copyright 2018 the u-root Authors. All rights reserved
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package uio unifies commonly used io utilities for u-root.
|
||||
//
|
||||
// uio's most used feature is the Buffer/Lexer combination to parse binary data
|
||||
// of arbitrary endianness into data structures.
|
||||
package uio
|
Reference in New Issue
Block a user