-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathseekable_buffer.go
More file actions
111 lines (103 loc) · 3.18 KB
/
seekable_buffer.go
File metadata and controls
111 lines (103 loc) · 3.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
package byte_utils
// This file defines an alternative to bytes.Buffer that supports the Seek
// method for writers.
import (
"fmt"
"io"
)
// This type implements an in-memory io.Reader, io.Writer, and io.Seeker.
type SeekableBuffer struct {
// This will grow as needed, based on either the farthest write or seek
// offset. Writing past the end of this will increase its size. Seeking
// past the end will add zeros to the necessary size.
Data []byte
// The current read or write offset in the file. It's an error to read past
// the end of the data, but writing past (using Write()) will expand the
// capacity and zero-fill any previous unwritten space (if skipped using
// Seek()).
Offset int64
}
func NewSeekableBuffer() *SeekableBuffer {
return &SeekableBuffer{
Data: make([]byte, 0, 4096),
Offset: 0,
}
}
// Used internally to expand b.Data to the given size. Panics if the new size
// is smaller than the current size.
func (b *SeekableBuffer) expandToSize(size int64) {
if size < int64(len(b.Data)) {
panic("Trying to reduce buffer size")
}
// Hopefully we can usually do this, given our extra allocation
if size <= int64(cap(b.Data)) {
b.Data = b.Data[:size]
}
// Allocate an arbitrary amount of extra space, to hopefully reduce
// reallocations.
extraSize := 2 * (size - int64(len(b.Data)))
// Limit the "bonus" allocation size to 1GB.
extraSizeLimit := int64(1 << 30)
if extraSize >= extraSizeLimit {
extraSize = extraSizeLimit
}
newBuffer := make([]byte, size, size+extraSize)
copy(newBuffer, b.Data)
b.Data = newBuffer
}
// Sets the next read or write offset to the specified offset, returning the
// new offset. Expands the underlying buffer if the new offset is greater than
// its current size. Returns an error without changing the current offset if an
// error occurs.
func (b *SeekableBuffer) Seek(offset int64, whence int) (int64, error) {
baseOffset := int64(0)
switch whence {
case io.SeekStart:
baseOffset = 0
case io.SeekEnd:
baseOffset = int64(len(b.Data))
case io.SeekCurrent:
baseOffset = b.Offset
default:
return b.Offset, fmt.Errorf("Invalid \"whence\" for seek: %d", whence)
}
newOffset := baseOffset + offset
// A negative offset is illegal
if newOffset < 0 {
return b.Offset, fmt.Errorf("Can't seek to negative offset: %d",
newOffset)
}
// Anything less than len(data) doesn't require expansion.
if newOffset <= int64(len(b.Data)) {
b.Offset = newOffset
return newOffset, nil
}
// Expand the size to be exactly equal to the new offset.
b.expandToSize(newOffset)
b.Offset = newOffset
return newOffset, nil
}
// Provides the normal io.Reader interface.
func (b *SeekableBuffer) Read(dst []byte) (int, error) {
start := b.Offset
if start >= int64(len(b.Data)) {
return 0, io.EOF
}
limit := start + int64(len(dst))
if limit > int64(len(b.Data)) {
limit = int64(len(b.Data))
}
copy(dst, b.Data[start:limit])
b.Offset = limit
return int(limit - start), nil
}
// Provides the normal io.Writer interface.
func (b *SeekableBuffer) Write(data []byte) (int, error) {
start := b.Offset
limit := start + int64(len(data))
if limit > int64(len(b.Data)) {
b.expandToSize(limit)
}
copy(b.Data[start:limit], data)
return len(data), nil
}