aboutsummaryrefslogtreecommitdiff
path: root/sftp/pool.go
blob: 36126290657e6476ce07f20730c617df467ca9cd (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
package sftp

// bufPool provides a pool of byte-slices to be reused in various parts of the package.
// It is safe to use concurrently through a pointer.
type bufPool struct {
	ch   chan []byte
	blen int
}

func newBufPool(depth, bufLen int) *bufPool {
	return &bufPool{
		ch:   make(chan []byte, depth),
		blen: bufLen,
	}
}

func (p *bufPool) Get() []byte {
	if p.blen <= 0 {
		panic("bufPool: new buffer creation length must be greater than zero")
	}

	for {
		select {
		case b := <-p.ch:
			if cap(b) < p.blen {
				// just in case: throw away any buffer with insufficient capacity.
				continue
			}

			return b[:p.blen]

		default:
			return make([]byte, p.blen)
		}
	}
}

func (p *bufPool) Put(b []byte) {
	if p == nil {
		// functional default: no reuse.
		return
	}

	if cap(b) < p.blen || cap(b) > p.blen*2 {
		// DO NOT reuse buffers with insufficient capacity.
		// This could cause panics when resizing to p.blen.

		// DO NOT reuse buffers with excessive capacity.
		// This could cause memory leaks.
		return
	}

	select {
	case p.ch <- b:
	default:
	}
}

type resChanPool chan chan result

func newResChanPool(depth int) resChanPool {
	return make(chan chan result, depth)
}

func (p resChanPool) Get() chan result {
	select {
	case ch := <-p:
		return ch
	default:
		return make(chan result, 1)
	}
}

func (p resChanPool) Put(ch chan result) {
	select {
	case p <- ch:
	default:
	}
}