Documentation
¶
Index ¶
Constants ¶
const ( APIPathUserQuery = "/query" APIPathInternalQuery = "/_query" APIPathUserStream = "/stream" APIPathInternalStream = "/_stream" APIPathReplicate = "/replicate" APIPathClusterState = "/_clusterstate" )
These are the store API URL paths.
Variables ¶
var ErrNoSegmentsAvailable = errors.New("no segments available")
ErrNoSegmentsAvailable is returned by various methods to indicate no qualifying segments are currently available.
var ErrShortRead = errors.New("short read")
ErrShortRead is returned when a read is unexpectedly shortened.
Functions ¶
This section is empty.
Types ¶
type API ¶
type API struct {
// contains filtered or unexported fields
}
API serves the store API.
func NewAPI ¶
func NewAPI( peer ClusterPeer, log Log, queryClient, streamClient Doer, replicatedSegments, replicatedBytes prometheus.Counter, duration *prometheus.HistogramVec, reporter EventReporter, ) *API
NewAPI returns a usable API.
type ClusterPeer ¶
ClusterPeer models cluster.Peer.
type Compacter ¶
type Compacter struct {
// contains filtered or unexported fields
}
Compacter is responsible for all post-flush segment mutation. That includes compacting highly-overlapping segments, compacting small and sequential segments, and enforcing the retention window.
func NewCompacter ¶
func NewCompacter( log Log, segmentTargetSize int64, retain time.Duration, purge time.Duration, compactDuration *prometheus.HistogramVec, trashSegments, purgeSegments *prometheus.CounterVec, reporter EventReporter, ) *Compacter
NewCompacter creates a Compacter. Don't forget to Run it.
type Consumer ¶
type Consumer struct {
// contains filtered or unexported fields
}
Consumer reads segments from the ingesters, and replicates merged segments to the rest of the cluster. It's implemented as a state machine: gather segments, replicate, commit, and repeat. All failures invalidate the entire batch.
func NewConsumer ¶
func NewConsumer( peer *cluster.Peer, client *http.Client, segmentTargetSize int64, segmentTargetAge time.Duration, replicationFactor int, consumedSegments, consumedBytes prometheus.Counter, replicatedSegments, replicatedBytes prometheus.Counter, reporter EventReporter, ) *Consumer
NewConsumer creates a consumer. Don't forget to Run it.
type EventReporter ¶
type EventReporter interface {
ReportEvent(Event)
}
EventReporter can receive (and, presumably, do something with) Events.
type Log ¶
type Log interface {
// Create a new segment for writes.
Create() (WriteSegment, error)
// Query written and closed segments.
Query(qp QueryParams, statsOnly bool) (QueryResult, error)
// Overlapping returns segments that have a high degree of time overlap and
// can be compacted.
Overlapping() ([]ReadSegment, error)
// Sequential returns segments that are small and sequential and can be
// compacted.
Sequential() ([]ReadSegment, error)
// Trashable segments are read segments whose newest record is older than
// the given time. They may be trashed, i.e. made unavailable for querying.
Trashable(oldestRecord time.Time) ([]ReadSegment, error)
// Purgable segments are trash segments whose modification time (i.e. the
// time they were trashed) is older than the given time. They may be purged,
// i.e. hard deleted.
Purgeable(oldestModTime time.Time) ([]TrashSegment, error)
// Stats of the current state of the store log.
Stats() (LogStats, error)
// Close the log, releasing any claimed lock.
Close() error
}
Log is an abstraction for segments on a storage node.
func NewFileLog ¶
func NewFileLog(filesys fs.Filesystem, root string, segmentTargetSize, segmentBufferSize int64, reporter EventReporter) (Log, error)
NewFileLog returns a Log backed by the filesystem at path root. Note that we don't own segment files! They may disappear.
type LogReporter ¶
LogReporter is a default implementation of EventReporter that logs events to the wrapped logger. By default, events are logged at Warning level; if Err is non-nil, events are logged at Error level.
func (LogReporter) ReportEvent ¶
func (r LogReporter) ReportEvent(e Event)
ReportEvent implements EventReporter.
type LogStats ¶
type LogStats struct {
ActiveSegments int64
ActiveBytes int64
FlushedSegments int64
FlushedBytes int64
ReadingSegments int64
ReadingBytes int64
TrashedSegments int64
TrashedBytes int64
}
LogStats describe the current state of the store log.
type QueryParams ¶
type QueryParams struct {
From ulidOrTime `json:"from"`
To ulidOrTime `json:"to"`
Q string `json:"q"`
Regex bool `json:"regex"`
}
QueryParams defines all dimensions of a query. StatsOnly is implicit by the HTTP method.
func (*QueryParams) DecodeFrom ¶
func (qp *QueryParams) DecodeFrom(u *url.URL, rb rangeBehavior) error
DecodeFrom populates a QueryParams from a URL.
type QueryResult ¶
type QueryResult struct {
Params QueryParams `json:"query"`
NodesQueried int `json:"nodes_queried"`
SegmentsQueried int `json:"segments_queried"`
MaxDataSetSize int64 `json:"max_data_set_size"`
ErrorCount int `json:"error_count,omitempty"`
Duration string `json:"duration"`
Records io.ReadCloser // TODO(pb): audit to ensure closing is valid throughout
}
QueryResult contains statistics about, and matching records for, a query.
func (*QueryResult) DecodeFrom ¶
func (qr *QueryResult) DecodeFrom(resp *http.Response) error
DecodeFrom decodes the QueryResult from the HTTP response.
func (*QueryResult) EncodeTo ¶
func (qr *QueryResult) EncodeTo(w http.ResponseWriter)
EncodeTo encodes the QueryResult to the HTTP response writer. It also closes the records ReadCloser.
func (*QueryResult) Merge ¶
func (qr *QueryResult) Merge(other QueryResult) error
Merge the other QueryResult into this one.
type ReadSegment ¶
ReadSegment can be read from, reset (back to flushed state), trashed (made unavailable for queries), or purged (hard deleted).
type TrashSegment ¶
type TrashSegment interface {
Purge() error
}
TrashSegment may only be purged (hard deleted).