39 Star 144 Fork 3

Gitee 极速下载 / grafana

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
此仓库是为了提升国内下载速度的镜像仓库,每日同步一次。 原始仓库: https://github.com/grafana/grafana
克隆/下载
sql_engine.go 16.35 KB
一键复制 编辑 原始数据 按行查看 历史
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639
package tsdb
import (
"container/list"
"context"
"database/sql"
"fmt"
"math"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/components/null"
"github.com/go-xorm/core"
"github.com/go-xorm/xorm"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/models"
)
// SqlMacroEngine interpolates macros into sql. It takes in the Query to have access to query context and
// timeRange to be able to generate queries that use from and to.
type SqlMacroEngine interface {
Interpolate(query *Query, timeRange *TimeRange, sql string) (string, error)
}
// SqlTableRowTransformer transforms a query result row to RowValues with proper types.
type SqlTableRowTransformer interface {
Transform(columnTypes []*sql.ColumnType, rows *core.Rows) (RowValues, error)
}
type engineCacheType struct {
cache map[int64]*xorm.Engine
versions map[int64]int
sync.Mutex
}
var engineCache = engineCacheType{
cache: make(map[int64]*xorm.Engine),
versions: make(map[int64]int),
}
var sqlIntervalCalculator = NewIntervalCalculator(nil)
var NewXormEngine = func(driverName string, connectionString string) (*xorm.Engine, error) {
return xorm.NewEngine(driverName, connectionString)
}
type sqlQueryEndpoint struct {
macroEngine SqlMacroEngine
rowTransformer SqlTableRowTransformer
engine *xorm.Engine
timeColumnNames []string
metricColumnTypes []string
log log.Logger
}
type SqlQueryEndpointConfiguration struct {
DriverName string
Datasource *models.DataSource
ConnectionString string
TimeColumnNames []string
MetricColumnTypes []string
}
var NewSqlQueryEndpoint = func(config *SqlQueryEndpointConfiguration, rowTransformer SqlTableRowTransformer, macroEngine SqlMacroEngine, log log.Logger) (TsdbQueryEndpoint, error) {
queryEndpoint := sqlQueryEndpoint{
rowTransformer: rowTransformer,
macroEngine: macroEngine,
timeColumnNames: []string{"time"},
log: log,
}
if len(config.TimeColumnNames) > 0 {
queryEndpoint.timeColumnNames = config.TimeColumnNames
}
if len(config.MetricColumnTypes) > 0 {
queryEndpoint.metricColumnTypes = config.MetricColumnTypes
}
engineCache.Lock()
defer engineCache.Unlock()
if engine, present := engineCache.cache[config.Datasource.Id]; present {
if version := engineCache.versions[config.Datasource.Id]; version == config.Datasource.Version {
queryEndpoint.engine = engine
return &queryEndpoint, nil
}
}
engine, err := NewXormEngine(config.DriverName, config.ConnectionString)
if err != nil {
return nil, err
}
engine.SetMaxOpenConns(10)
engine.SetMaxIdleConns(10)
engineCache.versions[config.Datasource.Id] = config.Datasource.Version
engineCache.cache[config.Datasource.Id] = engine
queryEndpoint.engine = engine
return &queryEndpoint, nil
}
const rowLimit = 1000000
// Query is the main function for the SqlQueryEndpoint
func (e *sqlQueryEndpoint) Query(ctx context.Context, dsInfo *models.DataSource, tsdbQuery *TsdbQuery) (*Response, error) {
result := &Response{
Results: make(map[string]*QueryResult),
}
session := e.engine.NewSession()
defer session.Close()
db := session.DB()
for _, query := range tsdbQuery.Queries {
rawSQL := query.Model.Get("rawSql").MustString()
if rawSQL == "" {
continue
}
queryResult := &QueryResult{Meta: simplejson.New(), RefId: query.RefId}
result.Results[query.RefId] = queryResult
// global substitutions
rawSQL, err := Interpolate(query, tsdbQuery.TimeRange, rawSQL)
if err != nil {
queryResult.Error = err
continue
}
// datasource specific substitutions
rawSQL, err = e.macroEngine.Interpolate(query, tsdbQuery.TimeRange, rawSQL)
if err != nil {
queryResult.Error = err
continue
}
queryResult.Meta.Set("sql", rawSQL)
rows, err := db.Query(rawSQL)
if err != nil {
queryResult.Error = err
continue
}
defer rows.Close()
format := query.Model.Get("format").MustString("time_series")
switch format {
case "time_series":
err := e.transformToTimeSeries(query, rows, queryResult, tsdbQuery)
if err != nil {
queryResult.Error = err
continue
}
case "table":
err := e.transformToTable(query, rows, queryResult, tsdbQuery)
if err != nil {
queryResult.Error = err
continue
}
}
}
return result, nil
}
// global macros/substitutions for all sql datasources
var Interpolate = func(query *Query, timeRange *TimeRange, sql string) (string, error) {
minInterval, err := GetIntervalFrom(query.DataSource, query.Model, time.Second*60)
if err != nil {
return sql, nil
}
interval := sqlIntervalCalculator.Calculate(timeRange, minInterval)
sql = strings.Replace(sql, "$__interval_ms", strconv.FormatInt(interval.Milliseconds(), 10), -1)
sql = strings.Replace(sql, "$__interval", interval.Text, -1)
return sql, nil
}
func (e *sqlQueryEndpoint) transformToTable(query *Query, rows *core.Rows, result *QueryResult, tsdbQuery *TsdbQuery) error {
columnNames, err := rows.Columns()
columnCount := len(columnNames)
if err != nil {
return err
}
rowCount := 0
timeIndex := -1
table := &Table{
Columns: make([]TableColumn, columnCount),
Rows: make([]RowValues, 0),
}
for i, name := range columnNames {
table.Columns[i].Text = name
for _, tc := range e.timeColumnNames {
if name == tc {
timeIndex = i
break
}
}
}
columnTypes, err := rows.ColumnTypes()
if err != nil {
return err
}
for ; rows.Next(); rowCount++ {
if rowCount > rowLimit {
return fmt.Errorf("query row limit exceeded, limit %d", rowLimit)
}
values, err := e.rowTransformer.Transform(columnTypes, rows)
if err != nil {
return err
}
// converts column named time to unix timestamp in milliseconds
// to make native mssql datetime types and epoch dates work in
// annotation and table queries.
ConvertSqlTimeColumnToEpochMs(values, timeIndex)
table.Rows = append(table.Rows, values)
}
result.Tables = append(result.Tables, table)
result.Meta.Set("rowCount", rowCount)
return nil
}
func (e *sqlQueryEndpoint) transformToTimeSeries(query *Query, rows *core.Rows, result *QueryResult, tsdbQuery *TsdbQuery) error {
pointsBySeries := make(map[string]*TimeSeries)
seriesByQueryOrder := list.New()
columnNames, err := rows.Columns()
if err != nil {
return err
}
columnTypes, err := rows.ColumnTypes()
if err != nil {
return err
}
rowCount := 0
timeIndex := -1
metricIndex := -1
metricPrefix := false
var metricPrefixValue string
// check columns of resultset: a column named time is mandatory
// the first text column is treated as metric name unless a column named metric is present
for i, col := range columnNames {
for _, tc := range e.timeColumnNames {
if col == tc {
timeIndex = i
continue
}
}
switch col {
case "metric":
metricIndex = i
default:
if metricIndex == -1 {
columnType := columnTypes[i].DatabaseTypeName()
for _, mct := range e.metricColumnTypes {
if columnType == mct {
metricIndex = i
continue
}
}
}
}
}
// use metric column as prefix with multiple value columns
if metricIndex != -1 && len(columnNames) > 3 {
metricPrefix = true
}
if timeIndex == -1 {
return fmt.Errorf("Found no column named %s", strings.Join(e.timeColumnNames, " or "))
}
fillMissing := query.Model.Get("fill").MustBool(false)
var fillInterval float64
fillValue := null.Float{}
fillPrevious := false
if fillMissing {
fillInterval = query.Model.Get("fillInterval").MustFloat64() * 1000
switch query.Model.Get("fillMode").MustString() {
case "null":
case "previous":
fillPrevious = true
case "value":
fillValue.Float64 = query.Model.Get("fillValue").MustFloat64()
fillValue.Valid = true
}
}
for rows.Next() {
var timestamp float64
var value null.Float
var metric string
if rowCount > rowLimit {
return fmt.Errorf("query row limit exceeded, limit %d", rowLimit)
}
values, err := e.rowTransformer.Transform(columnTypes, rows)
if err != nil {
return err
}
// converts column named time to unix timestamp in milliseconds to make
// native mysql datetime types and epoch dates work in
// annotation and table queries.
ConvertSqlTimeColumnToEpochMs(values, timeIndex)
switch columnValue := values[timeIndex].(type) {
case int64:
timestamp = float64(columnValue)
case float64:
timestamp = columnValue
default:
return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
}
if metricIndex >= 0 {
if columnValue, ok := values[metricIndex].(string); ok {
if metricPrefix {
metricPrefixValue = columnValue
} else {
metric = columnValue
}
} else {
return fmt.Errorf("Column metric must be of type %s. metric column name: %s type: %s but datatype is %T", strings.Join(e.metricColumnTypes, ", "), columnNames[metricIndex], columnTypes[metricIndex].DatabaseTypeName(), values[metricIndex])
}
}
for i, col := range columnNames {
if i == timeIndex || i == metricIndex {
continue
}
if value, err = ConvertSqlValueColumnToFloat(col, values[i]); err != nil {
return err
}
if metricIndex == -1 {
metric = col
} else if metricPrefix {
metric = metricPrefixValue + " " + col
}
series, exist := pointsBySeries[metric]
if !exist {
series = &TimeSeries{Name: metric}
pointsBySeries[metric] = series
seriesByQueryOrder.PushBack(metric)
}
if fillMissing {
var intervalStart float64
if !exist {
intervalStart = float64(tsdbQuery.TimeRange.MustGetFrom().UnixNano() / 1e6)
} else {
intervalStart = series.Points[len(series.Points)-1][1].Float64 + fillInterval
}
if fillPrevious {
if len(series.Points) > 0 {
fillValue = series.Points[len(series.Points)-1][0]
} else {
fillValue.Valid = false
}
}
// align interval start
intervalStart = math.Floor(intervalStart/fillInterval) * fillInterval
for i := intervalStart; i < timestamp; i += fillInterval {
series.Points = append(series.Points, TimePoint{fillValue, null.FloatFrom(i)})
rowCount++
}
}
series.Points = append(series.Points, TimePoint{value, null.FloatFrom(timestamp)})
e.log.Debug("Rows", "metric", metric, "time", timestamp, "value", value)
}
}
for elem := seriesByQueryOrder.Front(); elem != nil; elem = elem.Next() {
key := elem.Value.(string)
result.Series = append(result.Series, pointsBySeries[key])
if fillMissing {
series := pointsBySeries[key]
// fill in values from last fetched value till interval end
intervalStart := series.Points[len(series.Points)-1][1].Float64
intervalEnd := float64(tsdbQuery.TimeRange.MustGetTo().UnixNano() / 1e6)
if fillPrevious {
if len(series.Points) > 0 {
fillValue = series.Points[len(series.Points)-1][0]
} else {
fillValue.Valid = false
}
}
// align interval start
intervalStart = math.Floor(intervalStart/fillInterval) * fillInterval
for i := intervalStart + fillInterval; i < intervalEnd; i += fillInterval {
series.Points = append(series.Points, TimePoint{fillValue, null.FloatFrom(i)})
rowCount++
}
}
}
result.Meta.Set("rowCount", rowCount)
return nil
}
// ConvertSqlTimeColumnToEpochMs converts column named time to unix timestamp in milliseconds
// to make native datetime types and epoch dates work in annotation and table queries.
func ConvertSqlTimeColumnToEpochMs(values RowValues, timeIndex int) {
if timeIndex >= 0 {
switch value := values[timeIndex].(type) {
case time.Time:
values[timeIndex] = float64(value.UnixNano()) / float64(time.Millisecond)
case *time.Time:
if value != nil {
values[timeIndex] = float64((*value).UnixNano()) / float64(time.Millisecond)
}
case int64:
values[timeIndex] = int64(EpochPrecisionToMs(float64(value)))
case *int64:
if value != nil {
values[timeIndex] = int64(EpochPrecisionToMs(float64(*value)))
}
case uint64:
values[timeIndex] = int64(EpochPrecisionToMs(float64(value)))
case *uint64:
if value != nil {
values[timeIndex] = int64(EpochPrecisionToMs(float64(*value)))
}
case int32:
values[timeIndex] = int64(EpochPrecisionToMs(float64(value)))
case *int32:
if value != nil {
values[timeIndex] = int64(EpochPrecisionToMs(float64(*value)))
}
case uint32:
values[timeIndex] = int64(EpochPrecisionToMs(float64(value)))
case *uint32:
if value != nil {
values[timeIndex] = int64(EpochPrecisionToMs(float64(*value)))
}
case float64:
values[timeIndex] = EpochPrecisionToMs(value)
case *float64:
if value != nil {
values[timeIndex] = EpochPrecisionToMs(*value)
}
case float32:
values[timeIndex] = EpochPrecisionToMs(float64(value))
case *float32:
if value != nil {
values[timeIndex] = EpochPrecisionToMs(float64(*value))
}
}
}
}
// ConvertSqlValueColumnToFloat converts timeseries value column to float.
func ConvertSqlValueColumnToFloat(columnName string, columnValue interface{}) (null.Float, error) {
var value null.Float
switch typedValue := columnValue.(type) {
case int:
value = null.FloatFrom(float64(typedValue))
case *int:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case int64:
value = null.FloatFrom(float64(typedValue))
case *int64:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case int32:
value = null.FloatFrom(float64(typedValue))
case *int32:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case int16:
value = null.FloatFrom(float64(typedValue))
case *int16:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case int8:
value = null.FloatFrom(float64(typedValue))
case *int8:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case uint:
value = null.FloatFrom(float64(typedValue))
case *uint:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case uint64:
value = null.FloatFrom(float64(typedValue))
case *uint64:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case uint32:
value = null.FloatFrom(float64(typedValue))
case *uint32:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case uint16:
value = null.FloatFrom(float64(typedValue))
case *uint16:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case uint8:
value = null.FloatFrom(float64(typedValue))
case *uint8:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case float64:
value = null.FloatFrom(typedValue)
case *float64:
value = null.FloatFromPtr(typedValue)
case float32:
value = null.FloatFrom(float64(typedValue))
case *float32:
if typedValue == nil {
value.Valid = false
} else {
value = null.FloatFrom(float64(*typedValue))
}
case nil:
value.Valid = false
default:
return null.NewFloat(0, false), fmt.Errorf("Value column must have numeric datatype, column: %s type: %T value: %v", columnName, typedValue, typedValue)
}
return value, nil
}
func SetupFillmode(query *Query, interval time.Duration, fillmode string) error {
query.Model.Set("fill", true)
query.Model.Set("fillInterval", interval.Seconds())
switch fillmode {
case "NULL":
query.Model.Set("fillMode", "null")
case "previous":
query.Model.Set("fillMode", "previous")
default:
query.Model.Set("fillMode", "value")
floatVal, err := strconv.ParseFloat(fillmode, 64)
if err != nil {
return fmt.Errorf("error parsing fill value %v", fillmode)
}
query.Model.Set("fillValue", floatVal)
}
return nil
}
type SqlMacroEngineBase struct{}
func NewSqlMacroEngineBase() *SqlMacroEngineBase {
return &SqlMacroEngineBase{}
}
func (m *SqlMacroEngineBase) ReplaceAllStringSubmatchFunc(re *regexp.Regexp, str string, repl func([]string) string) string {
result := ""
lastIndex := 0
for _, v := range re.FindAllSubmatchIndex([]byte(str), -1) {
groups := []string{}
for i := 0; i < len(v); i += 2 {
groups = append(groups, str[v[i]:v[i+1]])
}
result += str[lastIndex:v[0]] + repl(groups)
lastIndex = v[1]
}
return result + str[lastIndex:]
}
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
JavaScript
1
https://gitee.com/mirrors/grafana.git
git@gitee.com:mirrors/grafana.git
mirrors
grafana
grafana
v5.3.1

搜索帮助

344bd9b3 5694891 D2dac590 5694891