From 1e5d12886463c2eba35054b88e8e980cdbc721f8 Mon Sep 17 00:00:00 2001 From: guoguozhenhaowan <877801999@qq.com> Date: Tue, 7 Mar 2023 06:28:51 +0000 Subject: [PATCH 01/87] fix(cmd_executor): fixed bug for shlex of py37 --- dbmind/common/cmd_executor.py | 139 +++++++++++++++++++++++++++++++++- 1 file changed, 138 insertions(+), 1 deletion(-) diff --git a/dbmind/common/cmd_executor.py b/dbmind/common/cmd_executor.py index 7b5fde4..d298425 100644 --- a/dbmind/common/cmd_executor.py +++ b/dbmind/common/cmd_executor.py @@ -19,6 +19,7 @@ import subprocess import threading import time import os +import sys import paramiko @@ -29,6 +30,139 @@ n_stdout = 1 n_stderr = 2 +class shlex_py38(shlex.shlex): + """ + Use the read_token function of shlex in py38 to replace that in py37 to + fix the incompatibility of punctuation_chars and whitespace_split in py37. + """ + + def read_token(self): + quoted = False + escapedstate = ' ' + while True: + if self.punctuation_chars and self._pushback_chars: + nextchar = self._pushback_chars.pop() + else: + nextchar = self.instream.read(1) + if nextchar == '\n': + self.lineno += 1 + if self.state is None: + self.token = '' # past end of file + break + elif self.state == ' ': + if not nextchar: + self.state = None # end of file + break + elif nextchar in self.whitespace: + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif nextchar in self.commenters: + self.instream.readline() + self.lineno += 1 + elif self.posix and nextchar in self.escape: + escapedstate = 'a' + self.state = nextchar + elif nextchar in self.wordchars: + self.token = nextchar + self.state = 'a' + elif nextchar in self.punctuation_chars: + self.token = nextchar + self.state = 'c' + elif nextchar in self.quotes: + if not self.posix: + self.token = nextchar + self.state = nextchar + elif self.whitespace_split: + self.token = nextchar + self.state = 'a' + else: + self.token = nextchar + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif self.state in self.quotes: + quoted = True + if not nextchar: # end of file + raise ValueError("No closing quotation") + if nextchar == self.state: + if not self.posix: + self.token += nextchar + self.state = ' ' + break + else: + self.state = 'a' + elif (self.posix and nextchar in self.escape and self.state + in self.escapedquotes): + escapedstate = self.state + self.state = nextchar + else: + self.token += nextchar + elif self.state in self.escape: + if not nextchar: # end of file + raise ValueError("No escaped character") + # In posix shells, only the quote itself or the escape + # character may be escaped within quotes. + if (escapedstate in self.quotes and + nextchar != self.state and nextchar != escapedstate): + self.token += self.state + self.token += nextchar + self.state = escapedstate + elif self.state in ('a', 'c'): + if not nextchar: + self.state = None # end of file + break + elif nextchar in self.whitespace: + self.state = ' ' + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif nextchar in self.commenters: + self.instream.readline() + self.lineno += 1 + if self.posix: + self.state = ' ' + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + elif self.state == 'c': + if nextchar in self.punctuation_chars: + self.token += nextchar + else: + if nextchar not in self.whitespace: + self._pushback_chars.append(nextchar) + self.state = ' ' + break + elif self.posix and nextchar in self.quotes: + self.state = nextchar + elif self.posix and nextchar in self.escape: + escapedstate = 'a' + self.state = nextchar + elif (nextchar in self.wordchars or nextchar in self.quotes + or (self.whitespace_split and + nextchar not in self.punctuation_chars)): + self.token += nextchar + else: + if self.punctuation_chars: + self._pushback_chars.append(nextchar) + else: + self.pushback.appendleft(nextchar) + self.state = ' ' + if self.token or (self.posix and quoted): + break # emit current token + else: + continue + result = self.token + self.token = '' + if self.posix and not quoted and result == '': + result = None + return result + + def bytes2text(bs): """ Converts bytes (or array-like of bytes) to text. @@ -302,7 +436,10 @@ def to_cmds(cmdline): escaped = '\\' def get_separators(s): - lex = shlex.shlex(s, punctuation_chars=True) + if sys.version_info < (3, 8): + lex = shlex_py38(s, punctuation_chars=True) + else: + lex = shlex.shlex(s, punctuation_chars=True) lex.whitespace_split = True tokens = list(lex) real_tokens = [] -- Gitee From 9c77a82229375069e593435c812ba9d65bed043b Mon Sep 17 00:00:00 2001 From: LK Date: Wed, 8 Mar 2023 03:16:18 +0000 Subject: [PATCH 02/87] feat(dbmind): support increment detection and update metrics * Add and refine metrics to support DBMind front-end; * Add API to provide usage size of database data directory; * Add API to get the latest sequences for metrics. --- .../diagnosis/query/slow_sql/query_feature.py | 7 +- .../query/slow_sql/query_info_source.py | 12 - dbmind/common/types/__init__.py | 2 +- dbmind/components/memory_check.py | 2 +- .../reprocessing_exporter.yml | 260 +++++++++++++----- dbmind/controllers/dbmind_core.py | 16 ++ dbmind/misc/metric_map.conf | 15 +- dbmind/service/dai.py | 24 +- dbmind/service/web.py | 53 +++- 9 files changed, 292 insertions(+), 99 deletions(-) diff --git a/dbmind/app/diagnosis/query/slow_sql/query_feature.py b/dbmind/app/diagnosis/query/slow_sql/query_feature.py index 9c690c2..c1edcdc 100644 --- a/dbmind/app/diagnosis/query/slow_sql/query_feature.py +++ b/dbmind/app/diagnosis/query/slow_sql/query_feature.py @@ -613,16 +613,13 @@ class QueryFeature: """Determine whether the current network packet loss is serious.""" node_network_transmit_drop = self.network_info.transmit_drop node_network_receive_drop = self.network_info.receive_drop - node_network_transmit_packets = self.network_info.transmit_packets - node_network_receive_packets = self.network_info.receive_packets if isinstance(node_network_transmit_drop, (list, tuple)) and \ isinstance(node_network_receive_drop, (list, tuple)): if _existing_spike(node_network_receive_drop) or _existing_spike(node_network_transmit_drop): self.detail['network_drop'] = "Found a positive spike in NETWORK-PACKET-DROP" else: - if node_network_receive_drop / node_network_receive_packets >= \ - monitoring.get_param('package_drop_rate_threshold') or \ - node_network_transmit_drop / node_network_transmit_packets >= \ + if node_network_receive_drop >= monitoring.get_param('package_drop_rate_threshold') or \ + node_network_transmit_drop >= \ monitoring.get_param('package_drop_rate_threshold'): self.detail['network_drop'] = "The current network packet loss rate is abnormal" if self.detail.get('network_drop'): diff --git a/dbmind/app/diagnosis/query/slow_sql/query_info_source.py b/dbmind/app/diagnosis/query/slow_sql/query_info_source.py index 554de7e..f0d08fc 100644 --- a/dbmind/app/diagnosis/query/slow_sql/query_info_source.py +++ b/dbmind/app/diagnosis/query/slow_sql/query_info_source.py @@ -724,22 +724,10 @@ class QueryContextFromTSDBAndRPC(QueryContext): self.query_start_time, self.query_end_time).from_server( f"{self.slow_sql_instance.db_host}").fetchone() - node_network_receive_packets_info = dai.get_metric_sequence('os_network_receive_packets', - self.query_start_time, - self.query_end_time).from_server( - f"{self.slow_sql_instance.db_host}").fetchone() - node_network_transmit_packets_info = dai.get_metric_sequence('os_network_transmit_packets', - self.query_start_time, - self.query_end_time).from_server( - f"{self.slow_sql_instance.db_host}").fetchone() if is_sequence_valid(node_network_receive_drop_info): network_info.receive_drop = _get_sequence_first_value(node_network_receive_drop_info, precision=4) if is_sequence_valid(node_network_transmit_drop_info): network_info.transmit_drop = _get_sequence_first_value(node_network_transmit_drop_info, precision=4) - if is_sequence_valid(node_network_receive_packets_info): - network_info.receive_packets = _get_sequence_first_value(node_network_receive_packets_info, precision=4) - if is_sequence_valid(node_network_transmit_packets_info): - network_info.transmit_packets = _get_sequence_first_value(node_network_transmit_packets_info, precision=4) return network_info diff --git a/dbmind/common/types/__init__.py b/dbmind/common/types/__init__.py index 54de800..0c0ef97 100644 --- a/dbmind/common/types/__init__.py +++ b/dbmind/common/types/__init__.py @@ -14,5 +14,5 @@ from .alarm import Alarm from .enums import ALARM_LEVEL, ALARM_TYPES, ANOMALY_TYPES from .misc import Log from .root_cause import RootCause -from .sequence import Sequence +from .sequence import Sequence, EMPTY_SEQUENCE from .ssl import SSLContext diff --git a/dbmind/components/memory_check.py b/dbmind/components/memory_check.py index f4c818d..b0b0df8 100644 --- a/dbmind/components/memory_check.py +++ b/dbmind/components/memory_check.py @@ -28,7 +28,7 @@ from dbmind.app.monitoring.generic_detection import AnomalyDetections from dbmind.cmd.edbmind import init_global_configs from dbmind.common.opengauss_driver import Driver from dbmind.common.types import Sequence -from dbmind.common.types.sequence import EMPTY_SEQUENCE +from dbmind.common.types import EMPTY_SEQUENCE from dbmind.common.utils.checking import path_type, date_type from dbmind.common.utils.cli import write_to_terminal from dbmind.common.utils import cached_property diff --git a/dbmind/components/reprocessing_exporter/reprocessing_exporter.yml b/dbmind/components/reprocessing_exporter/reprocessing_exporter.yml index fe69243..6bd530e 100644 --- a/dbmind/components/reprocessing_exporter/reprocessing_exporter.yml +++ b/dbmind/components/reprocessing_exporter/reprocessing_exporter.yml @@ -10,12 +10,12 @@ # EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, # MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. - -os_cpu_iowait: +os_cpu_iowait_usage: desc: iowait promql: " label_replace( - (avg(irate(node_cpu_seconds_total{mode='iowait'}[5m])) by (job, instance)), + sum(irate(node_cpu_seconds_total{mode='iowait'}[3m])) by (job, instance) / + sum(irate(node_cpu_seconds_total[3m])) by (job, instance), 'instance', '$1', 'instance', '(.*):.*') " metrics: @@ -31,16 +31,40 @@ os_cpu_iowait: ttl: 10 timeout: 5 +os_cpu_system_usage: + desc: system + promql: " + label_replace( + sum(irate(node_cpu_seconds_total{mode='system'}[3m])) by (job, instance) / + sum(irate(node_cpu_seconds_total[3m])) by (job, instance), + 'instance', '$1', 'instance', '(.*):.*') + " + metrics: + - name: from_job + label: job + description: from job + usage: LABEL + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 10 + timeout: 5 -os_disk_iops: - desc: iops +os_cpu_user_usage: + desc: user promql: " label_replace( - sum by (instance) (irate(node_disk_reads_completed_total[3m])) + sum by (instance) - (irate(node_disk_writes_completed_total[3m])), + sum(irate(node_cpu_seconds_total{mode='user'}[3m])) by (job, instance) / + sum(irate(node_cpu_seconds_total[3m])) by (job, instance), 'instance', '$1', 'instance', '(.*):.*') " metrics: + - name: from_job + label: job + description: from job + usage: LABEL - name: from_instance label: instance description: from instance @@ -49,6 +73,49 @@ os_disk_iops: ttl: 10 timeout: 5 +os_cpu_idle_usage: + desc: idle + promql: " + label_replace( + sum(irate(node_cpu_seconds_total{mode='idle'}[3m])) by (job, instance) / + sum(irate(node_cpu_seconds_total[3m])) by (job, instance), + 'instance', '$1', 'instance', '(.*):.*') + " + metrics: + - name: from_job + label: job + description: from job + usage: LABEL + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 10 + timeout: 5 + +os_disk_iops: + desc: iops + promql: " + label_replace( + irate(node_disk_reads_completed_total[3m]) + irate(node_disk_writes_completed_total[3m]), + 'instance', '$1', 'instance', '(.*):.*') + " + metrics: + - name: device + label: device + description: device label + usage: LABEL + - name: from_job + label: job + description: from job + usage: LABEL + - name: from_instance + label: instance + description: from instance + usage: LABEL + status: enable + ttl: 10 os_disk_ioutils: desc: ioutils @@ -74,15 +141,22 @@ os_disk_ioutils: ttl: 10 timeout: 5 - -os_io_read_bytes: +os_disk_io_read_bytes: desc: io read bytes promql: " label_replace( - sum by (instance) (irate(node_disk_read_bytes_total[1m])) / 1024 / 1024, + irate(node_disk_read_bytes_total[3m]) / 1024 / 1024, 'instance', '$1', 'instance', '(.*):.*') " metrics: + - name: device + label: device + description: device label + usage: LABEL + - name: from_job + label: job + description: from job + usage: LABEL - name: from_instance label: instance description: from instance @@ -91,15 +165,22 @@ os_io_read_bytes: ttl: 10 timeout: 5 - -os_io_write_bytes: +os_disk_io_write_bytes: desc: io write bytes promql: " label_replace( - sum by (instance) (irate(node_disk_written_bytes_total[1m])) / 1024 / 1024, + irate(node_disk_written_bytes_total[3m]) / 1024 / 1024, 'instance', '$1', 'instance', '(.*):.*') " metrics: + - name: device + label: device + description: device label + usage: LABEL + - name: from_job + label: job + description: from job + usage: LABEL - name: from_instance label: instance description: from instance @@ -113,10 +194,18 @@ os_disk_iocapacity: desc: os_disk_iocapacity promql: " label_replace( - sum by (instance) (irate(node_disk_read_bytes_total[1m])) / 1024 / 1024 + sum by (instance) (irate(node_disk_written_bytes_total[1m])) / 1024 / 1024, + (irate(node_disk_read_bytes_total[3m]) / 1024 / 1024) + (irate(node_disk_written_bytes_total[3m]) / 1024 / 1024), 'instance', '$1', 'instance', '(.*):.*') " metrics: + - name: device + label: device + description: device label + usage: LABEL + - name: from_job + label: job + description: from job + usage: LABEL - name: from_instance label: instance description: from instance @@ -125,7 +214,6 @@ os_disk_iocapacity: ttl: 10 timeout: 5 - os_disk_usage: name: os_disk_usage desc: os_disk_usage @@ -159,13 +247,12 @@ os_disk_usage: ttl: 10 timeout: 5 - -os_io_queue_number: - name: os_io_queue_number +os_disk_io_queue_length: + name: io queue length desc: io queue number promql: " label_replace( - rate(node_disk_io_time_weighted_seconds_total[3m]), + irate(node_disk_io_time_weighted_seconds_total[3m]), 'instance', '$1', 'instance', '(.*):.*') " metrics: @@ -185,13 +272,12 @@ os_io_queue_number: ttl: 10 timeout: 5 - -os_io_read_delay_time: - name: os_io_read_delay_time +os_disk_io_read_delay: + name: io_read_delay_time desc: io read delay time promql: " label_replace( - rate(node_disk_read_time_seconds_total[3m]) / (rate(node_disk_reads_completed_total[3m]) + 0.00001) * 1000, + irate(node_disk_read_time_seconds_total[3m]) / (irate(node_disk_reads_completed_total[3m]) + 0.00001) * 1000, 'instance', '$1', 'instance', '(.*):.*') " metrics: @@ -207,17 +293,16 @@ os_io_read_delay_time: label: device description: device usage: LABEL - status: disable + status: enable ttl: 10 timeout: 5 - -os_io_write_delay_time: - name: os_io_write_delay_time +os_disk_io_write_delay: + name: io_write_delay_time desc: io write delay time promql: " label_replace( - rate(node_disk_write_time_seconds_total[3m]) / (rate(node_disk_writes_completed_total[3m]) + 0.00001) * 1000, + irate(node_disk_write_time_seconds_total[3m]) / (irate(node_disk_writes_completed_total[3m]) + 0.00001) * 1000, 'instance', '$1', 'instance', '(.*):.*') " metrics: @@ -233,12 +318,10 @@ os_io_write_delay_time: label: device description: device usage: LABEL - status: enable ttl: 10 timeout: 5 - os_cpu_processor_number: name: os_cpu_processor_number desc: os_cpu_processor_number @@ -261,27 +344,6 @@ os_cpu_processor_number: timeout: 5 -os_cpu_usage: - name: os_cpu_usage - desc: used for CPU usage collection - promql: " - label_replace( - 1 - (avg by(job, instance) (irate(node_cpu_seconds_total{mode='idle'}[3m]))), - 'instance', '$1', 'instance', '(.*):.*') - " - metrics: - - name: from_job - label: job - description: from job - usage: LABEL - - name: from_instance - label: instance - description: from instance - usage: LABEL - status: enable - ttl: 1 - timeout: 5 - gaussdb_cpu_usage: name: gaussdb_cpu_usage desc: used CPU usage only for GaussDB @@ -358,7 +420,7 @@ gaussdb_errors_rate: label: from_instance description: from instance usage: LABEL - status: enable + status: disable ttl: 10 timeout: 5 @@ -376,7 +438,7 @@ gaussdb_invalid_logins_rate: label: from_instance description: from instance usage: LABEL - status: enable + status: disable ttl: 10 timeout: 5 @@ -399,6 +461,9 @@ gaussdb_dlp: label: from_instance description: from instance usage: LABEL + status: disable + ttl: 10 + timeout: 5 gaussdb_tup_fetched_rate: name: gaussdb_tup_fetched_rate @@ -715,7 +780,7 @@ gaussdb_table_expansion_rate: label: from_instance description: from instance usage: LABEL - status: enable + status: disable ttl: 10 timeout: 5 @@ -766,7 +831,7 @@ gaussdb_connections_used_ratio: label: from_instance description: from instance usage: LABEL - status: enable + status: disable ttl: 10 timeout: 5 @@ -791,11 +856,12 @@ load_average1: timeout: 5 # network -os_network_receive_drop: - desc: os_network_receive_drop +os_network_receive_error: + desc: os_network_receive_error promql: " label_replace( - sum(irate(node_network_receive_drop_total{device!~'bond.*?|lo'}[1m])) by (instance, job), + irate(node_network_receive_errs_total{device!~'bond.*?|lo'}[3m]) / + (irate(node_network_receive_packets_total{device!~'bond.*?|lo'}[3m]) > 0), 'instance', '$1', 'instance', '(.*):.*') " metrics: @@ -807,15 +873,43 @@ os_network_receive_drop: label: job description: from job usage: LABEL + - name: device + label: device + description: device + usage: LABEL status: enable ttl: 10 +os_network_transmit_error: + desc: os_network_receive_error + promql: " + label_replace( + irate(node_network_transmit_errs_total{device!~'bond.*?|lo'}[3m]) / + (irate(node_network_transmit_packets_total{device!~'bond.*?|lo'}[3m]) > 0), + 'instance', '$1', 'instance', '(.*):.*') + " + metrics: + - name: from_instance + label: instance + description: from instance + usage: LABEL + - name: from_job + label: job + description: from job + usage: LABEL + - name: device + label: device + description: device + usage: LABEL + status: enable + ttl: 10 os_network_transmit_drop: desc: os_network_transmit_drop promql: " label_replace( - sum(irate(node_network_transmit_drop_total{device!~'bond.*?|lo'}[1m])) by (instance, job), + irate(node_network_transmit_drop_total{device!~'bond.*?|lo'}[3m]) / + (irate(node_network_transmit_packets_total{device!~'bond.*?|lo'}[3m]) > 0), 'instance', '$1', 'instance', '(.*):.*') " metrics: @@ -827,15 +921,43 @@ os_network_transmit_drop: label: job description: from job usage: LABEL + - name: device + label: device + description: device + usage: LABEL + status: enable + ttl: 10 + +os_network_receive_drop: + desc: os_network_receive_drop + promql: " + label_replace( + irate(node_network_receive_drop_total{device!~'bond.*?|lo'}[3m]) / + (irate(node_network_receive_packets_total{device!~'bond.*?|lo'}[3m]) > 0), + 'instance', '$1', 'instance', '(.*):.*') + " + metrics: + - name: from_instance + label: instance + description: from instance + usage: LABEL + - name: from_job + label: job + description: from job + usage: LABEL + - name: device + label: device + description: device + usage: LABEL status: enable ttl: 10 -os_network_receive_packets: - desc: os_network_receive_packets +os_network_receive_bytes: + desc: os_network_receive_bytes promql: " label_replace( - sum(irate(node_network_receive_packets_total{device!~'bond.*?|lo'}[1m])) by (instance, job), + irate(node_network_receive_bytes_total{device!~'bond.*?|lo'}[3m]) / 1024 / 1024 > 0, 'instance', '$1', 'instance', '(.*):.*') " metrics: @@ -847,14 +969,18 @@ os_network_receive_packets: label: job description: from job usage: LABEL + - name: device + label: device + description: device + usage: LABEL status: enable ttl: 10 -os_network_transmit_packets: - desc: os_network_transmit_packets +os_network_transmit_bytes: + desc: os_network_transmit_bytes promql: " label_replace( - sum(irate(node_network_transmit_packets_total{device!~'bond.*?|lo'}[1m])) by (instance, job), + irate(node_network_transmit_bytes_total{device!~'bond.*?|lo'}[3m]) /1024 / 1024 > 0, 'instance', '$1', 'instance', '(.*):.*') " metrics: @@ -866,6 +992,10 @@ os_network_transmit_packets: label: job description: from job usage: LABEL + - name: device + label: device + description: device + usage: LABEL status: enable ttl: 10 diff --git a/dbmind/controllers/dbmind_core.py b/dbmind/controllers/dbmind_core.py index d9cf6bb..9bb1162 100644 --- a/dbmind/controllers/dbmind_core.py +++ b/dbmind/controllers/dbmind_core.py @@ -159,6 +159,15 @@ def get_metric_sequence(name, start: int = None, end: int = None, step: int = No return web.get_metric_sequence(name, start, end, step) +@request_mapping('/api/latest-sequence/{name}', methods=['GET'], api=True) +@oauth2.token_authentication() +@standardized_api_output +def get_latest_metric_sequence(instance, name, latest_minutes: int = None, + step: int = None, fetch_all: bool = False, labels: str = None): + return web.get_latest_metric_sequence(instance, name, latest_minutes, + step=step, fetch_all=fetch_all, labels=labels) + + @request_mapping('/api/alarm/history', methods=['GET'], api=True) @oauth2.token_authentication() @standardized_api_output @@ -576,3 +585,10 @@ def risk_analysis(metric, instance, warning_hours: int = 0, upper: int = 0, lowe @standardized_api_output def get_collection_system_status(): return web.get_collection_system_status() + + +@request_mapping('/api/app/data-directory/growth-rate', methods=['GET'], api=True) +@oauth2.token_authentication() +@standardized_api_output +def get_database_data_directory_growth_rate(instance: str, latest_minutes: int = 5): + return web.get_database_data_directory_growth_rate(instance, latest_minutes) diff --git a/dbmind/misc/metric_map.conf b/dbmind/misc/metric_map.conf index bdc5d55..bbff3a3 100644 --- a/dbmind/misc/metric_map.conf +++ b/dbmind/misc/metric_map.conf @@ -300,19 +300,22 @@ node_vmstat_pgpgin = node_vmstat_pgpgin node_vmstat_pgpgout = node_vmstat_pgpgout node_vmstat_pswpin = node_vmstat_pswpin node_vmstat_pswpout = node_vmstat_pswpout -os_cpu_iowait = os_cpu_iowait +os_cpu_iowait = os_cpu_iowait_usage os_cpu_processor_number = os_cpu_processor_number -os_cpu_usage = os_cpu_usage +os_cpu_usage = os_cpu_user_usage +os_cpu_system_usage = os_cpu_system_usage +os_cpu_idle_usage = os_cpu_idle_usage os_disk_iops = os_disk_iops os_disk_ioutils = os_disk_ioutils os_disk_usage = os_disk_usage -os_io_queue_number = os_io_queue_number -os_io_write_delay_time = os_io_write_delay_time +os_disk_io_queue_length = os_disk_io_queue_length +os_disk_io_write_delay = os_disk_io_write_delay +os_disk_io_read_delay = os_disk_io_read_delay os_mem_usage = os_mem_usage os_network_receive_drop = os_network_receive_drop -os_network_receive_packets = os_network_receive_packets +os_network_receive_bytes = os_network_receive_bytes os_network_transmit_drop = os_network_transmit_drop -os_network_transmit_packets = os_network_transmit_packets +os_network_transmit_bytes = os_network_transmit_bytes os_process_fds_rate = os_process_fds_rate pg_cpu_load_total_cpu = pg_cpu_load_total_cpu pg_database_all_size = pg_database_all_size diff --git a/dbmind/service/dai.py b/dbmind/service/dai.py index 27291b4..456503b 100644 --- a/dbmind/service/dai.py +++ b/dbmind/service/dai.py @@ -28,7 +28,7 @@ from dbmind.common.dispatcher.task_worker import get_mp_sync_manager from dbmind.common.platform import LINUX from dbmind.common.sequence_buffer import SequenceBufferPool from dbmind.common.tsdb import TsdbClientFactory -from dbmind.common.types import Sequence +from dbmind.common.types import Sequence, EMPTY_SEQUENCE from dbmind.common.utils import dbmind_assert from dbmind.metadatabase import dao from dbmind.service.utils import SequenceUtils @@ -664,3 +664,25 @@ def is_driver_result_valid(s): if isinstance(s, list) and len(s) > 0: return True return False + + +def get_database_data_directory_usage(instance, latest_minutes): + # get the size of the database data directory + data_directory_sequence = get_latest_metric_sequence('pg_node_info_uptime', latest_minutes).\ + from_server(instance).fetchone() + if not is_sequence_valid(data_directory_sequence): + return EMPTY_SEQUENCE + data_directory = data_directory_sequence.labels.get('datapath') + instance_with_no_port = instance.split(':')[0] + os_disk_usage_sequences = get_latest_metric_sequence('os_disk_usage', latest_minutes).\ + from_server(instance_with_no_port).fetchall() + if not is_sequence_valid(os_disk_usage_sequences): + return EMPTY_SEQUENCE + for sequence in os_disk_usage_sequences: + if not sequence.values: + continue + mount_point = sequence.labels.get('mountpoint') + # avoid mismatched, for example: mount_point is '/', data path is '/media/sdb/opengauss/data/dn' + if mount_point != '/' and mount_point in data_directory: + return sequence + return EMPTY_SEQUENCE diff --git a/dbmind/service/web.py b/dbmind/service/web.py index 99460d6..53e47f3 100644 --- a/dbmind/service/web.py +++ b/dbmind/service/web.py @@ -43,6 +43,7 @@ from dbmind.components.memory_check import memory_check from dbmind.common.utils import dbmind_assert, string_to_dict from dbmind.common.dispatcher import TimedTaskManager from dbmind.components.forecast import early_warning +from dbmind.common.algorithm.anomaly_detection.gradient_detector import linear_fitting from . import dai @@ -114,7 +115,7 @@ def _override_fetchone(self): return Sequence() -def get_metric_sequence_internal(metric_name, from_timestamp=None, to_timestamp=None, step=None): +def get_metric_sequence_internal(metric_name, from_timestamp=None, to_timestamp=None, step=None, instance=None): """Timestamps are microsecond level.""" if to_timestamp is None: to_timestamp = int(time.time() * 1000) @@ -123,9 +124,12 @@ def get_metric_sequence_internal(metric_name, from_timestamp=None, to_timestamp= from_datetime = datetime.datetime.fromtimestamp(from_timestamp / 1000) to_datetime = datetime.datetime.fromtimestamp(to_timestamp / 1000) fetcher = dai.get_metric_sequence(metric_name, from_datetime, to_datetime, step) - from_server_predicate = get_access_context(ACCESS_CONTEXT_NAME.TSDB_FROM_SERVERS_REGEX) - if from_server_predicate: - fetcher.from_server_like(from_server_predicate) + if instance is None: + from_server_predicate = get_access_context(ACCESS_CONTEXT_NAME.TSDB_FROM_SERVERS_REGEX) + if from_server_predicate: + fetcher.from_server_like(from_server_predicate) + else: + fetcher.from_server(instance) # monkeypatch trick setattr(fetcher, 'fetchall', lambda: _override_fetchall(fetcher)) @@ -134,12 +138,31 @@ def get_metric_sequence_internal(metric_name, from_timestamp=None, to_timestamp= def get_metric_sequence(metric_name, from_timestamp=None, to_timestamp=None, step=None): + # notes: 1) this method must ensure that the front-end and back-end time are consistent + # 2) this method will return the data of all nodes in the cluster, which is not friendly to some scenarios fetcher = get_metric_sequence_internal(metric_name, from_timestamp, to_timestamp, step) result = fetcher.fetchall() result.sort(key=lambda s: str(s.labels)) # Sorted by labels. return list(map(lambda s: s.jsonify(), result)) +def get_latest_metric_sequence(instance, metric_name, latest_minutes, step=None, fetch_all=False, labels=None): + # this function can actually be replaced by 'get_metric_sequence', but in order to avoid + # the hidden problems of that method, we add 'instance', 'fetch_all' and 'labels' to solve it + # notes: the format of labels is 'key1=val1, key2=val2, key3=val3, ...' + end_timestamp = int(time.time() * 1000) + start_timestamp = end_timestamp - latest_minutes * 60 * 1000 # transfer to ms + fetcher = get_metric_sequence_internal(metric_name, start_timestamp, end_timestamp, step=step, instance=instance) + if labels is not None: + labels = string_to_dict(labels, delimiter=',') + fetcher.filter(**labels) + if fetch_all: + result = fetcher.fetchall() + else: + result = [fetcher.fetchone()] + return list(map(lambda s: s.jsonify(), result)) + + def get_metric_value(metric_name): fetcher = dai.get_latest_metric_value(metric_name) from_server_predicate = get_access_context(ACCESS_CONTEXT_NAME.TSDB_FROM_SERVERS_REGEX) @@ -189,7 +212,6 @@ def get_metric_forecast_sequence(metric_name, from_timestamp=None, to_timestamp= return list(map(lambda _s: _s.jsonify(), future_sequences)) - def get_xact_status(): committed = get_metric_value('pg_db_xact_commit').fetchall() aborted = get_metric_value('pg_db_xact_rollback').fetchall() @@ -917,10 +939,14 @@ def get_killed_slow_queries_count(instance=None, query=None, start_time=None, en def get_slow_query_summary(pagesize=None, current=None): # Maybe multiple nodes, but we don't need to care. # Because that is an abnormal scenario. - threshold = get_metric_value('pg_settings_setting') \ + sequence = get_metric_value('pg_settings_setting') \ .filter(name='log_min_duration_statement') \ - .fetchone().values[0] - + .fetchone() + # fix the error which occurs in the interface of DBMind + if not dai.is_sequence_valid(sequence): + threshold = sequence.values[-1] + else: + threshold = 'Nan' return { 'nb_unique_slow_queries': dao.slow_queries.count_slow_queries( instance=get_access_context(ACCESS_CONTEXT_NAME.AGENT_INSTANCE_IP_WITH_PORT)), @@ -1260,3 +1286,14 @@ def get_collection_system_status(): suggestions = dai.diagnosis_exporter_status(detail['exporter']) detail['suggestions'] = suggestions return detail + + +def get_database_data_directory_growth_rate(instance, latest_minutes): + # instance: address of database, format is 'host:port' + # get the growth_rate of disk usage where the database data directory is located + sequence = dai.get_database_data_directory_usage(instance, latest_minutes) + if not sequence.values: + return + growth_rate, _ = linear_fitting(list(range(0, len(sequence))), sequence.values) + return growth_rate + -- Gitee From 3f37fcb3d8d2349ee95111afc0d1bb5131a0ec40 Mon Sep 17 00:00:00 2001 From: LK Date: Thu, 9 Mar 2023 06:55:47 +0000 Subject: [PATCH 03/87] doc: add link for DBMind package in README.md --- README.md | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0aa97d1..7ba8b37 100644 --- a/README.md +++ b/README.md @@ -55,9 +55,20 @@ source ~/.bashrc #### 方式二:使用安装包进行部署 DBMind会定期在openGauss-DBMind项目的release页面发布DBMind的安装包,可以通过下载该DBMind安装包进行安装部署。该安装包会自动将DBMind解压到指定目录,并配置好环境变量。 -安装包的下载地址为: +安装包和校验码的下载地址为: -https://gitee.com/opengauss/openGauss-DBMind/releases +| Name | Download | Remarks | +|-------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------| +| DBMind X86 | [dbmind-installer-x86_64-python3.10.sh.tar.gz](https://opengauss.obs.cn-south-1.myhuaweicloud.com/dbmind/latest/x86/dbmind-installer-x86_64-python3.10.sh.tar.gz) | X86架构下DBMind安装包 | +| DBMind X86 SHA256 | [dbmind-installer-x86_64-python3.10.sh.tar.gz.sha256](https://opengauss.obs.cn-south-1.myhuaweicloud.com/dbmind/latest/x86/dbmind-installer-x86_64-python3.10.sh.tar.gz.sha256) | DBMind X86安装包SHA256校验文件 | +| DBMind ARM | [dbmind-installer-aarch64-python3.10.sh.tar.gz](https://opengauss.obs.cn-south-1.myhuaweicloud.com/dbmind/latest/arm/dbmind-installer-aarch64-python3.10.sh.tar.gz) | X86架构下DBMind安装包 | +| DBMind ARM SHA256 | [dbmind-installer-aarch64-python3.10.sh.tar.gz.sha256](https://opengauss.obs.cn-south-1.myhuaweicloud.com/dbmind/latest/arm/dbmind-installer-aarch64-python3.10.sh.tar.gz.sha256) | DBMind X86安装包SHA256校验文件 | + +安装包使用: + +  解压:tar zxvf dbmind-installer-x86_64-python3.10.sh.tar.gz + +  DBMind安装: sh dbmind-installer-x86_64-python3.10.sh #### 关于Python运行环境 -- Gitee From f63838dd36a999e98ad6e7fe61ea37ea30aeb0b1 Mon Sep 17 00:00:00 2001 From: LK Date: Thu, 9 Mar 2023 07:13:42 +0000 Subject: [PATCH 04/87] feat(dbmind): start inspections in default --- dbmind/common/dispatcher/task_scheduler.py | 22 +++++++++------ dbmind/constants.py | 6 +++- dbmind/controllers/dbmind_core.py | 25 ++-------------- dbmind/service/web.py | 33 +--------------------- 4 files changed, 22 insertions(+), 64 deletions(-) diff --git a/dbmind/common/dispatcher/task_scheduler.py b/dbmind/common/dispatcher/task_scheduler.py index b259b08..03de67e 100644 --- a/dbmind/common/dispatcher/task_scheduler.py +++ b/dbmind/common/dispatcher/task_scheduler.py @@ -81,7 +81,8 @@ class _TimedTaskManager: if func.__name__ not in self.timers: self.timers[func.__name__] = RepeatedTimer(seconds, func) logging.info('Applied timed-task %s.', func.__name__) - logging.info("The timed-task %s has been already started.", func.__name__) + else: + logging.info("The timed-task %s has been already started.", func.__name__) self.task_table[func] = seconds def start(self, timed_task=None): @@ -104,7 +105,7 @@ class _TimedTaskManager: t.cancel() # remove from timer list self.timers.pop(timed_task) - logging.info("The time-task '%s' has been stopped.", timed_task) + logging.info("The timed-task '%s' has been stopped.", timed_task) def flush(self): # flush the timed_task which including: @@ -118,8 +119,12 @@ class _TimedTaskManager: logging.error("Timed-task '%s' not existed.", timed_task) else: func = global_vars.timed_task[timed_task]['object'] - seconds = global_vars.configs.getint('TIMED_TASK', f'{timed_task}_interval', - fallback=constants.TIMED_TASK_DEFAULT_INTERVAL) + if timed_task in (constants.DAILY_INSPECTION, + constants.WEEKLY_INSPECTION, constants.MONTHLY_INSPECTION): + seconds = global_vars.timed_task[timed_task].get('seconds') + else: + seconds = global_vars.configs.getint('TIMED_TASK', f'{timed_task}_interval', + fallback=constants.TIMED_TASK_DEFAULT_INTERVAL) if not self.check(timed_task): self.apply(func, seconds) self.start(timed_task) @@ -140,10 +145,11 @@ class _TimedTaskManager: def reset_interval(self, timed_task, seconds): # avoid task blocking caused by user interval setting too small, currently supported minimum interval is 30s seconds = minimal_timed_task_interval if seconds < minimal_timed_task_interval else seconds - t = self.timers.get(timed_task) - t.interval = seconds - # update running interval - global_vars.timed_task[timed_task]['seconds'] = seconds + if self.check(timed_task): + t = self.timers.get(timed_task) + t.interval = seconds + # update running interval + global_vars.timed_task[timed_task]['seconds'] = seconds def get_interval(self, timed_task): t = self.timers.get(timed_task) diff --git a/dbmind/constants.py b/dbmind/constants.py index 1309b47..457c04e 100644 --- a/dbmind/constants.py +++ b/dbmind/constants.py @@ -38,9 +38,13 @@ DBMIND_UI_DIRECTORY = os.path.join(DBMIND_PATH, '../ui/build') # User should not stop the 'DISCARD_EXPIRED_RESULTS' to avoid excessive disk usage. DISCARD_EXPIRED_RESULTS = 'discard_expired_results' ANOMALY_DETECTION_NAME = 'self_monitoring' +DAILY_INSPECTION = 'daily_inspection' +WEEKLY_INSPECTION = 'weekly_inspection' +MONTHLY_INSPECTION = 'monthly_inspection' # If the user does not provide a task run interval, the following default values will be used. TIMED_TASK_DEFAULT_INTERVAL = 24 * 60 * 60 -DEFAULT_TASK_NAMES = (ANOMALY_DETECTION_NAME, DISCARD_EXPIRED_RESULTS) +DEFAULT_TASK_NAMES = (ANOMALY_DETECTION_NAME, DISCARD_EXPIRED_RESULTS, + DAILY_INSPECTION, WEEKLY_INSPECTION, MONTHLY_INSPECTION) # Notice: 'DISTINGUISHING_INSTANCE_LABEL' is a magic string, i.e., our own name. # Thus, not all collection agents (such as Prometheus's openGauss-exporter) diff --git a/dbmind/controllers/dbmind_core.py b/dbmind/controllers/dbmind_core.py index 9bb1162..9a83a8c 100644 --- a/dbmind/controllers/dbmind_core.py +++ b/dbmind/controllers/dbmind_core.py @@ -538,35 +538,14 @@ def get_correlation_result(metric_name: str = None, host: str = None, start_time return web.get_correlation_result(metric_name, host, start_time, end_time) -@request_mapping('/api/toolkit/memory_check', methods=['POST'], api=True) +@request_mapping('/api/toolkit/memory_check', methods=['GET'], api=True) @oauth2.token_authentication() @standardized_api_output def memory_check(start_time: int = 0, end_time: int = 0): return web.check_memory_context(start_time, end_time) -@request_mapping('/api/app/start_timed_task', methods=['POST'], api=True) -@oauth2.token_authentication() -@standardized_api_output -def start_timed_app(funcname: str = None): - return web.start_timed_task(funcname) - - -@request_mapping('/api/app/stop_timed_task', methods=['POST'], api=True) -@oauth2.token_authentication() -@standardized_api_output -def start_timed_app(funcname: str = None): - return web.stop_timed_task(funcname) - - -@request_mapping('/api/app/reset_interval', methods=['POST'], api=True) -@oauth2.token_authentication() -@standardized_api_output -def reset_interval(funcname: str = None, seconds: int = 24 * 60 * 60): - return web.reset_timed_task_interval(funcname, seconds) - - -@request_mapping('/api/app/get_timed_task_status', methods=['GET'], api=True) +@request_mapping('/api/app/timed-task/status', methods=['GET'], api=True) @oauth2.token_authentication() @standardized_api_output def get_timed_task_status(): diff --git a/dbmind/service/web.py b/dbmind/service/web.py index 53e47f3..6b77734 100644 --- a/dbmind/service/web.py +++ b/dbmind/service/web.py @@ -25,7 +25,7 @@ from itertools import groupby import sqlparse -from dbmind import global_vars, constants +from dbmind import global_vars from dbmind.app.optimization import get_database_schemas, TemplateArgs from dbmind.app.optimization.index_recommendation import rpc_index_advise from dbmind.app.optimization.index_recommendation_rpc_executor import RpcExecutor @@ -1231,37 +1231,6 @@ def check_memory_context(start_time, end_time): return memory_check(start_time, end_time) -def start_timed_task(timed_task, seconds=None): - if timed_task not in global_vars.timed_task: - return {'state': 'failed', 'detail': f"The timed-task '{timed_task}' does not exist"} - if TimedTaskManager.check(timed_task): - return {'state': 'success', 'detail': f"The timed-task '{timed_task}' has already started"} - else: - func = global_vars.timed_task[timed_task]['object'] - seconds = constants.TIMED_TASK_DEFAULT_INTERVAL if seconds is None else seconds - TimedTaskManager.apply(func, seconds) - TimedTaskManager.start(timed_task) - return {'state': 'success', 'detail': f"The timed-task '{timed_task}' started successfully"} - - -def stop_timed_task(timed_task): - if timed_task not in global_vars.timed_task: - return {'state': 'fail', 'detail': f"The timed-task '{timed_task}' does not exist"} - if not TimedTaskManager.check(timed_task): - return {'state': 'fail', 'detail': f"The timed-task '{timed_task}' is not started"} - TimedTaskManager.stop(timed_task) - return {'state': 'success', 'detail': f"The timed-task '{timed_task}' stopped successfully"} - - -def reset_timed_task_interval(timed_task, seconds): - if timed_task not in global_vars.timed_task: - return {'state': 'fail', 'detail': f"The timed-task '{timed_task}' does not exist"} - if not TimedTaskManager.check(timed_task): - return {'state': 'fail', 'detail': f"The timed-task '{timed_task}' is not started"} - TimedTaskManager.reset_interval(timed_task, seconds) - return {'state': 'success', 'detail': f"The interval of timed-task '{timed_task}' has been modified successfully"} - - def get_timed_task_status(): detail = {} for timed_task, _ in global_vars.timed_task.items(): -- Gitee From 00e0961ba116ca103ce325cbbfa20e568fc8592c Mon Sep 17 00:00:00 2001 From: yuchen886 Date: Thu, 9 Mar 2023 07:15:22 +0000 Subject: [PATCH 05/87] fix(increase_detector): automately determine alpha --- .../anomaly_detection/increase_detector.py | 46 ++++++++++++++----- tests/test_detector.py | 2 +- 2 files changed, 35 insertions(+), 13 deletions(-) diff --git a/dbmind/common/algorithm/anomaly_detection/increase_detector.py b/dbmind/common/algorithm/anomaly_detection/increase_detector.py index bf3db32..7a767e0 100644 --- a/dbmind/common/algorithm/anomaly_detection/increase_detector.py +++ b/dbmind/common/algorithm/anomaly_detection/increase_detector.py @@ -17,6 +17,8 @@ import scipy.stats from ._abstract_detector import AbstractDetector from ...types import Sequence +CDF_THRESHOLD = 1e-5 + class IncreaseDetector(AbstractDetector): """ @@ -45,34 +47,54 @@ class IncreaseDetector(AbstractDetector): alpha (float, optional): the significant level to accept the hypothesis that the data sequence has a trend. Defaults to 0.05. """ - def __init__(self, side="positive", alpha=0.05): + def __init__(self, side="positive", alpha=None): self.side = side self.alpha = alpha + self.cdfs = None - def _fit(self, sequence: Sequence): - """Nothing to impl""" + def _fit(self, s: Sequence): + self.length = len(s.values) + self.half_n = int(self.length / 2) + if self.alpha is None: + self.cdfs = 2 * scipy.stats.binom.cdf(range(self.half_n + 1), self.length, 0.5) + idx = np.where(np.diff(self.cdfs) > CDF_THRESHOLD)[0][0] + self.alpha = self.cdfs[idx] def _predict(self, s: Sequence) -> Sequence: x, y = s.timestamps, s.values coef = np.polyfit(x, y, deg=1)[0] - half_n = int(len(y) / 2) + n_pos = n_neg = 0 - for i in range(half_n): - diff = y[i + half_n] - y[i] + for i in range(self.half_n): + diff = y[i + self.half_n] - y[i] if diff > 0: n_pos += 1 elif diff < 0: n_neg += 1 - n_diff = n_pos + n_neg + if self.side == "positive": - p_value = 2 * scipy.stats.binom.cdf(n_neg, n_diff, 0.5) + if n_neg > n_pos: + return Sequence(timestamps=s.timestamps, values=[False] * self.length) + + if self.cdfs is None: + p_value = 2 * scipy.stats.binom.cdf(n_neg, n_diff, 0.5) + else: + p_value = self.cdfs[n_neg] + if p_value < self.alpha and coef > 0: - return Sequence(timestamps=s.timestamps, values=[True] * len(y)) + return Sequence(timestamps=s.timestamps, values=[True] * self.length) elif self.side == "negative": - p_value = 2 * scipy.stats.binom.cdf(n_pos, n_diff, 0.5) + if n_pos > n_neg: + return Sequence(timestamps=s.timestamps, values=[False] * self.length) + + if self.cdfs is None: + p_value = 2 * scipy.stats.binom.cdf(n_pos, n_diff, 0.5) + else: + p_value = self.cdfs[n_pos] + if p_value < self.alpha and coef < 0: - return Sequence(timestamps=s.timestamps, values=[True] * len(y)) + return Sequence(timestamps=s.timestamps, values=[True] * self.length) - return Sequence(timestamps=s.timestamps, values=[False] * len(y)) + return Sequence(timestamps=s.timestamps, values=[False] * self.length) diff --git a/tests/test_detector.py b/tests/test_detector.py index 1657ce2..fcf64d9 100644 --- a/tests/test_detector.py +++ b/tests/test_detector.py @@ -465,7 +465,7 @@ def test_increase_detector(): # test case: the increasing sequence. input_data = [i * 0.05 + 0.7 * random.randint(1, 5) for i in list(range(50))] raw_data = Sequence(timestamps=list(range(len(input_data))), values=input_data) - detector = anomaly_detection.IncreaseDetector(side="positive", alpha=0.05) + detector = anomaly_detection.IncreaseDetector(side="positive") res = detector.fit_predict(raw_data) correct_data = (True,) * 50 assert res.values == correct_data -- Gitee From fe26b8f04edb20ec8206a74f29af676d82ef9a7c Mon Sep 17 00:00:00 2001 From: yuchen886 Date: Thu, 9 Mar 2023 09:08:22 +0000 Subject: [PATCH 06/87] fix(cmd_exporter): add a port to instance name --- dbmind/components/cmd_exporter/yamls/default.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/dbmind/components/cmd_exporter/yamls/default.yml b/dbmind/components/cmd_exporter/yamls/default.yml index 80a8f8d..ee3f55c 100644 --- a/dbmind/components/cmd_exporter/yamls/default.yml +++ b/dbmind/components/cmd_exporter/yamls/default.yml @@ -25,7 +25,7 @@ gaussdb_cluster: - query: "cm_ctl query -Cvi" + query: "cm_ctl query -Cvip" timeout: 5 metrics: - state: @@ -33,11 +33,15 @@ gaussdb_cluster: usage: "GAUGE" description: "cluster state, 0 meaning abnormal and 1 meaning normal" - primary: - subquery: grep -E "Primary" | grep -v "|" | awk '{print $3}' + subquery: grep -E "P Primary" | sed 's/|/\n/g' | grep -E "Primary" | awk '{printf "%s:%s\n",$3,$5}' usage: "LABEL" description: "primary node list" - standby: - subquery: grep -E "Standby" | grep -v "|" | awk '{print $3}' | sed -z 's/\n/,/g;s/,$/\n/g' + subquery: grep -E "P Primary" | sed 's/|/\n/g' | grep -E "Primary" | awk '{printf "%s:%s\n",$3,$5}' | sed -z 's/\n/,/g' + usage: "LABEL" + description: "standby node list" + - normal: + subquery: grep -E "P Primary" | sed 's/|/\n/g' | grep -E "Normal" | awk '{printf "%s:%s\n",$3,$5}' | sed -z 's/\n/,/g' usage: "LABEL" description: "standby node list" -- Gitee From f7ce599b8a84b5e84f82b1658699fc6109214c81 Mon Sep 17 00:00:00 2001 From: yuchen886 Date: Fri, 10 Mar 2023 09:39:41 +0000 Subject: [PATCH 07/87] fix(anomaly_analysis): add multi-processing to anomaly_analysis --- dbmind/components/anomaly_analysis.py | 1 + dbmind/service/web.py | 58 +++++++++++++++++++-------- 2 files changed, 43 insertions(+), 16 deletions(-) diff --git a/dbmind/components/anomaly_analysis.py b/dbmind/components/anomaly_analysis.py index 0a1e893..851b6ec 100644 --- a/dbmind/components/anomaly_analysis.py +++ b/dbmind/components/anomaly_analysis.py @@ -65,6 +65,7 @@ def get_sequences(arg): raise ValueError(f"Invalid instance: {instance}.") else: raise + step = dai.get_metric_sequence(metric, start_datetime, end_datetime).step / 1000 start_time = datetime.timestamp(start_datetime) end_time = datetime.timestamp(end_datetime) diff --git a/dbmind/service/web.py b/dbmind/service/web.py index 6b77734..82a29f2 100644 --- a/dbmind/service/web.py +++ b/dbmind/service/web.py @@ -38,7 +38,7 @@ from dbmind.components.sql_rewriter.sql_rewriter import rewrite_sql_api from dbmind.metadatabase import dao from dbmind.service.utils import SequenceUtils from dbmind.common.tsdb import TsdbClientFactory -from dbmind.components.anomaly_analysis import single_process_correlation_calculation +from dbmind.components.anomaly_analysis import get_sequences, get_correlations from dbmind.components.memory_check import memory_check from dbmind.common.utils import dbmind_assert, string_to_dict from dbmind.common.dispatcher import TimedTaskManager @@ -455,11 +455,11 @@ def toolkit_recommend_knobs_by_metrics(metric_pagesize, metric_current, offset=warning_offset, limit=warning_limit), field_names=('instance', 'level', 'comment') ) - + details = sqlalchemy_query_jsonify( dao.knob_recommendation.select_details( instance=get_access_context(ACCESS_CONTEXT_NAME.AGENT_INSTANCE_IP_WITH_PORT), - offset=knob_offset, limit=knob_limit), + offset=knob_offset, limit=knob_limit), field_names=('instance', 'name', 'current', 'recommend', 'min', 'max') ) return {"metric_snapshot": metric_snapshot, "warnings": warnings, "details": details} @@ -473,7 +473,7 @@ def get_knob_recommendation_snapshot(pagesize, current): offset=offset, limit=limit), field_names=('instance', 'metric', 'value')) - + def get_knob_recommendation_snapshot_count(): return dao.knob_recommendation.count_metric_snapshot( get_access_context(ACCESS_CONTEXT_NAME.AGENT_INSTANCE_IP_WITH_PORT)) @@ -620,7 +620,7 @@ def get_existing_indexes(pagesize, current): limit = pagesize filenames = ['db_name', 'tb_name', 'columns', 'index_stmt'] return sqlalchemy_query_jsonify(dao.index_recommendation.get_existing_indexes( - instance=get_access_context(ACCESS_CONTEXT_NAME.AGENT_INSTANCE_IP_WITH_PORT), + instance=get_access_context(ACCESS_CONTEXT_NAME.AGENT_INSTANCE_IP_WITH_PORT), offset=offset, limit=limit), filenames) @@ -813,7 +813,7 @@ def get_history_alarms(pagesize=None, current=None, instance=None, alarm_type=No return _sqlalchemy_query_union_records_logic( query_function=dao.alarms.select_history_alarm, instances=instances, - offset=offset, limit=limit, + offset=offset, limit=limit, alarm_type=alarm_type, alarm_level=alarm_level, group=group ) @@ -839,7 +839,7 @@ def get_future_alarms(pagesize=None, current=None, instance=None, metric_name=No return _sqlalchemy_query_union_records_logic( query_function=dao.alarms.select_future_alarm, instances=instances, - offset=offset, limit=limit, + offset=offset, limit=limit, metric_name=metric_name, start_at=start_at, group=group ) @@ -869,7 +869,7 @@ def get_healing_info(pagesize=None, current=None, instance=None, action=None, su return _sqlalchemy_query_union_records_logic( query_function=dao.healing_records.select_healing_records, instances=instances, - offset=offset, limit=limit, + offset=offset, limit=limit, action=action, success=success, min_occurrence=min_occurrence ) @@ -894,7 +894,7 @@ def get_slow_queries(pagesize=None, current=None, instance=None, query=None, sta limit = pagesize return _sqlalchemy_query_union_records_logic( query_function=dao.slow_queries.select_slow_queries, - instances=instances, only_with_port=True, + instances=instances, only_with_port=True, target_list=(), query=query, start_time=start_time, end_time=end_time, offset=offset, limit=limit, group=group ) @@ -906,8 +906,8 @@ def get_slow_queries_count(instance=None, distinct=False, query=None, start_time instances = None return _sqlalchemy_query_records_count_logic( count_function=dao.slow_queries.count_slow_queries, - instances=instances, only_with_port=True, - distinct=distinct, query=query, + instances=instances, only_with_port=True, + distinct=distinct, query=query, start_time=start_time, end_time=end_time, group=group) @@ -920,7 +920,7 @@ def get_killed_slow_queries(pagesize=None, current=None, instance=None, query=No limit = pagesize return _sqlalchemy_query_union_records_logic( query_function=dao.slow_queries.select_killed_slow_queries, - instances=instances, only_with_port=True, + instances=instances, only_with_port=True, query=query, start_time=start_time, end_time=end_time, offset=offset, limit=limit ) @@ -932,7 +932,7 @@ def get_killed_slow_queries_count(instance=None, query=None, start_time=None, en instances = None return _sqlalchemy_query_records_count_logic( count_function=dao.slow_queries.count_killed_slow_queries, - instances=instances, only_with_port=True, + instances=instances, only_with_port=True, query=query, start_time=start_time, end_time=end_time) @@ -1213,7 +1213,7 @@ def get_regular_inspections_count(inspection_type): inspection_type=inspection_type) -def get_correlation_result(metric_name, host, start_time, end_time, corr_threshold=0.3, topk=10): +def get_correlation_result(metric_name, host, start_time, end_time, topk=10): LEAST_WINDOW = int(7.2e3) * 1000 client = TsdbClientFactory.get_tsdb_client() all_metrics = client.all_metrics @@ -1222,8 +1222,34 @@ def get_correlation_result(metric_name, host, start_time, end_time, corr_thresho actual_start_time = min(start_time, end_time - LEAST_WINDOW) start_datetime = datetime.datetime.fromtimestamp(actual_start_time / 1000) end_datetime = datetime.datetime.fromtimestamp(end_time / 1000) - sequence_args = [(metric_name, host, start_datetime, end_datetime) for metric_name in all_metrics] - correlation_result = single_process_correlation_calculation(metric_name, sequence_args, corr_threshold=corr_threshold, topk=topk) + sequence_args = [((metric, host, start_datetime, end_datetime),) for metric in all_metrics] + + these_sequences = get_sequences((metric_name, host, start_datetime, end_datetime)) + if not these_sequences: + raise ValueError('The metric was not found.') + + sequence_results = global_vars.worker.parallel_execute( + get_sequences, sequence_args + ) or [] + + if all(sequences is None for sequences in sequence_results): + raise ValueError('The sequence_results is all None.') + + correlation_result = dict() + for this_name, this_sequence in these_sequences: + correlation_args = list() + for sequences in sequence_results: + for name, sequence in sequences: + correlation_args.append(((name, sequence, this_sequence),)) + + correlation_result[this_name] = global_vars.worker.parallel_execute( + get_correlations, correlation_args + ) or [] + + for this_name, this_sequence in correlation_result.items(): + this_sequence.sort(key=lambda item: item[1], reverse=True) + del (this_sequence[topk:]) + return correlation_result -- Gitee From 475fc082af32098ec9aed25dbe3e7f8041766c7c Mon Sep 17 00:00:00 2001 From: guoguozhenhaowan <877801999@qq.com> Date: Tue, 14 Mar 2023 02:58:46 +0000 Subject: [PATCH 08/87] fix(shared):update grafana json model according to the lastest table structures --- dbmind/misc/shares/grafana/forecasting.json | 18 ++++---- dbmind/misc/shares/grafana/index-advisor.json | 43 +++++++++---------- .../misc/shares/grafana/regular-checking.json | 30 +++---------- 3 files changed, 36 insertions(+), 55 deletions(-) diff --git a/dbmind/misc/shares/grafana/forecasting.json b/dbmind/misc/shares/grafana/forecasting.json index 1a9a09c..9c5e5f6 100644 --- a/dbmind/misc/shares/grafana/forecasting.json +++ b/dbmind/misc/shares/grafana/forecasting.json @@ -128,7 +128,7 @@ "options": { "mode": "exclude", "names": [ - "metric_value gaussdb_qps_by_instance" + "alarm_content gaussdb_qps_by_instance" ], "prefix": "All except:", "readOnly": true @@ -176,19 +176,19 @@ "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n metric_time as time, metric_name, metric_value\nFROM tb_forecasting_metrics where metric_name='gaussdb_qps_by_instance' order by time;", + "rawSql": "SELECT\n start_at as time, metric_name, alarm_type, alarm_content\nFROM tb_future_alarms where metric_name='gaussdb_qps_by_instance' order by time;", "refId": "A", "select": [ [ { "params": [ - "metric_time" + "start_at" ], "type": "column" } ] ], - "table": "tb_forecasting_metrics", + "table": "tb_future_alarms", "timeColumn": "time", "where": [] } @@ -283,7 +283,7 @@ "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n metric_time as time, metric_value\nFROM tb_forecasting_metrics where metric_name='os_disk_iops' order by time;\n", + "rawSql": "SELECT\n start_at as time, alarm_content\nFROM tb_future_alarms where metric_name='os_disk_iops' order by time;\n", "refId": "A", "select": [ [ @@ -394,7 +394,7 @@ "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n metric_time as time, metric_value\nFROM tb_forecasting_metrics where metric_name='statement_responsetime_percentile_p80' order by time;\n", + "rawSql": "SELECT\n start_at as time, alarm_content\nFROM tb_future_alarms where metric_name='statement_responsetime_percentile_p80' order by time;\n", "refId": "A", "select": [ [ @@ -510,7 +510,7 @@ "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n metric_time as time, metric_name, metric_value\nFROM tb_forecasting_metrics where metric_name in ('os_mem_usage', 'os_cpu_usage', 'os_disk_usage') and pg_catalog.random() < 0.05 order by time;\n", + "rawSql": "SELECT\n start_at as time, metric_name, alarm_content\nFROM tb_future_alarms where metric_name in ('os_mem_usage', 'os_cpu_usage', 'os_disk_usage') and pg_catalog.random() < 0.05 order by time;\n", "refId": "A", "select": [ [ @@ -595,7 +595,7 @@ "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n start_at as time, *\nFROM tb_future_alarms\nORDER BY time desc, host;", + "rawSql": "SELECT\n start_at as time, *\nFROM tb_future_alarms\nORDER BY time desc, instance;", "refId": "A", "select": [ [ @@ -718,7 +718,7 @@ "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n occurrence_at as time, * \nFROM tb_history_alarms order by time desc;", + "rawSql": "SELECT\n start_at as time, * \nFROM tb_history_alarms order by time desc;", "refId": "A", "select": [ [ diff --git a/dbmind/misc/shares/grafana/index-advisor.json b/dbmind/misc/shares/grafana/index-advisor.json index 548f1da..91403c1 100644 --- a/dbmind/misc/shares/grafana/index-advisor.json +++ b/dbmind/misc/shares/grafana/index-advisor.json @@ -94,7 +94,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "fieldConfig": { "defaults": { @@ -143,13 +143,13 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "format": "table", "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "select db_name, pg_catalog.count(1) from \n(SELECT\n distinct on (host, db_name, schema_name) db_name, schema_name\nFROM tb_index_recommendation) \ngroup by db_name;", + "rawSql": "select db_name, pg_catalog.count(1) from \n(SELECT\n distinct on (db_name, schema_name) db_name, schema_name\nFROM tb_index_recommendation) \ngroup by db_name;", "refId": "A", "select": [ [ @@ -172,7 +172,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "fieldConfig": { "defaults": { @@ -222,13 +222,13 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "format": "table", "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n count(distinct (host, db_name, schema_name, tb_name))\nFROM tb_index_recommendation;\n", + "rawSql": "SELECT\n count(distinct (db_name, schema_name, tb_name))\nFROM tb_index_recommendation;\n", "refId": "A", "select": [ [ @@ -256,7 +256,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "fieldConfig": { "defaults": { @@ -304,7 +304,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "format": "table", "group": [], @@ -333,7 +333,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "fieldConfig": { "defaults": { @@ -382,7 +382,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "format": "table", "group": [], @@ -416,7 +416,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "fieldConfig": { "defaults": { @@ -493,13 +493,13 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "format": "time_series", "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n occurrence_time as time, sum(recommend_index_count) as advised_indexes, sum(invalid_index_count) as invalid_indexes, sum(redundant_index_count) as redundant_indexes \nFROM tb_index_recommendation_stats group by occurrence_time;\n", + "rawSql": "SELECT\n occurrence_time as time, sum(recommend_index_count) as advised_indexes \nFROM tb_index_recommendation_stats group by occurrence_time;\n", "refId": "A", "select": [ [ @@ -522,7 +522,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "fieldConfig": { "defaults": { @@ -700,7 +700,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "format": "table", "group": [], @@ -769,7 +769,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "description": "", "fieldConfig": { @@ -839,7 +839,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "format": "table", "group": [], @@ -1048,7 +1048,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "fieldConfig": { "defaults": { @@ -1141,7 +1141,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "format": "table", "group": [], @@ -1209,7 +1209,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "fieldConfig": { "defaults": { @@ -1314,7 +1314,7 @@ { "datasource": { "type": "postgres", - "uid": "${DS_METADATABASE5-GUOW115}" + "uid": "${DS_METADATABASE}" }, "format": "table", "group": [], @@ -1370,7 +1370,6 @@ "timepicker": {}, "timezone": "", "title": "DBMind-index-advisor", - "uid": "3w1zfNK4z", "version": 39, "weekStart": "" } \ No newline at end of file diff --git a/dbmind/misc/shares/grafana/regular-checking.json b/dbmind/misc/shares/grafana/regular-checking.json index 707c5d8..141aaa7 100644 --- a/dbmind/misc/shares/grafana/regular-checking.json +++ b/dbmind/misc/shares/grafana/regular-checking.json @@ -82,7 +82,7 @@ "options": { "dedupStrategy": "exact", "enableLogDetails": true, - "prettifyLogMessage": true, + "prettifyLogMessage": false, "showCommonLabels": false, "showLabels": false, "showTime": true, @@ -100,7 +100,7 @@ "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n report,start as time\nFROM tb_regular_inspections where inspection_type='daily check' order by time desc limit 1;", + "rawSql": "SELECT\n report,start as time\nFROM tb_regular_inspections where inspection_type='daily_check' order by time desc limit 1;", "refId": "A", "select": [ [ @@ -159,7 +159,7 @@ "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n start as time, conclusion\nFROM tb_regular_inspections order by time desc limit 1;\n", + "rawSql": "SELECT\n start as time, conclusion\nFROM tb_regular_inspections where inspection_type='daily_check' order by time desc limit 1;\n", "refId": "A", "select": [ [ @@ -282,7 +282,7 @@ "group": [], "metricColumn": "none", "rawQuery": true, - "rawSql": "SELECT\n date as time, * \nFROM tb_stat_one_month order by host, time;", + "rawSql": "SELECT\n report,start as time\nFROM tb_regular_inspections where inspection_type='monthly_check' order by time desc limit 1;", "refId": "A", "select": [ [ @@ -294,31 +294,13 @@ } ] ], - "table": "tb_stat_one_month", + "table": "tb_regular_inspections", "timeColumn": "time", "where": [] } ], "title": "Month Statistics", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "metric_name", - "host", - "avg", - "min", - "max", - "the_95_quantile", - "time" - ] - } - } - } - ], - "type": "table" + "type": "logs" } ], "schemaVersion": 37, -- Gitee From a31ea35d82e5ae62d8aa99f3f06ef0f38b43d45b Mon Sep 17 00:00:00 2001 From: LK Date: Tue, 14 Mar 2023 13:19:17 +0800 Subject: [PATCH 09/87] feat: back-end adaptation and function optimization. --- dbmind/app/monitoring/regular_inspection.py | 4 +- dbmind/common/utils/base.py | 2 +- .../opengauss_exporter/yamls/default.yml | 4 +- .../reprocessing_exporter.yml | 4 +- dbmind/controllers/dbmind_core.py | 47 +++-- dbmind/service/dai.py | 144 +++++++++++++--- dbmind/service/web.py | 160 ++++++++++++++---- 7 files changed, 286 insertions(+), 79 deletions(-) diff --git a/dbmind/app/monitoring/regular_inspection.py b/dbmind/app/monitoring/regular_inspection.py index ebfef6b..1d5956f 100644 --- a/dbmind/app/monitoring/regular_inspection.py +++ b/dbmind/app/monitoring/regular_inspection.py @@ -143,8 +143,8 @@ class DailyInspection: datname = sequence.labels.get('datname', 'UNKNOWN') indexsize_seq = dai.get_metric_sequence('pg_tables_size_indexsize', self._start, self._end).\ from_server(self._agent_instance).filter(nspname=schema, relname=relname).fetchone() - rv['rows'].append((datname, schema, relname, round(get_sequence_value(sequence, max) / 1024 / 1024, 2), - round(get_sequence_value(indexsize_seq, max) / 1024 / 1024, 2))) + rv['rows'].append((datname, schema, relname, round(get_sequence_value(sequence, max), 2), + round(get_sequence_value(indexsize_seq, max), 2))) return rv @property diff --git a/dbmind/common/utils/base.py b/dbmind/common/utils/base.py index 466a284..0224e1e 100644 --- a/dbmind/common/utils/base.py +++ b/dbmind/common/utils/base.py @@ -441,5 +441,5 @@ def string_to_dict(values, delimiter=','): name, value = pair.split('=') d[name.strip()] = value.strip() except Exception as e: - raise e + return d return d diff --git a/dbmind/components/opengauss_exporter/yamls/default.yml b/dbmind/components/opengauss_exporter/yamls/default.yml index 0abef70..1f7ef88 100644 --- a/dbmind/components/opengauss_exporter/yamls/default.yml +++ b/dbmind/components/opengauss_exporter/yamls/default.yml @@ -511,7 +511,7 @@ pg_database: - name: pg_database sql: |- SELECT datname, - pg_catalog.pg_database_size(pg_database.datname) as size_bytes + pg_catalog.pg_database_size(pg_database.datname) / 1024 / 1024 as size_bytes FROM pg_database where datname NOT IN ('template0','template1'); version: '>=0.0.0' @@ -1136,7 +1136,7 @@ statement_responsetime_percentile: desc: statement responsetime percentile query: - name: statement_responsetime_percentile - sql: SELECT p80, p95 FROM dbe_perf.statement_responsetime_percentile; + sql: SELECT p80 / 1000 as p80, p95 / 1000 as p95 FROM dbe_perf.statement_responsetime_percentile; version: '>=0.0.0' timeout: 1 ttl: 10 diff --git a/dbmind/components/reprocessing_exporter/reprocessing_exporter.yml b/dbmind/components/reprocessing_exporter/reprocessing_exporter.yml index 6bd530e..b4a983a 100644 --- a/dbmind/components/reprocessing_exporter/reprocessing_exporter.yml +++ b/dbmind/components/reprocessing_exporter/reprocessing_exporter.yml @@ -161,7 +161,7 @@ os_disk_io_read_bytes: label: instance description: from instance usage: LABEL - status: disable + status: enable ttl: 10 timeout: 5 @@ -185,7 +185,7 @@ os_disk_io_write_bytes: label: instance description: from instance usage: LABEL - status: disable + status: enable ttl: 10 timeout: 5 diff --git a/dbmind/controllers/dbmind_core.py b/dbmind/controllers/dbmind_core.py index 9a83a8c..779af13 100644 --- a/dbmind/controllers/dbmind_core.py +++ b/dbmind/controllers/dbmind_core.py @@ -162,10 +162,14 @@ def get_metric_sequence(name, start: int = None, end: int = None, step: int = No @request_mapping('/api/latest-sequence/{name}', methods=['GET'], api=True) @oauth2.token_authentication() @standardized_api_output -def get_latest_metric_sequence(instance, name, latest_minutes: int = None, - step: int = None, fetch_all: bool = False, labels: str = None): - return web.get_latest_metric_sequence(instance, name, latest_minutes, - step=step, fetch_all=fetch_all, labels=labels) +def get_latest_metric_sequence(name, instance: str = None, latest_minutes: int = None, + step: int = None, fetch_all: bool = False, + regrex: bool = False, labels: str = None, + regrex_labels: str = None): + return web.get_latest_metric_sequence(name, instance, latest_minutes, + step=step, fetch_all=fetch_all, + regrex=regrex, labels=labels, + regrex_labels=regrex_labels) @request_mapping('/api/alarm/history', methods=['GET'], api=True) @@ -552,11 +556,12 @@ def get_timed_task_status(): return web.get_timed_task_status() -@request_mapping('/api/toolkit/risk_analysis', methods=['GET'], api=True) +@request_mapping('/api/toolkit/risk-analysis/{metric}', methods=['GET'], api=True) @oauth2.token_authentication() @standardized_api_output -def risk_analysis(metric, instance, warning_hours: int = 0, upper: int = 0, lower: int = 0, labels: str = None): - return web.risk_analysis(metric, instance, warning_hours, upper, lower, labels) +def risk_analysis(metric, instance: str = None, warning_hours: int = 1, upper: str = None, + lower: str = None, labels: str = None): + return web.risk_analysis(metric, instance, warning_hours, upper=upper, lower=lower, labels=labels) @request_mapping('/api/collection/status', methods=['GET'], api=True) @@ -566,8 +571,30 @@ def get_collection_system_status(): return web.get_collection_system_status() -@request_mapping('/api/app/data-directory/growth-rate', methods=['GET'], api=True) +@request_mapping('/api/data-directory/status', methods=['GET'], api=True) @oauth2.token_authentication() @standardized_api_output -def get_database_data_directory_growth_rate(instance: str, latest_minutes: int = 5): - return web.get_database_data_directory_growth_rate(instance, latest_minutes) +def get_database_data_directory_status(instance: str, latest_minutes: int = 5): + return web.get_database_data_directory_status(instance, latest_minutes) + + +@request_mapping('/api/overview', methods=['GET'], api=True) +@oauth2.token_authentication() +@standardized_api_output +def get_front_oveview(latest_minutes: int = 3): + return web.get_front_overview(latest_minutes=latest_minutes) + + +@request_mapping('/api/instance/status', methods=['GET'], api=True) +@oauth2.token_authentication() +@standardized_api_output +def get_current_instance_status(): + return web.get_current_instance_status() + + +@request_mapping('/api/agent/status', methods=['GET'], api=True) +@oauth2.token_authentication() +@standardized_api_output +def get_agent_status(): + return web.get_agent_status() + diff --git a/dbmind/service/dai.py b/dbmind/service/dai.py index 456503b..09e8fbb 100644 --- a/dbmind/service/dai.py +++ b/dbmind/service/dai.py @@ -34,6 +34,7 @@ from dbmind.metadatabase import dao from dbmind.service.utils import SequenceUtils from dbmind.constants import (DISTINGUISHING_INSTANCE_LABEL, EXPORTER_INSTANCE_LABEL) +from dbmind.common.algorithm.anomaly_detection.gradient_detector import linear_fitting if LINUX: mp_shared_buffer = get_mp_sync_manager().defaultdict(dict) @@ -573,37 +574,49 @@ def check_tsdb_status(): def check_exporter_status(): - detail = {'opengauss_exporter': [], 'reprocessing_exporter': [], 'node_exporter': []} + # notes: if the scope is not specified, the global_var.agent_proxy.current_cluster_instances() + # may return 'None' in most scenarios, therefore this method is limited to + # calling when implementing the API for front-end one agent or we only have one agent + detail = {'opengauss_exporter': [], 'reprocessing_exporter': [], 'node_exporter': [], 'cmd_exporter': []} client = TsdbClientFactory.get_tsdb_client() if not client.check_connection(): detail['opengauss_exporter'].append({'status': 'down', 'listen_address': 'unknown', 'instance': 'unknown'}) detail['reprocessing_exporter'].append({'status': 'down', 'listen_address': 'unknown', 'instance': 'unknown'}) detail['node_exporter'].append({'status': 'down', 'listen_address': 'unknown', 'instance': 'unknown'}) + detail['cmd_exporter'].append({'status': 'down', 'listen_address': 'unknown', 'instance': 'unknown'}) return detail self_exporters = {'opengauss_exporter': 'pg_node_info_uptime', 'reprocessing_exporter': 'os_cpu_usage', - 'node_exporter': 'node_boot_time_seconds'} + 'node_exporter': 'node_boot_time_seconds', 'cmd_exporter': 'gaussdb_cluster_state'} instance_with_port = global_vars.agent_proxy.current_cluster_instances() instance_with_no_port = [item.split(':')[0] for item in instance_with_port] for exporter, metric in self_exporters.items(): - # add cmd_exporter here later - if exporter in ('opengauss_exporter', ): + if exporter in ('opengauss_exporter', 'cmd_exporter'): instances = instance_with_port else: instances = instance_with_no_port for instance in instances: if exporter == 'node_exporter': instance_regex = instance + ':?.*' - sequences = get_latest_metric_value('node_boot_time_seconds').\ + sequences = get_latest_metric_value(metric).\ from_server_like(instance_regex).fetchall() + elif exporter == 'cmd_exporter': + instance_regrex = instance.split(':')[0] + ':?.*' + # since the cluster state may change, it will be matched again + # on the 'primary' after the matching fails on the 'standby' to ensure not miss exporter + sequences = get_latest_metric_value(metric).\ + filter_like(instance=instance_regrex, standby=f".*{instance}.*").fetchall() + if not is_sequence_valid(sequences): + sequences = get_latest_metric_value(metric).\ + filter_like(instance=instance_regrex).filter(primary=instance).fetchall() else: sequences = get_latest_metric_value(metric).from_server(instance).fetchall() if is_sequence_valid(sequences): for sequence in sequences: listen_address = sequence.labels.get('instance') if exporter == 'reprocessing_exporter': - if listen_address not in (item['listen_address'] for item in detail[exporter]): - detail[exporter].append( - {'instance': instance, 'listen_address': listen_address, 'status': 'up'}) + #if listen_address not in (item['listen_address'] for item in detail[exporter]): + detail[exporter].append( + {'instance': instance, 'listen_address': listen_address, 'status': 'up'}) else: detail[exporter].append( {'instance': instance, 'listen_address': listen_address, 'status': 'up'}) @@ -640,16 +653,18 @@ def diagnosis_exporter_status(exporter_status): exporter_status['reprocessing_exporter']))) if number_of_reprocessing_number > 1: suggestions.append("Only need to start one reprocessing exporter component.") - + if number_of_reprocessing_number < 1: + suggestions.append("Is is found that the instance has not deployed reprocessing_exporter or some exception occurs.") + # 5) check whether too many node_exporters are deployed number_of_alive_node_exporter = len(set([item['instance'] for item in exporter_status['node_exporter'] if item['status'] == 'up'])) - # 5) check whether too many node_exporters are deployed if number_of_alive_node_exporter > len(instance_with_no_port): suggestions.append("Too many node_exporter on instance, " - "it is recommended to deploy at most one opengauss_exporter on each instance.") + "it is recommended to deploy one node_exporter on each instance.") # 6) check if some nodes do not deploy exporter if number_of_alive_node_exporter < len(instance_with_no_port): - suggestions.append("Is is recommended to deploy one node_exporter on each instance node.") + suggestions.append("Is it found that some node has not deployed node_exporter, " + "it is recommended to deploy one node_exporter on each instance.") return suggestions @@ -666,23 +681,96 @@ def is_driver_result_valid(s): return False -def get_database_data_directory_usage(instance, latest_minutes): - # get the size of the database data directory - data_directory_sequence = get_latest_metric_sequence('pg_node_info_uptime', latest_minutes).\ - from_server(instance).fetchone() +def get_database_data_directory_status(instance, latest_minutes): + # return the data-directory information of current cluster + detail = {} + data_directory_sequence = get_latest_metric_value('pg_node_info_uptime').from_server(instance).fetchone() if not is_sequence_valid(data_directory_sequence): return EMPTY_SEQUENCE + # the data-directory is all same in the cluster data_directory = data_directory_sequence.labels.get('datapath') - instance_with_no_port = instance.split(':')[0] - os_disk_usage_sequences = get_latest_metric_sequence('os_disk_usage', latest_minutes).\ - from_server(instance_with_no_port).fetchall() - if not is_sequence_valid(os_disk_usage_sequences): - return EMPTY_SEQUENCE - for sequence in os_disk_usage_sequences: - if not sequence.values: + instances = global_vars.agent_proxy.get_all_agents()[instance] + for instance in instances: + instance_with_no_port = instance.split(':')[0] + instance_regrex = instance_with_no_port + ':?.*' + filesystem_total_size_sequences = get_latest_metric_value('node_filesystem_size_bytes').\ + filter_like(instance=instance_regrex).fetchall() + os_disk_usage_sequences = get_latest_metric_sequence('os_disk_usage', latest_minutes).\ + from_server(instance_with_no_port).fetchall() + if not is_sequence_valid(filesystem_total_size_sequences): continue - mount_point = sequence.labels.get('mountpoint') - # avoid mismatched, for example: mount_point is '/', data path is '/media/sdb/opengauss/data/dn' - if mount_point != '/' and mount_point in data_directory: - return sequence - return EMPTY_SEQUENCE + if not is_sequence_valid(os_disk_usage_sequences): + continue + data_directory_related_sequences = [sequence for sequence in filesystem_total_size_sequences if + data_directory.startswith(sequence.labels['mountpoint'])] + disk_usage_related_sequences = [sequence for sequence in os_disk_usage_sequences if + data_directory.startswith(sequence.labels['mountpoint'])] + # transfer bytes to GB + total_space = '' if not is_sequence_valid(data_directory_related_sequences) else round(data_directory_related_sequences[0].values[-1] / 1024 / 1024 / 1024, 2) + usage_rate = '' if not is_sequence_valid(disk_usage_related_sequences) else disk_usage_related_sequences[0].values + tile_rate, used_space, free_space = '', '', '' + if total_space and usage_rate: + tile_rate, _ = linear_fitting(range(0, len(usage_rate)), usage_rate) + # replace tile rate with disk absolute size(unit: mbytes) + tile_rate = round(total_space * tile_rate * 1024, 2) + used_space = round(total_space * usage_rate[-1], 2) + free_space = round(total_space - used_space, 2) + detail[instance] = {'total_space': total_space, + 'tilt_rate': tile_rate, + 'usage_rate': round(usage_rate[-1], 2) if usage_rate else '', + 'used_space': used_space, 'free_space': free_space} + return detail + + +def check_instance_status(): + # there are two scenarios, which are 'centralized' and 'single', the judgment method is as follows: + # 1) centralized: judging by 'gaussdb_cluster_state which is fetched by 'cmd_exporter' + # 2) single: judging by 'pg_node_info_uptime' which is fetched by 'opengauss_exporter' + # notes: if the scope is not specified, the global_var.agent_proxy.current_cluster_instances() + # may return 'None' in most scenarios, therefore this method is limited to + # calling when implementing the API for front-end or we only have one agent + detail = {'status': 'unknown', 'deployment_mode': 'unknown', 'primary': '', 'standby':[], 'abnormal': []} + cluster = global_vars.agent_proxy.current_cluster_instances() + if len(cluster) == 1: + detail['deployment_mode'] = 'single' + detail['primary'] = cluster[0] + sequence = get_latest_metric_value('pg_node_info_uptime').from_server(cluster[0]).fetchone() + if is_sequence_valid(sequence): + detail['status'] = 'normal' + else: + detail['status'] = 'abnormal' + elif len(cluster) > 1: + detail['deployment_mode'] = 'centralized' + # since the state of cluster may change and we do not know the latest situation of instance, + # therefore we try all nodes in turn to ensure not miss key information + for instance in cluster: + cluster_sequence = get_latest_metric_value('gaussdb_cluster_state').filter_like(standby=f'.*{instance}.*').fetchone() + if not is_sequence_valid(cluster_sequence): + cluster_sequence = get_latest_metric_value('gaussdb_cluster_state').filter(primary=instance).fetchone() + if is_sequence_valid(cluster_sequence): + detail['status'] = 'normal' if cluster_sequence.values[-1] == 1 else 'abnormal' + detail['primary'] = cluster_sequence.labels['primary'] + detail['standby'] = cluster_sequence.labels['standby'].strip(',').split(',') + normal = cluster_sequence.labels['normal'].strip(',').split(',') + detail['abnormal'] = list(set([detail['primary']] + detail['standby']) - set(normal)) + detail['status'] = 'abnormal' if detail['abnormal'] else 'normal' + break + return detail + + +def check_agent_status(): + # we judge the status of agent by executing statement, if the result is correct then + # it prove the status of agent is normal, otherwise it is abnormal + # notes: if the scope is not specified, the global_var.agent_proxy.current_agent_addr() + # may return 'None' in most scenarios, therefore this method is limited to + # calling when implementing the API for front-end or we only have one agent + detail = {'status': 'unknown'} + detail['agent_address'] = global_vars.agent_proxy.current_agent_addr() + try: + res = global_vars.agent_proxy.call('query_in_database', 'select 1', None, return_tuples=True) + if res and res[0] and res[0][0] == 1: + detail['status'] = 'up' + except Exception: + detail['status'] = 'down' + return detail + diff --git a/dbmind/service/web.py b/dbmind/service/web.py index 6b77734..c0ef042 100644 --- a/dbmind/service/web.py +++ b/dbmind/service/web.py @@ -20,7 +20,7 @@ import os import sys import threading import time -from collections import defaultdict +from collections import defaultdict, Counter from itertools import groupby import sqlparse @@ -30,7 +30,7 @@ from dbmind.app.optimization import get_database_schemas, TemplateArgs from dbmind.app.optimization.index_recommendation import rpc_index_advise from dbmind.app.optimization.index_recommendation_rpc_executor import RpcExecutor from dbmind.common.algorithm.forecasting import quickly_forecast -from dbmind.common.types import ALARM_TYPES +from dbmind.common.types import ALARM_TYPES, ALARM_LEVEL from dbmind.common.types import Sequence from dbmind.components.extract_log import get_workload_template from dbmind.components.slow_query_diagnosis import analyze_slow_query_with_rpc @@ -40,10 +40,9 @@ from dbmind.service.utils import SequenceUtils from dbmind.common.tsdb import TsdbClientFactory from dbmind.components.anomaly_analysis import single_process_correlation_calculation from dbmind.components.memory_check import memory_check -from dbmind.common.utils import dbmind_assert, string_to_dict +from dbmind.common.utils import dbmind_assert, string_to_dict, cast_to_int_or_float from dbmind.common.dispatcher import TimedTaskManager from dbmind.components.forecast import early_warning -from dbmind.common.algorithm.anomaly_detection.gradient_detector import linear_fitting from . import dai @@ -115,7 +114,7 @@ def _override_fetchone(self): return Sequence() -def get_metric_sequence_internal(metric_name, from_timestamp=None, to_timestamp=None, step=None, instance=None): +def get_metric_sequence_internal(metric_name, from_timestamp=None, to_timestamp=None, step=None): """Timestamps are microsecond level.""" if to_timestamp is None: to_timestamp = int(time.time() * 1000) @@ -124,12 +123,9 @@ def get_metric_sequence_internal(metric_name, from_timestamp=None, to_timestamp= from_datetime = datetime.datetime.fromtimestamp(from_timestamp / 1000) to_datetime = datetime.datetime.fromtimestamp(to_timestamp / 1000) fetcher = dai.get_metric_sequence(metric_name, from_datetime, to_datetime, step) - if instance is None: - from_server_predicate = get_access_context(ACCESS_CONTEXT_NAME.TSDB_FROM_SERVERS_REGEX) - if from_server_predicate: - fetcher.from_server_like(from_server_predicate) - else: - fetcher.from_server(instance) + from_server_predicate = get_access_context(ACCESS_CONTEXT_NAME.TSDB_FROM_SERVERS_REGEX) + if from_server_predicate: + fetcher.from_server_like(from_server_predicate) # monkeypatch trick setattr(fetcher, 'fetchall', lambda: _override_fetchall(fetcher)) @@ -146,16 +142,33 @@ def get_metric_sequence(metric_name, from_timestamp=None, to_timestamp=None, ste return list(map(lambda s: s.jsonify(), result)) -def get_latest_metric_sequence(instance, metric_name, latest_minutes, step=None, fetch_all=False, labels=None): +def get_latest_metric_sequence(metric_name, instance, latest_minutes, step=None, fetch_all=False, regrex=False, + labels=None, regrex_labels=None): # this function can actually be replaced by 'get_metric_sequence', but in order to avoid # the hidden problems of that method, we add 'instance', 'fetch_all' and 'labels' to solve it # notes: the format of labels is 'key1=val1, key2=val2, key3=val3, ...' - end_timestamp = int(time.time() * 1000) - start_timestamp = end_timestamp - latest_minutes * 60 * 1000 # transfer to ms - fetcher = get_metric_sequence_internal(metric_name, start_timestamp, end_timestamp, step=step, instance=instance) + if latest_minutes is None or latest_minutes <= 0: + fetcher = dai.get_latest_metric_value(metric_name) + else: + fetcher = dai.get_latest_metric_sequence(metric_name, latest_minutes, step=step) + if instance is None: + from_server_predicate = get_access_context(ACCESS_CONTEXT_NAME.TSDB_FROM_SERVERS_REGEX) + if from_server_predicate: + fetcher.from_server_like(from_server_predicate) + else: + if regrex: + instance = instance + ':?.*' + fetcher.from_server_like(instance) + else: + fetcher.from_server(instance) + if labels is not None: labels = string_to_dict(labels, delimiter=',') fetcher.filter(**labels) + if regrex_labels is not None: + regrex_labels = string_to_dict(regrex_labels, delimiter=',') + fetcher.filter_like(**regrex_labels) + if fetch_all: result = fetcher.fetchall() else: @@ -1232,37 +1245,116 @@ def check_memory_context(start_time, end_time): def get_timed_task_status(): - detail = {} + detail = {'header': ['name', 'current_status', 'running_interval'], 'rows': []} + rows_list = [] for timed_task, _ in global_vars.timed_task.items(): - detail[timed_task] = {} + detail_list = [timed_task] if TimedTaskManager.check(timed_task): - detail[timed_task]['status'] = TimedTaskManager.is_alive(timed_task) - detail[timed_task]['interval'] = TimedTaskManager.get_interval(timed_task) + if TimedTaskManager.is_alive(timed_task): + detail_list.append('Running') + detail_list.append(TimedTaskManager.get_interval(timed_task)) else: - detail[timed_task]['status'] = 'not start' + detail_list.append('Stopping') + detail_list.append(0) + rows_list.append(detail_list) + detail['rows'] = rows_list return detail def risk_analysis(metric, instance, warning_hours, upper, lower, labels): - retroactive_period = warning_hours * 3 labels = string_to_dict(labels, delimiter=',') - warnings = early_warning(metric, instance, retroactive_period, warning_hours, upper, lower, labels) + upper = cast_to_int_or_float(upper) + lower = cast_to_int_or_float(lower) + warnings = early_warning(metric, instance, None, warning_hours, upper, lower, labels) return warnings -def get_collection_system_status(): - detail = {'tsdb': dai.check_tsdb_status(), 'exporter': dai.check_exporter_status()} - suggestions = dai.diagnosis_exporter_status(detail['exporter']) - detail['suggestions'] = suggestions +def get_database_data_directory_status(instance, latest_minutes): + # instance: address of instance agent, format is 'host:port' + # get the growth_rate of disk usage where the database data directory is located + return dai.get_database_data_directory_status(instance, latest_minutes) + + +def get_front_overview(latest_minutes=5): + overview_detail = {'status': 'stopping', 'strength_version': 'unknown', 'deployment_mode': 'unknown', + 'operating_system': 'unknown', 'general_risk': 0, 'major_risk': 0, 'high_risk': 0, 'low_risk':0} + # this method can be used to front-end + # instance = get_access_context(ACCESS_CONTEXT_NAME.AGENT_INSTANCE_IP_WITH_PORT) + instance = global_vars.agent_proxy.current_agent_addr() + if instance is None: + return overview_detail + instance_with_no_port = instance.split(':')[0] + instance_regrex = instance_with_no_port + ':?.*' + + # get the status of instance + overview_detail['status'] = dai.check_instance_status().get('status') + + # get version of opengauss + version_sequence = dai.get_latest_metric_value('pg_node_info_uptime').from_server(instance).fetchone() + if dai.is_sequence_valid(version_sequence): + overview_detail['strength_version'] = version_sequence.labels['version'] + + # get version of system + operating_system_sequence = dai.get_latest_metric_value('node_uname_info').filter_like(instance=instance_regrex).fetchone() + if dai.is_sequence_valid(operating_system_sequence): + overview_detail['operating_system'] = operating_system_sequence.labels['machine'] + + # get summary of alarm between start at and end_at + end_time = int(time.time() * 1000) + start_time = end_time - latest_minutes * 60 * 1000 + history_alarms = dao.alarms.select_history_alarm(instance=instance, start_at=start_time, end_at=end_time).all() + alarm_level = [item.alarm_level for item in history_alarms] + count = Counter(alarm_level) + for item, times in count.items(): + if times >= 0 and item == ALARM_LEVEL.WARNING.value: + overview_detail['general_risk'] = times + elif times >= 0 and item == ALARM_LEVEL.CRITICAL.value: + overview_detail['major_risk'] = times + elif times >= 0 and item == ALARM_LEVEL.ERROR.value: + overview_detail['high_risk'] = times + elif times >= 0 and item == ALARM_LEVEL.INFO.value: + overview_detail['low_risk'] = times + + # get deployment mode of instance + # need to change to: clusters = get_access_context(ACCESS_CONTEXT_NAME.INSTANCE_IP_WITH_PORT_LIST) + clusters = global_vars.agent_proxy.current_cluster_instances() + if len(clusters) == 1: + overview_detail['deployment_mode'] = 'single' + elif len(clusters) > 1: + overview_detail['deployment_mode'] = 'centralized' + + return overview_detail + + +def get_agent_status(): + agent_status = dai.check_agent_status() + agent_status['status'] = True if agent_status['status'] == 'up' else False + return agent_status + + +def get_current_instance_status(): + detail = {'header': ['instance', 'role', 'state'], 'rows': []} + instance_status = dai.check_instance_status() + detail['rows'].append([instance_status['primary'], 'primary', True if + instance_status['primary'] not in + instance_status['abnormal'] else False]) + for instance in instance_status['standby']: + detail['rows'].append([instance, 'standby', True if instance not in instance_status['abnormal'] else False]) return detail -def get_database_data_directory_growth_rate(instance, latest_minutes): - # instance: address of database, format is 'host:port' - # get the growth_rate of disk usage where the database data directory is located - sequence = dai.get_database_data_directory_usage(instance, latest_minutes) - if not sequence.values: - return - growth_rate, _ = linear_fitting(list(range(0, len(sequence))), sequence.values) - return growth_rate +def get_collection_system_status(): + collection_detail = {'header': ['component', 'listen_address', 'is_alive'], 'rows': [], 'suggestions': []} + tsdb_status = dai.check_tsdb_status() + exporter_status = dai.check_exporter_status() + collection_detail['suggestions'] = dai.diagnosis_exporter_status(exporter_status) + for component, details in exporter_status.items(): + for detail in details: + listen_address = detail['listen_address'] + status = True if detail['status'] == 'up' else False + collection_detail['rows'].append([component, listen_address, status]) + collection_detail['rows'].append([tsdb_status['name'], + tsdb_status['listen_address'], + True if tsdb_status['status'] == 'up' else False]) + return collection_detail -- Gitee From 8c4e30fe1cb274f1949a1f1a97f9255caf76b8bf Mon Sep 17 00:00:00 2001 From: liumiao Date: Tue, 14 Mar 2023 14:10:00 +0800 Subject: [PATCH 10/87] feat(web): back-end adaptation and function optimization. --- ui/src/api/autonomousManagement.js | 2 +- ui/src/api/overview.js | 58 +++- ui/src/assets/css/common.css | 8 +- ui/src/assets/css/main/overview.css | 92 +++++ ui/src/assets/imgs/Available.png | Bin 0 -> 507 bytes ui/src/assets/imgs/Average Queue Length.png | Bin 0 -> 464 bytes ui/src/assets/imgs/Average request delay.png | Bin 0 -> 862 bytes ui/src/assets/imgs/Average request size.png | Bin 0 -> 481 bytes ui/src/assets/imgs/Bandwidth Utilization.png | Bin 0 -> 672 bytes ui/src/assets/imgs/Buffer.png | Bin 0 -> 728 bytes ui/src/assets/imgs/Cache.png | Bin 0 -> 641 bytes ui/src/assets/imgs/Current Receive Rate.png | Bin 0 -> 778 bytes ui/src/assets/imgs/Current Sending Rate.png | Bin 0 -> 722 bytes ui/src/assets/imgs/Empty.png | Bin 0 -> 568 bytes ui/src/assets/imgs/Error packet.png | Bin 0 -> 777 bytes ui/src/assets/imgs/Help.png | Bin 0 -> 491 bytes ui/src/assets/imgs/Initiate.png | Bin 0 -> 253 bytes ui/src/assets/imgs/Packet loss.png | Bin 0 -> 806 bytes ui/src/assets/imgs/Pause.png | Bin 0 -> 227 bytes ui/src/assets/imgs/Read rate.png | Bin 0 -> 479 bytes ui/src/assets/imgs/Refresh.png | Bin 0 -> 555 bytes ui/src/assets/imgs/Single Read Time.png | Bin 0 -> 509 bytes ui/src/assets/imgs/Single Write Time.png | Bin 0 -> 399 bytes ui/src/assets/imgs/Svctm.png | Bin 0 -> 542 bytes ui/src/assets/imgs/System.png | Bin 0 -> 338 bytes ui/src/assets/imgs/Tps.png | Bin 0 -> 664 bytes ui/src/assets/imgs/Used.png | Bin 0 -> 500 bytes ui/src/assets/imgs/User.png | Bin 0 -> 474 bytes ui/src/assets/imgs/Wait.png | Bin 0 -> 622 bytes ui/src/assets/imgs/Write rate.png | Bin 0 -> 484 bytes ui/src/assets/imgs/alarm1.png | Bin 0 -> 567 bytes ui/src/assets/imgs/alarm2.png | Bin 0 -> 499 bytes ui/src/assets/imgs/alarm3.png | Bin 0 -> 530 bytes ui/src/assets/imgs/alarm4.png | Bin 0 -> 572 bytes ui/src/assets/imgs/icon1.png | Bin 0 -> 632 bytes ui/src/assets/imgs/icon2.png | Bin 0 -> 730 bytes ui/src/assets/imgs/icon3.png | Bin 0 -> 434 bytes ui/src/assets/imgs/icon4.png | Bin 0 -> 599 bytes ui/src/assets/imgs/icon5.png | Bin 0 -> 552 bytes ui/src/assets/imgs/icon6.png | Bin 0 -> 145 bytes ui/src/assets/imgs/icon7.png | Bin 0 -> 149 bytes ui/src/assets/imgs/icon8.png | Bin 0 -> 145 bytes ui/src/assets/imgs/iconok.png | Bin 0 -> 369 bytes ui/src/assets/imgs/iconstop.png | Bin 0 -> 382 bytes ui/src/assets/imgs/not.png | Bin 0 -> 1255 bytes ui/src/assets/imgs/run.png | Bin 0 -> 1051 bytes ui/src/assets/imgs/stop.png | Bin 0 -> 1178 bytes ui/src/assets/imgs/update.png | Bin 0 -> 276 bytes ui/src/components/AiToolkit/IndexAdvisor.jsx | 16 +- ui/src/components/AiToolkit/RiskAnalysis.jsx | 69 ++-- ui/src/components/Overview/Alert.jsx | 78 ----- .../components/Overview/CollectionTable.jsx | 92 +++++ .../components/Overview/ConnectionCharts.jsx | 163 +++++++++ ui/src/components/Overview/DataDiskCharts.jsx | 197 +++++++++++ .../components/Overview/DatabaseSizeChart.jsx | 152 +++++++++ .../Overview/ExporterInformationChart.jsx | 89 ----- ui/src/components/Overview/Instance.jsx | 79 +++++ ui/src/components/Overview/NodeTable.jsx | 91 +++++ ui/src/components/Overview/Proxy.jsx | 49 +++ .../components/Overview/ResponseTimeChart.jsx | 152 --------- .../Overview/ResponseTimeCharts.jsx | 164 +++++++++ .../Overview/RunningStatusChart.jsx | 154 --------- .../Overview/ScheduledTaskTable.jsx | 173 ++++++++++ .../Overview/SqlDistributionChart.jsx | 234 +++++++++++++ ui/src/components/Overview/SystemCpuChart.jsx | 146 -------- .../components/Overview/SystemDiskChart.jsx | 140 -------- ui/src/components/Overview/SystemMemChart.jsx | 147 -------- .../components/Overview/ThroughputChart.jsx | 144 -------- .../Overview/TransactionStateChart.jsx | 321 ++++++++---------- ui/src/pages/Overview.jsx | 126 +++---- 70 files changed, 1809 insertions(+), 1327 deletions(-) create mode 100644 ui/src/assets/imgs/Available.png create mode 100644 ui/src/assets/imgs/Average Queue Length.png create mode 100644 ui/src/assets/imgs/Average request delay.png create mode 100644 ui/src/assets/imgs/Average request size.png create mode 100644 ui/src/assets/imgs/Bandwidth Utilization.png create mode 100644 ui/src/assets/imgs/Buffer.png create mode 100644 ui/src/assets/imgs/Cache.png create mode 100644 ui/src/assets/imgs/Current Receive Rate.png create mode 100644 ui/src/assets/imgs/Current Sending Rate.png create mode 100644 ui/src/assets/imgs/Empty.png create mode 100644 ui/src/assets/imgs/Error packet.png create mode 100644 ui/src/assets/imgs/Help.png create mode 100644 ui/src/assets/imgs/Initiate.png create mode 100644 ui/src/assets/imgs/Packet loss.png create mode 100644 ui/src/assets/imgs/Pause.png create mode 100644 ui/src/assets/imgs/Read rate.png create mode 100644 ui/src/assets/imgs/Refresh.png create mode 100644 ui/src/assets/imgs/Single Read Time.png create mode 100644 ui/src/assets/imgs/Single Write Time.png create mode 100644 ui/src/assets/imgs/Svctm.png create mode 100644 ui/src/assets/imgs/System.png create mode 100644 ui/src/assets/imgs/Tps.png create mode 100644 ui/src/assets/imgs/Used.png create mode 100644 ui/src/assets/imgs/User.png create mode 100644 ui/src/assets/imgs/Wait.png create mode 100644 ui/src/assets/imgs/Write rate.png create mode 100644 ui/src/assets/imgs/alarm1.png create mode 100644 ui/src/assets/imgs/alarm2.png create mode 100644 ui/src/assets/imgs/alarm3.png create mode 100644 ui/src/assets/imgs/alarm4.png create mode 100644 ui/src/assets/imgs/icon1.png create mode 100644 ui/src/assets/imgs/icon2.png create mode 100644 ui/src/assets/imgs/icon3.png create mode 100644 ui/src/assets/imgs/icon4.png create mode 100644 ui/src/assets/imgs/icon5.png create mode 100644 ui/src/assets/imgs/icon6.png create mode 100644 ui/src/assets/imgs/icon7.png create mode 100644 ui/src/assets/imgs/icon8.png create mode 100644 ui/src/assets/imgs/iconok.png create mode 100644 ui/src/assets/imgs/iconstop.png create mode 100644 ui/src/assets/imgs/not.png create mode 100644 ui/src/assets/imgs/run.png create mode 100644 ui/src/assets/imgs/stop.png create mode 100644 ui/src/assets/imgs/update.png delete mode 100644 ui/src/components/Overview/Alert.jsx create mode 100644 ui/src/components/Overview/CollectionTable.jsx create mode 100644 ui/src/components/Overview/ConnectionCharts.jsx create mode 100644 ui/src/components/Overview/DataDiskCharts.jsx create mode 100644 ui/src/components/Overview/DatabaseSizeChart.jsx delete mode 100644 ui/src/components/Overview/ExporterInformationChart.jsx create mode 100644 ui/src/components/Overview/Instance.jsx create mode 100644 ui/src/components/Overview/NodeTable.jsx create mode 100644 ui/src/components/Overview/Proxy.jsx delete mode 100644 ui/src/components/Overview/ResponseTimeChart.jsx create mode 100644 ui/src/components/Overview/ResponseTimeCharts.jsx delete mode 100644 ui/src/components/Overview/RunningStatusChart.jsx create mode 100644 ui/src/components/Overview/ScheduledTaskTable.jsx create mode 100644 ui/src/components/Overview/SqlDistributionChart.jsx delete mode 100644 ui/src/components/Overview/SystemCpuChart.jsx delete mode 100644 ui/src/components/Overview/SystemDiskChart.jsx delete mode 100644 ui/src/components/Overview/SystemMemChart.jsx delete mode 100644 ui/src/components/Overview/ThroughputChart.jsx diff --git a/ui/src/api/autonomousManagement.js b/ui/src/api/autonomousManagement.js index 6ee33cf..08fd1f2 100644 --- a/ui/src/api/autonomousManagement.js +++ b/ui/src/api/autonomousManagement.js @@ -12,7 +12,7 @@ export const getSearchMetricInterface = () => { return get('/list/metric'); }; export const getForecastInterface = (data) => { - return get(`/toolkit/risk_analysis?instance_name=${data.instance_name}&metric_name=${data.metric_name}&filter_name=${data.labels}&warning_hours=${data.warning_hours}&upper=${data.upper}&lower=${data.lower}`); + return get(`/toolkit/risk-analysis?metric=${data.metric_name}&instance=${data.instance_name}&warning_hours=${data.warning_hours}&upper=${data.upper}&lower=${data.lower}&labels=${data.labels}`); }; // ----Alarms diff --git a/ui/src/api/overview.js b/ui/src/api/overview.js index 9f811b8..1c9fcaf 100644 --- a/ui/src/api/overview.js +++ b/ui/src/api/overview.js @@ -1,4 +1,4 @@ -import { get } from './request'; +import { get, post } from './request'; // Transaction State export const getTransactionStateInterface = () => { @@ -19,6 +19,62 @@ export const getRunningStatusInterface = () => { export const getAlertInterface = () => { return get('/status/alert'); }; + export const getQpsInterface = (data) => { return get(`/sequence/${data.name}`, data.time); }; + +export const getInterface = () => { + return get('/overview'); +}; + +export const getResponseTime = (data) => { + return get(`/latest-sequence/${data.label}?latest_minutes=3&instance=${data.instance}`); +}; + +export const getConnection = (data) => { + return get(`/latest-sequence/${data.label}?&latest_minutes=3&instance=${data.instance}`); +}; + +export const getProxy = () => { + return get('/agent/status'); +}; + +export const getDistribution = (data) => { + return get(`/latest-sequence/${data.label}?&latest_minutes=0&instance=${data.instance}`); +}; + +export const getTransaction = (data) => { + return get(`/latest-sequence/${data.label}?latest_minutes=0&fetch_all=True&instance=${data.instance}`); +}; + +export const getDatabaseSize = (data) => { + return get(`/latest-sequence/${data.label}?latest_minutes=0&fetch_all=True&instance=${data.instance}`); +}; + +export const getCollectionTable = () => { + return get('/collection/status'); +}; +export const getNode = () => { + return get('/instance/status'); +}; + +export const getDataDisk = (data) => { + return get(`/data-directory/status?latest_minutes=3&instance=${data}`); +}; + +export const getTimedTaskStatus = () => { + return get('/app/timed-task/status'); +}; + +export const getStopTimed = (data) => { + return post(`/app/stop_timed_task?funcname=${data}`) +}; + +export const getStartTimed = (data) => { + return post(`/app/start_timed_task?funcname=${data}`) +}; + +export const getResetInterval = (data) => { + return post(`/app/reset_interval?funcname=${data.funcname}&seconds=${data.seconds}`) +}; \ No newline at end of file diff --git a/ui/src/assets/css/common.css b/ui/src/assets/css/common.css index 39781b6..3c5bcfc 100644 --- a/ui/src/assets/css/common.css +++ b/ui/src/assets/css/common.css @@ -15,6 +15,9 @@ ul,li{ list-style: none; } +.mb-10 { + margin-bottom: 10px; +} .mb-20 { margin-bottom: 20px; } @@ -24,10 +27,13 @@ ul,li{ .mr-20{ margin-right: 20px; } +.selfthealing { + display: block; +} .contentWrap { border: 1px solid #f2f2f2; border-top: none; - background: #f1f1f1; + background: #F5F5F5; padding:20px 20px 0 20px; } .plr-0{ diff --git a/ui/src/assets/css/main/overview.css b/ui/src/assets/css/main/overview.css index f064787..e5ebfb0 100644 --- a/ui/src/assets/css/main/overview.css +++ b/ui/src/assets/css/main/overview.css @@ -28,4 +28,96 @@ #alertrolling1 li { margin-bottom: 6px; color:#ff4d4f +} +.instancename .ant-card-head { + font-size: 18px; + border-bottom: none; + padding: 0 20px; +} +.instancename .ant-card-head-title { + padding: 18px 0 10px 0; +} +.instancename .ant-card-body { + padding: 0 20px 20px 20px; +} +.instancename .ant-card-bordered{ + border: none; +} +.instancefontsize{ + border-radius: 6px ; + background: #fbf9ffff ; +} +.instancefontsize .ant-card-head{ + font-size: 14px ; + color: #737a80 ; + padding: 0 20px; +} +.ant-col-5 { + display: block; + flex: 0 0 20%; + max-width: 20%; +} +.bgcolor{ + background: #ffffff; +} +.bgcolor .ant-card-extra{ + padding: 12px 0 0 0; +} +.textstyle { + font-size:18px; + color:#272727; + font-family:Arial; + text-align:left; + font-weight:Bold; + text-overflow: ellipsis; + white-space: nowrap; + overflow: hidden; + width: 200px; + display: inline-block; +} +.iconstyle{ + padding: 2px 4px 4px 0; +} +.alarmstyle{ + padding: 0px 0px 6px 0; +} +.textstyle .numstyle{ + width: 30px; + display: inline-block; + text-align: center; +} +.proxystyle{ + padding:0px 30px 10px 30px +} +.procolorblue .ant-progress-inner{ + border : 1px solid #5990fdff; +} +.procolorred .ant-progress-inner{ + border : 1px solid #e54545; +} +.overviewTable .ant-table-thead > tr > th{ + background:#f2f5fcff; +} +.overviewTable .ant-table.ant-table-bordered > .ant-table-container{ + border-left: none; +} +.overviewTable .ant-table.ant-table-bordered > .ant-table-container > .ant-table-content > table > tbody > tr > td{ + border-right: none; +} +.point { + width: 4px; + height: 4px; + border: 1px solid #ff6c04; + border-radius: 50%; + background-color: #ff6c04; + display: inline-block; + margin: 0 4px 2px 0; +} +.grayimg { + -webkit-filter: grayscale(100%); + -moz-filter: grayscale(100%); + -ms-filter: grayscale(100%); + -o-filter: grayscale(100%); + filter: grayscale(100%); + filter: gray; } \ No newline at end of file diff --git a/ui/src/assets/imgs/Available.png b/ui/src/assets/imgs/Available.png new file mode 100644 index 0000000000000000000000000000000000000000..5110d56cc3362ac2f61ddf20b3cafd62f99bc438 GIT binary patch literal 507 zcmV@K6b}C>7X|K4L$~4oCLuJL6i>Z5AXpxbWkX` z+f>(*3a*0Si#qF~O=%aU=qAo8#pZZ!OQK0{LYt5rF86%j{m!}fy9gmqvXr@S(AiN) z$;o1MrSKSpVi=iLGVTE=1Auv1v{QwLP_pr2IzFVvt`KlSBoV(+59a|9)xW-#`R9$J zH<9Zp(=4L~4s6=1=DC>YMVVGK7D0tWJ7rG#$hb#TN#!wN8T$(2Zd;f5iRC1bl^;}f zbI-*|$^K79mFedB$at{LpqvLJf{Jd2Zj76!aV7pFYItMWN}81^Bf_*xz@?SBKeJ*i zRmP1!LBzC7nE(V!6@Be&P(T^MH9|J4z*H5r*2s^oe#DX-T|g9BTN&rOI9NV&FliY# zyknvimA@7dsKVZuFEr!zEU1hD4Md8|ltmv6zcN7Firme< z&zTG8c0jZ@`8b9OsEGFr^bpp0Jp6s%*o=@lKCDa`vqAPMYJII??M0o002ovPDHLkV1lY~-+}-D literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Average Queue Length.png b/ui/src/assets/imgs/Average Queue Length.png new file mode 100644 index 0000000000000000000000000000000000000000..a0cb335ae6d01df0a6aa6df2160c6935c0f66c0f GIT binary patch literal 464 zcmV;>0WbcEP)8%4={aT!*rHsG%zwS z){|=3e}?~!S0ZOL;4tj}$Nvlr6G$-h0MM`lSPcW&1a$fYo}cXT4F4Ggh&2@ET@-JE z39Y^yi6T$1`y6WGGpds+sq823Pft6SilK(t{n=a`78f+Ggm%Q%w;If)jXyGEa;!P6+<>tYti7`=0 zw>=qbt0=9;#MqWV&w5PAS81O$@X1`;e0sKt2d#Q0&;u>R**X7juPHO-R?1hm5ar9{#! z<+A0FP!I;hQv?u0!l;!FzVpxDCzj93$Lyq|wZj2%FO#27Bb}*JpK~ncteD58CWli zNv<7M2Tp1~pPDm;6iwVVi~Gc~(|WKUNN3DqIG<9dYp27!p=^7%s5RH)TW5OdTILIjb+MlLd;dmqpn*58`zQ?R?0l;h!sZdb0E|WC13~@ zlys_E(1pzvUww3Pp0HPYeDo&y0(cTj8y{+Cli5AwISnd66AZMtcM|;cYO2dB00X$o zw}-$jPBT?q*q2&HKH{%VJRTgZ{FWDVA@li+xK7FD`)p2e*-(@5TKuu~W$@V(Iv}Qh zaCwu?uGz%qEgrNeWthO_kG}U^g447@$F>y6<^_1S$P0 z01-l57ztMSe3<90Az&XQyrSnzW!8scUWSG*NBheEu5D6nkk+Dau0=X8w{>i>MixQ1`_dl?fnQ5jp zdy%KTtMaqZ~~bVFcYo~#;ryOy)mR4eOXNtd>VCOZJ@idUcP+qRWtp^#YI@dcX8EN(-e^l z?AMsd(>f8F3ri4m{ix{N0oZsD_l2l~jnG<}(2Amdz76~;vJYE3BrUedG6R4nrj^sw>G~&Zr4t!V@=Q&Rk>)okIY2SY6T)m8y<+m>+qgdT1;h ztG$KnEmdE4UR_eR@*21rswVmK&feNlw}-LYJs+lIOK}{SO~gOO3{j|S;)jE{hNw8F z_q25St(om=rs8TG^6si9#{UG}E>)Ke>%4ATvvtO*5OD%kz07L9#Z93L`p+)HiNg7pbW5qHpOpQHS$q08Tnx@AA;3?9EZIEPp)QKM2ObtDZk9 z0{p`#D7!L6%NjWgUi*uKMn%Qh_Y5IE3P#~SgB=6;+xy>@c08$oPh}_o0000}8p5TBVPN+sOWf|69yNf*ncODCPB`~%9-sgUQsmk&!o z{{bxRuuCUp?6AX1$`tyJSVhC`II}NHE+CEDaLnz^{^s|w2pd+oN!B0_R!501^#BL1VvMOc=gtk7 z^N2*>>9dX<8wO;(!)zBEOyWjszZVm8Z`Tv;6XQb$D9Xl5kNl0iKh4~)v9;Tmf!6{| zJVR3_3C#rZEEYneLaE6JO$6Fu<_p<0mxg6TEQt_j0RBU^xEN}~0OoC0)!#J%28TcQ znNWDZIv|fkjAZ8m&MuyRgZqmON!4hDL(SOW0$~Q!& zryh79Vp*j|k*QTstWXCMiRI7xlOHwq)c1go5uv|HO&R$mF^`da`uz2Ssd6q9yisIJ z6KatNtngA50)`u{A%x5OnHTkycts{oXrhp>YNT4cK}I`>GRB6rhD-(UB2ZLA$NXQ; zG%2Ef1NFfpw$e^{J17&?WO@YTKdq_h=?S3!npZOb63=1uNDS_}&tNY6%!Nns8I&3P z7g2v0eHO{rEH4v)ggw6EJ(W720K;5k#3C|ZI>c!{0g)o(prO`UnpVe+Ud!jNaK}{^ z&QuDcx<@<0000< KMNUMnLSTY2cS>9U literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Cache.png b/ui/src/assets/imgs/Cache.png new file mode 100644 index 0000000000000000000000000000000000000000..792a7812ac019a72ff8b34100dd4c6c77acf6e83 GIT binary patch literal 641 zcmV-{0)G98P){?k&WU}o+B>75riPHxe&4OJC_r+yIr>tm?!H@Sv)g)tzxaU0&|QB>Ct zpi+wnG`sf^nIAK#X`e?`ZMOsHfSnxugdXjK*0CrGa)~ujR;jfSpbNa8+!#Tq^OHlN z1D4)-Mk$5ix!jG^=6#qsy`Z4N*MwkX5`Xts1saS@+0@x!**|Z|r{|Q$0)g(@K-Mkx zE*N=@^w*~d_r#nqHk&=-gZ`E$;t!i|t=j%wB$lrf-h*wEINW|Lp8Dvq3L?t%wO)9y z-l5HhIQlk0CePYb6~jDr0}?d5_W_(uP5Yb_No9V&F*Ec*>v)gDtw+f2z7qwf0mU%h zmvl)Xt730MT`N#}Y}mA<>d}7J)JlOF817kz5ucTUsKnP&>dMW(xA_4At>e-G;+fSN)eS@N(GYWwv46P)b-D4&mB(YnGt24a=QuxJ0+XhV zjX-%W!FO#G+(+dW2jjUl;?sq}cZ`z}pTTkJOF0(xnMOhBmqg8Ln}D)6&v~Ha&+(l1 bZO8rrm+DcUk@md{00000NkvXXu0mjfHPbJ+ literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Current Receive Rate.png b/ui/src/assets/imgs/Current Receive Rate.png new file mode 100644 index 0000000000000000000000000000000000000000..c79275cd1ceeac8d0996752a8f07175fe45a8f42 GIT binary patch literal 778 zcmV+l1NHogP)sFL}un4QZeH%Gz!m4c1%l?AheJ^_ElN?o42Cp!z6QE_g<+Y&k$H*b|v!mUHGavfUk^EeQ$(nMPkL>*ITDno2cU@&j ztTN&?u;mRRm;#QB1n5|RYw0HQ9Lg!>q(Hd7=*oB0=G^1#{cv?I>|m=l6f#JBL8Lxx z*1`%)@DSiPhW6VPWK$T+X6buL&^wg1$YlX=fDim&dAI)?#s~Bou}jLEA8lcrk2#aD zZKiWzsv2&!NnPF@=k*Kay-i_kG(7E31nr`<*1V}m8z78s6=pR=XxgtP?IUwd0$}6T zVVqclu(euhv!+aOZVy#DsgS8exgDCgBHz~)fPvh=Ewg?#Ta>3otVmaKRRYq2UgOv& zEg7FseiZISA=(Q-r=7r!2)LuttJR#=RB(fG4sl%`iReCe!fW1pf}W91p2xbZVVI{- zNTxuF7suwG4=wO0#nXN@#)Ch>1({u)O)av(3t!W|M2lBbf>fHth7Fj*I#7(2oagl1 z9m_F2r-%Qn1Y zm=Fed*!3}v`e8bdsH$iQxgo6VQBHMrgKz}+e=g%wJ2$J#UlO1|UC)PtIsgCw07*qo IM6N<$fWMNkDOg%^aXDf?&1mnoQ_C z;z?3~r;#K_XlAXoo}O)LG)(r<1zc!>r9FFH)7tQz>2g1D4h8Ze@K#wQ!qh^*wAaia zVo}$XfEfcc2<76!03Jh)c1X}(s*5?U^ykzmu4z_lK=EFnKY(@`5l#hb5y>K&kNRf{ z(*=802!=@DGav-C1Ba(&H%wRB&mU&Uc9yZX& z0@h!tYTZ}?BVGGCZuykgoEJ3jI0O$h3y(LIIYcVg=w@rfwMupWnduz zor*VlvRk81>4iJ0Wk%b6(TaIyh+nDS7S-eGeb?ORCLIYDIHY{phr*8qsitYo8?Zxy zxL;Y{E4i`OkoVe(V4-8GV8@YYR9}{=>M|Bt(chcD0Q_b~qv4UG0{{R307*qoM6N<$ Ef(SZGCIA2c literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Empty.png b/ui/src/assets/imgs/Empty.png new file mode 100644 index 0000000000000000000000000000000000000000..a5788fdcd5c9bc12e502916ff97fe957702bafcb GIT binary patch literal 568 zcmV-80>}M{P)d%;5L@|aUp z=s(iFAn+QHt7E_wu-PO@ttwKCA@VZ|Qho9cvEB8DVKkp4Gv1~I9Th>VmF+pR!Y#pm z5O}Lj+s>oMzyMnN<8}As1PgorIWU42DvDI)l9ZzoJ=Hr_ymTt}cK`p%49plp7W_#? zG_L9-v8s0RBqzaCv5Iek_Vjns$+H&$y=ZsvD_l~N4Jluqt&CLf*xika&?1GCH^sCR zxjswN#20mA!7j{vw+}L#n4vYB43|(0>+T87_T6Mso3L3j?J=KGyOctA2{2pzY)<Pfo`qb926;UIp?^4A80000PJvS+9aTO5`#^G-qd=kh^-fE zJv6zA_>p=lLNQ*%TNO{DP!$g)X(<*wiFCdBg+%b+p<+um4K~>~{x>x-ZBs?Mhh+E7 z+u!@o%zu`t!DV+uJ6Nf9|J%$9H=GYeBOdbPnyvBN(sO*E)ymvm`6|~hNZ|O!f>}-z z(*XpIX^}llY81~qD*J@eep(!|#hYa2^NK0iGLv~yWz=AWaP*=kHOyKk4E9Vp2SAg7(`#B~~D7`r8DBo>Fw|j~Dnyl={3M?Ot-NflzOe$SnAm5-crD~@Mo%CCo zr|z>Ij$8N(E~yktNRkF|Nr*4ESl>@^zB7K54Y}f%(&++ur=TWOpE|Dvt8NnMGekZ) z%p(j{{g4+RmaoJLvX@^XG=ySTltP)Tj?0|s6n8BcEw4Erid|Af`!w+^>T+Y{W~6h+ zw6X~aMK%@K_hVHY3;v?B)oxbr{|R%8-)dzeu05F5)Vg49p`k2X2*ppJF=Ok4;cas! zgAC%ShGqDlG+iZrS=@|Hr_kxazbdnX(IiZ`S2KTG>tv*w0Hk-NZ7OynnKz4ix80;Z zupR6gMq#@Z8L=`u7Qk(dI{JGWmJC~eq1&F49xr>L8gqB# zk0%><(^W*mY1e}={UdBv>smv%00000NkvXX Hu0mjfABAPC literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Help.png b/ui/src/assets/imgs/Help.png new file mode 100644 index 0000000000000000000000000000000000000000..6a02f8b4e4f98f478799d2bc11db09efb2ea156b GIT binary patch literal 491 zcmVPx$rAb6VR7gwZRZCLBP!Q~SVSy|01{?uTAU6PZ;7U$lastB%qBnpQaV4I>cmg?s zM+hrfkk{5rpdf$HVqrO(RJ}~5r@QA(1ZNI8bA9{+fLo6pt#l7$S0EiDDZwQ~0%$e! z9r>)1xl*E$7ugVa6$L^qaG&q3r?Q+d+eHwHzyg#g*kNvr@ouAv@H zkqhX%95ce1J68PuFmeD!bBg{rOd>H`L)lHyo`9_{TH_|7X@+4)s_U#LIe?)TxgvN- zwLvuibbHq|iHSfhA^IiL$i#Onx01og+XN7~$uM04Xai`)=P@9V1pxUzcN*PFfT0(K zBDgFwm*g0*0!d^{0QV8jZR_gC0LBYIxe)y(38S=<#L9Q96ll(a5{VLJV(k%@-i;$A z%A36J(|BPbzR?8?>P?PU19VX2gJBSRo1LcQD4%*( h%iz?4+5Eo`@C{SNwHYAIjA8%)002ovPDHLkV1jJY%ijP1 literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Initiate.png b/ui/src/assets/imgs/Initiate.png new file mode 100644 index 0000000000000000000000000000000000000000..e1f2b6c4246e61d51284fdcd0415e4b98f12b326 GIT binary patch literal 253 zcmeAS@N?(olHy`uVBq!ia0vp^d?3uh1|;P@bT0xamSQK*5Dp-y;YjHK^0#=pIEH8h zZ@qL-^q_+P+k^TeM}66&E;LRNFn_&-*^@o+;1cFJ7Fvp47u)aNU-$h{rHf9#?Rj(K z{O8km7QH#L$|1GA_8)ewx?!qt^L8ksTv=s())dJj zl`V<4-~ap3C)oKzt4N_=$jR@9meqPj#gqE~y)!CazuP(;=za!IS3j3^P61KIqEP)^T7v8Z}?+opqfE>#dN3i1Si3Vx5hk-mm$hPs>BG*swaK)q!doW#ef( zm>n4sX9>gzAT(Ki1TZda>441E2KaDY0W;w6txOg$w!nL^`N|?5p>#{LF)`hV;k0(a@H@FEjQ5cJ^0Fzr;omK>>Y4GK(bc zI$Y7qm<)O@m$cav{K72BA9UbqP^t|6YupVQfP4-kdk6w`iNYs{ftIPzYQRd6Nx{FFt8cC z%qh1x+mgvj=~~_8$2g0PrHa1|Il2FPMKE@J#H%-lN>^gm;V6|5rBtzYlpF8^jTz4nO6Ud?+e=%%IX+;HIlU_^ z1)kBMyR&$_ESse>yDdAN$*3=prl2mN=n=7=yGSRjq2`T0DP~;}%yLN0xXzeTea+i{f^zc`m1YJ!$3y!<^Tn@KN`F_dZICW{7u^Z)<=07*qoM6N<$g7A20ivR!s literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Pause.png b/ui/src/assets/imgs/Pause.png new file mode 100644 index 0000000000000000000000000000000000000000..3cd6d0860e53f2568374e81ba9b0afc51e7c9a5a GIT binary patch literal 227 zcmeAS@N?(olHy`uVBq!ia0vp^d?3uh1|;P@bT0xamSQK*5Dp-y;YjHK@@ILvIEH8h z-<@=k_mBaPOKl2+gsSufLylk*;Q%#{?Hb8b_HwlM-s0kHiLpC5UG%QE&hLj267N^A zHo3{PA1XXNVdAM=k%adN(>e}%)IOEUI9JDZ^7Wsaw-f7G!}*W1eTw;zaw)`p@4ec&tHRsnPZ6HVq-N45 Y+4DLonprw`16|4B>FVdQ&MBb@0OOiejQ{`u literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Read rate.png b/ui/src/assets/imgs/Read rate.png new file mode 100644 index 0000000000000000000000000000000000000000..b50929aee8f6d56c0c8a7be248a8650af1351c90 GIT binary patch literal 479 zcmV<50U-W~P)-iT0_VK5IvdoGZiovmUBwBBtDt7VgMc_+q`Uc&3a`PPzlR6tSTTF zpUHu-(oeJyaQK$^INtnP?;uizI}8d%Z6++|SVi7iSwL%pRFxA>M@eLRzILH&ew)9> z^U38yEG+9-23KfGu&z;g0t(Z}77fFgC~&H?ZO6(i6W)7-`&^v`$E(?5Q!s?Ed-uqG zm-Q5Er%@_v0Oy1x?Ppo1Es~Uov5!6U%C0UF=B04h-7n2#o09AKURpy;BWeW(>;;_= z-4XbXPt?;jy(d1NY&)#A$k7;!KhaEe)|4=rXq;vX9}Ve&VNHe|CmCb!MMqBPx$os9gq%41)&4uo1BZ|3L;eyr2^p!VxLe>kt;B+ zAh?21f_#F5wUf7iun>Rp^YbAud)4=SvorJUh49Oi4|NLsN_JQmL2$H9a9k*~FtSsF*@87)?{dOQp7N68pKJ__wgV*pQ?Kn`Nl z@TGhYfC&lLE5Cb5)5vbD^ihcb6O-=G_U)H?2T5`LQi;|kd&)DJ4{a0!l0Rx76Um&B z?svIyjZ6*_bpbbd-f?9!*~|9gH5WcEI)Isi?$@Rtcmz!E=$iVb9SPeYeD{ffWJ}--x*{2B)DC9ODXD)+vmEI?}4q<@*MySdtt;|1mXvJvcYg^&>^NI)qh z)ClAWf;cn*j})Y5Cq4i$p0Z_QBFwGG0xLo`O(_yqE38`Mc>IlEk_q zXrH|bvsal>ZDX)lb+^eX3{zr>K@A4|cnAcYPo&iVVTFseAF}0~8|WR4YQDrEm3`5M z>L5N^omJ_t^Ka>;kHtD4E-|p;I~qjT=C3#qh--fHCW-D4Fu>|w7?z$0vy2Y_s8*0j zeyhS1B5snDXWX(q7Kp+g)Tu|qsm8aW$SMk~HrdLSn|^BTBEJQ#*#CnavuDLwdG>eM z@Pgwd4@=HHpi^ITccOh)`@wiO*w_937Eb;b39Qc)$JRov00000NkvXXu0mjfr_kKn literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Single Write Time.png b/ui/src/assets/imgs/Single Write Time.png new file mode 100644 index 0000000000000000000000000000000000000000..59ef4c4d5755d557443fe44e7db7a39f9b7c48b3 GIT binary patch literal 399 zcmV;A0dW3_P) zAS%Fg;7oW1-SUqaF-S-nGeZV9qu_S;-tF!G68z$lV^~T0mIG33Y;ZeNe-!L;Q7Kd( z*LD>B!|)epmZTa%+=_2MS9+jp2CaCZYA!ZbcHT0LdYW@CBG;vw*#xd&_N>!a3R}Wt z6LHVN+B+-7H<%A3X_XP8M$OW}ZAEa>0MfiAf|#HORb6iED68|GY7$s5u~nE{c&IK` z#nUTHGzKq(?D>1mzh2Y{i&G89> z@O(=wnu%H#jnuPPb;;VP{hi8=fz8YkPC+gBYV|k{CTKk;jg~|T?n-%osLk->P2iP8 zSfGUukwJCjsfhInQ<7vo<1SSyv9Q~qt&VK&2%OCxpn?edR>Zs)a?4%4EOe{ysaX_= zfcpLgUtP)rkGqweyI4?xHA6Kfbq9YkN|@5>V6oM?t+{wKwtQNzFiNzE{$LTu_R zLrG_#v_=#Frn3v6bOuaPdoDA-bw|$uM~E^7Nf9Fxqs8BU|4&?lOaK4? literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Tps.png b/ui/src/assets/imgs/Tps.png new file mode 100644 index 0000000000000000000000000000000000000000..df8ec1d7ca2ce04593e0a5ba2019f56b1f330a91 GIT binary patch literal 664 zcmV;J0%!e+P)iJOubGq_vC!f2x2CCG&Y?*hb;&wljQ{2aUa z8?Y5n{x?9Q0JGL33Q-E20~{%{q;@9QJdpcX?r)!EmH{#7wusr4fZl*8%3duFdt|}Z zhdKRFGQR|P`&ed4Sih1NkCl;SM-;kY`(WslOs}2AsK#0{>5m01O_As21cjIgD*X0g zj6o=`KaPC%8Y$9qk9csz^U;(FgMeodCE>@Gu+qR-7!40~*ZF3Fq|+*cWzUR`lqdPA zp-EVm+PN()Z%DwG|A6fQ*;rxCNSi{aO*%iT67yXmIz3S0cORbB$7STk<>$-lJn#M3 znzJk*sny5w(tG_cWmQyMJIIvYWo(M3SJ{2|1 zBPZW8vpgc2y|SD8lDge|(VT(rGYwrd@zHP@&kHr*8O8JWOgfce+mG1&(bri_NLZHY5Nxwiwny)}fDO!S>H)O7=R zEh3iSep%;H01U}m3b2U9df&u)6>FpQZrhtPTQYG=_Sub3h}C=9K6^y&RLatYsjRMD yvu`nTHCk=jg(RuPWYZ4&$1XD;x$f>_)ck+OjzQG@E^t8r0000WHDhrT{xf6S>geNeu(8dSw0cgwBSVt6Z?NVU$^!@kPMUZX#cv=E*WTCUPcNhF+jSXEDZoCcwbjj4n`imS`H{fas#+SLga}+UDxd~(+o4)Uo(1BoO=IaXo?=5S#nM~ ztbn%^FiQy(`Z+!83l%Es_87x>c!w77Q{oBu&)KZ6#wRfxz54B|(QY;u^walOZZ?CN4D)IPeWZ_L q!_b!^ZRhS|M41!W(c?-rNcIWJ%TATxot*>#0000UEA{j literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/User.png b/ui/src/assets/imgs/User.png new file mode 100644 index 0000000000000000000000000000000000000000..f5088129ab36f56354dd7da97f3d516c5583b10a GIT binary patch literal 474 zcmV<00VV#4P)xM=Ua>2F$E6$$G zhrzf^Kq;)7E5!VpB()Xk+d{tU#Q$eTJ^*Pvtv(tHOR^3Ee;5P!-@BEgI|Ze)I7oNEJW|cChepzd1c3ZJ=18xb}Xs6J_yanT4jM!Vp(EkJN z7EquSd(ZicEo@7p@G!N1II4Qtf`jJ{CZbYPiR!+N%>xb(4Uc zR!=%#Fdm(a*b8R0BJBKFWx6S>yJAFddc-_TyIq@6pA|rU>aI?owf-DG0kV*^6dmZl QqyPW_07*qoM6N<$f;WB3^#A|> literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/Wait.png b/ui/src/assets/imgs/Wait.png new file mode 100644 index 0000000000000000000000000000000000000000..11698107e4f7144da325721b731b708cd39d297f GIT binary patch literal 622 zcmV-!0+IcRP)t8-gRWr^z+UCgm1`BcK zkE4DKLOyoltUFDZQn0NhHza{~h~N_&r3B>h?s2xv6JM*eJEU>Zr4M_3v6m;^Bz9lMj$-k#F@0Wxt(~16W@gP)GA(}{5^)r0r(cHUGC+G^ghXaz zU1^kI|1;UWFLkSIGxbO)F@8==Lm`S+R*gc= z_!FMZ@$`X1mO?6e>+xihI3vSf^8oH(F+@wB6!Qt*v-tx;-3&KkiUp5Ixq(if)5*wM zzSM{;yihKabq{%F$<(pcTf)E1WVZrHD?xUen2cX9&od?`7P3PS)pjegmnJTDhl%?O zqwdZfI|1QM>KV*epqTtH&zaZxYoVYsQyke+n;p zQVsYff#-D7q}jeB($QEugVJi`0YDhxv?I2@z-N!o>T@Dr3OB7>^%yo_k88i!6i acI^jB;KLNi{k&!X0000y9Y(PMj%lh^AHGKgfuE5s1EApd(B#Fwzd6%=CbU(ncw@~%tzoKgZv{4 zzJ;;j;l_fZY%qW!Ldd?LDC6YlsAweMTUg%XSpklltu<2`wI-9HKf?-cx1E|yS-h=L zYet17u2Sh14i9r*0S%SqMKJ+{j5G%Vz&jl96pw?FLk43zR$2D{8g?m)E}YSc+R!pJ z1x-F5RIab#W^Am=V2t(IWcgGN{8AahP7vl&i_L?B&^kR0rF0tZmzFfxDZ=`GgyGQr zIP`6;kas%45{<&+<|aHP67T(#j^Z-v67bO)Z&t|Stn6ABXR$y>EC%&F9=Kw$t233lRS#;)F1_A0{FyK^q){Hdw{DZ9$r|j+<8e}s%V5H!Z!-7y zLmO`MZgCOH8MWk7R5(g|I+Mxssxq{~JX+y(@4BeMW@g|n95#ZfzFR=6M5?g7*E@=z zt-n68u0CLur0v?w5IBohh`3Guw@}=sf-l+?BxzOcc>$Wrvl17qPkI0V002ovPDHLk FV1n*i{zU)) literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/alarm2.png b/ui/src/assets/imgs/alarm2.png new file mode 100644 index 0000000000000000000000000000000000000000..408483ef3d0184596e8438ace478c83133eb6048 GIT binary patch literal 499 zcmV1uimMdEbxk(rV!LV=_-yO$+IdHh#?|$!d-tYU) zH8gRVCbGB?W*2GY7b`k&PW#D3%$DD*uO2qSmJ#83fQ^Yvsn}n?;;v-t< zF$r=}z?#miM_PhW?kAw54?g+ubI&m{ayNEqm_YZG{3Q@PcmsDI!Ec#a>Mk()1>C6w zMmiJBfgBKSkHW(baFB)dk%E3lu>QI*fm{h|VPdB=6I&|tC47Ees*elkS`n@?XXjVJ zH9Zz)wB3Zq-{neYeBCJ3=4jV2W;iMY%V9~4-B5OXkeIaa_73EO0Th?znel_`6RZ$s zUvYzD4i&pz*$cV`;Kw%Df0dv9aE)&BoP0UV%drp;kK1(E??3a_#Wmh8xu&kE+&kV!`Jg?7;mU%gy|GSMqSMOiVQBM zT7NJVFs(E8*cJhDqp%IgvH%w|z+*rdyhP4}reDJZR3V_ zxR(e_=}ce(A_2|$Qg7KDW!i#byG0skc<(32YK6%LS*Db~aGBpcT`L81zx-(w4O-ZKO(*-hZ zoq3vE2OT<0hHizX90@ujNo1-9O5`;UB_em?E^#r!P_$BcjFYG{i9)}a98P(A6gDKV z&eX7|hzBGpBk!07*qoM6N<$g6rJl-~a#s literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/alarm4.png b/ui/src/assets/imgs/alarm4.png new file mode 100644 index 0000000000000000000000000000000000000000..fe3c9d7acb3279572d2d1e32508f7f208ab76fbc GIT binary patch literal 572 zcmV-C0>k}@P)PU~mH!bz&1Y7omxiKS$636E{lO)EFCGVA1$zspV+p`g^{kxnfKC(}WxE-ut}I z`*Va{d}tS0*b;Vmh7Esi`Go{s0^?1(HgxMpe<0UiGV=HF%Rt*b} zt=?4Q@H=L-*M|SoogDlZC$p z7$a%LLe1O4t|sgb5RZ6)Ck`m|pEnTyyoy&}ob`OB&Z(F=`aiJ5Z0!`Xc1o=3yr6?Y z!t;wkJQb_tw|p^I>=vD%+4wq4Iqzi&>`EfjtB4+z>rb|sxchbmOMkfJlV-j*9jh7K z028aja-d&jgZs*1XAUWR_^qK!S?*@jjuAR0c0b*mqL~c+nY4GO+TTms9hPNu6=ByV zawl0k?*;8Ehn+j(ZqvZS`7A#E(i>|r@}&HyC)HvZv z@cH+5hUr(fFzkGMj;N%f%qPi^rQ^e3AfiN+dV&t)WsM2yCCLGM{o3&wh#XpR>dp-D>MqoE04RQW*f?++ zdiUdVhA+Q=faPi80D|5@Qa}?21SwfFfD#xCfP8%P#Wk$%qKO0gtWr@dyYuN8!?Y_~ z>E{4kQQ!tl?e4Nh4C26S_ww6&hC9HJ;RU7@s^pfjFli!%T-<%5woIPC=y|wl)E%Snh?E18ad<5tJU3pydE4iU?&vP$DIm1_+iU z6e<{Xz`%yU|NkEczH2t%ol{UEhIG#v3V=*t_neWR!h}s&JE&92%+LVpo&x|}PDVBo SD}OBj0000Q-XIjq125zbjrcn>c0dgnjVp7;iK!A73PP}IR7Vs8N4Q%zI<9mwPK%J zxxB*V>p3`5GfAwaE_m{&>@yJ+JSTcz6WxVN#O?8hA)+Xp6F zUfL3wTtCll-8qP`^V4;#~XSWyylSS6VWRsdgCwlFib;pa(0Z&0R-#cmw$>k93T zL=2ei<#j`3Gk literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/icon3.png b/ui/src/assets/imgs/icon3.png new file mode 100644 index 0000000000000000000000000000000000000000..5fa5789dcc4f2c8cc7f0876d166e46e3ce5946c7 GIT binary patch literal 434 zcmV;j0ZsmiP)jue}I9Rxsg#&_5dT; za>oDbX=))Z!Ifev_ZfPs&ch&(zL0@QKnkyNf+ZnH_pciV8UDPuO^m&8 zH5>+B1T6v}KZ!YsbmhQKDX@Fq c0P3Ct00x2Y@M`1KPXGV_07*qoM6N<$g0+;tivR!s literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/icon4.png b/ui/src/assets/imgs/icon4.png new file mode 100644 index 0000000000000000000000000000000000000000..b9a2aed8c490181993fab0b5d4631be243eaa42f GIT binary patch literal 599 zcmV-d0;v6oP)jDl7eZKCOH;;LP_ghg__1}wYPE{V zOe|O=uO=*#3JVJ%BvkwgVR+X$eQnfB->cicdIKkUOW*4~pL@>toO7=L+1-;Ik-!Wj zdLhujzg5_D^0UxfCv=f@zFuL6g_jo?6t_=Or?*j00DvR|Q1m@Ai3e{1Xy5+;B}YI% zme5Zm-adeVPJvjTC`m}l0H@c$+JS+q^ml*0Iv^g>(9@yD@neAWGq6sMQ|HbO0<9)U zThq@#iUPd8Q?XL0w+_{+H5?Qe4GGj#IfE1hSlyL4I8$?;bLpojB!)Fdf}#M^D~9a? zejniX30&PvdJ2CwH?EV0ysJRX>5fSJofw4Hb2XN^1V*_V5h4<(xK)&!EYw9qgLyEu%j?lR+;K&;C5 z)tZhgHjb?znF9$uuY4fs5*Yfe1Br72)ZZ2zM(Rtd{o{`VNIZIfHS_#(%96=K%x16W l^=dLI>FXq2&*%Ai{t2&SBfnL!RfPZm002ovPDHLkV1n8<1VsP< literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/icon5.png b/ui/src/assets/imgs/icon5.png new file mode 100644 index 0000000000000000000000000000000000000000..2c38e0ebf6e8b449c67f3f7a56d7e40ebbe8811c GIT binary patch literal 552 zcmV+@0@wYCP)F~lnWTcd4V)qYh$BrNUKdMTm1n-QfRHV+WG^m zZPHn5ZLPI7fyf0TL`_U&vm+k(akn2)xda?o1eu$8^XAPiqX0EB8r{>M2YO=kT;hL6 zwibR1 z&*`)!KoJ(W0PK2ymCu7WkpN@}eaLLK142X#TmV+p`7;?%Q>mSVMya&Zikn7*DMo1k zu3!*!s&GYBHv-SQT`*`%wry_!tnt%|MeszU%L@ssC)4Rhew+{#Da*JFutpGP-Ax(6 z%FSDGJ=sA-$QnbW*Q)O(h=YtF2D4+q8;`?Br}OI(#1FYGFr&*{#Ui~o;|c>{F(KRc zb$=-U_@Vh6f6}otA1AE%oKC~hpJSotP0~r40m=AW_Y(kT86af*Css4ujGbFfQKqX6 q`4I?-fui`i(%s*oO+%D$J^ul?tts0s&#nCc0000AM58eHXdZ?C~W%A!^1D}-&oYh?c4v_0|!nh^!po5y2Coj#iywu_7Y_rLzckN@?I3>wm6KToQ3@&V0c@O1TaS?83{1OP)?E+GH_ literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/iconok.png b/ui/src/assets/imgs/iconok.png new file mode 100644 index 0000000000000000000000000000000000000000..b2e417e9fe20bab00c41dc86f489d03729931ef5 GIT binary patch literal 369 zcmV-%0gnEOP)8 zAxuC=NKegyTKw`$z{$gX^~sj~S-*aXDO#D6$;kK>3uOpT+p9{L%Ep^@X}Rv6bGMnh z?=t{f1SuQ{I5QUx#qb$=FIeXDw_y> zHe_zcm{YoH*iY0wgw6Q+gy4s~6g*GJl;gsU(B z3nv+||2PLEsClzo_C^j~6{*aqYtLks3I$i=EUoMZ#Im?0IG2!D#6$iA5fG8)F-%=W P00000NkvXXu0mjf*ASg_ literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/iconstop.png b/ui/src/assets/imgs/iconstop.png new file mode 100644 index 0000000000000000000000000000000000000000..66c9514c8cb8e66a3ffc59e544d57f13ba4cf982 GIT binary patch literal 382 zcmV-^0fGLBP)bj+AfOwuGYDI z4qJRC$&e530Ce18wic0`1FsL$^wHc^Km%g&-~vRcSAORl*5k0<%>bn?4H#yZ6n?L@Yy6WNKiUlGtQADGUn@n$m8~BzZ9*-yxYAOz zsn~8aQE<>-h5Z=pSH}jLb~DPD#Mv*YrPD?H(NC&Eg?>>*3mUWi@rN-H#H34f=UhGE z&GjbtYX2Eu2wvZN&VBFuobx>Ay+>d%Mq^{6O{dcxT3lQ_0Dpzq+1cz^w(EUfuh+W+ zfxu!?Qqs`;{QQ&p`g*(4o(1_NfLJaF!oB?b{P!6d8M^fJbe5Kuw%+kl-JjR%Wz*Bs z_zR4TjBEyn6E>UeZ{9Hq%+}UchrwVtT~<~mLKIXNNK}JxIL!L``uskhucM}>rj~Gs zfQdlnXi6{OYfdvI_tFflRF27rEpz7m*7?aa(fr^#dzSNp(3x9{ocnVy=O zs;sN4b4g$}H8q7T77H+Y_0<0U$E$4rzCG;Z@naEOA3g17ZqL8Wa_)3otpE7)Hp|Z1 z&JKV6ndX?_dexO@!0!{yK4`cWKey)}_Pg^Y`{Byvm}`YXA?9>CL!h6V1+3Zy1qD}2 zN=nv5Z+uA{-`T!q22n3vClZ*r-~RsoAQYtr!QpV+DJm+e$j!}NRiw!n!OY3tA^l64 zCs??%5T!K4$H>ds3RC%>U(@gU$H-k>KIQ z&-UhsAK6hx;ad2NRD&4hhe79`#b-MC(z^JL@iG(b#>U3j(9qB=p}Dzv+-x@QR672Q zjJ{(JfwF->keR+b!t(Q00#5Yl`pdp9kRN6v4wNIxL<*4R%xr0Cxm{RT_?41o zBF%9_!64H7Xk3~jn6x-*7Xi4I%#=qg7R`BJKO{40J$WW-&CG~qE?4uPpqg({qPp6g zoOYB^RA`hmGkZstq*t*t6Q3~y*-XbRB`_(&NQ+6|)G`x;&TD2oP1mZK0|NuW;o;#1 z>6~e|+e1}VRhwX_VHcO?bQ3q9YA{4mTVBn1Dj$WyB|mHO)t0|cRUScn0Y!>G5ukiv zK_czrb&>Z&K>6qMg^Iu>c~Pt8c+PBZZ~xC~wSGiknvF)|4XEl%2gQw{8G-KZZZCWR zorO+PAOCHB9lp&kYb#-+edda&+zloxFwzRZVpPZbm{iz9NiPY}x z?e)&h&2;p%6Olo1oLKc(6|dkfGB)37z&ZG=iQ(r~NU2_^Z;s{|9$84}>|m RcYy!^002ovPDHLkV1hOka|Qqa literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/run.png b/ui/src/assets/imgs/run.png new file mode 100644 index 0000000000000000000000000000000000000000..0d51a07371e3c3a62349dc3a468029192f42bad5 GIT binary patch literal 1051 zcmV+$1mydPP)$5hWR7kU?1_8CD2UzEVh5mo&}S-aF0gI_tV#+g!b2+Q71x zd*_^)^Ue32bM8ej!-y{skEv8B6iFn=)fpjOKk=Js?qa|1<&N5ih6w?T8V5(nbM0~A znq17t`es}ni9lF-usINosXaiY_CVmXe6ho2JHBo`Kvy4u?mif)Zy}BVcz1agdTePq z1ps*JDbpxbab5uy@VVeNjyh8{p zA??f(X&St4E&%5bU`1f3Y=Jf%fs+x+__yBlxPh)!N(|*$f+J#8n--gk(HoVKHv=RKWLig;{lMf zXL_EJ%b7etW@ih`s1O8O)*$HV2e?or!Kr8rI(p5R$*yTDX9T+Ly@}n{%#^cl>6)C+ zS<3BgM=d|&+&!sdPRyJFCT;9<_R53FCCKH;L0BwcnwgoTkt!mCVV^xY(tQUXkt8Z7sf z`-m3AA^US?C9UT46*Jq^XI>T!mkuJqL74xFE(a!;JC{ETrju4vURl|{(qPV4H6NpW zW=)J}pzrZ1I8|lMT zR*U}KoGXI^XYU(XWdj*)?jXgQWFdq3nxgCEo1%v1len}U$j#AV{v|)#Q(vg_nY6Dz z0lj{Zel?l^avGTIMr=L_6R`Af(`qEDJ;7;(Cj#~V@0q~bpRc`y{$P|=Z#-_myO^{s z4}3=Y#pjD@F3u1t5eRm50-<*R>}u>6D!XR0FHbk<7#anUe&T)eV;JhS*}@Gu`(J`S VrJa4c3cCOR002ovPDHLkV1i4l<~aZW literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/stop.png b/ui/src/assets/imgs/stop.png new file mode 100644 index 0000000000000000000000000000000000000000..509a61cc7fbfbb373b27d1d0483c7001782283bd GIT binary patch literal 1178 zcmV;L1ZDe)P)lCU$HxF86rq67^lZ4(m(*~xF= zmVgor8*nFz@e^WH3@r=eLQ_gmTp4j`Shx^a)KV=3T0Z^PJ5Q%x`iZ3#RPZJ*eZ6n) zXU?26cis~qNwe$qtrC^00wR&a>{kfR&6Omx-R}Le!or!b#}i?Gz7H-hzd0~A)}P2{ zA-2c zOzY4uGMl6S@!7tvE>M(~{;>%8r>4Ng_Q3Be)y+iwesI*+2lrd8a=~6z)w;X3wlBA_ zaZ}{&Y;6VQ@#CN}7{vW~Mn=HHz@&};>tLc!i}&yQyf)hv!Ex@Kh2=>DI{dp^=<8Q- zwzPoe)-90c=dWYq9@g)%SRnM}i`Y*#2tM@@V~IH`#$pTvZ;!$8F=k=&fA|2>f&$R= z|EydR%_rLaU?biuD=T63)vL6aiQ`wt$HD#N$*|z4tNWC9?V2u~^2ht`oyfHC$_nH* zG(h&zqjB+Yb$5$SB6!M*in!-IGZzO2Ajkr+@9I@potS_{76fE4IgU>6X=)PJTz&mo z1dW(Z^W%Cl7C5xy@A>&~6Syvo!F4kM6HBJ~?(R>*+=UA>nwvL^(_y8vxfwz(mpBxI zk>A^!a2j%i9`eBQV8%)wS*0=>MQ|{u5qo*YuGWQeRF`TZ(qq8%~^WsV0BRt1T2CNVl$&LsAc9baW+ znlteQ_Tf)`Ru@Lz9@qCfZHX405cNmSgmB)@XsyC^4&&1Kc70jgv4}9Lq$$N#v!)@r_ z`maVN(;cZ+dp@tdT`edSnfSl0$D*_su3ukZW#JG383(2)`KqeZVY&RdQm+pw4jsyp z>vVwEPH#WHWrBZt+P7x2Ngy1qMQQW6#C;s5hYr2I3DoKeHt2|ulOqi|ooN;J+aU;2 sjb=8oyc`0FWD?x&3GLX}Kq9}s0YXi_k>C%}p#T5?07*qoM6N<$f*fEt@Bjb+ literal 0 HcmV?d00001 diff --git a/ui/src/assets/imgs/update.png b/ui/src/assets/imgs/update.png new file mode 100644 index 0000000000000000000000000000000000000000..2c75b0af64bb3c4a19ecc7f64b99487e7c1d828b GIT binary patch literal 276 zcmeAS@N?(olHy`uVBq!ia0vp^d?3uh1|;P@bT0xamSQK*5Dp-y;YjHK@=trZIEH8h z&z*Eyw8=okCB26=qDl2eUO z+uOkDfN%(3ijZDzXww`vgWS&coj%!rneJv+J9@GoSmLl*aU&mlzO+`mQTzsrhf^TIHUJ}ZKC#drOnT6_nf)Wx-MaEx$pbvseZpxZruAU{Ny3~ zVv`x7oTu}bg+D&~Y@7Kq?@7Ol|GnL}_TwbBf+t<-ij6uY>!!S`N5tl4mCUG YQ=X+2E($Xy1HHxI>FVdQ&MBb@0B{F!WdHyG literal 0 HcmV?d00001 diff --git a/ui/src/components/AiToolkit/IndexAdvisor.jsx b/ui/src/components/AiToolkit/IndexAdvisor.jsx index 91075f1..4b83748 100644 --- a/ui/src/components/AiToolkit/IndexAdvisor.jsx +++ b/ui/src/components/AiToolkit/IndexAdvisor.jsx @@ -40,6 +40,7 @@ export default class IndexAdvisor extends Component { maxIndexStorage: 100, minImprovedRate: '3.0', isDetailsVisible:false, + isSettingVisible:false, selValue: '', textareaVal: '', options: [], @@ -85,8 +86,8 @@ export default class IndexAdvisor extends Component { const { success, data, msg } = await getListIndexAdvisorInterface(params) if (success) { let advisorColumObj = {},advisorHeader = ["index","improve_rate","index_size","templates","select","delete","update","insert"],advisorTableHeader = [], - redundantColumObj = [],redundantHeader = ["schemaName","tbname","columns","statement","related_indexes"],redundantTableHeader = [], - uselessColumObj = [],uselessHeader = ["schemaName","tbname","columns","statement"],uselessTableHeader = [],widthArray = ['34%','12%','12%','12%','5%','5%','5%','15%'] + redundantColumObj = [],redundantHeader = ["schemaName","tbName","columns","statement","existingIndex"],redundantTableHeader = [], + uselessColumObj = [],uselessHeader = ["schemaName","tbName","columns","statement"],uselessTableHeader = [],widthArray = ['34%','12%','12%','12%','5%','5%','5%','15%'] advisorHeader.forEach((item,Index) => { advisorColumObj = { title: formatTableTitle(item), @@ -112,6 +113,13 @@ export default class IndexAdvisor extends Component { dataIndex: item, key: item, ellipsis: true, + render: (row, record) => { + if(item === 'existingIndex'){ + return row.toString() + } else { + return row + } + } } redundantTableHeader.push(redundantColumObj) }) @@ -373,7 +381,7 @@ export default class IndexAdvisor extends Component { # SELECT * FROM t1 WHERE t1.id > 100`} />
- +
@@ -400,4 +408,4 @@ export default class IndexAdvisor extends Component { ) } -} \ No newline at end of file +} diff --git a/ui/src/components/AiToolkit/RiskAnalysis.jsx b/ui/src/components/AiToolkit/RiskAnalysis.jsx index d3e79a1..847f55f 100644 --- a/ui/src/components/AiToolkit/RiskAnalysis.jsx +++ b/ui/src/components/AiToolkit/RiskAnalysis.jsx @@ -19,7 +19,7 @@ export default class RiskAnalysis extends Component { xdata: [], optionsSel: [], options: [], - showType: 0, + showType: 2, newSelectValue: '', newSelValue: '', instanceName: '', @@ -55,9 +55,7 @@ export default class RiskAnalysis extends Component { if (success) { this.setState(() => ({ optionsSel: optionArr, instanceName: optionArr[0] - }), () => { - this.getWorkloadForecast() - }) + })) } else { message.error(msg) } @@ -67,7 +65,7 @@ export default class RiskAnalysis extends Component { let params = { instance_name: this.state.instanceName ? this.state.instanceName : null, metric_name: this.state.metricName ? this.state.metricName : null, - labels: this.state.labels ? this.state.labels : '', + labels: this.state.labels ? this.state.labels : null, warning_hours: this.state.warningHours ? this.state.warningHours : null, upper: this.state.upper ? this.state.upper : null, lower: this.state.lower ? this.state.lower : null, @@ -85,49 +83,54 @@ export default class RiskAnalysis extends Component { data[key][i].timestamps.forEach(ele => { formatTimeData.push(formatTimestamp(ele)) }); - data[key][i].forecast_timestmaps.forEach(ele => { - forecastFormatTimeData.push(formatTimestamp(ele)) - }); - formatTimeData = formatTimeData.concat(forecastFormatTimeData) + if(data[key][i].forecast_timestmaps){ + data[key][i].forecast_timestmaps.forEach(ele => { + forecastFormatTimeData.push(formatTimestamp(ele)) + }); + formatTimeData = formatTimeData.concat(forecastFormatTimeData) + } // 处理Y轴数据 let colors = ['#5c7bd9', '#91cc75', '#fac858', '#007acc', '#fb542f', '#c586c0', '#1890ff', '#d69439', '#b03a5b', '#eb8f53', '#c5c63e', '#1e1e1e', '#5470c6', '#91cc75', '#fac858', '#007acc', '#fb542f', '#34a853', '#d69439', '#b03a5b', '#eb8f53', '#c5c63e', '#1e1e1e', '#5470c6', '#91cc75', '#fac858', '#007acc', '#fb542f', '34a853', '#d69439', '#b03a5b', '#eb8f53', '#c5c63e'] data[key].forEach((item, index) => { - let ydata = [], nametooltip = '', legendDataFlag = [] + let ydata = [], nametooltip = '', legendDataFlag = [], timeTotal = '', solidLine = [], dataArray = [], dashedLine = [], forecastSeriesItem = {} Object.keys(item.labels).forEach(function (key) { nametooltip += `${key}:${item.labels[key] ? item.labels[key] : '-'} ` }) - let timeTotal = item.values.length + item.forecast_values.length; - let solidLine = item.values.concat(Array(timeTotal - item.values.length).fill('')); - let dataArray = [...item.values].fill('',0,item.values.length-1) - let dashedLine = dataArray.concat(item.forecast_values) - let seriesItem = { - data: solidLine, - type: 'line', - smooth: true, - name: nametooltip, - symbol: 'none', - color: colors[index], + if(item.forecast_values){ + timeTotal = item.values.length + item.forecast_values.length; + solidLine = item.values.concat(Array(timeTotal - item.values.length).fill('')); + dataArray = [...item.values].fill('',0,item.values.length-1) + dashedLine = dataArray.concat(item.forecast_values) + forecastSeriesItem = { + data: dashedLine, + type: 'line', + smooth: true, + name: nametooltip, + symbol: 'none', + color: colors[index], + lineStyle: { + type: 'dashed' + } + } } - let forecastSeriesItem = { - data: dashedLine, + let seriesItem = { + data: item.forecast_values ? solidLine : item.values, type: 'line', smooth: true, name: nametooltip, symbol: 'none', color: colors[index], - lineStyle: { - type: 'dashed' - } } legendDataFlag.push(nametooltip) ydata.push(seriesItem) - ydata.push(forecastSeriesItem) + if(item.forecast_values){ + ydata.push(forecastSeriesItem) + } let param = { xdata:formatTimeData, seriesData:ydata, legendData:legendDataFlag, - warning:(item.abnormal_detail).join("\n"), - textAreaRows:item.abnormal_detail.length ? item.abnormal_detail.length : 1 + warning:item.abnormal_detail, } instanceAllData.push(param); }) @@ -373,13 +376,13 @@ export default class RiskAnalysis extends Component { this.handleNumChange(e)} value={this.state.warningHours} style={{ width:60 }}/> - this.handleUpperChange(e)} stringMode value={this.state.upper} style={{ width:60 }}/> + this.handleUpperChange(e)} value={this.state.upper} style={{ width:60 }}/> - this.handleLowerChange(e)} stringMode value={this.state.lower} style={{ width:60 }}/> + this.handleLowerChange(e)} value={this.state.lower} style={{ width:60 }}/> - +
@@ -388,7 +391,6 @@ export default class RiskAnalysis extends Component { this.state.allDataRegular.map((item,index) => { return ( <> -