- {{ $t('message.addition') }}
+ {{ $t("message.addition") }}
-
+
-
-
-
+
+
+
+
+
+ {{ record.product_name }}
+
+
+
+
+
+
+ {{ $t("message.inside") }}
+ {{ $t("message.outside") }}
+
+
+
+ {{ record.source === 1 ? $t("message.inside") : $t("message.outside") }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
- {{ text }}
- {{ text===1 ? '内部' : '外部' }}
+ {{ text }}
- {{$t('message.save')}}
+ {{ $t("message.save") }}
- {{$t('message.cancel')}}
+ {{ $t("message.cancel") }}
- {{$t('message.edit')}}
- {{ $t('message.alarmHistory') }}
-
- {{$t('message.delete')}}
+ {{ $t("message.edit") }}
+ {{ $t("message.alarmHistory") }}
+
+ {{ $t("message.delete") }}
-
-
-
+
+
-
-
+
+
-
+
- {{ $t('message.inside') }}
- {{ $t('message.outside') }}
+ {{ $t("message.inside") }}
+ {{ $t("message.outside") }}
-
-
+
+
-
-
+
+
-
-
+
+
+
+
+
+
-
@@ -83,18 +113,19 @@
diff --git a/ant-vue/src/views/feishuRobot/index.vue b/ant-vue/src/views/feishuRobot/index.vue
new file mode 100644
index 0000000000000000000000000000000000000000..36ad0f33bad4063e180a61143b069b8c23e66e82
--- /dev/null
+++ b/ant-vue/src/views/feishuRobot/index.vue
@@ -0,0 +1,179 @@
+
+
+
+ {{ $t("message.addition") }}
+
+
+
+
+ {{ $t("message.feishuContent") }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{ text }}
+
+
+
+
+
+
+ {{ $t("message.save") }}
+
+ {{ $t("message.cancel") }}
+
+
+
+ {{ $t("message.edit") }}
+
+ {{ $t("message.delete") }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ant-vue/src/views/http/index.vue b/ant-vue/src/views/http/index.vue
new file mode 100644
index 0000000000000000000000000000000000000000..8b80e99aa43f4110e33e6be3b8dedda4267eb5b8
--- /dev/null
+++ b/ant-vue/src/views/http/index.vue
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/ant-vue/src/views/influxdb2/index.vue b/ant-vue/src/views/influxdb2/index.vue
new file mode 100644
index 0000000000000000000000000000000000000000..d7f36ed96d6e06ecf500d10e026880d3cd22eb72
--- /dev/null
+++ b/ant-vue/src/views/influxdb2/index.vue
@@ -0,0 +1,144 @@
+
+
+
+ {{ $t("message.addition") }}
+
+
+
+
+
+
+
+
+
+ {{ text }}
+
+
+
+
+
+
+ {{ $t("message.save") }}
+
+ {{ $t("message.cancel") }}
+
+
+
+ {{ $t("message.edit") }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ant-vue/src/views/message-list/index.vue b/ant-vue/src/views/message-list/index.vue
index 80a8633bb33ee50071e98aa396fb7a96f027a168..3d3dce04a190d6b9b0171b858243e0a15088259c 100644
--- a/ant-vue/src/views/message-list/index.vue
+++ b/ant-vue/src/views/message-list/index.vue
@@ -1,87 +1,5 @@
-
+
-
-
\ No newline at end of file
+
+
diff --git a/ant-vue/src/views/mongo/index.vue b/ant-vue/src/views/mongo/index.vue
new file mode 100644
index 0000000000000000000000000000000000000000..891317c1bb68ccfe73f85a2b9f61eb840def028e
--- /dev/null
+++ b/ant-vue/src/views/mongo/index.vue
@@ -0,0 +1,153 @@
+
+
+
+ {{ $t("message.addition") }}
+
+
+
+
+
+
+
+
+
+ {{ text }}
+
+
+
+
+
+
+ {{ $t("message.save") }}
+
+ {{ $t("message.cancel") }}
+
+
+
+ {{ $t("message.edit") }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ant-vue/src/views/mqtt-management/index.vue b/ant-vue/src/views/mqtt-management/index.vue
index fbc744c30c4f570d52602dfa14670b69b005f7e9..dcec4d049be49dc87cd616b7f2e4a4b3b24754d3 100644
--- a/ant-vue/src/views/mqtt-management/index.vue
+++ b/ant-vue/src/views/mqtt-management/index.vue
@@ -191,7 +191,7 @@ let rules: Record
= {
subtopic: [{ required: true, message: t('message.pleaseTopic'), trigger: "blur" }],
payload: [{ required: true, message: t('message.pleaseMessage'), trigger: "blur" }],
qos: [{ required: true, message: t('message.pleaseService'), trigger: "change" }],
- topic: [{ required: true, message: t('message.pleaseTopic'), trigger: "blur" }],
+ topic: [{ required: true, message: t('message.pleaseTopic'), trigger: "change" }],
retained: [{ required: true, message: t('message.pleaseChooseMessage'), trigger: "change" }],
};
const routerStore = useRouterNameStore();
@@ -349,6 +349,7 @@ const onCopy = async () => {
var result = {
"Time": Math.floor(Date.now() / 1000),
"DataRows": dataRows,
+ "IdentificationCode": "${scriptId.value}",
"DeviceUid": "${scriptId.value}",
"Nc": nc
};
@@ -364,8 +365,8 @@ const cancel = (key: string) => {
};
const onSignal = (id: string) => {
- routerStore.setRouterName("/signal-configuration");
- jump.routeJump({ path: "/signal-configuration", query: { mqtt_client_id: id } });
+ routerStore.setRouterName("/signal-configuration/index");
+ jump.routeJump({ path: "/signal-configuration/index", query: { mqtt_client_id: id,protocol :"mqtt" } });
};
const save = async (key: string) => {
Object.assign(list.value.filter((item) => key === item.key)[0], editableData[key]);
diff --git a/ant-vue/src/views/mysql/index.vue b/ant-vue/src/views/mysql/index.vue
new file mode 100644
index 0000000000000000000000000000000000000000..75a2038e14419d11f5b5794e2a9b4c92ae97b7aa
--- /dev/null
+++ b/ant-vue/src/views/mysql/index.vue
@@ -0,0 +1,153 @@
+
+
+
+ {{ $t("message.addition") }}
+
+
+
+
+
+
+
+
+
+ {{ text }}
+
+
+
+
+
+
+ {{ $t("message.save") }}
+
+ {{ $t("message.cancel") }}
+
+
+
+ {{ $t("message.edit") }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ant-vue/src/views/product/index.vue b/ant-vue/src/views/product/index.vue
index f8eb0d0d302a33ea508424e8e144fcd345332ff7..b5b7b4960e1f2bd6a88f4f87e388d6ba551cf6b6 100644
--- a/ant-vue/src/views/product/index.vue
+++ b/ant-vue/src/views/product/index.vue
@@ -16,7 +16,7 @@
@@ -90,7 +90,6 @@ import {ProductCreate, ProductDelete, ProductPage, ProductUpdate} from "@/api";
import {useI18n} from "vue-i18n";
import {Rule} from "ant-design-vue/es/form";
import {message} from "ant-design-vue";
-import { cloneDeep } from "lodash-es";
import Upload from '@/components/upload/index.vue'
import Tag from '@/components/Tag/index.vue'
@@ -171,6 +170,7 @@ const columns = ref([
}
]);
const title = ref(t('message.addition'))
+const VITE_APP_API_URL = import.meta.env.VITE_APP_API_URL
let rules: Record = {
name: [{ required: true, message: t('message.pleaseName'), trigger: "blur" }],
diff --git a/ant-vue/src/views/production-plans/index.vue b/ant-vue/src/views/production-plans/index.vue
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..78c511e4f71e0d055783dfa1d1f4c06182a91627 100644
--- a/ant-vue/src/views/production-plans/index.vue
+++ b/ant-vue/src/views/production-plans/index.vue
@@ -0,0 +1,324 @@
+
+
+
+
+
+
+
+
+ {{ $t('message.search') }}
+
+
+ {{ $t('message.addition') }}
+
+
+
+
+ {{formatDate(text)}}
+
+
+ {{formatDate(text)}}
+
+
+
+
+ {{$t('message.save')}}
+
+ {{$t('message.cancel')}}
+
+
+
+ {{$t('message.edit')}}
+
+ {{$t('message.delete')}}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{ $t('message.preparing') }}
+ {{ $t('message.ongoing') }}
+ {{ $t('message.finished') }}
+
+
+
+
+
+
+
+
+
+
({{item.unit}})
+
+
+
-
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/ant-vue/src/views/repair-records/index.vue b/ant-vue/src/views/repair-records/index.vue
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..e90f2d926ab54628d1f049e5acde929790b17576 100644
--- a/ant-vue/src/views/repair-records/index.vue
+++ b/ant-vue/src/views/repair-records/index.vue
@@ -0,0 +1,6 @@
+
+ 维修记录
+
+
+
+
diff --git a/ant-vue/src/views/role/index.vue b/ant-vue/src/views/role/index.vue
index f47936dde1eb2dd4e8de5aaea07bcd24dd38d210..a3d3bb06bdb1538c52813d7703e486dea7eeef54 100644
--- a/ant-vue/src/views/role/index.vue
+++ b/ant-vue/src/views/role/index.vue
@@ -46,7 +46,7 @@
-
+
@@ -172,6 +172,11 @@ const confirm = async (id: string) => {
});
};
+const handleCancel = ()=>{
+ formRef.value?.resetFields();
+}
+
+
const onAddData = async() => {
(formRef.value as HTMLFormElement)
.validate()
diff --git a/ant-vue/src/views/script-alarm-parameters/index.vue b/ant-vue/src/views/script-alarm-parameters/index.vue
index beba2ac96eeb483384b51dfc25b3f46d027ef6db..f970fb48360258aa445bfcd98cc0b4163a8844eb 100644
--- a/ant-vue/src/views/script-alarm-parameters/index.vue
+++ b/ant-vue/src/views/script-alarm-parameters/index.vue
@@ -13,24 +13,24 @@
-
+
-
+
- {{ record.mqtt_client_name }}
+ {{ record.mqtt_client_name }}
{{ record.signal_name }}
{{ text }}
@@ -60,11 +60,19 @@
-
-
+
+
+ mqtt
+
+
+
+
-
+
+
+
+
@@ -87,7 +95,7 @@ import {useI18n} from "vue-i18n";
interface DataItem {
name: string;
- mqtt_client_id: string;
+ device_uid: string;
signal_id: string;
}
const { t,locale } = useI18n();
@@ -110,7 +118,7 @@ let rules: Record = {
trigger: "blur",
},
],
- mqtt_client_id: [{ required: true, message: t('message.pleaseSelectClientID'), trigger: "change" }],
+ device_uid: [{ required: true, message: t('message.pleaseSelectClientID'), trigger: "change" }],
signal_id: [{ required: true, message: t('message.pleaseSignalName'), trigger: "change" }],
};
const title = ref(t('message.addition'));
@@ -125,11 +133,19 @@ const columns = ref([
},
{
title: t('message.clientID'),
- dataIndex: "mqtt_client_id",
+ dataIndex: "device_uid",
render: ({ record }: any) => {
return record.mqtt_client_name;
},
},
+ {
+ title: 'device_uid',
+ dataIndex: "device_uid",
+ },
+ {
+ title: 'identification_code',
+ dataIndex: "identification_code",
+ },
{
title: t('message.signalName'),
dataIndex: "signal_id",
@@ -150,10 +166,12 @@ const editableData: UnwrapRef> = reactive({});
const form = reactive({
id: "",
name: "",
- mqtt_client_id: "",
+ device_uid: "",
+ protocol: "",
signal_delay_waring_id: "",
signal_name: "",
signal_id: "",
+ identification_code: ""
});
const formState = reactive({ name: "" });
const signalName = ref("");
@@ -165,9 +183,9 @@ watch(
},
);
watch(
- () => form.mqtt_client_id,
+ () => form.device_uid,
() => {
- (formRef.value as HTMLFormElement).clearValidate("mqtt_client_id");
+ (formRef.value as HTMLFormElement).clearValidate("device_uid");
},
);
watch(
@@ -188,11 +206,19 @@ watch(locale, () => {
},
{
title: t('message.clientID'),
- dataIndex: "mqtt_client_id",
+ dataIndex: "device_uid",
render: ({ record }) => {
return record.mqtt_client_name;
},
},
+ {
+ title: 'device_uid',
+ dataIndex: "device_uid",
+ },
+ {
+ title: 'identification_code',
+ dataIndex: "identification_code",
+ },
{
title: t('message.signalName'),
dataIndex: "signal_id",
@@ -224,12 +250,16 @@ watch(locale, () => {
trigger: "blur",
},
],
- mqtt_client_id: [{ required: true, message: t('message.pleaseSelectClientID'), trigger: "change" }],
+ device_uid: [{ required: true, message: t('message.pleaseSelectClientID'), trigger: "change" }],
signal_id: [{ required: true, message: t('message.pleaseSignalName'), trigger: "change" }]
}
});
const onAdd = () => {
+ if (!form.signal_delay_waring_id) {
+ message.error(`${t('message.pleaseCreateScriptAlarmRule')}`);
+ return;
+ }
modalVisible.value = true;
title.value = t('message.addition');
};
@@ -246,10 +276,11 @@ const pageList = async () => {
key: index,
ID: item.ID,
name: item.name,
- mqtt_client_id: item.mqtt_client_id,
+ device_uid: item.device_uid,
mqtt_client_name: item.mqtt_client_name,
signal_name: item.signal_name,
signal_id: item.signal_id,
+ identification_code: item.identification_code
}));
};
const edit = (key: string) => {
@@ -266,6 +297,10 @@ const handleTableChange = async (page: any) => {
await pageList();
};
const onAddData = () => {
+ if (!form.signal_delay_waring_id) {
+ message.error(`${t('message.pleaseCreateScriptAlarmRule')}`);
+ return;
+ }
(formRef.value as HTMLFormElement)
.validate()
.then(() => {
@@ -312,7 +347,7 @@ const save = async (key: string) => {
Object.assign(list.value.filter((item) => key === item.key)[0], editableData[key]);
const data = list.value.filter((item) => key === item.key)[0];
delete editableData[key];
- if (!data.mqtt_client_id || !data.signal_name) {
+ if (!data.device_uid || !data.signal_name) {
message.error(t('message.clientSignal'));
return;
}
diff --git a/ant-vue/src/views/script-alarm/index.vue b/ant-vue/src/views/script-alarm/index.vue
index e4157c2f856ea3eff5844704081efce4c68dc143..2b829064cefe1df498973c8eb9a05fc7d0278d86 100644
--- a/ant-vue/src/views/script-alarm/index.vue
+++ b/ant-vue/src/views/script-alarm/index.vue
@@ -327,8 +327,8 @@ const onAddUpdateData = () => {
};
const onGo = (id: string) => {
- routerStore.setRouterName("/script-alarm-parameters");
- jump.routeJump({ path: "/script-alarm-parameters", query: { signal_delay_waring_id: id } });
+ routerStore.setRouterName("/script-alarm-parameters/index");
+ jump.routeJump({ path: "/script-alarm-parameters/index", query: { signal_delay_waring_id: id } });
};
const handleCancel = () => {
modalVisible.value = false;
diff --git a/ant-vue/src/views/signal-configuration/index.vue b/ant-vue/src/views/signal-configuration/index.vue
index 779e88ce151e4869cdadf0687730767c7420ef5f..ed2641cfc158ce52cc72a9a1f738c9d21439e00a 100644
--- a/ant-vue/src/views/signal-configuration/index.vue
+++ b/ant-vue/src/views/signal-configuration/index.vue
@@ -3,6 +3,11 @@
+
+
+ mqtt
+
+
@@ -43,9 +48,9 @@
- {{ $t('message.check') }}
+ {{ $t('message.check') }}
{{$t('message.edit')}}
- {{ $t('message.SignalAlarmConfig') }}
+ {{ $t('message.SignalAlarmConfig') }}
{{ $t('message.historicalData') }}
{{$t('message.delete')}}
@@ -64,6 +69,15 @@
+
+
+
+
+
+
+
+
+
{{ $t('message.text') }}
@@ -134,15 +148,19 @@ let rules: Record = {
alias: [{ required: true, message: t('message.pleaseAlias'), trigger: "blur" }],
cache_size: [{ required: true, message: t('message.pleaseCacheSize'), trigger: "blur" }],
unit: [{ required: true, message: t('message.pleaseUnit'), trigger: "blur" }],
+ protocol: [{ required: true, message: t('message.pleaseAlias'), trigger: "change" }],
+ device_uid: [{ required: true, message: t('message.pleaseAlias'), trigger: "change" }],
+ identification_code: [{ required: true, message: t('message.pleaseAlias'), trigger: "change" }],
};
const jump = useRouteJump();
const formRef = ref(null);
const routerStore = useRouterNameStore();
const route = useRoute();
const value = ref("");
+const protocol = ref(route.query.protocol|| 'mqtt');
const modalVisible = ref(false);
const modalView = ref(false);
-const form = reactive({ mqtt_client_id: Number(route.query.id) || "", name: "", type: "", alias: "", cache_size: 1, unit: "" });
+const form = reactive({ mqtt_client_id: Number(route.query.id) || "", name: "", type: "", alias: "", cache_size: 1, unit: "",protocol:'mqtt',identification_code:String(route.query.mqtt_client_id), device_uid:Number(route.query.mqtt_client_id)});
let columns = [
{
title: t('message.uniCode'),
@@ -156,6 +174,18 @@ let columns = [
title: t('message.type'),
dataIndex: "type",
},
+ {
+ title: 'IdentificationCode',
+ dataIndex:'identification_code'
+ },
+ {
+ title: 'protocol',
+ dataIndex:'protocol'
+ },
+ {
+ title: 'device_uid',
+ dataIndex:'device_uid'
+ },
{
title: t('message.alias'),
dataIndex: "alias",
@@ -278,13 +308,16 @@ const confirm = async (id: string) => {
};
const pageList = async () => {
- const { data } = await SignalPage({ mqtt_client_id: value.value, page: pagination.current, page_size: pagination.pageSize });
+ const { data } = await SignalPage({ device_uid: value.value, protocol : route.query.protocol, page: pagination.current, page_size: pagination.pageSize });
pagination.total = data.data?.total || 0;
list.value = data.data.data?.map((item: any, index: number) => ({
key: index,
ID: item.ID,
mqtt_client_id: item.mqtt_client_id,
mqtt_client_name: item.mqtt_client_name,
+ device_uid: item.device_uid,
+ identification_code: item.identification_code,
+ protocol: item.protocol,
name: item.name,
type: item.type,
alias: item.alias,
@@ -293,8 +326,8 @@ const pageList = async () => {
}));
};
const onSignal = (id: string, mqtt_client_id: string) => {
- routerStore.setRouterName("/signal");
- jump.routeJump({ path: "/signal", query: { id, mqtt_client_id } });
+ routerStore.setRouterName("/signal/index");
+ jump.routeJump({ path: "/signal/index", query: { id, mqtt_client_id } });
};
const handleTableChange = async (page: any) => {
@@ -303,15 +336,17 @@ const handleTableChange = async (page: any) => {
await pageList();
};
-const onView = (id: number, rowId: number, alias: string, unit: string, type: string) => {
+const onView = (rowId: number, alias: string, unit: string, type: string, protocol: string,device_uid:number,identification_code:string) => {
const time = dayjs();
const data = type === "数字" ? QueryInfluxdb : QueryStrInfluxdb;
showSpinning.value = true;
data({
- measurement: String(id),
+ measurement: protocol+'_'+device_uid+'_'+identification_code,
fields: [String(rowId), "storage_time", "push_time"],
start_time: time.subtract(30, "day").unix(),
end_time: time.unix(),
+ device_uid: Number(route.query.mqtt_client_id) || device_uid,
+ protocol:route.query.protocol || protocol,
aggregation: {
every: 1,
function: type === "数字" ? "mean" : "first",
diff --git a/ant-vue/src/views/signal/index.vue b/ant-vue/src/views/signal/index.vue
index e403bdc84eabe4fd18bdf56a07793ebf4713535e..b2239816097100f8a5ecf6ca4b861e5344968ff0 100644
--- a/ant-vue/src/views/signal/index.vue
+++ b/ant-vue/src/views/signal/index.vue
@@ -2,11 +2,16 @@
+
+
+ mqtt
+
+
-
+
-
+
{{ $t('message.search') }}
@@ -84,6 +89,9 @@
+
+
+
@@ -119,7 +127,8 @@ const formRefTime = ref(null);
const modalVisible = ref(false);
const modalTime = ref(false);
const modalHistory = ref(false);
-const form = reactive({ mqtt_client_id: "", signal_id: "", max: "", min: "", in_or_out: 1, checked: true });
+const protocol = ref('mqtt');
+const form = reactive({ device_uid: "", signal_id: "", max: "", min: "", in_or_out: 1, checked: true,identification_code:'',protocol:protocol.value});
let columns = [
{
title: t('message.uniCode'),
@@ -151,6 +160,7 @@ let rules: Record = {
min: [{ required: true, message: t('message.pleaseMinimum'), trigger: "blur" }],
max: [{ required: true, message: t('message.pleaseMaximum'), trigger: "blur" }],
checked: [{ required: true, message: t('message.pleaseAlarm'), trigger: "change" }],
+ identification_code: [{ required: true, message: '请输入identification_code', trigger: "change" }],
date: [{ required: true, message: t('message.pleaseTime'), trigger: "change" }],
};
const showSpinning = ref(false);
@@ -178,7 +188,7 @@ const columnsResult = ref([
},
]);
-watch([() => form.mqtt_client_id, () => form.signal_id], async ([newParam1, newParam2], []) => {
+watch([() => form.device_uid, () => form.signal_id], async ([newParam1, newParam2], []) => {
if (newParam1 && newParam2) {
await pageList();
}
@@ -235,7 +245,7 @@ const onAddData = () => {
(formRef.value as HTMLFormElement)
.validate()
.then(() => {
- if (!form.signal_id || !form.mqtt_client_id) {
+ if (!form.signal_id || !form.device_uid) {
message.error(t('message.clientSignal'));
return;
}
@@ -294,6 +304,7 @@ const save = async (key: string) => {
// eslint-disable-next-line @typescript-eslint/no-dynamic-delete
delete editableData[key];
data.in_or_out = data.in_or_out ? 1 : 0;
+ data.protocol = protocol.value;
await SignalWaringConfigUpdate(data);
await pageList();
};
@@ -314,12 +325,12 @@ const confirm = async (id: string) => {
};
const pageList = async () => {
- const { data } = await SignalWaringConfigPage({ mqtt_client_id: form.mqtt_client_id, signal_id: form.signal_id, page: paginations.current, page_size: paginations.pageSize });
+ const { data } = await SignalWaringConfigPage({ device_uid: form.device_uid, protocol:protocol.value, signal_id: form.signal_id, page: paginations.current, page_size: paginations.pageSize });
paginations.total = data.data?.total || 0;
list.value = data.data.data?.map((item: any, index: number) => ({
key: index,
ID: item.ID,
- mqtt_client_id: item.mqtt_client_id,
+ device_uid: item.device_uid,
signal: item.signal,
signal_id: item.signal_id,
in_or_out: item.in_or_out === 1,
diff --git a/ant-vue/src/views/tcp/index.vue b/ant-vue/src/views/tcp/index.vue
new file mode 100644
index 0000000000000000000000000000000000000000..8b80e99aa43f4110e33e6be3b8dedda4267eb5b8
--- /dev/null
+++ b/ant-vue/src/views/tcp/index.vue
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/ant-vue/src/views/user/index.vue b/ant-vue/src/views/user/index.vue
index 02f444d7e28be405857253da4e17849e62506c0d..17a8be0e44ae104ccbd195468c730cc92311c318 100644
--- a/ant-vue/src/views/user/index.vue
+++ b/ant-vue/src/views/user/index.vue
@@ -13,12 +13,13 @@
-
+
- {{ text }}
+ *****
+ {{ text }}
@@ -33,6 +34,7 @@
{{$t('message.edit')}}
{{$t('message.assigningRoles')}}
+ {{$t('message.assigningDept')}}
{{$t('message.delete')}}
@@ -43,7 +45,7 @@
-
+
@@ -71,16 +73,31 @@
@selectChange="handleSelectChange"
/>
+
+
+
+
+
+
+
diff --git a/deploy/IotAdminVue.Dockerfile b/deploy/IotAdminVue.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..5b10f1b11158cdc878498ab5abf2bffc92e8da21
--- /dev/null
+++ b/deploy/IotAdminVue.Dockerfile
@@ -0,0 +1,20 @@
+FROM node:18.19.0-alpine3.18 as build
+
+
+WORKDIR /app
+COPY ../ant-vue ./ant-vue
+
+RUN cd ant-vue && npm install --registry=https://registry.npmmirror.com && npm run build-docker
+
+
+
+FROM nginx:stable-alpine3.17
+
+
+COPY ../ant-vue/nginx.conf /etc/nginx/nginx.conf
+WORKDIR /app
+COPY --from=build /app/ant-vue/dist /app/iot/project/html
+
+RUN mkdir /var/log/nginx/iot
+
+CMD ["nginx", "-g", "daemon off;"]
diff --git a/deploy/IotGoMQ.Dockerfile b/deploy/IotGoMQ.Dockerfile
index 0f3ea03778af2e8409fec60058a93d23ab0b557e..2c3bc0feb42d4c27d0591e48eaedbe07c2b4336b 100644
--- a/deploy/IotGoMQ.Dockerfile
+++ b/deploy/IotGoMQ.Dockerfile
@@ -9,7 +9,6 @@ WORKDIR /app
COPY ../go-iot-mq ./go-iot-mq
COPY ../notice ./notice
COPY ../transmit ./transmit
-COPY ../go-iot ./go-iot
#
RUN cd go-iot-mq && go mod tidy && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o main .
@@ -32,4 +31,4 @@ ENV GIN_MODE=release \
EXPOSE 8080
#fixme: 配置需要动态调整
-ENTRYPOINT ["/app/main", "-config", "/app/app-local.yml"]
+ENTRYPOINT ["/app/main","-config","/app/app-local.yml"]
diff --git a/deploy/IotGoProject.Dockerfile b/deploy/IotGoProject.Dockerfile
index 6eea5a24912134fa61de5f0b4b24d2b1f9a0f01d..6999c8bf0ff268523c7dc0cbe76678d1e6d583f7 100644
--- a/deploy/IotGoProject.Dockerfile
+++ b/deploy/IotGoProject.Dockerfile
@@ -10,8 +10,7 @@ COPY ../iot-go-project ./iot-go-project
COPY ../notice ./notice
COPY ../transmit ./transmit
-
-RUN cd iot-go-project && go mod tidy && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o main .
+RUN cd iot-go-project && go install github.com/swaggo/swag/cmd/swag@latest && swag init --parseDependency --parseInternal --parseDepth 5 --instanceName "swagger" && go mod tidy && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o main .
RUN chmod +x /app/iot-go-project/main
# 运行阶段指定 scratch 作为基础镜像
diff --git "a/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210.md" "b/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210.md"
new file mode 100644
index 0000000000000000000000000000000000000000..3711c950c25d7f8e4351685d46f3e1fcae39a301
--- /dev/null
+++ "b/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210.md"
@@ -0,0 +1,107 @@
+# MQTT 客户端管理方案
+
+在物联网项目开发中,MQTT 协议是应用层协议,而MQTT客户端在其中承担了数据传输的职责,因此客户端的管理就显得尤为重要。
+
+
+## 限制
+
+1. 一个应用程序能够承载的MQTT客户端数量是有限量的。以Java为例,默认情况下,创建一个MQTT客户端在不做任何操作的情况下需要使用5个线程。
+
+
+
+
+
+> **不考虑系统限制,可以通过如下公式计算,得出最大线程数量**
+> 线程数量=(机器本身可用内存-JVM分配的堆内存)/Xss的值,比如我们的容器本身大小是8G,堆大小是4096M,走-Xss默认值,可以得出 最大线程数量:4096个。
+
+
+2. 每个MQTT客户端存活的时间跨度大,物联网项目中一个MQTT客户端往往会持续运行几年,因此在方案设计阶段需要考虑到未来的扩容问题。
+
+## 方案设计
+
+针对限制内容最重要的设计元素有如下内容:
+
+1. 通过配置文件限制当前应用能够创建的最大MQTT客户端数量。
+2. 能够创建多个应用实例,并且支持负载均衡、故障转移。
+
+
+上述两点中第一点是较为容易实现的,通过配置文件限制当前应用能够创建的最大MQTT客户端数量。最大的难度在于第二点,如何实现多个应用实例之间负载均衡、故障转移。
+
+
+
+### 负载均衡
+
+本负载均衡方案有一个前置条件:认为每个MQTT客户端做的行为相同,不会出现出现某些MQTT客户端执行内容过重的情况。
+
+在这个前置条件下,负载均衡策略可以设计的极为简单:从所有的应用实例中选择使用数量最小的节点创建MQTT客户端。
+
+> 使用数量=最大MQTT客户端数量-已创建MQTT客户端数量
+
+请不要忘了MQTT客户端的执行内容可以相同,但什么时候执行却是不相同的。原因是物理设备在上报数据的时候会存在时间差异。因此一个复杂的负载均衡算法可以通过如下内容进行权重组合。
+
+
+
+$$
+\text{剩余容量比例} = \frac{\text{最大MQTT客户端数量} - \text{当前MQTT客户端数量}}{\text{最大MQTT客户端数量}}
+$$
+
+
+
+$$
+\text{CPU负载比例} = \frac{\text{当前CPU使用率}}{100}
+$$
+
+
+
+
+$$
+ \text{内存负载比例} = \frac{\text{当前内存使用量}}{\text{最大内存容量}}
+$$
+
+$$
+\text{负载分数} = w_1 \times \text{剩余容量比例} + w_2 \times \text{CPU负载比例} + w_3 \times \text{内存负载比例}
+$$
+
+其中: $w_1$,$w_2$和 $w_3$ 分别是剩余容量比例、CPU负载比例和内存负载比例的权重。最终可以根据计算出的负载分数,选择分数最低的应用实例作为新MQTT客户端的创建节点。
+
+
+
+
+
+### 横向扩容
+
+在负载均衡方案中,我们假设了MQTT客户端的行为是相同的,因此当需要扩容的时候,只需要增加应用实例的数量即可。
+
+横向扩容时机:单个应用实例中MQTT客户端使用量占到80%的时候触发扩容。
+
+> 注意: 应用实例中可以采用消息通知机制告知运维人员或者自动化程序创建新的应用实例。
+
+
+
+
+
+注意:
+1. 图中门面可以是Nginx、应用程序等任何可以与MQTT客户端管理实例产生网络通讯的组件。
+2. 选择机制是在MQTT客户端管理实例中的并不是一个单独的程序
+
+
+### 故障转移
+
+故障转移是为了在出现以外的时候保证MQTT客户端的正常运行。这里主要假设两种故障的情况
+1. 使用 EMQX 将MQTT客户端剔除(认为这是一个误操作,只针对在集群内部创建出来的MQTT是误操作)。有关误操作主要使用MQTT客户端的离线通知机制,以Java程序为例,`MqttCallback#connectionLost`方法会在MQTT客户端断开连接的时候被调用,因此可以作为判断是否是误操作的依据。
+
+2. 应用实例意外死亡,导致该应用实例中的MQTT客户端全部被销毁。针对此项问题可以进行的操作有如下几种实现方案:
+ 1. 通过外部程序对应用实例进行监控,如果发现应用实例死亡则触发故障转移机制。注意:此方案需要额外维护一个监控程序,并且需要保证监控程序的正常运行。
+ 2. 使用类似Redis的工具实现过期监听,当应用实例中MQTT客户端过期后,触发故障转移机制。注意:每个应用实例需要有一个额外的定时任务周期性的写入心跳数据到Redis中,会增加一个线程消耗。
+
+
+
+
+
+
+
+
+
+
+
+
diff --git "a/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210/MQTT\345\256\242\346\210\267\347\253\257\344\277\235\346\264\273.drawio.png" "b/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210/MQTT\345\256\242\346\210\267\347\253\257\344\277\235\346\264\273.drawio.png"
new file mode 100644
index 0000000000000000000000000000000000000000..ab396064b2204455459591c871904446763d7cf9
Binary files /dev/null and "b/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210/MQTT\345\256\242\346\210\267\347\253\257\344\277\235\346\264\273.drawio.png" differ
diff --git "a/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210.drawio.png" "b/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210.drawio.png"
new file mode 100644
index 0000000000000000000000000000000000000000..68b48ac197534ce2ef2504aabfbc7f1c3050f118
Binary files /dev/null and "b/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210.drawio.png" differ
diff --git "a/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210/image-20240423094241762.png" "b/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210/image-20240423094241762.png"
new file mode 100644
index 0000000000000000000000000000000000000000..8a98f74233f6c8781f872485f9aaa5803b65c3a1
Binary files /dev/null and "b/doc/MQTT\345\256\242\346\210\267\347\253\257\347\256\241\347\220\206\346\226\271\346\241\210/image-20240423094241762.png" differ
diff --git a/doc/readme.md b/doc/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..42eaa839a76cef41ace29dfe0e7556ee9eef9e1d
--- /dev/null
+++ b/doc/readme.md
@@ -0,0 +1,6 @@
+# Go IoT 开发平台文档库
+
+1. [MQTT客户端管理方案](mqtt客户端管理方案.md)
+1. [数据流转链路](数据流转链路.md)
+1. [报警](报警.md)
+1. [部署](部署.md)
\ No newline at end of file
diff --git "a/doc/\346\212\245\350\255\246.md" "b/doc/\346\212\245\350\255\246.md"
new file mode 100644
index 0000000000000000000000000000000000000000..f0ba7a74fd68455d1bf6be013a7d93756659b084
--- /dev/null
+++ "b/doc/\346\212\245\350\255\246.md"
@@ -0,0 +1,81 @@
+# 数据报警设计
+
+
+
+## 场景概括
+
+
+
+
+数据报警在物联网项目中是一个十分重要的应用场景。在实际业务开发过程中我们可能遇到的报警形式存在如下可能:
+
+1. 判断当前信号是否在一个特定的范围内,可以是在范围内报警或者是不在范围内报警。
+
+> **案例**:
+>
+> **温度监控报警**:在工业环境中,设备可能需要在特定的温度范围内运行以保证效率和安全。如果设备的温度超过了这个范围,系统会触发一个高温报警,提醒操作人员检查设备或采取冷却措施。
+>
+> - **在范围内报警**:设备温度在40°C到60°C之间是正常的,如果设备温度持续在这一范围内,系统可能不会发出报警。
+> - **不在范围内报警**:如果设备温度超过60°C,系统会发出高温报警;如果低于40°C,可能会发出低温报警。
+
+
+
+
+
+
+2. 趋势性比较,当前数据与前一个数据进行比较,是否在特定范围内(百分比范围)。在这个模式下也可能是多个历史数据比较而非只是前一个。
+
+> **案例**:
+>
+> **设备性能下降报警**:在生产过程中,设备的性能可能会随时间逐渐下降。通过比较当前的性能指标与前一个周期的性能指标,可以设置一个性能下降的阈值。
+>
+> - 如果设备的性能指标(如产量、速度等)在连续几个周期内下降超过设定的百分比(例如5%),则系统会发出性能下降报警。
+
+
+3. 多设备联动报警,在这种模式下参与报警计算的会存在多个设备,这些设备会在现实场景中存在一定的关联性。当其中一个指标满足报警条件时,会继续观测其他设备指标,前者是后者的参考,如果满足条件则报警。
+
+
+
+> **案例:**
+>
+> **环境监测设备联动报警**:在环境监测中,不同的监测设备可能监测同一环境的不同参数。
+>
+> - 例如,如果某个区域的空气质量监测站检测到污染物浓度突然上升,系统会检查该区域的气象站数据,看是否有不利于污染物扩散的气象条件。如果气象条件确实不利于扩散,系统会发出联动报警,提示可能存在严重的空气质量问题。
+
+
+
+## 工程设计
+
+面对前文提到的场景我们对整个报警系统进行了抽象设计。分为两大类报警模式:
+1. 数值报警
+2. 脚本报警
+
+### 数值报警
+
+数值报警是数据报警的典型场景,用户仅需要创建信号、阈值区间、报警规则(区间内报警、区间外报警)。
+
+程序侧实现逻辑: 设备数据被解析后在信号配置表中找到对应的数据进行数值比较,如果满足阈值条件则触发报警。
+
+
+
+
+
+
+
+
+
+
+
+### 脚本报警
+
+脚本报警是数据报警的另一种场景,用户需要创建信号、缓存大小(存储多少条信号历史数据)、报警脚本、脚本参数。
+
+程序执行链路如下:
+1. 设备数据在上报完成后进行数据缓存,此时缓存的数据是具备容量限制的(容量限制:缓存大小),这里缓存的数据会被后续的报警脚本使用。
+2. 在完成缓存数据的存储后会执行报警脚本的执行,报警脚本是一个用JavaScript编写的程序,返回boolean数据。
+
+
+
+
+
+
diff --git "a/doc/\346\212\245\350\255\246/\346\225\260\345\200\274\346\212\245\350\255\246.drawio.png" "b/doc/\346\212\245\350\255\246/\346\225\260\345\200\274\346\212\245\350\255\246.drawio.png"
new file mode 100644
index 0000000000000000000000000000000000000000..2f64243b92f5cfc8f35dc1e62c466d6d70dfcd9e
Binary files /dev/null and "b/doc/\346\212\245\350\255\246/\346\225\260\345\200\274\346\212\245\350\255\246.drawio.png" differ
diff --git "a/doc/\346\212\245\350\255\246/\350\204\232\346\234\254\346\212\245\350\255\246.drawio.png" "b/doc/\346\212\245\350\255\246/\350\204\232\346\234\254\346\212\245\350\255\246.drawio.png"
new file mode 100644
index 0000000000000000000000000000000000000000..bee2170acbc4b554ec8d5ee69a0089883ab0e69d
Binary files /dev/null and "b/doc/\346\212\245\350\255\246/\350\204\232\346\234\254\346\212\245\350\255\246.drawio.png" differ
diff --git "a/doc/\346\225\260\346\215\256\346\265\201\350\275\254\351\223\276\350\267\257.md" "b/doc/\346\225\260\346\215\256\346\265\201\350\275\254\351\223\276\350\267\257.md"
new file mode 100644
index 0000000000000000000000000000000000000000..0b1e26a716bdb0470331536b5eba62361cefb9e8
--- /dev/null
+++ "b/doc/\346\225\260\346\215\256\346\265\201\350\275\254\351\223\276\350\267\257.md"
@@ -0,0 +1,88 @@
+# 数据流转链路
+
+
+## 核心数据结构
+
+物联网项目的现场安装方式:
+
+1. 物理设备可以直接进行网络通讯,将数据进行上报
+2. 物理设备需要通过网关设备才能够完成数据上报
+
+针对这两种安装模式,提出本系统内部的核心数据结构。
+
+```go
+type DataRowList struct {
+ Time int64 `json:"time"` // 秒级时间戳
+ DeviceUid string `json:"device_uid"` // 能够产生网络通讯的唯一编码
+ IdentificationCode string `json:"identification_code"` // 设备标识码
+ DataRows []DataRow `json:"data"`
+ Nc string `json:"nc"`
+}
+type DataRow struct {
+ Name string `json:"name"`
+ Value string `json:"value"`
+}
+
+
+```
+
+第一种安装方式下 `DeviceUid` 和 `IdentificationCode` 相同,第二种安装方式下则不相同。
+
+
+> 注意: 这个数据结构并不是设备上报的数据结构,而是内部流转的数据结构。设备上报后需要通过解析脚本解析为本数据结构。
+
+
+
+
+
+下面以MQTT客户端上报数据为例,`DeviceUid` 这个字段是本系统中MQTT客户端的唯一标识。这个标识在创建MQTT客户端的时候就已经确定,因此用户只需要拷贝相关唯一标识即可。另外如果不采用MQTT
+的通配符订阅模式,`IdentificationCode` 需要和 `DeviceUid` 设置为相同的。其他协议处理模式相同。
+
+
+
+
+
+## 核心数据流转链路
+
+下图为设备上报数据到处理数据的整个完整过程。
+
+1. 设备设备通过COAP、MQTT、TCP/IP、WebSocket和HTTP发送数据到指定的服务器
+2. 服务器在收到数据后会根据不同的上报协议转发到不同的消息队列中。
+
+| 上报协议 | 消息队列 |
+| --------- | ---------------- |
+| COAP | pre_coap_handler |
+| MQTT | pre_handler |
+| TCP/IP | pre_tcp_handler |
+| WebSocket | pre_ws_handler |
+| HTTP | pre_http_handler |
+
+
+
+3. 消息队列会找到对应的解析脚本将原始报文转换为 `DataRowList` 数据。`pre_xxx_handler` 中会完成数据存储
+4. 经过 `pre_xxx_handler` 的队列后将数据放入到 `waring_handler` 、`waring_delay_handler` 和 `transmit_handler` 消息队列完成后续的数据处理。其中 `waring_handler` 判断为报警的数据会放入通知队列 `waring_notice`
+
+
+
+
+
+
+
+
+
+
+
+## 数据存储
+
+本项目采用Influxdb2作为数据存储工具。整体设计方案如下。
+
+1. `bucket` 为配置文件内容
+2. `measurement` 计算规则: `协议_${DeviceUid}_${IdentificationCode}`
+3. `field`详细内容如下
+
+| 数据字段 | 数据值 |
+| ------------------------- | -------------------------------------------------------- |
+| storage_time | 系统存储时间,此数据为系统生成,写入influxdb之前的时间。 |
+| push_time | 数据上报时间,此数据从`${DataRowList.Time}`中获取 |
+| DataRowList.DataRows.Name | DataRowList.DataRows.Value |
+
diff --git "a/doc/\346\225\260\346\215\256\346\265\201\350\275\254\351\223\276\350\267\257/\350\256\276\345\244\207\346\225\260\346\215\256\346\265\201\350\275\254\351\223\276\350\267\257.drawio.png" "b/doc/\346\225\260\346\215\256\346\265\201\350\275\254\351\223\276\350\267\257/\350\256\276\345\244\207\346\225\260\346\215\256\346\265\201\350\275\254\351\223\276\350\267\257.drawio.png"
new file mode 100644
index 0000000000000000000000000000000000000000..f2fad07e2765ea853bfa88381cd1271e93f70aa8
Binary files /dev/null and "b/doc/\346\225\260\346\215\256\346\265\201\350\275\254\351\223\276\350\267\257/\350\256\276\345\244\207\346\225\260\346\215\256\346\265\201\350\275\254\351\223\276\350\267\257.drawio.png" differ
diff --git "a/doc/\351\203\250\347\275\262.md" "b/doc/\351\203\250\347\275\262.md"
new file mode 100644
index 0000000000000000000000000000000000000000..1c63a294ea2796a33fcfac99b272a11d77062341
--- /dev/null
+++ "b/doc/\351\203\250\347\275\262.md"
@@ -0,0 +1,58 @@
+# 部署文档
+
+
+
+## 前置条件
+- [x] 安装 Docker
+- [x] 安装 Docker Compose
+- [x] 下载本项目
+
+## 环境搭建步骤
+
+```shell
+cd $project_path/docker
+sh env-start.sh
+```
+
+## 项目部署步骤
+
+```shell
+cd $project_path/docker
+sh app-start.sh
+```
+
+部署完成后访问: http://localhost:80/ 即可看到项目
+
+## 端口使用情况
+
+### 环境相关端口
+| 服务名称 | 容器端口 | 宿主机端口 | 备注 |
+|----------------|-----------|-----------|--------------|
+| influxdb | 8086 | 8086 | InfluxDB 数据库 |
+| mongodb | 27017 | 27017 | MongoDB 数据库 |
+| mongo-express | 8081 | 8181 | MongoDB 管理界面 |
+| emqx1 | 1883 | 1883 | EMQ X MQTT Broker |
+| emqx1 | 8083 | 8083 | EMQ X Dashboard |
+| emqx1 | 8084 | 8084 | EMQ X WebSocket |
+| emqx1 | 8883 | 8883 | EMQ X MQTTS (Secure) |
+| emqx1 | 18083 | 18083 | EMQ X Dashboard Secure |
+| mysql | 3306 | 3306 | MySQL 数据库 |
+| rabbitmq | 5672 | 5672 | RabbitMQ AMQP |
+| rabbitmq | 15672 | 15672 | RabbitMQ Management Console |
+| redis | 6379 | 6379 | Redis 数据库 |
+
+
+### 应用相关端口
+
+
+| 服务名称 | 容器端口 | 宿主机端口 | 备注 |
+|----------------------|-----------|-----------|--------------------|
+| iotgomqtt1 | 8006 | 8006 | Go Iot MQTT Service |
+| iotgomqtt2 | 8007 | 8007 | Go Iot MQTT Service |
+| iotgomqtt3 | 8008 | 8008 | Go Iot MQTT Service |
+| iotgomq-pre_handler | 29002 | 8001 | Go Iot MQ Pre Handler |
+| iotgomq-calc_handler | 29001 | 8002 | Go Iot MQ Calc Handler |
+| iotgomq-waring_handler | 29003 | 8003 | Go Iot MQ Waring Handler |
+| iotgomq-wd_handler | 29004 | 8004 | Go Iot MQ WD Handler |
+| iotgoproject | 8080 | 8005 | Go Iot Project |
+| iot-admin-vue | 80 | 8080 | Go Iot Admin Vue |
\ No newline at end of file
diff --git a/docker/app-start.sh b/docker/app-start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..36048d931f8c7af4f2a39799b02bfbc5b0ccd4f4
--- /dev/null
+++ b/docker/app-start.sh
@@ -0,0 +1,5 @@
+#!/bin/zsh
+docker-compose -f ./app/docker-compose.yml down
+docker rmi go-iot-project:latest go-iot-admin-vue:latest go-iot-mq:latest go-iot-mqtt:latest
+docker-compose -f ./app/docker-compose.yml up -d
+echo "项目已启动"
diff --git a/docker/app/docker-compose.yml b/docker/app/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..20d222992641fb37df4e6ad80eea095d4d15fc1d
--- /dev/null
+++ b/docker/app/docker-compose.yml
@@ -0,0 +1,120 @@
+services:
+ iotgomqtt1:
+ build:
+ context: ../../
+ dockerfile: deploy/IotMQTT.Dockerfile
+ image: go-iot-mqtt:latest
+ entrypoint: ["/app/main", "-config", "/app/app-local.yml"]
+ environment:
+ - TZ=Asia/Shanghai
+ volumes:
+ - ./mqtt/config/app-local.yml:/app/app-local.yml
+ ports:
+ - 8006:8006
+ networks:
+ - iot-net
+ iotgomqtt2:
+ image: go-iot-mqtt:latest
+ ports:
+ - 8007:8007
+ entrypoint: ["/app/main", "-config", "/app/app-local2.yml"]
+ environment:
+ - TZ=Asia/Shanghai
+ volumes:
+ - ./mqtt/config/app-local2.yml:/app/app-local2.yml
+ networks:
+ - iot-net
+ iotgomqtt3:
+ image: go-iot-mqtt:latest
+ ports:
+ - 8008:8008
+ entrypoint: ["/app/main", "-config", "/app/app-local3.yml"]
+ environment:
+ - TZ=Asia/Shanghai
+ volumes:
+ - ./mqtt/config/app-local3.yml:/app/app-local3.yml
+ networks:
+ - iot-net
+ iotgomq-pre_handler:
+ build:
+ context: ../../
+ dockerfile: deploy/IotGoMQ.Dockerfile
+ image: go-iot-mq:latest
+ ports:
+ - 8001:29002
+ volumes:
+ - ./mq/config/app-local-pre_handler.yml:/app/app-local-pre_handler.yml
+ entrypoint: ["/app/main", "-config", "/app/app-local-pre_handler.yml"]
+ environment:
+ - TZ=Asia/Shanghai
+ depends_on:
+ - iotgomqtt1
+ networks:
+ - iot-net
+ iotgomq-calc_handler:
+ image: go-iot-mq:latest
+ ports:
+ - 8002:29001
+ volumes:
+ - ./mq/config/app-local-calc.yml:/app/app-local-calc.yml
+ entrypoint: ["/app/main", "-config", "/app/app-local-calc.yml"]
+ environment:
+ - TZ=Asia/Shanghai
+ depends_on:
+ - iotgomq-pre_handler
+ networks:
+ - iot-net
+ iotgomq-waring_handler:
+ image: go-iot-mq:latest
+ ports:
+ - 8003:29003
+ volumes:
+ - ./mq/config/app-local-waring_handler.yml:/app/app-local-waring_handler.yml
+ entrypoint: ["/app/main", "-config", "/app/app-local-waring_handler.yml"]
+ environment:
+ - TZ=Asia/Shanghai
+ depends_on:
+ - iotgomq-calc_handler
+ networks:
+ - iot-net
+ iotgomq-wd_handler:
+ image: go-iot-mq:latest
+ ports:
+ - 8004:29004
+ volumes:
+ - ./mq/config/app-local-wd.yml:/app/app-local-wd.yml
+ entrypoint: ["/app/main", "-config", "/app/app-local-wd.yml"]
+ environment:
+ - TZ=Asia/Shanghai
+ depends_on:
+ - iotgomq-calc_handler
+ networks:
+ - iot-net
+ iotgoproject:
+ build:
+ context: ../../
+ dockerfile: deploy/IotGoProject.Dockerfile
+ image: go-iot-project:latest
+ volumes:
+ - ./iot-project/config/app-local.yml:/app/app-local.yml
+ - ./iot-project/fileupdate:/app/fileupdate
+ environment:
+ - TZ=Asia/Shanghai
+ ports:
+ - 8005:8080
+ networks:
+ - iot-net
+ iot-admin-vue:
+ build:
+ context: ../../
+ dockerfile: deploy/IotAdminVue.Dockerfile
+ image: go-iot-admin-vue:latest
+ environment:
+ - TZ=Asia/Shanghai
+ ports:
+ - 8080:80
+ networks:
+ - iot-net
+networks:
+ iot-net:
+ driver: bridge
diff --git a/docker/app/iot-project/config/app-local.yml b/docker/app/iot-project/config/app-local.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7b396e4b6deb0f019a2b6805f3ec56e8437e4cf1
--- /dev/null
+++ b/docker/app/iot-project/config/app-local.yml
@@ -0,0 +1,35 @@
+node_info:
+ port: 8080
+redis_config:
+ host: 172.17.0.1
+ port: 6379
+ db: 10
+ password: eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
+
+mq_config:
+ host: 172.17.0.1
+ port: 5672
+ username: guest
+ password: guest
+influx_config:
+ host: 172.17.0.1
+ port: 8086
+ token: mytoken
+ org: myorg
+ bucket: mybucket
+
+mysql_config:
+ username: app
+ password: iot123456
+ host: 172.17.0.1
+ port: 3306
+ dbname: iot
+mongo_config:
+ host: 172.17.0.1
+ port: 27017
+ username: admin
+ password: admin
+ db: iot
+ collection: calc
+ waring_collection: waring
+ script_waring_collection: script_waring
diff --git a/docker/app/mq/config/app-local-calc.yml b/docker/app/mq/config/app-local-calc.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2e895eebed3669fbde0774a7639e30f78941c1e8
--- /dev/null
+++ b/docker/app/mq/config/app-local-calc.yml
@@ -0,0 +1,35 @@
+node_info:
+ host: 172.17.0.1
+ port: 29001
+ name: mq1
+ type: calc_queue # pre_handler、 waring_handler、 calc_queue、waring_delay_handler
+
+
+redis_config:
+ host: 172.17.0.1
+ port: 6379
+ db: 10
+ password: eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
+
+
+
+mq_config:
+ host: 172.17.0.1
+ port: 5672
+ username: guest
+ password: guest
+influx_config:
+ host: 172.17.0.1
+ port: 8086
+ token: mytoken
+ org: myorg
+ bucket: mybucket
+mongo_config:
+ host: 172.17.0.1
+ port: 27017
+ username: admin
+ password: admin
+ db: iot
+ collection: calc
+ waring_collection: waring
+ script_waring_collection: script_waring
diff --git a/docker/app/mq/config/app-local-pre_handler.yml b/docker/app/mq/config/app-local-pre_handler.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9ee3bdc4f180329cb8a02449dc1718abb61523ca
--- /dev/null
+++ b/docker/app/mq/config/app-local-pre_handler.yml
@@ -0,0 +1,34 @@
+node_info:
+ host: 172.17.0.1
+ port: 29002
+ name: mq1
+ type: pre_handler # pre_handler、 waring_handler、 calc_queue、waring_delay_handler
+
+
+redis_config:
+ host: 172.17.0.1
+ port: 6379
+ db: 10
+ password: eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
+
+
+mq_config:
+ host: 172.17.0.1
+ port: 5672
+ username: guest
+ password: guest
+influx_config:
+ host: 172.17.0.1
+ port: 8086
+ token: mytoken
+ org: myorg
+ bucket: mybucket
+mongo_config:
+ host: 172.17.0.1
+ port: 27017
+ username: admin
+ password: admin
+ db: iot
+ collection: calc
+ waring_collection: waring
+ script_waring_collection: script_waring
diff --git a/docker/app/mq/config/app-local-waring_handler.yml b/docker/app/mq/config/app-local-waring_handler.yml
new file mode 100644
index 0000000000000000000000000000000000000000..dddf354e0f5f6fc489a071ad955de0aad63636cd
--- /dev/null
+++ b/docker/app/mq/config/app-local-waring_handler.yml
@@ -0,0 +1,34 @@
+node_info:
+ host: 172.17.0.1
+ port: 29003
+ name: mq1
+ type: waring_handler # pre_handler、 waring_handler、 calc_queue、waring_delay_handler
+
+
+redis_config:
+ host: 172.17.0.1
+ port: 6379
+ db: 10
+ password: eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
+
+
+mq_config:
+ host: 172.17.0.1
+ port: 5672
+ username: guest
+ password: guest
+influx_config:
+ host: 172.17.0.1
+ port: 8086
+ token: mytoken
+ org: myorg
+ bucket: mybucket
+mongo_config:
+ host: 172.17.0.1
+ port: 27017
+ username: admin
+ password: admin
+ db: iot
+ collection: calc
+ waring_collection: waring
+ script_waring_collection: script_waring
diff --git a/docker/app/mq/config/app-local-wd.yml b/docker/app/mq/config/app-local-wd.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c1e44417b92fe31b34e6065cb2fb30bc7efb5ead
--- /dev/null
+++ b/docker/app/mq/config/app-local-wd.yml
@@ -0,0 +1,34 @@
+node_info:
+ host: 172.17.0.1
+ port: 29004
+ name: mq1
+ type: waring_delay_handler # pre_handler、 waring_handler、 calc_queue、waring_delay_handler
+
+
+redis_config:
+ host: 172.17.0.1
+ port: 6379
+ db: 10
+ password: eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
+
+
+mq_config:
+ host: 172.17.0.1
+ port: 5672
+ username: guest
+ password: guest
+influx_config:
+ host: 172.17.0.1
+ port: 8086
+ token: mytoken
+ org: myorg
+ bucket: mybucket
+mongo_config:
+ host: 172.17.0.1
+ port: 27017
+ username: admin
+ password: admin
+ db: iot
+ collection: calc
+ waring_collection: waring
+ script_waring_collection: script_waring
diff --git a/docker/app/mqtt/config/app-local.yml b/docker/app/mqtt/config/app-local.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2351dcd076092f0678e06729fca73fdcee52b5f7
--- /dev/null
+++ b/docker/app/mqtt/config/app-local.yml
@@ -0,0 +1,19 @@
+node_info:
+ host: 172.17.0.1
+ port: 8006
+ name: m1
+ type: mqtt
+ size: 3
+redis_config:
+ host: 172.17.0.1
+ port: 6379
+ db: 10
+ password: eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
+
+
+
+mq_config:
+ host: 172.17.0.1
+ port: 5672
+ username: guest
+ password: guest
diff --git a/docker/app/mqtt/config/app-local2.yml b/docker/app/mqtt/config/app-local2.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6d7c78b31b3b6cba0e598b11d5bc68f280cdfad8
--- /dev/null
+++ b/docker/app/mqtt/config/app-local2.yml
@@ -0,0 +1,19 @@
+node_info:
+ host: 172.17.0.1
+ port: 8007
+ name: m2
+ type: mqtt
+ size: 3
+redis_config:
+ host: 172.17.0.1
+ port: 6379
+ db: 10
+ password: eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
+
+
+
+mq_config:
+ host: 172.17.0.1
+ port: 5672
+ username: guest
+ password: guest
diff --git a/docker/app/mqtt/config/app-local3.yml b/docker/app/mqtt/config/app-local3.yml
new file mode 100644
index 0000000000000000000000000000000000000000..04b27def3a5ce8684af386836b6b44779b31326d
--- /dev/null
+++ b/docker/app/mqtt/config/app-local3.yml
@@ -0,0 +1,19 @@
+node_info:
+ host: 172.17.0.1
+ port: 8008
+ name: m3
+ type: mqtt
+ size: 3
+redis_config:
+ host: 172.17.0.1
+ port: 6379
+ db: 10
+ password: eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
+
+
+
+mq_config:
+ host: 172.17.0.1
+ port: 5672
+ username: guest
+ password: guest
diff --git a/docker/env-start.sh b/docker/env-start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f8c263450326ff90a14464c83da70f65dc3c82aa
--- /dev/null
+++ b/docker/env-start.sh
@@ -0,0 +1,3 @@
+#!bin/bash
+docker-compose -f ./env/base-env-docker-compose.yml up -d
+echo "环境后台准备中...,请稍后"
diff --git a/docker/Cassandra/docker-compose.yml b/docker/env/Cassandra/docker-compose.yml
similarity index 100%
rename from docker/Cassandra/docker-compose.yml
rename to docker/env/Cassandra/docker-compose.yml
diff --git a/docker/Pulsar/docker-compose.yml b/docker/env/Pulsar/docker-compose.yml
similarity index 100%
rename from docker/Pulsar/docker-compose.yml
rename to docker/env/Pulsar/docker-compose.yml
diff --git a/docker/env/base-env-docker-compose.yml b/docker/env/base-env-docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..079992dfc3e327ed956b994211ea6340591df006
--- /dev/null
+++ b/docker/env/base-env-docker-compose.yml
@@ -0,0 +1,134 @@
+version: '3'
+
+services:
+ influxdb:
+ image: influxdb:2.6-alpine
+ env_file:
+ - ./influx/influxv2.env
+ volumes:
+ # Mount for influxdb data directory and configuration
+ - influxdbv2:/var/lib/influxdb2:rw
+ environment:
+ - TZ=Asia/Shanghai
+ ports:
+ - "8086:8086"
+ networks:
+ - iot-net
+ telegraf:
+ image: telegraf:1.25
+ depends_on:
+ - influxdb
+ volumes:
+ # Mount for telegraf config
+ - ./influx/telegraf/mytelegraf.conf:/etc/telegraf/telegraf.conf:ro
+ environment:
+ - TZ=Asia/Shanghai
+ env_file:
+ - ./influx/influxv2.env
+ networks:
+ - iot-net
+ mongodb:
+ image: mongo
+ container_name: mongodb
+ ports:
+ - 27017:27017
+ volumes:
+ - ./mongo/database:/data/db
+ - ./mongo/init-mongo.js:/docker-entrypoint-initdb.d/init-mongo.js
+ environment:
+ - MONGO_INITDB_ROOT_USERNAME=admin
+ - MONGO_INITDB_ROOT_PASSWORD=admin
+ - TZ=Asia/Shanghai
+ networks:
+ - iot-net
+ mongo-express:
+ image: mongo-express
+ container_name: mongo-express
+ restart: always
+ ports:
+ - 8181:8081
+ environment:
+ - ME_CONFIG_MONGODB_ADMINUSERNAME=admin
+ - ME_CONFIG_MONGODB_ADMINPASSWORD=admin
+ - ME_CONFIG_MONGODB_SERVER=mongodb
+ - TZ=Asia/Shanghai
+ networks:
+ - iot-net
+ emqx1:
+ image: emqx:5.4.1
+ container_name: emqx1
+ environment:
+ - "EMQX_NODE_NAME=emqx@node1.emqx.io"
+ - "EMQX_CLUSTER__DISCOVERY_STRATEGY=static"
+ - "EMQX_CLUSTER__STATIC__SEEDS=[emqx@node1.emqx.io]"
+ - TZ=Asia/Shanghai
+ healthcheck:
+ test: [ "CMD", "/opt/emqx/bin/emqx ctl", "status" ]
+ interval: 5s
+ timeout: 25s
+ retries: 5
+ networks:
+ iot-net:
+ aliases:
+ - node1.emqx.io
+ ports:
+ - 1883:1883
+ - 8083:8083
+ - 8084:8084
+ - 8883:8883
+ - 18083:18083
+ mysql:
+ image: mysql:8.0
+ container_name: mysql8
+ restart: always
+ environment:
+ MYSQL_ROOT_PASSWORD: root123
+ MYSQL_DATABASE: iot
+ MYSQL_USER: app
+ MYSQL_PASSWORD: iot123456
+ TZ: "Asia/Shanghai"
+ volumes:
+ - ./mysql/data:/var/lib/mysql
+ ports:
+ - "3306:3306"
+ command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --binlog-format=ROW
+ networks:
+ - iot-net
+ rabbitmq:
+ image: rabbitmq:3.13.3-management
+ container_name: 'rabbitmq'
+ ports:
+ - 5672:5672
+ - 15672:15672
+ volumes:
+ - ./rabbitmq/data/:/var/lib/rabbitmq/
+ - ./rabbitmq/log/:/var/log/rabbitmq
+ - ./rabbitmq/plugins:/usr/lib/rabbitmq/plugins
+ - ./rabbitmq/enabled_plugins:/etc/rabbitmq/enabled_plugins:rw
+ environment:
+ - RABBITMQ_PLUGINS_DIR=/opt/rabbitmq/plugins:/usr/lib/rabbitmq/plugins
+ - TZ=Asia/Shanghai
+ networks:
+ - iot-net
+
+ redis:
+ image: redis:6.2-alpine
+ restart: always
+ ports:
+ - '6379:6379'
+ command: redis-server --save 20 1 --loglevel warning --requirepass eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
+ environment:
+ - TZ=Asia/Shanghai
+ volumes:
+ - ./redis/data:/data
+ - ./redis/conf:/usr/local/etc/redis
+ - ./redis/log:/var/log/redis
+ networks:
+ - iot-net
+volumes:
+ influxdbv2:
+
+networks:
+ iot-net:
+ driver: bridge
+
diff --git a/docker/env/big-data-env-docker-compose.yml b/docker/env/big-data-env-docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8b3f94df3eea883792c5c5a6113aa3a5d997cb41
--- /dev/null
+++ b/docker/env/big-data-env-docker-compose.yml
@@ -0,0 +1,228 @@
+version: '3'
+services:
+ cassandra:
+ image: cassandra:latest
+ container_name: cassandra-container
+ ports:
+ - "9042:9042"
+ environment:
+ - CASSANDRA_USER=admin
+ - CASSANDRA_PASSWORD=admin
+ volumes:
+ - ./dataa:/var/lib/cassandra
+ clickhouse:
+ image: yandex/clickhouse-server:22.1.3.7
+ container_name: clickhouse
+ restart: always
+ ports:
+ - "8123:8123"
+ - "9000:9000"
+ volumes:
+ # 默认配置
+ - clickhouse/config/docker_related_config.xml:/etc/clickhouse-server/config.d/docker_related_config.xml:rw
+ - clickhouse/config/config.xml:/etc/clickhouse-server/config.xml:rw
+ - clickhouse/config/users.xml:/etc/clickhouse-server/users.xml:rw
+ - /etc/localtime:/etc/localtime:ro
+ # 运行日志
+ - clickhouse/log:/var/log/clickhouse-server
+ # 数据持久
+ - clickhouse/data:/var/lib/clickhouse:rw
+ iotdb-service:
+ image: apache/iotdb:1.3.0-standalone
+ hostname: iotdb-service
+ container_name: iotdb-service
+ ports:
+ - "6667:6667"
+ environment:
+ - cn_internal_address=iotdb-service
+ - cn_internal_port=10710
+ - cn_consensus_port=10720
+ - cn_seed_config_node=iotdb-service:10710
+ - dn_rpc_address=iotdb-service
+ - dn_internal_address=iotdb-service
+ - dn_rpc_port=6667
+ - dn_mpp_data_exchange_port=10740
+ - dn_schema_region_consensus_port=10750
+ - dn_data_region_consensus_port=10760
+ - dn_seed_config_node=iotdb-service:10710
+ volumes:
+ - iotdb/data:/iotdb/data
+ - iotdb/logs:/iotdb/logs
+ networks:
+ iot-env:
+ zookeeper:
+ image: apachepulsar/pulsar:latest
+ container_name: zookeeper
+ restart: on-failure
+ networks:
+ - pulsar
+ volumes:
+ - Pulsar/data/zookeeper:/pulsar/data/zookeeper
+ environment:
+ - metadataStoreUrl=zk:zookeeper:2181
+ - PULSAR_MEM=-Xms256m -Xmx256m -XX:MaxDirectMemorySize=256m
+ command: >
+ bash -c "bin/apply-config-from-env.py conf/zookeeper.conf && \
+ bin/generate-zookeeper-config.sh conf/zookeeper.conf && \
+ exec bin/pulsar zookeeper"
+ healthcheck:
+ test: ["CMD", "bin/pulsar-zookeeper-ruok.sh"]
+ interval: 10s
+ timeout: 5s
+ retries: 30
+
+ # Init cluster metadata
+ pulsar-init:
+ container_name: pulsar-init
+ hostname: pulsar-init
+ image: apachepulsar/pulsar:latest
+ networks:
+ - pulsar
+ command: >
+ bin/pulsar initialize-cluster-metadata \
+ --cluster cluster-a \
+ --zookeeper zookeeper:2181 \
+ --configuration-store zookeeper:2181 \
+ --web-service-url http://broker:8080 \
+ --broker-service-url pulsar://broker:6650
+ depends_on:
+ zookeeper:
+ condition: service_healthy
+
+ # Start bookie
+ bookie:
+ image: apachepulsar/pulsar:latest
+ container_name: bookie
+ restart: on-failure
+ networks:
+ - pulsar
+ environment:
+ - clusterName=cluster-a
+ - zkServers=zookeeper:2181
+ - metadataServiceUri=metadata-store:zk:zookeeper:2181
+ # otherwise every time we run docker compose uo or down we fail to start due to Cookie
+ # See: https://github.com/apache/bookkeeper/blob/405e72acf42bb1104296447ea8840d805094c787/bookkeeper-server/src/main/java/org/apache/bookkeeper/bookie/Cookie.java#L57-68
+ - advertisedAddress=bookie
+ - BOOKIE_MEM=-Xms512m -Xmx512m -XX:MaxDirectMemorySize=256m
+ depends_on:
+ zookeeper:
+ condition: service_healthy
+ pulsar-init:
+ condition: service_completed_successfully
+ # Map the local directory to the container to avoid bookie startup failure due to insufficient container disks.
+ volumes:
+ - Pulsar/data/bookkeeper:/pulsar/data/bookkeeper
+ command: bash -c "bin/apply-config-from-env.py conf/bookkeeper.conf && exec bin/pulsar bookie"
+
+ # Start broker
+ broker:
+ image: apachepulsar/pulsar:latest
+ container_name: broker
+ hostname: broker
+ restart: on-failure
+ networks:
+ - pulsar
+ environment:
+ - metadataStoreUrl=zk:zookeeper:2181
+ - zookeeperServers=zookeeper:2181
+ - clusterName=cluster-a
+ - managedLedgerDefaultEnsembleSize=1
+ - managedLedgerDefaultWriteQuorum=1
+ - managedLedgerDefaultAckQuorum=1
+ - advertisedAddress=broker
+ - advertisedListeners=external:pulsar://127.0.0.1:6650
+ - PULSAR_MEM=-Xms512m -Xmx512m -XX:MaxDirectMemorySize=256m
+ depends_on:
+ zookeeper:
+ condition: service_healthy
+ bookie:
+ condition: service_started
+ ports:
+ - "6650:6650"
+ - "18080:8080"
+ command: bash -c "bin/apply-config-from-env.py conf/broker.conf && exec bin/pulsar broker"
+
+ kafka:
+ image: wurstmeister/kafka
+ container_name: kafka
+ volumes:
+ - /etc/localtime:/etc/localtime
+ ports:
+ - "9092:9092"
+ environment:
+ KAFKA_ADVERTISED_HOST_NAME: 127.0.0.1
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_ADVERTISED_PORT: 9092
+ KAFKA_LOG_RETENTION_HOURS: 120
+ KAFKA_MESSAGE_MAX_BYTES: 10000000
+ KAFKA_REPLICA_FETCH_MAX_BYTES: 10000000
+ KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS: 60000
+ KAFKA_NUM_PARTITIONS: 3
+ KAFKA_DELETE_RETENTION_MS: 1000
+ kafka-manager:
+ image: sheepkiller/kafka-manager
+ container_name: kafka-manager
+ environment:
+ ZK_HOSTS: 127.0.0.1
+ ports:
+ - "9009:9000"
+
+ rmqnamesrv:
+ image: foxiswho/rocketmq:server
+ restart: always
+ container_name: rmqnamesrv
+ ports:
+ - 9876:9876
+ volumes:
+ - rocketmq/rmqnamesrv/logs:/opt/logs
+ - rocketmq/rmqnamesrv/store:/opt/store
+ networks:
+ rmq:
+ aliases:
+ - rmqnamesrv
+
+ rmqbroker:
+ image: foxiswho/rocketmq:broker
+ restart: always
+ container_name: rmqbroker
+ ports:
+ - 10909:10909
+ - 10911:10911
+ volumes:
+ - rocketmq/rmqbroker/logs:/opt/logs
+ - rocketmq/rmqbroker/store:/opt/store
+ - rocketmq/rmqbroker/conf/broker.conf:/etc/rocketmq/broker.conf
+ environment:
+ NAMESRV_ADDR: "rmqnamesrv:9876"
+ JAVA_OPTS: " -Duser.home=/opt"
+ JAVA_OPT_EXT: "-server -Xms128m -Xmx128m -Xmn128m"
+ command: mqbroker -c /etc/rocketmq/broker.conf
+ depends_on:
+ - rmqnamesrv
+ networks:
+ rmq:
+ aliases:
+ - rmqbroker
+
+ rmqconsole:
+ image: styletang/rocketmq-console-ng
+ restart: always
+ container_name: rmqconsole
+ ports:
+ - 18080:8080
+ environment:
+ JAVA_OPTS: "-Drocketmq.namesrv.addr=rmqnamesrv:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false"
+ depends_on:
+ - rmqnamesrv
+ networks:
+ rmq:
+ aliases:
+ - rmqconsole
+
+volumes:
+ cassandra-data:
+networks:
+ iot-env:
+ driver: bridge
+ rmq:
+ driver: bridge
diff --git a/docker/clickhouse/config/config.xml b/docker/env/clickhouse/config/config.xml
similarity index 100%
rename from docker/clickhouse/config/config.xml
rename to docker/env/clickhouse/config/config.xml
diff --git a/docker/clickhouse/config/docker_related_config.xml b/docker/env/clickhouse/config/docker_related_config.xml
similarity index 100%
rename from docker/clickhouse/config/docker_related_config.xml
rename to docker/env/clickhouse/config/docker_related_config.xml
diff --git a/docker/clickhouse/config/users.xml b/docker/env/clickhouse/config/users.xml
similarity index 100%
rename from docker/clickhouse/config/users.xml
rename to docker/env/clickhouse/config/users.xml
diff --git a/docker/clickhouse/docker-compose.yml b/docker/env/clickhouse/docker-compose.yml
similarity index 100%
rename from docker/clickhouse/docker-compose.yml
rename to docker/env/clickhouse/docker-compose.yml
diff --git a/docker/influx/docker-compose.yml b/docker/env/influx/docker-compose.yml
similarity index 100%
rename from docker/influx/docker-compose.yml
rename to docker/env/influx/docker-compose.yml
diff --git a/docker/influx/influxv2.env b/docker/env/influx/influxv2.env
similarity index 100%
rename from docker/influx/influxv2.env
rename to docker/env/influx/influxv2.env
diff --git a/docker/influx/telegraf/mytelegraf.conf b/docker/env/influx/telegraf/mytelegraf.conf
similarity index 100%
rename from docker/influx/telegraf/mytelegraf.conf
rename to docker/env/influx/telegraf/mytelegraf.conf
diff --git a/docker/iotdb/docker-compose.yml b/docker/env/iotdb/docker-compose.yml
similarity index 100%
rename from docker/iotdb/docker-compose.yml
rename to docker/env/iotdb/docker-compose.yml
diff --git a/docker/kafka/docker-compose.yml b/docker/env/kafka/docker-compose.yml
similarity index 100%
rename from docker/kafka/docker-compose.yml
rename to docker/env/kafka/docker-compose.yml
diff --git a/docker/mongo/docker-compose.yml b/docker/env/mongo/docker-compose.yml
similarity index 100%
rename from docker/mongo/docker-compose.yml
rename to docker/env/mongo/docker-compose.yml
diff --git a/docker/env/mongo/init-mongo.js b/docker/env/mongo/init-mongo.js
new file mode 100644
index 0000000000000000000000000000000000000000..36173931bef5658642053c2aee930108ecc02f6a
--- /dev/null
+++ b/docker/env/mongo/init-mongo.js
@@ -0,0 +1,22 @@
+// dbAdmin = db.getSiblingDB("admin");
+// dbAdmin.createUser({
+// user: "iot",
+// pwd: "iot123",
+// roles: [{ role: "userAdminAnyDatabase", db: "admin" }],
+// mechanisms: ["SCRAM-SHA-1"],
+// });
+//
+// // Authenticate user
+// dbAdmin.auth({
+// user: "iot",
+// pwd: "iot123",
+// mechanisms: ["SCRAM-SHA-1"],
+// digestPassword: true,
+// });
+
+use iot;
+db.createCollection("calc");
+db.createCollection("waring");
+db.createCollection("script_waring");
+
+// todo 导入数据可以在此处操作,或者新建一个单独的用户来操作有权限的库
diff --git a/docker/mqtt/docker-compose.yml b/docker/env/mqtt/docker-compose.yml
similarity index 100%
rename from docker/mqtt/docker-compose.yml
rename to docker/env/mqtt/docker-compose.yml
diff --git a/docker/mqtt/mock/Dockerfile b/docker/env/mqtt/mock/Dockerfile
similarity index 100%
rename from docker/mqtt/mock/Dockerfile
rename to docker/env/mqtt/mock/Dockerfile
diff --git a/docker/mqtt/mock/go.mod b/docker/env/mqtt/mock/go.mod
similarity index 100%
rename from docker/mqtt/mock/go.mod
rename to docker/env/mqtt/mock/go.mod
diff --git a/docker/mqtt/mock/go.sum b/docker/env/mqtt/mock/go.sum
similarity index 100%
rename from docker/mqtt/mock/go.sum
rename to docker/env/mqtt/mock/go.sum
diff --git a/docker/mqtt/mock/main.go b/docker/env/mqtt/mock/main.go
similarity index 100%
rename from docker/mqtt/mock/main.go
rename to docker/env/mqtt/mock/main.go
diff --git a/docker/mqtt/mock/mqtt.yml b/docker/env/mqtt/mock/mqtt.yml
similarity index 100%
rename from docker/mqtt/mock/mqtt.yml
rename to docker/env/mqtt/mock/mqtt.yml
diff --git a/docker/mqtt/mock/start.sh b/docker/env/mqtt/mock/start.sh
similarity index 100%
rename from docker/mqtt/mock/start.sh
rename to docker/env/mqtt/mock/start.sh
diff --git a/docker/mqtt/readme.md b/docker/env/mqtt/readme.md
similarity index 100%
rename from docker/mqtt/readme.md
rename to docker/env/mqtt/readme.md
diff --git a/docker/env/mysql/docker-compose.yml b/docker/env/mysql/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..275ff8e495d74a7eed88d637d6aecedcb507ff1b
--- /dev/null
+++ b/docker/env/mysql/docker-compose.yml
@@ -0,0 +1,18 @@
+version: '3'
+
+services:
+ mysql:
+ image: mysql:8.0.38
+ container_name: mysql8
+ restart: always
+ environment:
+ MYSQL_ROOT_PASSWORD: root123
+ MYSQL_DATABASE: iot
+ MYSQL_USER: app
+ MYSQL_PASSWORD: iot123456
+ TZ: "Asia/Shanghai"
+ volumes:
+ - ./data:/var/lib/mysql
+ ports:
+ - "3306:3306"
+ command: --default-authentication-plugin=mysql_native_password --character-set-server=utf8mb4 --binlog-format=ROW
diff --git a/docker/rabbitmq/Dockerfile b/docker/env/rabbitmq/Dockerfile
similarity index 100%
rename from docker/rabbitmq/Dockerfile
rename to docker/env/rabbitmq/Dockerfile
diff --git a/docker/rabbitmq/docker-compose.yml b/docker/env/rabbitmq/docker-compose.yml
similarity index 100%
rename from docker/rabbitmq/docker-compose.yml
rename to docker/env/rabbitmq/docker-compose.yml
diff --git a/docker/rabbitmq/enabled_plugins b/docker/env/rabbitmq/enabled_plugins
similarity index 100%
rename from docker/rabbitmq/enabled_plugins
rename to docker/env/rabbitmq/enabled_plugins
diff --git a/docker/rabbitmq/plugins/rabbitmq_delayed_message_exchange-3.10.2.ez b/docker/env/rabbitmq/plugins/rabbitmq_delayed_message_exchange-3.10.2.ez
similarity index 100%
rename from docker/rabbitmq/plugins/rabbitmq_delayed_message_exchange-3.10.2.ez
rename to docker/env/rabbitmq/plugins/rabbitmq_delayed_message_exchange-3.10.2.ez
diff --git a/docker/env/redis/conf/redis.conf b/docker/env/redis/conf/redis.conf
new file mode 100644
index 0000000000000000000000000000000000000000..867d5513daa0bd14a959693a4c09e3b5004d041a
--- /dev/null
+++ b/docker/env/redis/conf/redis.conf
@@ -0,0 +1,1053 @@
+# Redis configuration file example.
+#
+# Note that in order to read the configuration file, Redis must be
+# started with the file path as first argument:
+#
+# ./redis-server /path/to/redis.conf
+
+# Note on units: when memory size is needed, it is possible to specify
+# it in the usual form of 1k 5GB 4M and so forth:
+#
+# 1k => 1000 bytes
+# 1kb => 1024 bytes
+# 1m => 1000000 bytes
+# 1mb => 1024*1024 bytes
+# 1g => 1000000000 bytes
+# 1gb => 1024*1024*1024 bytes
+#
+# units are case insensitive so 1GB 1Gb 1gB are all the same.
+
+################################## INCLUDES ###################################
+
+# Include one or more other config files here. This is useful if you
+# have a standard template that goes to all Redis servers but also need
+# to customize a few per-server settings. Include files can include
+# other files, so use this wisely.
+#
+# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
+# from admin or Redis Sentinel. Since Redis always uses the last processed
+# line as value of a configuration directive, you'd better put includes
+# at the beginning of this file to avoid overwriting config change at runtime.
+#
+# If instead you are interested in using includes to override configuration
+# options, it is better to use include as the last line.
+#
+# include /path/to/local.conf
+# include /path/to/other.conf
+
+################################## NETWORK #####################################
+
+# By default, if no "bind" configuration directive is specified, Redis listens
+# for connections from all the network interfaces available on the server.
+# It is possible to listen to just one or multiple selected interfaces using
+# the "bind" configuration directive, followed by one or more IP addresses.
+#
+# Examples:
+#
+# bind 192.168.1.100 10.0.0.1
+# bind 127.0.0.1 ::1
+#
+# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
+# internet, binding to all the interfaces is dangerous and will expose the
+# instance to everybody on the internet. So by default we uncomment the
+# following bind directive, that will force Redis to listen only into
+# the IPv4 lookback interface address (this means Redis will be able to
+# accept connections only from clients running into the same computer it
+# is running).
+#
+# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
+# JUST COMMENT THE FOLLOWING LINE.
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# bind 127.0.0.1
+
+# Protected mode is a layer of security protection, in order to avoid that
+# Redis instances left open on the internet are accessed and exploited.
+#
+# When protected mode is on and if:
+#
+# 1) The server is not binding explicitly to a set of addresses using the
+# "bind" directive.
+# 2) No password is configured.
+#
+# The server only accepts connections from clients connecting from the
+# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
+# sockets.
+#
+# By default protected mode is enabled. You should disable it only if
+# you are sure you want clients from other hosts to connect to Redis
+# even if no authentication is configured, nor a specific set of interfaces
+# are explicitly listed using the "bind" directive.
+protected-mode yes
+
+# Accept connections on the specified port, default is 6379 (IANA #815344).
+# If port 0 is specified Redis will not listen on a TCP socket.
+port 6379
+
+# TCP listen() backlog.
+#
+# In high requests-per-second environments you need an high backlog in order
+# to avoid slow clients connections issues. Note that the Linux kernel
+# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
+# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
+# in order to get the desired effect.
+tcp-backlog 511
+
+# Unix socket.
+#
+# Specify the path for the Unix socket that will be used to listen for
+# incoming connections. There is no default, so Redis will not listen
+# on a unix socket when not specified.
+#
+# unixsocket /tmp/redis.sock
+# unixsocketperm 700
+
+# Close the connection after a client is idle for N seconds (0 to disable)
+timeout 0
+
+# TCP keepalive.
+#
+# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
+# of communication. This is useful for two reasons:
+#
+# 1) Detect dead peers.
+# 2) Take the connection alive from the point of view of network
+# equipment in the middle.
+#
+# On Linux, the specified value (in seconds) is the period used to send ACKs.
+# Note that to close the connection the double of the time is needed.
+# On other kernels the period depends on the kernel configuration.
+#
+# A reasonable value for this option is 300 seconds, which is the new
+# Redis default starting with Redis 3.2.1.
+tcp-keepalive 300
+
+################################# GENERAL #####################################
+
+# By default Redis does not run as a daemon. Use 'yes' if you need it.
+# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
+daemonize no
+
+# If you run Redis from upstart or systemd, Redis can interact with your
+# supervision tree. Options:
+# supervised no - no supervision interaction
+# supervised upstart - signal upstart by putting Redis into SIGSTOP mode
+# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
+# supervised auto - detect upstart or systemd method based on
+# UPSTART_JOB or NOTIFY_SOCKET environment variables
+# Note: these supervision methods only signal "process is ready."
+# They do not enable continuous liveness pings back to your supervisor.
+supervised no
+
+# If a pid file is specified, Redis writes it where specified at startup
+# and removes it at exit.
+#
+# When the server runs non daemonized, no pid file is created if none is
+# specified in the configuration. When the server is daemonized, the pid file
+# is used even if not specified, defaulting to "/var/run/redis.pid".
+#
+# Creating a pid file is best effort: if Redis is not able to create it
+# nothing bad happens, the server will start and run normally.
+pidfile /var/run/redis_6379.pid
+
+# Specify the server verbosity level.
+# This can be one of:
+# debug (a lot of information, useful for development/testing)
+# verbose (many rarely useful info, but not a mess like the debug level)
+# notice (moderately verbose, what you want in production probably)
+# warning (only very important / critical messages are logged)
+loglevel notice
+
+# Specify the log file name. Also the empty string can be used to force
+# Redis to log on the standard output. Note that if you use standard
+# output for logging but daemonize, logs will be sent to /dev/null
+logfile /var/log/redis/redis.log
+
+# To enable logging to the system logger, just set 'syslog-enabled' to yes,
+# and optionally update the other syslog parameters to suit your needs.
+# syslog-enabled no
+
+# Specify the syslog identity.
+# syslog-ident redis
+
+# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
+# syslog-facility local0
+
+# Set the number of databases. The default database is DB 0, you can select
+# a different one on a per-connection basis using SELECT where
+# dbid is a number between 0 and 'databases'-1
+databases 16
+
+################################ SNAPSHOTTING ################################
+#
+# Save the DB on disk:
+#
+# save
+#
+# Will save the DB if both the given number of seconds and the given
+# number of write operations against the DB occurred.
+#
+# In the example below the behaviour will be to save:
+# after 900 sec (15 min) if at least 1 key changed
+# after 300 sec (5 min) if at least 10 keys changed
+# after 60 sec if at least 10000 keys changed
+#
+# Note: you can disable saving completely by commenting out all "save" lines.
+#
+# It is also possible to remove all the previously configured save
+# points by adding a save directive with a single empty string argument
+# like in the following example:
+#
+# save ""
+
+save 900 1
+save 300 10
+save 60 10000
+
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in a hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# disaster will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usual even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
+# Compress string objects using LZF when dump .rdb databases?
+# For default that's set to 'yes' as it's almost always a win.
+# If you want to save some CPU in the saving child set it to 'no' but
+# the dataset will likely be bigger if you have compressible values or keys.
+rdbcompression yes
+
+# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
+# This makes the format more resistant to corruption but there is a performance
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
+# for maximum performances.
+#
+# RDB files created with checksum disabled have a checksum of zero that will
+# tell the loading code to skip the check.
+rdbchecksum yes
+
+# The filename where to dump the DB
+dbfilename dump.rdb
+
+# The working directory.
+#
+# The DB will be written inside this directory, with the filename specified
+# above using the 'dbfilename' configuration directive.
+#
+# The Append Only File will also be created inside this directory.
+#
+# Note that you must specify a directory here, not a file name.
+dir /data
+
+################################# REPLICATION #################################
+
+# Master-Slave replication. Use slaveof to make a Redis instance a copy of
+# another Redis server. A few things to understand ASAP about Redis replication.
+#
+# 1) Redis replication is asynchronous, but you can configure a master to
+# stop accepting writes if it appears to be not connected with at least
+# a given number of slaves.
+# 2) Redis slaves are able to perform a partial resynchronization with the
+# master if the replication link is lost for a relatively small amount of
+# time. You may want to configure the replication backlog size (see the next
+# sections of this file) with a sensible value depending on your needs.
+# 3) Replication is automatic and does not need user intervention. After a
+# network partition slaves automatically try to reconnect to masters
+# and resynchronize with them.
+#
+# slaveof
+
+# If the master is password protected (using the "requirepass" configuration
+# directive below) it is possible to tell the slave to authenticate before
+# starting the replication synchronization process, otherwise the master will
+# refuse the slave request.
+#
+# masterauth
+
+# When a slave loses its connection with the master, or when the replication
+# is still in progress, the slave can act in two different ways:
+#
+# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
+# still reply to client requests, possibly with out of date data, or the
+# data set may just be empty if this is the first synchronization.
+#
+# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
+# an error "SYNC with master in progress" to all the kind of commands
+# but to INFO and SLAVEOF.
+#
+slave-serve-stale-data yes
+
+# You can configure a slave instance to accept writes or not. Writing against
+# a slave instance may be useful to store some ephemeral data (because data
+# written on a slave will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default slaves are read-only.
+#
+# Note: read only slaves are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only slave exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
+# security of read only slaves using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+slave-read-only yes
+
+# Replication SYNC strategy: disk or socket.
+#
+# -------------------------------------------------------
+# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
+# -------------------------------------------------------
+#
+# New slaves and reconnecting slaves that are not able to continue the replication
+# process just receiving differences, need to do what is called a "full
+# synchronization". An RDB file is transmitted from the master to the slaves.
+# The transmission can happen in two different ways:
+#
+# 1) Disk-backed: The Redis master creates a new process that writes the RDB
+# file on disk. Later the file is transferred by the parent
+# process to the slaves incrementally.
+# 2) Diskless: The Redis master creates a new process that directly writes the
+# RDB file to slave sockets, without touching the disk at all.
+#
+# With disk-backed replication, while the RDB file is generated, more slaves
+# can be queued and served with the RDB file as soon as the current child producing
+# the RDB file finishes its work. With diskless replication instead once
+# the transfer starts, new slaves arriving will be queued and a new transfer
+# will start when the current one terminates.
+#
+# When diskless replication is used, the master waits a configurable amount of
+# time (in seconds) before starting the transfer in the hope that multiple slaves
+# will arrive and the transfer can be parallelized.
+#
+# With slow disks and fast (large bandwidth) networks, diskless replication
+# works better.
+repl-diskless-sync no
+
+# When diskless replication is enabled, it is possible to configure the delay
+# the server waits in order to spawn the child that transfers the RDB via socket
+# to the slaves.
+#
+# This is important since once the transfer starts, it is not possible to serve
+# new slaves arriving, that will be queued for the next RDB transfer, so the server
+# waits a delay in order to let more slaves arrive.
+#
+# The delay is specified in seconds, and by default is 5 seconds. To disable
+# it entirely just set it to 0 seconds and the transfer will start ASAP.
+repl-diskless-sync-delay 5
+
+# Slaves send PINGs to server in a predefined interval. It's possible to change
+# this interval with the repl_ping_slave_period option. The default value is 10
+# seconds.
+#
+# repl-ping-slave-period 10
+
+# The following option sets the replication timeout for:
+#
+# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
+# 2) Master timeout from the point of view of slaves (data, pings).
+# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-slave-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the slave.
+#
+# repl-timeout 60
+
+# Disable TCP_NODELAY on the slave socket after SYNC?
+#
+# If you select "yes" Redis will use a smaller number of TCP packets and
+# less bandwidth to send data to slaves. But this can add a delay for
+# the data to appear on the slave side, up to 40 milliseconds with
+# Linux kernels using a default configuration.
+#
+# If you select "no" the delay for data to appear on the slave side will
+# be reduced but more bandwidth will be used for replication.
+#
+# By default we optimize for low latency, but in very high traffic conditions
+# or when the master and slaves are many hops away, turning this to "yes" may
+# be a good idea.
+repl-disable-tcp-nodelay no
+
+# Set the replication backlog size. The backlog is a buffer that accumulates
+# slave data when slaves are disconnected for some time, so that when a slave
+# wants to reconnect again, often a full resync is not needed, but a partial
+# resync is enough, just passing the portion of data the slave missed while
+# disconnected.
+#
+# The bigger the replication backlog, the longer the time the slave can be
+# disconnected and later be able to perform a partial resynchronization.
+#
+# The backlog is only allocated once there is at least a slave connected.
+#
+# repl-backlog-size 1mb
+
+# After a master has no longer connected slaves for some time, the backlog
+# will be freed. The following option configures the amount of seconds that
+# need to elapse, starting from the time the last slave disconnected, for
+# the backlog buffer to be freed.
+#
+# A value of 0 means to never release the backlog.
+#
+# repl-backlog-ttl 3600
+
+# The slave priority is an integer number published by Redis in the INFO output.
+# It is used by Redis Sentinel in order to select a slave to promote into a
+# master if the master is no longer working correctly.
+#
+# A slave with a low priority number is considered better for promotion, so
+# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
+# pick the one with priority 10, that is the lowest.
+#
+# However a special priority of 0 marks the slave as not able to perform the
+# role of master, so a slave with priority of 0 will never be selected by
+# Redis Sentinel for promotion.
+#
+# By default the priority is 100.
+slave-priority 100
+
+# It is possible for a master to stop accepting writes if there are less than
+# N slaves connected, having a lag less or equal than M seconds.
+#
+# The N slaves need to be in "online" state.
+#
+# The lag in seconds, that must be <= the specified value, is calculated from
+# the last ping received from the slave, that is usually sent every second.
+#
+# This option does not GUARANTEE that N replicas will accept the write, but
+# will limit the window of exposure for lost writes in case not enough slaves
+# are available, to the specified number of seconds.
+#
+# For example to require at least 3 slaves with a lag <= 10 seconds use:
+#
+# min-slaves-to-write 3
+# min-slaves-max-lag 10
+#
+# Setting one or the other to 0 disables the feature.
+#
+# By default min-slaves-to-write is set to 0 (feature disabled) and
+# min-slaves-max-lag is set to 10.
+
+# A Redis master is able to list the address and port of the attached
+# slaves in different ways. For example the "INFO replication" section
+# offers this information, which is used, among other tools, by
+# Redis Sentinel in order to discover slave instances.
+# Another place where this info is available is in the output of the
+# "ROLE" command of a masteer.
+#
+# The listed IP and address normally reported by a slave is obtained
+# in the following way:
+#
+# IP: The address is auto detected by checking the peer address
+# of the socket used by the slave to connect with the master.
+#
+# Port: The port is communicated by the slave during the replication
+# handshake, and is normally the port that the slave is using to
+# list for connections.
+#
+# However when port forwarding or Network Address Translation (NAT) is
+# used, the slave may be actually reachable via different IP and port
+# pairs. The following two options can be used by a slave in order to
+# report to its master a specific set of IP and port, so that both INFO
+# and ROLE will report those values.
+#
+# There is no need to use both the options if you need to override just
+# the port or the IP address.
+#
+# slave-announce-ip 5.5.5.5
+# slave-announce-port 1234
+
+################################## SECURITY ###################################
+
+# Require clients to issue AUTH before processing any other
+# commands. This might be useful in environments in which you do not trust
+# others with access to the host running redis-server.
+#
+# This should stay commented out for backward compatibility and because most
+# people do not need auth (e.g. they run their own servers).
+#
+# Warning: since Redis is pretty fast an outside user can try up to
+# 150k passwords per second against a good box. This means that you should
+# use a very strong password otherwise it will be very easy to break.
+#
+# requirepass foobared
+requirepass eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
+
+# Command renaming.
+#
+# It is possible to change the name of dangerous commands in a shared
+# environment. For instance the CONFIG command may be renamed into something
+# hard to guess so that it will still be available for internal-use tools
+# but not available for general clients.
+#
+# Example:
+#
+# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
+#
+# It is also possible to completely kill a command by renaming it into
+# an empty string:
+#
+# rename-command CONFIG ""
+#
+# Please note that changing the name of commands that are logged into the
+# AOF file or transmitted to slaves may cause problems.
+
+################################### LIMITS ####################################
+
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able to configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
+# Once the limit is reached Redis will close all the new connections sending
+# an error 'max number of clients reached'.
+#
+# maxclients 10000
+
+# Don't use more memory than the specified amount of bytes.
+# When the memory limit is reached Redis will try to remove keys
+# according to the eviction policy selected (see maxmemory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU cache, or to set
+# a hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have slaves attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the slaves are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of slaves is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have slaves attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for slave
+# output buffers (but this is not needed if the policy is 'noeviction').
+#
+# maxmemory
+
+# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
+# is reached. You can select among five behaviors:
+#
+# volatile-lru -> remove the key with an expire set using an LRU algorithm
+# allkeys-lru -> remove any key according to the LRU algorithm
+# volatile-random -> remove a random key with an expire set
+# allkeys-random -> remove a random key, any key
+# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
+# noeviction -> don't expire at all, just return an error on write operations
+#
+# Note: with any of the above policies, Redis will return an error on write
+# operations, when there are no suitable keys for eviction.
+#
+# At the date of writing these commands are: set setnx setex append
+# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
+# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
+# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
+# getset mset msetnx exec sort
+#
+# The default is:
+#
+# maxmemory-policy noeviction
+
+# LRU and minimal TTL algorithms are not precise algorithms but approximated
+# algorithms (in order to save memory), so you can tune it for speed or
+# accuracy. For default Redis will check five keys and pick the one that was
+# used less recently, you can change the sample size using the following
+# configuration directive.
+#
+# The default of 5 produces good enough results. 10 Approximates very closely
+# true LRU but costs a bit more CPU. 3 is very fast but not very accurate.
+#
+# maxmemory-samples 5
+
+############################## APPEND ONLY MODE ###############################
+
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check http://redis.io/topics/persistence for more information.
+
+appendonly no
+
+# The name of the append only file (default: "appendonly.aof")
+
+appendfilename "appendonly.aof"
+
+# The fsync() call tells the Operating System to actually write data on disk
+# instead of waiting for more data in the output buffer. Some OS will really flush
+# data on disk, some other OS will just try to do it ASAP.
+#
+# Redis supports three different modes:
+#
+# no: don't fsync, just let the OS flush the data when it wants. Faster.
+# always: fsync after every write to the append only log. Slow, Safest.
+# everysec: fsync only one time every second. Compromise.
+#
+# The default is "everysec", as that's usually the right compromise between
+# speed and data safety. It's up to you to understand if you can relax this to
+# "no" that will let the operating system flush the output buffer when
+# it wants, for better performances (but if you can live with the idea of
+# some data loss consider the default persistence mode that's snapshotting),
+# or on the contrary, use "always" that's very slow but a bit safer than
+# everysec.
+#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
+# If unsure, use "everysec".
+
+# appendfsync always
+appendfsync everysec
+# appendfsync no
+
+# When the AOF fsync policy is set to always or everysec, and a background
+# saving process (a background save or AOF log background rewriting) is
+# performing a lot of I/O against the disk, in some Linux configurations
+# Redis may block too long on the fsync() call. Note that there is no fix for
+# this currently, as even performing fsync in a different thread will block
+# our synchronous write(2) call.
+#
+# In order to mitigate this problem it's possible to use the following option
+# that will prevent fsync() from being called in the main process while a
+# BGSAVE or BGREWRITEAOF is in progress.
+#
+# This means that while another child is saving, the durability of Redis is
+# the same as "appendfsync none". In practical terms, this means that it is
+# possible to lose up to 30 seconds of log in the worst scenario (with the
+# default Linux settings).
+#
+# If you have latency problems turn this to "yes". Otherwise leave it as
+# "no" that is the safest pick from the point of view of durability.
+
+no-appendfsync-on-rewrite no
+
+# Automatic rewrite of the append only file.
+# Redis is able to automatically rewrite the log file implicitly calling
+# BGREWRITEAOF when the AOF log size grows by the specified percentage.
+#
+# This is how it works: Redis remembers the size of the AOF file after the
+# latest rewrite (if no rewrite has happened since the restart, the size of
+# the AOF at startup is used).
+#
+# This base size is compared to the current size. If the current size is
+# bigger than the specified percentage, the rewrite is triggered. Also
+# you need to specify a minimal size for the AOF file to be rewritten, this
+# is useful to avoid rewriting the AOF file even if the percentage increase
+# is reached but it is still pretty small.
+#
+# Specify a percentage of zero in order to disable the automatic AOF
+# rewrite feature.
+
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+
+# An AOF file may be found to be truncated at the end during the Redis
+# startup process, when the AOF data gets loaded back into memory.
+# This may happen when the system where Redis is running
+# crashes, especially when an ext4 filesystem is mounted without the
+# data=ordered option (however this can't happen when Redis itself
+# crashes or aborts but the operating system still works correctly).
+#
+# Redis can either exit with an error when this happens, or load as much
+# data as possible (the default now) and start if the AOF file is found
+# to be truncated at the end. The following option controls this behavior.
+#
+# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
+# the Redis server starts emitting a log to inform the user of the event.
+# Otherwise if the option is set to no, the server aborts with an error
+# and refuses to start. When the option is set to no, the user requires
+# to fix the AOF file using the "redis-check-aof" utility before to restart
+# the server.
+#
+# Note that if the AOF file will be found to be corrupted in the middle
+# the server will still exit with an error. This option only applies when
+# Redis will try to read more data from the AOF file but not enough bytes
+# will be found.
+aof-load-truncated yes
+
+################################ LUA SCRIPTING ###############################
+
+# Max execution time of a Lua script in milliseconds.
+#
+# If the maximum execution time is reached Redis will log that a script is
+# still in execution after the maximum allowed time and will start to
+# reply to queries with an error.
+#
+# When a long running script exceeds the maximum execution time only the
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
+# used to stop a script that did not yet called write commands. The second
+# is the only way to shut down the server in the case a write command was
+# already issued by the script but the user doesn't want to wait for the natural
+# termination of the script.
+#
+# Set it to 0 or a negative value for unlimited execution without warnings.
+lua-time-limit 5000
+
+################################ REDIS CLUSTER ###############################
+#
+# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
+# in order to mark it as "mature" we need to wait for a non trivial percentage
+# of users to deploy it in production.
+# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+#
+# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
+# started as cluster nodes can. In order to start a Redis instance as a
+# cluster node enable the cluster support uncommenting the following:
+#
+# cluster-enabled yes
+
+# Every cluster node has a cluster configuration file. This file is not
+# intended to be edited by hand. It is created and updated by Redis nodes.
+# Every Redis Cluster node requires a different cluster configuration file.
+# Make sure that instances running in the same system do not have
+# overlapping cluster configuration file names.
+#
+# cluster-config-file nodes-6379.conf
+
+# Cluster node timeout is the amount of milliseconds a node must be unreachable
+# for it to be considered in failure state.
+# Most other internal time limits are multiple of the node timeout.
+#
+# cluster-node-timeout 15000
+
+# A slave of a failing master will avoid to start a failover if its data
+# looks too old.
+#
+# There is no simple way for a slave to actually have a exact measure of
+# its "data age", so the following two checks are performed:
+#
+# 1) If there are multiple slaves able to failover, they exchange messages
+# in order to try to give an advantage to the slave with the best
+# replication offset (more data from the master processed).
+# Slaves will try to get their rank by offset, and apply to the start
+# of the failover a delay proportional to their rank.
+#
+# 2) Every single slave computes the time of the last interaction with
+# its master. This can be the last ping or command received (if the master
+# is still in the "connected" state), or the time that elapsed since the
+# disconnection with the master (if the replication link is currently down).
+# If the last interaction is too old, the slave will not try to failover
+# at all.
+#
+# The point "2" can be tuned by user. Specifically a slave will not perform
+# the failover if, since the last interaction with the master, the time
+# elapsed is greater than:
+#
+# (node-timeout * slave-validity-factor) + repl-ping-slave-period
+#
+# So for example if node-timeout is 30 seconds, and the slave-validity-factor
+# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
+# slave will not try to failover if it was not able to talk with the master
+# for longer than 310 seconds.
+#
+# A large slave-validity-factor may allow slaves with too old data to failover
+# a master, while a too small value may prevent the cluster from being able to
+# elect a slave at all.
+#
+# For maximum availability, it is possible to set the slave-validity-factor
+# to a value of 0, which means, that slaves will always try to failover the
+# master regardless of the last time they interacted with the master.
+# (However they'll always try to apply a delay proportional to their
+# offset rank).
+#
+# Zero is the only value able to guarantee that when all the partitions heal
+# the cluster will always be able to continue.
+#
+# cluster-slave-validity-factor 10
+
+# Cluster slaves are able to migrate to orphaned masters, that are masters
+# that are left without working slaves. This improves the cluster ability
+# to resist to failures as otherwise an orphaned master can't be failed over
+# in case of failure if it has no working slaves.
+#
+# Slaves migrate to orphaned masters only if there are still at least a
+# given number of other working slaves for their old master. This number
+# is the "migration barrier". A migration barrier of 1 means that a slave
+# will migrate only if there is at least 1 other working slave for its master
+# and so forth. It usually reflects the number of slaves you want for every
+# master in your cluster.
+#
+# Default is 1 (slaves migrate only if their masters remain with at least
+# one slave). To disable migration just set it to a very large value.
+# A value of 0 can be set but is useful only for debugging and dangerous
+# in production.
+#
+# cluster-migration-barrier 1
+
+# By default Redis Cluster nodes stop accepting queries if they detect there
+# is at least an hash slot uncovered (no available node is serving it).
+# This way if the cluster is partially down (for example a range of hash slots
+# are no longer covered) all the cluster becomes, eventually, unavailable.
+# It automatically returns available as soon as all the slots are covered again.
+#
+# However sometimes you want the subset of the cluster which is working,
+# to continue to accept queries for the part of the key space that is still
+# covered. In order to do so, just set the cluster-require-full-coverage
+# option to no.
+#
+# cluster-require-full-coverage yes
+
+# In order to setup your cluster make sure to read the documentation
+# available at http://redis.io web site.
+
+################################## SLOW LOG ###################################
+
+# The Redis Slow Log is a system to log queries that exceeded a specified
+# execution time. The execution time does not include the I/O operations
+# like talking with the client, sending the reply and so forth,
+# but just the time needed to actually execute the command (this is the only
+# stage of command execution where the thread is blocked and can not serve
+# other requests in the meantime).
+#
+# You can configure the slow log with two parameters: one tells Redis
+# what is the execution time, in microseconds, to exceed in order for the
+# command to get logged, and the other parameter is the length of the
+# slow log. When a new command is logged the oldest one is removed from the
+# queue of logged commands.
+
+# The following time is expressed in microseconds, so 1000000 is equivalent
+# to one second. Note that a negative number disables the slow log, while
+# a value of zero forces the logging of every command.
+slowlog-log-slower-than 10000
+
+# There is no limit to this length. Just be aware that it will consume memory.
+# You can reclaim memory used by the slow log with SLOWLOG RESET.
+slowlog-max-len 128
+
+################################ LATENCY MONITOR ##############################
+
+# The Redis latency monitoring subsystem samples different operations
+# at runtime in order to collect data related to possible sources of
+# latency of a Redis instance.
+#
+# Via the LATENCY command this information is available to the user that can
+# print graphs and obtain reports.
+#
+# The system only logs operations that were performed in a time equal or
+# greater than the amount of milliseconds specified via the
+# latency-monitor-threshold configuration directive. When its value is set
+# to zero, the latency monitor is turned off.
+#
+# By default latency monitoring is disabled since it is mostly not needed
+# if you don't have latency issues, and collecting data has a performance
+# impact, that while very small, can be measured under big load. Latency
+# monitoring can easily be enabled at runtime using the command
+# "CONFIG SET latency-monitor-threshold " if needed.
+latency-monitor-threshold 0
+
+############################# EVENT NOTIFICATION ##############################
+
+# Redis can notify Pub/Sub clients about events happening in the key space.
+# This feature is documented at http://redis.io/topics/notifications
+#
+# For instance if keyspace events notification is enabled, and a client
+# performs a DEL operation on key "foo" stored in the Database 0, two
+# messages will be published via Pub/Sub:
+#
+# PUBLISH __keyspace@0__:foo del
+# PUBLISH __keyevent@0__:del foo
+#
+# It is possible to select the events that Redis will notify among a set
+# of classes. Every class is identified by a single character:
+#
+# K Keyspace events, published with __keyspace@__ prefix.
+# E Keyevent events, published with __keyevent@__ prefix.
+# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
+# $ String commands
+# l List commands
+# s Set commands
+# h Hash commands
+# z Sorted set commands
+# x Expired events (events generated every time a key expires)
+# e Evicted events (events generated when a key is evicted for maxmemory)
+# A Alias for g$lshzxe, so that the "AKE" string means all the events.
+#
+# The "notify-keyspace-events" takes as argument a string that is composed
+# of zero or multiple characters. The empty string means that notifications
+# are disabled.
+#
+# Example: to enable list and generic events, from the point of view of the
+# event name, use:
+#
+# notify-keyspace-events Elg
+#
+# Example 2: to get the stream of the expired keys subscribing to channel
+# name __keyevent@0__:expired use:
+#
+notify-keyspace-events Ex
+#
+# By default all notifications are disabled because most users don't need
+# this feature and the feature has some overhead. Note that if you don't
+# specify at least one of K or E, no events will be delivered.
+# notify-keyspace-events ""
+
+############################### ADVANCED CONFIG ###############################
+
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+
+# Lists are also encoded in a special way to save a lot of space.
+# The number of entries allowed per internal list node can be specified
+# as a fixed maximum size or a maximum number of elements.
+# For a fixed maximum size, use -5 through -1, meaning:
+# -5: max size: 64 Kb <-- not recommended for normal workloads
+# -4: max size: 32 Kb <-- not recommended
+# -3: max size: 16 Kb <-- probably not recommended
+# -2: max size: 8 Kb <-- good
+# -1: max size: 4 Kb <-- good
+# Positive numbers mean store up to _exactly_ that number of elements
+# per list node.
+# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
+# but if your use case is unique, adjust the settings as necessary.
+list-max-ziplist-size -2
+
+# Lists may also be compressed.
+# Compress depth is the number of quicklist ziplist nodes from *each* side of
+# the list to *exclude* from compression. The head and tail of the list
+# are always uncompressed for fast push/pop operations. Settings are:
+# 0: disable all list compression
+# 1: depth 1 means "don't start compressing until after 1 node into the list,
+# going from either the head or tail"
+# So: [head]->node->node->...->node->[tail]
+# [head], [tail] will always be uncompressed; inner nodes will compress.
+# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
+# 2 here means: don't compress head or head->next or tail->prev or tail,
+# but compress all nodes between them.
+# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
+# etc.
+list-compress-depth 0
+
+# Sets have a special encoding in just one case: when a set is composed
+# of just strings that happen to be integers in radix 10 in the range
+# of 64 bit signed integers.
+# The following configuration setting sets the limit in the size of the
+# set in order to use this special memory saving encoding.
+set-max-intset-entries 512
+
+# Similarly to hashes and lists, sorted sets are also specially encoded in
+# order to save a lot of space. This encoding is only used when the length and
+# elements of a sorted set are below the following limits:
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+
+# HyperLogLog sparse representation bytes limit. The limit includes the
+# 16 bytes header. When an HyperLogLog using the sparse representation crosses
+# this limit, it is converted into the dense representation.
+#
+# A value greater than 16000 is totally useless, since at that point the
+# dense representation is more memory efficient.
+#
+# The suggested value is ~ 3000 in order to have the benefits of
+# the space efficient encoding without slowing down too much PFADD,
+# which is O(N) with the sparse encoding. The value can be raised to
+# ~ 10000 when CPU is not a concern, but space is, and the data set is
+# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
+hll-sparse-max-bytes 3000
+
+# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
+# order to help rehashing the main Redis hash table (the one mapping top-level
+# keys to values). The hash table implementation Redis uses (see dict.c)
+# performs a lazy rehashing: the more operation you run into a hash table
+# that is rehashing, the more rehashing "steps" are performed, so if the
+# server is idle the rehashing is never complete and some more memory is used
+# by the hash table.
+#
+# The default is to use this millisecond 10 times every second in order to
+# actively rehash the main dictionaries, freeing memory when possible.
+#
+# If unsure:
+# use "activerehashing no" if you have hard latency requirements and it is
+# not a good thing in your environment that Redis can reply from time to time
+# to queries with 2 milliseconds delay.
+#
+# use "activerehashing yes" if you don't have such hard requirements but
+# want to free memory asap when possible.
+activerehashing yes
+
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients including MONITOR clients
+# slave -> slave clients
+# pubsub -> clients subscribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and slave clients, since
+# subscribers and slaves receive data in a push fashion.
+#
+# Both the hard or the soft limit can be disabled by setting them to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit slave 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
+# Redis calls an internal function to perform many background tasks, like
+# closing connections of clients in timeout, purging expired keys that are
+# never requested, and so forth.
+#
+# Not all tasks are performed with the same frequency, but Redis checks for
+# tasks to perform according to the specified "hz" value.
+#
+# By default "hz" is set to 10. Raising the value will use more CPU when
+# Redis is idle, but at the same time will make Redis more responsive when
+# there are many keys expiring at the same time, and timeouts may be
+# handled with more precision.
+#
+# The range is between 1 and 500, however a value over 100 is usually not
+# a good idea. Most users should use the default of 10 and raise this up to
+# 100 only in environments where very low latency is required.
+hz 10
+
+# When a child rewrites the AOF file, if the following option is enabled
+# the file will be fsync-ed every 32 MB of data generated. This is useful
+# in order to commit the file to the disk more incrementally and avoid
+# big latency spikes.
+aof-rewrite-incremental-fsync yes
diff --git a/docker/redis/docker-compose.yml b/docker/env/redis/docker-compose.yml
similarity index 70%
rename from docker/redis/docker-compose.yml
rename to docker/env/redis/docker-compose.yml
index c2dbfd54c98b2af36908a58fedeb3413244f19b6..afb4110598ee67ca5d63c7bff0981b3697c17e54 100644
--- a/docker/redis/docker-compose.yml
+++ b/docker/env/redis/docker-compose.yml
@@ -7,7 +7,9 @@ services:
- '6379:6379'
command: redis-server --save 20 1 --loglevel warning --requirepass eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81
volumes:
- - cache:/data
+ - ./data:/data
+ - ./conf:/usr/local/etc/redis
+ - ./log:/var/log/redis
volumes:
cache:
- driver: local
\ No newline at end of file
+ driver: local
diff --git a/docker/rocketmq/docker-compose.yml b/docker/env/rocketmq/docker-compose.yml
similarity index 100%
rename from docker/rocketmq/docker-compose.yml
rename to docker/env/rocketmq/docker-compose.yml
diff --git a/docker/metricsMonitor/grafana/config/datasources.json b/docker/metricsMonitor/grafana/config/datasources.json
new file mode 100644
index 0000000000000000000000000000000000000000..b60be7d3e19f3c783cfb5cf8671a6752505dab81
--- /dev/null
+++ b/docker/metricsMonitor/grafana/config/datasources.json
@@ -0,0 +1,11 @@
+{
+ "apiVersion": 1,
+ "datasources": [
+ {
+ "name": "iotPrometheus",
+ "type": "prometheus",
+ "url": "http://172.17.0.1:9090",
+ "isDefault": true
+ }
+ ]
+}
diff --git a/docker/metricsMonitor/grafana/config/grafana.ini b/docker/metricsMonitor/grafana/config/grafana.ini
new file mode 100644
index 0000000000000000000000000000000000000000..90185674a1bf39145f6b9f36c2342f1346cdc7e6
--- /dev/null
+++ b/docker/metricsMonitor/grafana/config/grafana.ini
@@ -0,0 +1,2 @@
+[security]
+admin_user = admin
diff --git a/docker/metricsMonitor/metrics-monitor-docker-compose.yml b/docker/metricsMonitor/metrics-monitor-docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..91c5e15fb16ae2fe793b8d3ebc8f7abe1fc7171e
--- /dev/null
+++ b/docker/metricsMonitor/metrics-monitor-docker-compose.yml
@@ -0,0 +1,29 @@
+version: '3'
+services:
+ prometheus:
+ image: bitnami/prometheus:latest
+ environment:
+ - TZ=Asia/Shanghai
+ ports:
+ - '9090:9090'
+ volumes:
+ - ./prometheus/prometheus.yml:/opt/bitnami/prometheus/conf/prometheus.yml
+ networks:
+ - iot-metrics-monitor
+ grafana:
+ image: grafana/grafana
+ environment:
+ - TZ=Asia/Shanghai
+ - GF_AUTH_ADMIN_PASSWORD=iot123456
+ ports:
+ - '9091:3000'
+ volumes:
+ - ./grafana/config/grafana.ini:/etc/grafana/grafana.ini
+ # command: >
+# bash -c "/usr/sbin/grafana-cli --config /etc/grafana/grafana.ini datasources import /etc/grafana/provisioning/datasources/datasources.json &&
+# /run.sh"
+ networks:
+ - iot-metrics-monitor
+networks:
+ iot-metrics-monitor:
+ driver: bridge
diff --git a/docker/metricsMonitor/prometheus/prometheus.yml b/docker/metricsMonitor/prometheus/prometheus.yml
new file mode 100644
index 0000000000000000000000000000000000000000..02924631fee4707b237d367f6ef7bc442ef91346
--- /dev/null
+++ b/docker/metricsMonitor/prometheus/prometheus.yml
@@ -0,0 +1,65 @@
+global:
+ scrape_interval: 15s
+ evaluation_interval: 15s
+
+
+scrape_configs:
+# - job_name: prometheus
+# static_configs:
+# - targets: ['172.17.0.1:9090']
+ - job_name: iotgoproject
+ static_configs:
+ - targets: ['172.17.0.1:8005']
+ labels:
+ namespace: iot-go-project
+ pod: iot-go-project
+ instance: iot-go-project
+ - job_name: iotgomqtt1
+ static_configs:
+ - targets: ['172.17.0.1:8006']
+ labels:
+ namespace: iot-mqtt
+ pod: iot-mqtt1
+ instance: iot-mqtt1
+ - job_name: iotgomqtt2
+ static_configs:
+ - targets: ['172.17.0.1:8007']
+ labels:
+ namespace: iot-mqtt
+ pod: iot-mqtt2
+ instance: iot-mqtt2
+ - job_name: iotgomqtt3
+ static_configs:
+ - targets: ['172.17.0.1:8008']
+ labels:
+ namespace: iot-mqtt
+ pod: iot-mqtt3
+ instance: iot-mqtt3
+ - job_name: iotgomq-pre_handler
+ static_configs:
+ - targets: ['172.17.0.1:8001']
+ labels:
+ namespace: iot-mq
+ pod: iot-mq-pre_handler
+ instance: iot-mq-pre_handler
+ - job_name: calc_handler
+ static_configs:
+ - targets: ['172.17.0.1:8002']
+ labels:
+ namespace: iot-mq
+ pod: iot-mq-calc_handler
+ instance: iot-mq-calc_handler
+ - job_name: iotgomq-waring_handler
+ static_configs:
+ - targets: ['172.17.0.1:8003']
+ labels:
+ namespace: iot-mq
+ pod: iot-mq-waring_handler
+ instance: iot-mq-waring_handler
+ - job_name: iotgomq-wd_handler
+ static_configs:
+ - targets: ['172.17.0.1:8004']
+ labels:
+ namespace: iot-mq
+ pod: iot-mq-wd_handler
+ instance: iot-mq-wd_handler
diff --git a/go-iot-mq/MongoUt.go b/go-iot-mq/MongoUt.go
new file mode 100644
index 0000000000000000000000000000000000000000..9db6ca1e1effacd64961aa32f365402e6add6606
--- /dev/null
+++ b/go-iot-mq/MongoUt.go
@@ -0,0 +1,49 @@
+package main
+
+import (
+ "context"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+ "go.uber.org/zap"
+ "strconv"
+)
+
+func CalcCollectionName(prefix string, id uint) string {
+ return prefix + "_" + strconv.Itoa(int(id%100))
+}
+
+
+func CheckCollectionAndCreate(prefix , collectionName string ) {
+ db := GMongoClient.Database(globalConfig.MongoConfig.Db)
+
+ regex := primitive.Regex{Pattern: "^" + prefix, Options: "i"} // 'i' 表示不区分大小写
+
+ filter := bson.M{"name": regex}
+
+ collectionNames, err := db.ListCollectionNames(context.TODO(),filter)
+
+ if err != nil {
+
+ zap.S().Fatal(err)
+
+ }
+
+ collectionExists := false
+ for _, name := range collectionNames {
+ if name == collectionName {
+ collectionExists = true
+ break
+ }
+ }
+ zap.S().Infof("collection %s exists: %v", collectionName, collectionExists)
+
+ if collectionExists {
+ err := db.CreateCollection(context.TODO(), collectionName)
+ if err != nil {
+ zap.S().Fatal(err)
+ return
+ }
+
+ }
+
+}
diff --git a/go-iot-mq/go.mod b/go-iot-mq/go.mod
index b33938c2c4a25bdc9c79826ee6e64748d91776e3..3773c5541c0e99e8b0c3ae1d7fd2c95539aeba73 100644
--- a/go-iot-mq/go.mod
+++ b/go-iot-mq/go.mod
@@ -11,6 +11,8 @@ require iot-notice v0.0.0
replace iot-notice => ../notice
require (
+ github.com/CatchZeng/feishu v1.3.2
+ github.com/blinkbean/dingtalk v1.1.3
github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2
github.com/influxdata/influxdb-client-go/v2 v2.13.0
github.com/prometheus/client_golang v1.19.1
@@ -20,34 +22,54 @@ require (
go.mongodb.org/mongo-driver v1.16.0
go.uber.org/zap v1.27.0
gopkg.in/yaml.v3 v3.0.1
+ gorm.io/gorm v1.25.11
)
require (
+ filippo.io/edwards25519 v1.1.0 // indirect
+ github.com/ClickHouse/ch-go v0.61.5 // indirect
+ github.com/ClickHouse/clickhouse-go/v2 v2.26.0 // indirect
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
+ github.com/andybalholm/brotli v1.1.0 // indirect
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
- github.com/go-ole/go-ole v1.2.4 // indirect
+ github.com/go-faster/city v1.0.1 // indirect
+ github.com/go-faster/errors v0.7.1 // indirect
+ github.com/go-ole/go-ole v1.2.6 // indirect
+ github.com/go-resty/resty/v2 v2.7.0 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
+ github.com/go-sql-driver/mysql v1.8.1 // indirect
+ github.com/gocql/gocql v1.6.0 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/gorilla/websocket v1.4.1 // indirect
+ github.com/gorilla/websocket v1.4.2 // indirect
+ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
+ github.com/jinzhu/inflection v1.0.0 // indirect
+ github.com/jinzhu/now v1.1.5 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/oapi-codegen/runtime v1.0.0 // indirect
+ github.com/paulmach/orb v0.11.1 // indirect
+ github.com/pierrec/lz4/v4 v4.1.21 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
- github.com/shirou/gopsutil v2.19.11+incompatible // indirect
+ github.com/segmentio/asm v1.2.0 // indirect
+ github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect
+ github.com/shopspring/decimal v1.4.0 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
+ go.opentelemetry.io/otel v1.28.0 // indirect
+ go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/net v0.25.0 // indirect
@@ -55,4 +77,5 @@ require (
golang.org/x/sys v0.22.0 // indirect
golang.org/x/text v0.15.0 // indirect
google.golang.org/protobuf v1.34.1 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
)
diff --git a/go-iot-mq/go.sum b/go-iot-mq/go.sum
index 01dff0c549cb8da94f60c16fe6770c3cc32c580f..54f0485f51eaae77092597692765c5447627a5d0 100644
--- a/go-iot-mq/go.sum
+++ b/go-iot-mq/go.sum
@@ -1,10 +1,24 @@
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/CatchZeng/feishu v1.3.2 h1:3A51zYAvxGwPHSy5MGL0thM7o7v/+ufgGfVEbKkd1Ps=
+github.com/CatchZeng/feishu v1.3.2/go.mod h1:osX8HjZ4feBHq6F0ggxor4/VSNM3CpyqlWKZZ4IVsdw=
+github.com/ClickHouse/ch-go v0.61.5 h1:zwR8QbYI0tsMiEcze/uIMK+Tz1D3XZXLdNrlaOpeEI4=
+github.com/ClickHouse/ch-go v0.61.5/go.mod h1:s1LJW/F/LcFs5HJnuogFMta50kKDO0lf9zzfrbl0RQg=
+github.com/ClickHouse/clickhouse-go/v2 v2.26.0 h1:j4/y6NYaCcFkJwN/TU700ebW+nmsIy34RmUAAcZKy9w=
+github.com/ClickHouse/clickhouse-go/v2 v2.26.0/go.mod h1:iDTViXk2Fgvf1jn2dbJd1ys+fBkdD1UMRnXlwmhijhQ=
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
+github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
+github.com/blinkbean/dingtalk v1.1.3 h1:MbidFZYom7DTFHD/YIs+eaI7kRy52kmWE/sy0xjo6E4=
+github.com/blinkbean/dingtalk v1.1.3/go.mod h1:9BaLuGSBqY3vT5hstValh48DbsKO7vaHaJnG9pXwbto=
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
@@ -24,29 +38,59 @@ github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnm
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja v0.0.0-20240220182346-e401ed450204 h1:O7I1iuzEA7SG+dK8ocOBSlYAA9jBUmCYl/Qa7ey7JAM=
github.com/dop251/goja v0.0.0-20240220182346-e401ed450204/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
+github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2 h1:4Ew88p5s9dwIk5/woUyqI9BD89NgZoUNH4/rM/h2UDg=
github.com/dop251/goja v0.0.0-20240627195025-eb1f15ee67d2/go.mod h1:o31y53rb/qiIAONF7w3FHJZRqqP3fzHUr1HqanthByw=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
+github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
+github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
+github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg=
+github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
+github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
+github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/gocql/gocql v1.6.0 h1:IdFdOTbnpbd0pDhl4REKQDM+Q0SzKXQ1Yh+YZZ8T/qU=
+github.com/gocql/gocql v1.6.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U=
github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM=
github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4=
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU=
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
+github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -58,9 +102,17 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/oapi-codegen/runtime v1.0.0 h1:P4rqFX5fMFWqRzY9M/3YF9+aPSPPB06IzP2P7oOxrWo=
github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18B34OO356yJ/A=
+github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU=
+github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
+github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
@@ -77,70 +129,105 @@ github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzuk
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8=
github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
+github.com/redis/go-redis/v9 v9.5.4 h1:vOFYDKKVgrI5u++QvnMT7DksSMYg7Aw/Np4vLJLKLwY=
github.com/redis/go-redis/v9 v9.5.4/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
+github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
+github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
+github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
+github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc=
go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
+go.mongodb.org/mongo-driver v1.16.0 h1:tpRsfBJMROVHKpdGyc1BBEzzjDUWjItxbVSZ8Ls4BQ4=
go.mongodb.org/mongo-driver v1.16.0/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw=
+go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
+go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
+go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
+go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
+golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -148,6 +235,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -158,25 +246,36 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg=
+gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
diff --git a/go-iot-mq/handler_calc.go b/go-iot-mq/handler_calc.go
index 10b37b69be0d35b96e029ecfcfcd4d42ca6b9f03..a16992f68a668da54cfa446050c179083ff7e3f2 100644
--- a/go-iot-mq/handler_calc.go
+++ b/go-iot-mq/handler_calc.go
@@ -39,6 +39,9 @@ func HandlerCalc(messages <-chan amqp.Delivery) {
zap.S().Infof(" [*] Waiting for messages. To exit press CTRL+C")
}
+func calcMeasurement(deviceUid int , IdentificationCode, protocol string) string {
+ return protocol + "_" + strconv.Itoa(deviceUid) + "_" + IdentificationCode
+}
func HandlerCalcStr(d amqp.Delivery) bool {
var myMap map[string]int64
@@ -74,13 +77,13 @@ func HandlerCalcStr(d amqp.Delivery) bool {
}
var m = make(map[string]any)
for _, cache := range ccc.Param {
- // fixme: 支持一下直接查询原始数据
+ // todo: 查询逻辑调整。 Measurement 不能直接和 MQTT_CLIENT_ID 对应了
if "原始" == cache.Reduce {
var fd []string
fd = append(fd, strconv.Itoa(cache.SignalId))
config := InfluxQueryConfig{}
config.Bucket = globalConfig.InfluxConfig.Bucket
- config.Measurement = strconv.Itoa(cache.MqttClientId)
+ config.Measurement = calcMeasurement(cache.DeviceUid,cache.IdentificationCode,cache.Protocol)
config.Fields = fd
config.Aggregation = AggregationConfig{
Every: 1,
@@ -116,7 +119,7 @@ func HandlerCalcStr(d amqp.Delivery) bool {
config := InfluxQueryConfig{}
config.Bucket = globalConfig.InfluxConfig.Bucket
- config.Measurement = strconv.Itoa(cache.MqttClientId)
+ config.Measurement = calcMeasurement(cache.DeviceUid,cache.IdentificationCode,cache.Protocol)
config.Fields = fd
config.StartTime = preTime - ccc.Offset
@@ -150,7 +153,10 @@ func HandlerCalcStr(d amqp.Delivery) bool {
// 获取数据库和集合
db := GMongoClient.Database(globalConfig.MongoConfig.Db)
- collection := db.Collection(globalConfig.MongoConfig.Collection)
+ // fixme: 暂时使用固定集合名,后续需要改成根据规则ID动态获取
+ name := CalcCollectionName(globalConfig.MongoConfig.Collection, ccc.ID)
+ CheckCollectionAndCreate(globalConfig.MongoConfig.Collection,name)
+ collection := db.Collection(name)
// 插入数据
insertResult, err := collection.InsertOne(context.Background(), bson.M{
@@ -206,19 +212,35 @@ func HandlerCalcStr(d amqp.Delivery) bool {
// map[string]interface{}类型,表示JavaScript脚本执行后的结果,其中键为结果名,值为结果值
func runCalcScript(param map[string]any, script string) map[string]interface{} {
vm := goja.New()
+
+ // 执行 JavaScript 脚本
_, err := vm.RunString(script)
if err != nil {
- zap.S().Error("JS代码有问题!")
+ zap.S().Error("JS代码有问题!", zap.Error(err))
return nil
}
- var fn func(string2 map[string]any) map[string]interface{}
+
+ // 将 JavaScript 中的 main 函数映射到 Go 的 fn 函数
+ var fn func(map[string]any) map[string]interface{}
err = vm.ExportTo(vm.Get("main"), &fn)
if err != nil {
- zap.S().Error("Js函数映射到 Go 函数失败!")
+ zap.S().Error("Js函数映射到 Go 函数失败!", zap.Error(err))
return nil
}
- a := fn(param)
- return a
+
+ // 使用 defer 和 recover 来捕获 fn 函数中的 panic
+ result := make(map[string]interface{})
+ defer func() {
+ if r := recover(); r != nil {
+ zap.S().Error("在执行 JavaScript 函数时发生 panic:", zap.Any("panic value", r))
+ // 可以选择返回空的 map 或者包含错误信息的 map
+ result["error"] = fmt.Sprintf("panic occurred: %v", r)
+ }
+ }()
+
+ // 调用映射的函数
+ result = fn(param)
+ return result
}
// getNextTime 获取下一次执行时间(秒)
diff --git a/go-iot-mq/handler_coap_storage.go b/go-iot-mq/handler_coap_storage.go
new file mode 100644
index 0000000000000000000000000000000000000000..1be13e18acdc2ca632b63df529655a6f4ae088d5
--- /dev/null
+++ b/go-iot-mq/handler_coap_storage.go
@@ -0,0 +1,147 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ amqp "github.com/rabbitmq/amqp091-go"
+ "github.com/redis/go-redis/v9"
+ "go.uber.org/zap"
+ "strconv"
+ "time"
+)
+
+// CoapMessage 用于处理coap转发后的数据
+type CoapMessage struct {
+ Uid string `json:"uid"`
+ Message string `json:"message"`
+}
+
+// HandlerCoapDataStorage 函数处理从AMQP通道接收到的coap消息数据
+// 参数:
+//
+// messages <-chan amqp.Delivery:接收AMQP消息的通道
+//
+// 返回值:
+//
+// 无
+func HandlerCoapDataStorage(messages <-chan amqp.Delivery) {
+
+ go func() {
+
+ for d := range messages {
+ HandlerDataCoapStorageString(d)
+ err := d.Ack(false)
+ if err != nil {
+ zap.S().Errorf("消息确认异常:%+v", err)
+
+ }
+ }
+ }()
+
+ zap.S().Infof(" [*] Waiting for messages. To exit press CTRL+C")
+}
+
+func HandlerDataCoapStorageString(d amqp.Delivery) {
+ var msg CoapMessage
+ err := json.Unmarshal(d.Body, &msg)
+ if err != nil {
+ zap.S().Infof("Failed to unmarshal message: %s", err)
+ return
+ }
+ zap.S().Infof("处理 pre_coap_handler 数据 : %+v", msg)
+
+ script := GetScriptRedisForCoap(msg.Uid)
+ if script != "" {
+ data := runScript(msg.Message, script)
+ if data == nil {
+ zap.S().Infof("执行脚本为空")
+ return
+ }
+ for i := 0; i < len(*data); i++ {
+ row := (*data)[i]
+ StorageDataRowList(row,"coap")
+ }
+ zap.S().Debugf("DataRowList: %+v", data)
+
+ jsonData, err := json.Marshal(data)
+ if err != nil {
+ zap.S().Errorf("推送报警原始数据异常 %s", err)
+ return
+ }
+ zap.S().Infof("推送报警原始数据: %s", jsonData)
+ HandlerCoapLastTime(*data)
+ PushToQueue("waring_handler", jsonData)
+ PushToQueue("waring_delay_handler", jsonData)
+ PushToQueue("transmit_handler", jsonData)
+ } else {
+ zap.S().Infof("执行脚本为空")
+ }
+
+}
+
+// HandlerCoapLastTime 和上一次推送事件进行对比,判断是否超过阈值,如果超过则发送额外的消息通知
+func HandlerCoapLastTime(data []DataRowList) {
+ if len(data) == 0 {
+ return
+ }
+
+ var deviceUid = data[0].DeviceUid
+ key := "last_push_time:" + deviceUid
+ // 1. 从redis中获取这个设备上次推送的时间
+ lastTime, err := globalRedisClient.Get(context.Background(), key).Result()
+ if err != nil && !errors.Is(err, redis.Nil) {
+ zap.S().Errorf("获取设备上次推送时间异常:%+v", err)
+ return
+ }
+ now := time.Now().Unix()
+
+ // 如果没有这个时间则设置时间(当前时间)
+ if errors.Is(err, redis.Nil) {
+ err := globalRedisClient.Set(context.Background(), key, now, 0).Err()
+ if err != nil {
+ zap.S().Errorf("设置设备上次推送时间异常:%+v", err)
+ return
+ }
+ lastTime = fmt.Sprintf("%d", now)
+ }
+
+ if lastTime != fmt.Sprintf("%d", now) {
+
+ val := globalRedisClient.LRange(context.Background(), "coap_bind_device_info:"+deviceUid, 0, -1).Val()
+
+ for _, s := range val {
+ handlerCoapOne(s)
+ }
+
+ }
+
+}
+
+func handlerCoapOne(deviceUid string) bool {
+ val := globalRedisClient.Get(context.Background(), "coap_bind_device_info:"+deviceUid).Val()
+ if val == "" {
+ return true
+ }
+ parseUint, _ := strconv.ParseUint(val, 10, 64)
+ withRedis := FindByIdWithRedis(parseUint)
+ if withRedis == nil {
+ return true
+ }
+ globalRedisClient.Expire(context.Background(), "Device_Off_Message:"+deviceUid, time.Duration(withRedis.PushInterval)*time.Second)
+ return false
+}
+
+// GetScriptRedisForCoap 根据 http 的设备ID从Redis中获取对应的脚本
+// 参数:
+//
+// tcp id string - tcp id
+//
+// 返回值:
+//
+// string - 对应的脚本
+func GetScriptRedisForCoap(tcpId string) string {
+ val := globalRedisClient.HGet(context.Background(), "struct:Coap", tcpId).Val()
+ return val
+}
diff --git a/go-iot-mq/handler_http_storage.go b/go-iot-mq/handler_http_storage.go
new file mode 100644
index 0000000000000000000000000000000000000000..f75a3f14cb396e180bea8925c231817135e91911
--- /dev/null
+++ b/go-iot-mq/handler_http_storage.go
@@ -0,0 +1,147 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ amqp "github.com/rabbitmq/amqp091-go"
+ "github.com/redis/go-redis/v9"
+ "go.uber.org/zap"
+ "strconv"
+ "time"
+)
+
+// HttpMessage 用于处理http转发后的数据
+type HttpMessage struct {
+ Uid string `json:"uid"`
+ Message string `json:"message"`
+}
+
+// HandlerHttpDataStorage 函数处理从AMQP通道接收到的HTTP消息数据
+// 参数:
+//
+// messages <-chan amqp.Delivery:接收AMQP消息的通道
+//
+// 返回值:
+//
+// 无
+func HandlerHttpDataStorage(messages <-chan amqp.Delivery) {
+
+ go func() {
+
+ for d := range messages {
+ HandlerDataHttpStorageString(d)
+ err := d.Ack(false)
+ if err != nil {
+ zap.S().Errorf("消息确认异常:%+v", err)
+
+ }
+ }
+ }()
+
+ zap.S().Infof(" [*] Waiting for messages. To exit press CTRL+C")
+}
+
+func HandlerDataHttpStorageString(d amqp.Delivery) {
+ var msg HttpMessage
+ err := json.Unmarshal(d.Body, &msg)
+ if err != nil {
+ zap.S().Infof("Failed to unmarshal message: %s", err)
+ return
+ }
+ zap.S().Infof("处理 pre_http_handler 数据 : %+v", msg)
+
+ script := GetScriptRedisForHttp(msg.Uid)
+ if script != "" {
+ data := runScript(msg.Message, script)
+ if data == nil {
+ zap.S().Infof("执行脚本为空")
+ return
+ }
+ for i := 0; i < len(*data); i++ {
+ row := (*data)[i]
+ StorageDataRowList(row,"http")
+ }
+ zap.S().Debugf("DataRowList: %+v", data)
+
+ jsonData, err := json.Marshal(data)
+ if err != nil {
+ zap.S().Errorf("推送报警原始数据异常 %s", err)
+ return
+ }
+ zap.S().Infof("推送报警原始数据: %s", jsonData)
+ HandlerHttpLastTime(*data)
+ PushToQueue("waring_handler", jsonData)
+ PushToQueue("waring_delay_handler", jsonData)
+ PushToQueue("transmit_handler", jsonData)
+ } else {
+ zap.S().Infof("执行脚本为空")
+ }
+
+}
+
+// HandlerHttpLastTime 和上一次推送事件进行对比,判断是否超过阈值,如果超过则发送额外的消息通知
+func HandlerHttpLastTime(data []DataRowList) {
+ if len(data) == 0 {
+ return
+ }
+
+ var deviceUid = data[0].DeviceUid
+ key := "last_push_time:" + deviceUid
+ // 1. 从redis中获取这个设备上次推送的时间
+ lastTime, err := globalRedisClient.Get(context.Background(), key).Result()
+ if err != nil && !errors.Is(err, redis.Nil) {
+ zap.S().Errorf("获取设备上次推送时间异常:%+v", err)
+ return
+ }
+ now := time.Now().Unix()
+
+ // 如果没有这个时间则设置时间(当前时间)
+ if errors.Is(err, redis.Nil) {
+ err := globalRedisClient.Set(context.Background(), key, now, 0).Err()
+ if err != nil {
+ zap.S().Errorf("设置设备上次推送时间异常:%+v", err)
+ return
+ }
+ lastTime = fmt.Sprintf("%d", now)
+ }
+
+ if lastTime != fmt.Sprintf("%d", now) {
+
+ val := globalRedisClient.LRange(context.Background(), "http_bind_device_info:"+deviceUid, 0, -1).Val()
+
+ for _, s := range val {
+ handlerHttpOne(s)
+ }
+
+ }
+
+}
+
+func handlerHttpOne(deviceUid string) bool {
+ val := globalRedisClient.Get(context.Background(), "http_bind_device_info:"+deviceUid).Val()
+ if val == "" {
+ return true
+ }
+ parseUint, _ := strconv.ParseUint(val, 10, 64)
+ withRedis := FindByIdWithRedis(parseUint)
+ if withRedis == nil {
+ return true
+ }
+ globalRedisClient.Expire(context.Background(), "Device_Off_Message:"+deviceUid, time.Duration(withRedis.PushInterval)*time.Second)
+ return false
+}
+
+// GetScriptRedisForHttp 根据 http 的设备ID从Redis中获取对应的脚本
+// 参数:
+//
+// tcp id string - tcp id
+//
+// 返回值:
+//
+// string - 对应的脚本
+func GetScriptRedisForHttp(tcpId string) string {
+ val := globalRedisClient.HGet(context.Background(), "struct:Http", tcpId).Val()
+ return val
+}
diff --git a/go-iot-mq/handler_storage.go b/go-iot-mq/handler_mqtt_storage.go
similarity index 63%
rename from go-iot-mq/handler_storage.go
rename to go-iot-mq/handler_mqtt_storage.go
index 721f8fa481d1f844d477d8f9d11bf340ed9b328f..60ce1aee6d0f361865cca725c1a19c5295512c48 100644
--- a/go-iot-mq/handler_storage.go
+++ b/go-iot-mq/handler_mqtt_storage.go
@@ -1,3 +1,5 @@
+// 用于处理MQTT转发过来的数据
+
package main
import (
@@ -61,9 +63,14 @@ func HandlerDataStorageString(d amqp.Delivery) {
script := GetScriptRedis(msg.MQTTClientID)
if script != "" {
data := runScript(msg.Message, script)
+
+ if data == nil {
+ zap.S().Infof("执行脚本为空")
+ return
+ }
for i := 0; i < len(*data); i++ {
row := (*data)[i]
- StorageDataRowList(row)
+ StorageDataRowList(row, "mqtt")
}
zap.S().Debugf("DataRowList: %+v", data)
@@ -73,8 +80,7 @@ func HandlerDataStorageString(d amqp.Delivery) {
return
}
zap.S().Infof("推送报警原始数据: %s", jsonData)
- writeAPI.Flush()
- HandlerLastTime(*data)
+ HandlerMqttLastTime(*data)
PushToQueue("waring_handler", jsonData)
PushToQueue("waring_delay_handler", jsonData)
PushToQueue("transmit_handler", jsonData)
@@ -84,8 +90,8 @@ func HandlerDataStorageString(d amqp.Delivery) {
}
-// HandlerLastTime 和上一次推送事件进行对比,判断是否超过阈值,如果超过则发送额外的消息通知
-func HandlerLastTime(data []DataRowList) {
+// HandlerMqttLastTime 和上一次推送事件进行对比,判断是否超过阈值,如果超过则发送额外的消息通知
+func HandlerMqttLastTime(data []DataRowList) {
if len(data) == 0 {
return
}
@@ -146,12 +152,17 @@ func FindByIdWithRedis(id uint64) *DeviceInfo {
return &res
}
-func genMeasurement(dt DataRowList) string {
- if dt.DeviceUid == dt.IdentificationCode {
- return "mqtt_" + dt.DeviceUid
- } else {
- return "mqtt_" + dt.DeviceUid + dt.IdentificationCode
- }
+func genMeasurement(dt DataRowList, protocol string) string {
+ return protocol + "_" + dt.DeviceUid + "_" + dt.IdentificationCode
+}
+
+// CalcBucketName 函数根据前缀、协议和id计算桶名
+// prefix: 桶名前缀
+// protocol: 使用的协议
+// id: 桶的ID
+// 返回值: 计算得到的桶名
+func CalcBucketName(prefix, protocol string, id uint) string {
+ return prefix + "_" + protocol + "_" + strconv.Itoa(int(id%100))
}
// StorageDataRowList 函数将DataRowList类型指针dt中的数据写入InfluxDB数据库
@@ -162,16 +173,29 @@ func genMeasurement(dt DataRowList) string {
// 返回值:
//
// 无
-func StorageDataRowList(dt DataRowList) {
- signal2 := GetMqttClientSignal2(dt.DeviceUid)
+func StorageDataRowList(dt DataRowList, protocol string) {
+ signal2 := GetMqttClientSignal2(dt.DeviceUid, dt.IdentificationCode)
+ zap.S().Infof("获取的mqtt信号数据signal2: %+v", signal2)
+ zap.S().Infof("当前的DataRowList数据: %+v", dt)
timeFromUnix := time.Unix(dt.Time, 0)
- p := influxdb2.NewPointWithMeasurement(genMeasurement(dt)).
+ i, err := strconv.Atoi(dt.DeviceUid)
+ if err != nil {
+ fmt.Println("转换错误:", err)
+ } else {
+ fmt.Println("转换后的整数:", i)
+ }
+
+ writeAPI := GlobalInfluxDbClient.WriteAPI(globalConfig.InfluxConfig.Org,
+ CalcBucketName(globalConfig.InfluxConfig.Bucket, protocol, uint(i)))
+
+ p := influxdb2.NewPointWithMeasurement(genMeasurement(dt, protocol)).
AddField("storage_time", time.Now().Unix()).
AddField("push_time", dt.Time).
SetTime(timeFromUnix)
for _, row := range dt.DataRows {
+
b := signal2[row.Name].Numb
if b {
float, _ := strconv.ParseFloat(row.Value, 64)
@@ -181,32 +205,46 @@ func StorageDataRowList(dt DataRowList) {
p.AddField(strconv.Itoa(signal2[row.Name].ID), row.Value)
}
-
+ zap.S().Infof("当前信号的的CacheSize:%+v=============rowName:%+v", signal2[row.Name].CacheSize, row.Name)
if signal2[row.Name].CacheSize > 0 {
// 获取当前 ZSet 的大小
- currentSize := globalRedisClient.ZCard(context.Background(), "signal_delay_warning:"+dt.DeviceUid+":"+strconv.Itoa(signal2[row.Name].ID)).Val()
-
+ currentSize := globalRedisClient.ZCard(context.Background(),
+ "signal_delay_warning:"+dt.DeviceUid+":" + dt.IdentificationCode+ ":"+strconv.Itoa(signal2[row.Name].ID)).Val()
+ zap.S().Infof("当前signal_delay_warning的大小: %+v", currentSize)
// 如果 ZSet 的大小已经达到或超过配置的缓存大小,则移除第一个元素
if currentSize >= signal2[row.Name].CacheSize {
+ zap.S().Infof("当前signal_delay_warning的currentSize大于等于配置大小:%+v", signal2[row.Name].CacheSize)
// 移除 ZSet 中分数最低的元素,即最早的元素
i := signal2[row.Name].CacheSize + 1 - currentSize
+ zap.S().Infof("计算后的i: %+v", i)
if i == 1 {
-
+ zap.S().Infof("计算后的i的值为1")
} else {
- err := globalRedisClient.ZRemRangeByRank(context.Background(), "signal_delay_warning:"+dt.DeviceUid+":"+strconv.Itoa(signal2[row.Name].ID), 0, i).Err()
+ zap.S().Infof("开始移除之前的元素")
+ err := globalRedisClient.ZRemRangeByRank(context.Background(),
+ "signal_delay_warning:"+dt.DeviceUid+":" +dt.IdentificationCode+ ":"+strconv.Itoa(
+ signal2[row.Name].ID), 0,
+ i-1).Err()
if err != nil {
// 处理错误
zap.S().Errorf("移除 ZSet 元素异常:%+v", err)
}
}
+ } else {
+ zap.S().Infof("当前大小未超过配置大小,写入缓存")
+ // 写入缓存
+ // 根据zset的特效,如果value一致的话,则会修改score,此处体现为修改了该值的时间,也就是说最新的值和之前的值相同的话只会保留最新时间的这一份
+ err := globalRedisClient.ZAdd(context.Background(),
+ "signal_delay_warning:"+dt.DeviceUid+":" + dt.IdentificationCode +":"+strconv.Itoa(signal2[row.
+ Name].
+ ID),
+ redis.Z{Score: float64(dt.Time), Member: row.Value}).Err()
+ if err != nil {
+ // 处理错误
+ zap.S().Errorf("写入 ZSet 元素异常:%+v", err)
+ }
}
- // 写入缓存
- err := globalRedisClient.ZAdd(context.Background(), "signal_delay_warning:"+dt.DeviceUid+":"+strconv.Itoa(signal2[row.Name].ID), redis.Z{Score: float64(dt.Time), Member: row.Value}).Err()
- if err != nil {
- // 处理错误
- zap.S().Errorf("写入 ZSet 元素异常:%+v", err)
- }
}
}
@@ -224,60 +262,41 @@ func StorageDataRowList(dt DataRowList) {
// 返回值:
// *DataRowList 类型指针,JS 脚本执行后的结果,如果执行失败则返回 nil
func runScript(param string, script string) *[]DataRowList {
-
vm := goja.New()
+
+ // 执行 JavaScript 脚本
_, err := vm.RunString(script)
if err != nil {
- zap.S().Errorf("JS代码有问题!")
+ zap.S().Errorf("JS代码有问题: %v", err)
return nil
}
- var fn func(string2 string) *[]DataRowList
+
+ // 将 JavaScript 中的 main 函数映射到 Go 的 fn 函数
+ var fn func(string) *[]DataRowList
err = vm.ExportTo(vm.Get("main"), &fn)
if err != nil {
- zap.S().Errorf("Js函数映射到 Go 函数失败!")
+ zap.S().Errorf("Js函数映射到 Go 函数失败: %v", err)
return nil
}
- a := fn(param)
- return a
-
-}
-// GetMqttClientSignal 函数根据MQTT客户端ID获取对应的信号映射表
-// 参数:
-//
-// mqtt_client_id string - MQTT客户端ID
-//
-// 返回值:
-//
-// map[string]bool - 信号映射表,其中key为信号名称,value表示信号类型是否为数字类型(忽略大小写)
-func GetMqttClientSignal(mqttClientId string) (map[int]bool, map[int]int64, map[string]int) {
- background := context.Background()
- result, err := globalRedisClient.LRange(background, "signal:"+mqttClientId, 0, -1).Result()
- if err != nil {
- // 处理错误,例如记录日志或返回错误
- zap.S().Errorf("获取信号映射表失败:%+v", err)
- }
- mapping := make(map[int]bool)
- mappingName := make(map[string]int)
- cacheSizeMapping := make(map[int]int64)
- for _, strSignal := range result {
- var signal Signal
- err := json.Unmarshal([]byte(strSignal), &signal)
- if err != nil {
- continue // 如果反序列化失败,跳过当前信号
+ // 使用 defer 和 recover 来捕获 fn 函数中的 panic
+ var result *[]DataRowList
+ defer func() {
+ if r := recover(); r != nil {
+ zap.S().Errorf("在执行 JavaScript 函数时发生 panic: %v", r)
+ // 这里可以进行一些清理工作或者返回一个特定的错误结果
+ result = nil // 或者设置为一个特定的错误结果
}
+ }()
- mapping[signal.ID] = strings.EqualFold(signal.Type, "数字")
- cacheSizeMapping[signal.ID] = signal.CacheSize
- mappingName[signal.Name] = signal.ID
- }
- return mapping, cacheSizeMapping, mappingName
-
+ // 调用映射的函数
+ result = fn(param)
+ return result
}
-func GetMqttClientSignal2(mqttClientId string) map[string]signalMapping {
+func GetMqttClientSignal2(mqttClientId, IdentificationCode string) map[string]signalMapping {
background := context.Background()
- result, err := globalRedisClient.LRange(background, "signal:"+mqttClientId, 0, -1).Result()
+ result, err := globalRedisClient.LRange(background, "signal:"+mqttClientId+":"+IdentificationCode, 0, -1).Result()
if err != nil {
// 处理错误,例如记录日志或返回错误
zap.S().Errorf("获取信号映射表失败:%+v", err)
diff --git a/go-iot-mq/handler_tcp_storage.go b/go-iot-mq/handler_tcp_storage.go
new file mode 100644
index 0000000000000000000000000000000000000000..16535ad6995499446088c616fb7d8feff4385dad
--- /dev/null
+++ b/go-iot-mq/handler_tcp_storage.go
@@ -0,0 +1,147 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ amqp "github.com/rabbitmq/amqp091-go"
+ "github.com/redis/go-redis/v9"
+ "go.uber.org/zap"
+ "strconv"
+ "time"
+)
+
+// TcpMessage 用于处理tcp转发后的数据
+type TcpMessage struct {
+ Uid string `json:"uid"`
+ Message string `json:"message"`
+}
+
+// HandlerTcpDataStorage 函数处理从AMQP通道接收到的TCP消息数据
+// 参数:
+//
+// messages <-chan amqp.Delivery:接收AMQP消息的通道
+//
+// 返回值:
+//
+// 无
+func HandlerTcpDataStorage(messages <-chan amqp.Delivery) {
+
+ go func() {
+
+ for d := range messages {
+ HandlerDataTcpStorageString(d)
+ err := d.Ack(false)
+ if err != nil {
+ zap.S().Errorf("消息确认异常:%+v", err)
+
+ }
+ }
+ }()
+
+ zap.S().Infof(" [*] Waiting for messages. To exit press CTRL+C")
+}
+
+func HandlerDataTcpStorageString(d amqp.Delivery) {
+ var msg TcpMessage
+ err := json.Unmarshal(d.Body, &msg)
+ if err != nil {
+ zap.S().Infof("Failed to unmarshal message: %s", err)
+ return
+ }
+ zap.S().Infof("处理 pre_tcp_handler 数据 : %+v", msg)
+
+ script := GetScriptRedisForTcp(msg.Uid)
+ if script != "" {
+ data := runScript(msg.Message, script)
+ if data == nil {
+ zap.S().Infof("执行脚本为空")
+ return
+ }
+ for i := 0; i < len(*data); i++ {
+ row := (*data)[i]
+ StorageDataRowList(row,"tcp")
+ }
+ zap.S().Debugf("DataRowList: %+v", data)
+
+ jsonData, err := json.Marshal(data)
+ if err != nil {
+ zap.S().Errorf("推送报警原始数据异常 %s", err)
+ return
+ }
+ zap.S().Infof("推送报警原始数据: %s", jsonData)
+ HandlerTcpLastTime(*data)
+ PushToQueue("waring_handler", jsonData)
+ PushToQueue("waring_delay_handler", jsonData)
+ PushToQueue("transmit_handler", jsonData)
+ } else {
+ zap.S().Infof("执行脚本为空")
+ }
+
+}
+
+// HandlerTcpLastTime 和上一次推送事件进行对比,判断是否超过阈值,如果超过则发送额外的消息通知
+func HandlerTcpLastTime(data []DataRowList) {
+ if len(data) == 0 {
+ return
+ }
+
+ var deviceUid = data[0].DeviceUid
+ key := "last_push_time:" + deviceUid
+ // 1. 从redis中获取这个设备上次推送的时间
+ lastTime, err := globalRedisClient.Get(context.Background(), key).Result()
+ if err != nil && !errors.Is(err, redis.Nil) {
+ zap.S().Errorf("获取设备上次推送时间异常:%+v", err)
+ return
+ }
+ now := time.Now().Unix()
+
+ // 如果没有这个时间则设置时间(当前时间)
+ if errors.Is(err, redis.Nil) {
+ err := globalRedisClient.Set(context.Background(), key, now, 0).Err()
+ if err != nil {
+ zap.S().Errorf("设置设备上次推送时间异常:%+v", err)
+ return
+ }
+ lastTime = fmt.Sprintf("%d", now)
+ }
+
+ if lastTime != fmt.Sprintf("%d", now) {
+
+ val := globalRedisClient.LRange(context.Background(), "tcp_bind_device_info:"+deviceUid, 0, -1).Val()
+
+ for _, s := range val {
+ handlerTcpOne(s)
+ }
+
+ }
+
+}
+
+func handlerTcpOne(deviceUid string) bool {
+ val := globalRedisClient.Get(context.Background(), "tcp_bind_device_info:"+deviceUid).Val()
+ if val == "" {
+ return true
+ }
+ parseUint, _ := strconv.ParseUint(val, 10, 64)
+ withRedis := FindByIdWithRedis(parseUint)
+ if withRedis == nil {
+ return true
+ }
+ globalRedisClient.Expire(context.Background(), "Device_Off_Message:"+deviceUid, time.Duration(withRedis.PushInterval)*time.Second)
+ return false
+}
+
+// GetScriptRedisForTcp 根据tcp 的设备ID从Redis中获取对应的脚本
+// 参数:
+//
+// tcp id string - tcp id
+//
+// 返回值:
+//
+// string - 对应的脚本
+func GetScriptRedisForTcp(tcpId string) string {
+ val := globalRedisClient.HGet(context.Background(), "struct:tcp", tcpId).Val()
+ return val
+}
diff --git a/go-iot-mq/handler_transmit.go b/go-iot-mq/handler_transmit.go
index e6876bba37f4dffe63d8dfbfe082d9871fc141d3..5d6f5f2bfd4681ac67c77ea83062eb72cc124ed0 100644
--- a/go-iot-mq/handler_transmit.go
+++ b/go-iot-mq/handler_transmit.go
@@ -23,7 +23,7 @@ func HandlerTransmit(messages <-chan amqp.Delivery) {
zap.S().Error("处理cassandra数据失败", zap.Error(err))
}
- transmitCacheBiz.Run(globalRedisClient, data[0].DeviceUid, data)
+ transmitCacheBiz.Run(globalRedisClient, data)
err = d.Ack(false)
if err != nil {
zap.S().Errorf("消息确认异常:%+v", err)
diff --git a/go-iot-mq/handler_waring.go b/go-iot-mq/handler_waring.go
index b1e1880515ab67278d762fa0c42a7e29397022c1..400d9211d065cf43cc4582c336b5ce2acad89244 100644
--- a/go-iot-mq/handler_waring.go
+++ b/go-iot-mq/handler_waring.go
@@ -70,11 +70,9 @@ func handlerWaringOnce(msg DataRowList) {
uid := msg.DeviceUid
// 1. 根据设备UID(mqtt客户端ID)获取所有信号
- mapping := getMqttClientMappingSignalWarningConfig(uid)
+ mapping := getMqttClientMappingSignalWarningConfig(uid,msg.IdentificationCode)
db := GMongoClient.Database(globalConfig.MongoConfig.Db)
- collection := db.Collection(globalConfig.MongoConfig.WaringCollection)
- var toInsert []interface{}
for _, row := range msg.DataRows {
configs := mapping[row.Name]
@@ -92,7 +90,7 @@ func handlerWaringOnce(msg DataRowList) {
if config.Min <= floatValue && floatValue <= config.Max {
// 在范围内,根据需求执行操作
zap.S().Infof("当前信号 %s 值在范围内: %+v 命中规则ID %d", row.Name, floatValue, config.ID)
- toInsert = append(toInsert, bson.M{
+ m := bson.M{
"device_uid": uid,
"signal_name": row.Name,
"signal_id": config.SignalId,
@@ -100,13 +98,22 @@ func handlerWaringOnce(msg DataRowList) {
"rule_id": config.ID,
"insert_time": time.Now().Unix(),
"up_time": msg.Time,
- })
+ }
+ name := CalcCollectionName(globalConfig.MongoConfig.WaringCollection, uint(config.ID))
+ collection := db.Collection(name)
+ one, err := collection.InsertOne(context.TODO(), m)
+ if err != nil {
+ zap.S().Errorf("插入数据失败: %+v", err)
+ } else {
+ zap.S().Infof("插入数据成功: %s", one.InsertedID)
+ }
}
+
} else {
if floatValue < config.Min || floatValue > config.Max {
// 范围外报警
zap.S().Infof("当前信号 %s 范围外报警: %+v 命中规则ID %d", row.Name, floatValue, config.ID)
- toInsert = append(toInsert, bson.M{
+ m := bson.M{
"device_uid": uid,
"signal_name": row.Name,
"signal_id": config.SignalId,
@@ -114,15 +121,23 @@ func handlerWaringOnce(msg DataRowList) {
"rule_id": config.ID,
"insert_time": time.Now().Unix(),
"up_time": msg.Time,
- })
+ }
+
+ name := CalcCollectionName(globalConfig.MongoConfig.WaringCollection, uint(config.ID))
+ collection := db.Collection(name)
+ one, err := collection.InsertOne(context.TODO(), m)
+ if err != nil {
+ zap.S().Errorf("插入数据失败: %+v", err)
+ } else {
+ zap.S().Infof("插入数据成功: %s", one.InsertedID)
+ }
}
}
-
// fixme: 将报警元数据分发到不同的数据推送通道。
- mt := models.MessageTemplate{
+ mt := models.MessageTemplate{
GeneratorTime: msg.Time,
- DeviceUid: uid,
+ DeviceUid: uid,
SignalId: config.SignalId,
MqttClientId: uid,
SignalName: row.Name,
@@ -140,14 +155,10 @@ func handlerWaringOnce(msg DataRowList) {
}
PushToQueue("waring_notice", jsonData)
-
}
}
- _, err := collection.InsertMany(context.Background(), toInsert)
- if err != nil {
- zap.S().Errorf("消息确认异常:%+v", err)
- }
+
}
// getMqttClientMappingSignalWarningConfig 根据 MQTT 客户端 ID 获取信号警告配置的映射
@@ -158,9 +169,9 @@ func handlerWaringOnce(msg DataRowList) {
// 返回值:
//
// map[string][]SignalWaringConfig - 信号名称到信号警告配置切片的映射
-func getMqttClientMappingSignalWarningConfig(mqttClientId string) map[string][]SignalWaringConfig {
+func getMqttClientMappingSignalWarningConfig(mqttClientId string, code string) map[string][]SignalWaringConfig {
background := context.Background()
- result, err := globalRedisClient.LRange(background, "signal:"+mqttClientId, 0, -1).Result()
+ result, err := globalRedisClient.LRange(background, "signal:"+mqttClientId +":"+ code, 0, -1).Result()
if err != nil {
// 处理错误,例如记录日志或返回错误
zap.S().Errorf("获取信号列表失败: %+v", err)
diff --git a/go-iot-mq/handler_waring_delay.go b/go-iot-mq/handler_waring_delay.go
index 5a8d24f6baad54c5ddb77c9067f2ce3f1f9bea68..4237d201c895a994ce9aa1349699a76e4e8c0bb2 100644
--- a/go-iot-mq/handler_waring_delay.go
+++ b/go-iot-mq/handler_waring_delay.go
@@ -57,11 +57,13 @@ func HandlerWaringDelayStr(d amqp.Delivery) bool {
func handlerWaringDelayOnce(msg DataRowList) {
zap.S().Infof("处理 handlerWaringDelayOnce 数据: %+v", msg)
uid := msg.DeviceUid
- mapping := getDelayParam(uid, msg.DataRows)
+ mapping := getDelayParam(uid , msg.IdentificationCode, msg.DataRows)
+ zap.S().Infof("getDelayParam 数据: %+v", mapping)
background := context.Background()
var scriptParam = make(map[string][]Tv)
for _, param := range mapping {
- key := "signal_delay_warning:" + strconv.Itoa(param.MqttClientId) + ":" + strconv.Itoa(param.SignalId)
+
+ key := "signal_delay_warning:" + strconv.Itoa(param.DeviceUid) +":" +param.IdentificationCode + ":" + strconv.Itoa(param.SignalId)
zap.S().Infof("key = %s", key)
members, _ := globalRedisClient.ZRevRangeWithScores(background, key, 0, -1).Result()
var vs []Tv
@@ -74,14 +76,19 @@ func handlerWaringDelayOnce(msg DataRowList) {
}
script := getDelayScript(mapping)
+ zap.S().Infof("getDelayScript结果: %+v", script)
zap.S().Infof("脚本报警参数 = %+v", scriptParam)
db := GMongoClient.Database(globalConfig.MongoConfig.Db)
- collection := db.Collection(globalConfig.MongoConfig.ScriptWaringCollection)
- var toInsert []interface{}
+
+
+
+
for _, waring := range script {
zap.S().Infof("key = %+v", waring)
delayScript := runWaringDelayScript(waring.Script, scriptParam)
- toInsert = append(toInsert, bson.M{
+ zap.S().Infof("runWaringDelayScript 执行后数据: %+v", delayScript)
+
+ v := bson.M{
"device_uid": uid,
"param": scriptParam,
"script": waring.Script,
@@ -89,17 +96,16 @@ func handlerWaringDelayOnce(msg DataRowList) {
"rule_id": waring.ID,
"insert_time": time.Now().Unix(),
"up_time": msg.Time,
- })
- }
- if toInsert != nil {
-
- one, err := collection.InsertMany(context.Background(), toInsert)
+ }
+ name := CalcCollectionName(globalConfig.MongoConfig.ScriptWaringCollection, uint(waring.ID))
+ CheckCollectionAndCreate(globalConfig.MongoConfig.ScriptWaringCollection,name)
+ collection := db.Collection(name)
+ one, err := collection.InsertOne(context.Background(), v)
if err != nil {
- zap.S().Errorf("插入数据失败 %+v", err)
+ zap.S().Errorf("插入数据异常: %+v", err)
} else {
- zap.S().Infof("插入数据成功 %+v", one)
+ zap.S().Infof("插入数据成功: %+v", one)
}
- return
}
}
@@ -114,20 +120,34 @@ func handlerWaringDelayOnce(msg DataRowList) {
// bool - JavaScript脚本执行后返回的结果
func runWaringDelayScript(script string, param map[string][]Tv) bool {
vm := goja.New()
+
+ // 执行 JavaScript 脚本
_, err := vm.RunString(script)
if err != nil {
fmt.Println("JS代码有问题!")
+ return false // 直接返回 false 表示执行失败
}
- var fn func(string2 map[string][]Tv) bool
+
+ // 将 JavaScript 中的 main 函数映射到 Go 的 fn 函数
+ var fn func(map[string][]Tv) bool
err = vm.ExportTo(vm.Get("main"), &fn)
if err != nil {
fmt.Println("Js函数映射到 Go 函数失败!")
- panic(err)
+ return false // 直接返回 false 表示映射失败
}
+
+ // 使用 defer 和 recover 来捕获 fn 函数中的 panic
+ defer func() {
+ if r := recover(); r != nil {
+ fmt.Println("在执行 JavaScript 函数时发生 panic:", r)
+ // 这里可以根据需要进行错误处理,例如记录日志等
+ }
+ }()
+
+ // 调用映射的函数
a := fn(param)
return a
}
-
// getDelayScript 从Redis中获取SignalDelayWaring信息列表
// 参数:
//
@@ -150,7 +170,19 @@ func getDelayScript(mapping []SignalDelayWaringParam) []SignalDelayWaring {
}
res = append(res, singw)
}
- return res
+ // 使用map来存储已经出现过的ID
+ idMap := make(map[int]bool)
+
+ var uniqueRes []SignalDelayWaring
+ for _, item := range res {
+ if _, exists := idMap[item.ID]; !exists {
+ // 如果ID在map中不存在,则添加到结果数组中
+ uniqueRes = append(uniqueRes, item)
+ // 将ID添加到map中,标记为已存在
+ idMap[item.ID] = true
+ }
+ }
+ return uniqueRes
}
// getDelayParam 函数根据用户UID和DataRow切片从Redis中获取延迟报警参数
@@ -161,7 +193,7 @@ func getDelayScript(mapping []SignalDelayWaringParam) []SignalDelayWaring {
//
// 返回值:
// []SignalDelayWaringParam - SignalDelayWaringParam切片,包含符合要求的延迟报警参数
-func getDelayParam(uid string, rows []DataRow) []SignalDelayWaringParam {
+func getDelayParam(uid string, code string, rows []DataRow) []SignalDelayWaringParam {
val := globalRedisClient.LRange(context.Background(), "delay_param", 0, -1).Val()
var mapping []SignalDelayWaringParam
for _, s := range val {
@@ -170,7 +202,7 @@ func getDelayParam(uid string, rows []DataRow) []SignalDelayWaringParam {
if err != nil {
continue // 如果反序列化失败,跳过当前信号
}
- if strconv.Itoa(param.MqttClientId) == uid && nameInDataRow(param.SignalName, rows) {
+ if strconv.Itoa(param.DeviceUid) == uid && code == param.IdentificationCode && nameInDataRow(param.SignalName, rows) {
mapping = append(mapping, param)
}
diff --git a/go-iot-mq/handler_ws_storage.go b/go-iot-mq/handler_ws_storage.go
new file mode 100644
index 0000000000000000000000000000000000000000..ce46d1d7a1f628358dd98b2ed7bb13f33ee9e4e9
--- /dev/null
+++ b/go-iot-mq/handler_ws_storage.go
@@ -0,0 +1,147 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ amqp "github.com/rabbitmq/amqp091-go"
+ "github.com/redis/go-redis/v9"
+ "go.uber.org/zap"
+ "strconv"
+ "time"
+)
+
+// WsMessage 用于处理ws转发后的数据
+type WsMessage struct {
+ Uid string `json:"uid"`
+ Message string `json:"message"`
+}
+
+// HandlerWsDataStorage 函数处理从AMQP通道接收到的websocket消息数据
+// 参数:
+//
+// messages <-chan amqp.Delivery:接收AMQP消息的通道
+//
+// 返回值:
+//
+// 无
+func HandlerWsDataStorage(messages <-chan amqp.Delivery) {
+
+ go func() {
+
+ for d := range messages {
+ HandlerDataWsStorageString(d)
+ err := d.Ack(false)
+ if err != nil {
+ zap.S().Errorf("消息确认异常:%+v", err)
+
+ }
+ }
+ }()
+
+ zap.S().Infof(" [*] Waiting for messages. To exit press CTRL+C")
+}
+
+func HandlerDataWsStorageString(d amqp.Delivery) {
+ var msg WsMessage
+ err := json.Unmarshal(d.Body, &msg)
+ if err != nil {
+ zap.S().Infof("Failed to unmarshal message: %s", err)
+ return
+ }
+ zap.S().Infof("处理 pre_ws_handler 数据 : %+v", msg)
+
+ script := GetScriptRedisForWs(msg.Uid)
+ if script != "" {
+ data := runScript(msg.Message, script)
+ if data == nil {
+ zap.S().Infof("执行脚本为空")
+ return
+ }
+ for i := 0; i < len(*data); i++ {
+ row := (*data)[i]
+ StorageDataRowList(row,"websocket")
+ }
+ zap.S().Debugf("DataRowList: %+v", data)
+
+ jsonData, err := json.Marshal(data)
+ if err != nil {
+ zap.S().Errorf("推送报警原始数据异常 %s", err)
+ return
+ }
+ zap.S().Infof("推送报警原始数据: %s", jsonData)
+ HandlerWebsocketLastTime(*data)
+ PushToQueue("waring_handler", jsonData)
+ PushToQueue("waring_delay_handler", jsonData)
+ PushToQueue("transmit_handler", jsonData)
+ } else {
+ zap.S().Infof("执行脚本为空")
+ }
+
+}
+
+// HandlerWebsocketLastTime 和上一次推送事件进行对比,判断是否超过阈值,如果超过则发送额外的消息通知
+func HandlerWebsocketLastTime(data []DataRowList) {
+ if len(data) == 0 {
+ return
+ }
+
+ var deviceUid = data[0].DeviceUid
+ key := "last_push_time:" + deviceUid
+ // 1. 从redis中获取这个设备上次推送的时间
+ lastTime, err := globalRedisClient.Get(context.Background(), key).Result()
+ if err != nil && !errors.Is(err, redis.Nil) {
+ zap.S().Errorf("获取设备上次推送时间异常:%+v", err)
+ return
+ }
+ now := time.Now().Unix()
+
+ // 如果没有这个时间则设置时间(当前时间)
+ if errors.Is(err, redis.Nil) {
+ err := globalRedisClient.Set(context.Background(), key, now, 0).Err()
+ if err != nil {
+ zap.S().Errorf("设置设备上次推送时间异常:%+v", err)
+ return
+ }
+ lastTime = fmt.Sprintf("%d", now)
+ }
+
+ if lastTime != fmt.Sprintf("%d", now) {
+
+ val := globalRedisClient.LRange(context.Background(), "ws_bind_device_info:"+deviceUid, 0, -1).Val()
+
+ for _, s := range val {
+ handlerWebsocketOne(s)
+ }
+
+ }
+
+}
+
+func handlerWebsocketOne(deviceUid string) bool {
+ val := globalRedisClient.Get(context.Background(), "ws_bind_device_info:"+deviceUid).Val()
+ if val == "" {
+ return true
+ }
+ parseUint, _ := strconv.ParseUint(val, 10, 64)
+ withRedis := FindByIdWithRedis(parseUint)
+ if withRedis == nil {
+ return true
+ }
+ globalRedisClient.Expire(context.Background(), "Device_Off_Message:"+deviceUid, time.Duration(withRedis.PushInterval)*time.Second)
+ return false
+}
+
+// GetScriptRedisForWs 根据 http 的设备ID从Redis中获取对应的脚本
+// 参数:
+//
+// tcp id string - tcp id
+//
+// 返回值:
+//
+// string - 对应的脚本
+func GetScriptRedisForWs(tcpId string) string {
+ val := globalRedisClient.HGet(context.Background(), "struct:Websocket", tcpId).Val()
+ return val
+}
diff --git a/go-iot-mq/log.go b/go-iot-mq/log.go
index b2e6a90062e4a06639fee63d4b5c533dffd8150b..b79af217d0502b7194d86f0ea5b772cc809368f1 100644
--- a/go-iot-mq/log.go
+++ b/go-iot-mq/log.go
@@ -1,11 +1,9 @@
package main
import (
- "errors"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"os"
- "syscall"
"time"
)
@@ -41,12 +39,12 @@ func InitLog() {
zap.ReplaceGlobals(logger) // 替换全局 Logger
// 确保日志被刷新
- defer func(logger *zap.Logger) {
- err := logger.Sync()
- if err != nil && !errors.Is(err, syscall.ENOTTY) {
- zap.S().Errorf("日志同步失败 %+v", err)
- }
- }(logger)
+ //defer func(logger *zap.Logger) {
+ // err := logger.Sync()
+ // if err != nil && !errors.Is(err, syscall.ENOTTY) {
+ // zap.S().Errorf("日志同步失败 %+v", err)
+ // }
+ //}(logger)
// 记录一条日志作为示例
logger.Debug("这是一个调试级别的日志")
diff --git a/go-iot-mq/main.go b/go-iot-mq/main.go
index b12bd2cba1a099c3a183e27724d5d1b96966f470..1d4512f833851c76caa8702105fc9bc5ff399db6 100644
--- a/go-iot-mq/main.go
+++ b/go-iot-mq/main.go
@@ -5,7 +5,6 @@ import (
"flag"
"fmt"
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
- "github.com/influxdata/influxdb-client-go/v2/api"
"github.com/prometheus/client_golang/prometheus/promhttp"
amqp "github.com/rabbitmq/amqp091-go"
"go.mongodb.org/mongo-driver/mongo"
@@ -21,12 +20,11 @@ import (
)
var globalConfig ServerConfig
-var writeAPI api.WriteAPI
func main() {
var configPath string
- flag.StringVar(&configPath, "config", "app-node1.yml", "Path to the config file")
+ flag.StringVar(&configPath, "config", "app-local-pre_handler.yml", "Path to the config file")
flag.Parse()
yfile, err := os.ReadFile(configPath)
@@ -42,7 +40,6 @@ func main() {
InitGlobalRedisClient(globalConfig.RedisConfig)
InitInfluxDbClient(globalConfig.InfluxConfig)
- writeAPI = GlobalInfluxDbClient.WriteAPI(globalConfig.InfluxConfig.Org, globalConfig.InfluxConfig.Bucket)
//InitRabbitCon(globalConfig.MQConfig)
err = ConnectToRMQ()
if err != nil {
@@ -50,6 +47,7 @@ func main() {
}
zap.S().Infof("消息队列类型 %s", globalConfig.NodeInfo.Type)
+ CreateRabbitQueue("calc_queue")
CreateRabbitQueue("waring_handler")
CreateRabbitQueue("waring_notice")
CreateRabbitQueue("transmit_handler")
@@ -71,40 +69,73 @@ func main() {
cus.Handle(deliveries, HandlerDataStorage, 1, "pre_handler", "")
}
if globalConfig.NodeInfo.Type == "waring_handler" {
- waring_handler, err := cus.AnnounceQueue("waring_handler", "")
+ waringHandler, err := cus.AnnounceQueue("waring_handler", "")
if err != nil {
log.Fatalf("Failed to connect to RabbitMQ: %s", err)
}
- cus.Handle(waring_handler, HandlerWaring, 1, "waring_handler", "")
+ cus.Handle(waringHandler, HandlerWaring, 1, "waring_handler", "")
}
if globalConfig.NodeInfo.Type == "calc_queue" {
- calc_queue, err := cus.AnnounceQueue("calc_queue", "")
+ calcQueue, err := cus.AnnounceQueue("calc_queue", "")
if err != nil {
log.Fatalf("Failed to connect to RabbitMQ: %s", err)
}
- cus.Handle(calc_queue, HandlerCalc, 1, "calc_queue", "")
+ cus.Handle(calcQueue, HandlerCalc, 1, "calc_queue", "")
}
if globalConfig.NodeInfo.Type == "waring_delay_handler" {
- waring_delay_handler, err := cus.AnnounceQueue("waring_delay_handler", "")
+ waringDelayHandler, err := cus.AnnounceQueue("waring_delay_handler", "")
if err != nil {
log.Fatalf("Failed to connect to RabbitMQ: %s", err)
}
- cus.Handle(waring_delay_handler, HandlerWaringDelay, 1, "waring_delay_handler", "")
+ cus.Handle(waringDelayHandler, HandlerWaringDelay, 1, "waring_delay_handler", "")
}
if globalConfig.NodeInfo.Type == "transmit_handler" {
- transmit_handler, err := cus.AnnounceQueue("transmit_handler", "")
+ transmitHandler, err := cus.AnnounceQueue("transmit_handler", "")
if err != nil {
log.Fatalf("Failed to connect to RabbitMQ: %s", err)
}
- cus.Handle(transmit_handler, HandlerTransmit, 1, "transmit_handler", "")
+ cus.Handle(transmitHandler, HandlerTransmit, 1, "transmit_handler", "")
}
if globalConfig.NodeInfo.Type == "waring_notice" {
- waring_notice, err := cus.AnnounceQueue("waring_notice", "")
+ waringNotice, err := cus.AnnounceQueue("waring_notice", "")
if err != nil {
log.Fatalf("Failed to connect to RabbitMQ: %s", err)
}
- cus.Handle(waring_notice, HandlerNotice, 1, "waring_notice", "")
+ cus.Handle(waringNotice, HandlerNotice, 1, "waring_notice", "")
+ }
+
+
+
+ // 协议层处理
+ if globalConfig.NodeInfo.Type == "pre_tcp_handler" {
+ preTcpHandler, err := cus.AnnounceQueue("pre_tcp_handler", "")
+ if err != nil {
+ log.Fatalf("Failed to connect to RabbitMQ: %s", err)
+ }
+ cus.Handle(preTcpHandler, HandlerTcpDataStorage, 1, "pre_tcp_handler", "")
+ }
+
+ if globalConfig.NodeInfo.Type == "pre_http_handler" {
+ preHttpHandler, err := cus.AnnounceQueue("pre_http_handler", "")
+ if err != nil {
+ log.Fatalf("Failed to connect to RabbitMQ: %s", err)
+ }
+ cus.Handle(preHttpHandler, HandlerHttpDataStorage, 1, "pre_http_handler", "")
+ }
+ if globalConfig.NodeInfo.Type == "pre_ws_handler" {
+ preWsHandler, err := cus.AnnounceQueue("pre_ws_handler", "")
+ if err != nil {
+ log.Fatalf("Failed to connect to RabbitMQ: %s", err)
+ }
+ cus.Handle(preWsHandler, HandlerWsDataStorage, 1, "pre_ws_handler", "")
+ }
+ if globalConfig.NodeInfo.Type == "pre_coap_handler" {
+ preCCoapHandler, err := cus.AnnounceQueue("pre_coap_handler", "")
+ if err != nil {
+ log.Fatalf("Failed to connect to RabbitMQ: %s", err)
+ }
+ cus.Handle(preCCoapHandler, HandlerCoapDataStorage, 1, "pre_coap_handler", "")
}
}
diff --git a/go-iot-mq/mq.go b/go-iot-mq/mq.go
index ff263fd08ded147f6975d26298fabdffde25b34a..044579cc12d6efeec84c3ea829c0ebc493da297a 100644
--- a/go-iot-mq/mq.go
+++ b/go-iot-mq/mq.go
@@ -18,7 +18,7 @@ var conn *amqp.Connection
var chann *amqp.Channel
func ConnectToRMQ() (err error) {
- conn, err = amqp.Dial(rmqCredentials)
+ conn, err = amqp.Dial(genUrl(globalConfig.MQConfig))
if err != nil {
return errors.New("Error de conexion: " + err.Error())
}
diff --git a/go-iot-mq/structs.go b/go-iot-mq/structs.go
index 656ac7b8a1a62b02d6c2ad699431a6524e826434..636d43095539e221df487d8414156479e938d287 100644
--- a/go-iot-mq/structs.go
+++ b/go-iot-mq/structs.go
@@ -14,7 +14,7 @@ type MQTTMessage struct {
type DataRowList struct {
Time int64 `json:"time"` // 秒级时间戳
- DeviceUid string `json:"device_uid"` // 是MqttClient的ID
+ DeviceUid string `json:"device_uid"` // 能够产生网络通讯的唯一编码
IdentificationCode string `json:"identification_code"` // 设备标识码
DataRows []DataRow `json:"data"`
Nc string `json:"nc"`
@@ -79,6 +79,12 @@ type MongoConfig struct {
}
type Signal struct {
+ Protocol string `json:"protocol"`
+
+ IdentificationCode string `json:"identification_code"` // 设备标识码
+
+ DeviceUid int `json:"device_uid"` // MQTT客户端表的外键ID
+
MqttClientId int `json:"mqtt_client_id"` // MQTT客户端表的外键ID
Name string `json:"name"` // 信号的名称,用于标识不同的信号
Type string `json:"type"` // 信号的数据类型,如整数、字符串等
@@ -99,7 +105,12 @@ type SignalWaringConfig struct {
type SignalDelayWaringParam struct {
MqttClientName string `gorm:"-" json:"mqtt_client_name"` // MQTT客户端的名称,不存储在数据库中
- MqttClientId int `json:"mqtt_client_id"` // MQTT客户端表的外键ID
+ Protocol string `json:"protocol"`
+
+ IdentificationCode string `json:"identification_code"` // 设备标识码
+
+ DeviceUid int `json:"device_uid"` // MQTT客户端表的外键ID
+
Name string `json:"name"` // 参数名称
SignalName string `gorm:"signal_name" json:"signal_name" structs:"signal_name"` // 信号表 name
SignalId int `gorm:"signal_id" json:"signal_id" structs:"signal_id"` // 信号表的外键ID
@@ -125,7 +136,9 @@ type CalcCache struct {
}
type CalcParamCache struct {
- MqttClientId int `json:"mqtt_client_id"` // MQTT客户端表的外键ID
+ Protocol string `json:"protocol"`
+ IdentificationCode string `json:"identification_code"` // 设备标识码
+ DeviceUid int `json:"device_uid"` // MQTT客户端表的外键ID
Name string `json:"name"` // 参数名称
SignalName string `gorm:"signal_name" json:"signal_name" structs:"signal_name"` // 信号表 name
SignalId int `json:"signal_id" structs:"signal_id"` // 信号表的外键ID
diff --git a/go-iot-mq/z_test.go b/go-iot-mq/z_test.go
index 86db4c4b8cfc39cef965300d8672a936b21be9ce..ec8b208931f7fef9b337fa56988c58d3dade9f7d 100644
--- a/go-iot-mq/z_test.go
+++ b/go-iot-mq/z_test.go
@@ -2,19 +2,77 @@ package main
import (
"context"
+ "encoding/json"
+ "fmt"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/bson/primitive"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+ "log"
+ "net/url"
"testing"
)
+func TestInfluxdbQueryM(t *testing.T) {
+ var config = InfluxConfig{
+ Host: "127.0.0.1",
+ Port: 8086,
+ Token: "mytoken",
+ Org: "myorg",
+ Bucket: "mybucket",
+ }
+ InitInfluxDbClient(config)
+ query := `from(bucket: "mybucket")
+ |> range(start: -1h, stop: now())
+ |> filter(fn: (r) => r._measurement =~ /^d/)
+ |> keep(columns: ["_measurement"])
+ |> group()
+ |> distinct(column: "_measurement")
+ |> limit(n: 200)
+ |> sort()`
+
+ queryAPI := GlobalInfluxDbClient.QueryAPI("myorg")
+
+ // 执行查询
+ result, err := queryAPI.Query(context.Background(), query)
+ if err != nil {
+ fmt.Printf("Error: %v\n", err)
+ return
+ }
+
+ // 处理查询结果
+ for result.Next() {
+ record := result.Record()
+ value := record.Value()
+ fmt.Printf("Result: %v\n", value)
+ }
+
+}
func TestA(t *testing.T) {
var config = RedisConfig{
Host: "127.0.0.1",
Port: 6379,
- Db: 0,
+ Db: 10,
Password: "eYVX7EwVmmxKPCDmwMtyKVge8oLd2t81",
}
InitGlobalRedisClient(config)
+ globalRedisClient.Set(context.Background(), "a", 1, 0)
+ jsonData, _ := json.Marshal(config)
+
+ type Auth struct {
+ Username string `json:"username"`
+ Password string `json:"password"`
+ DeviceId string `json:"device_id"`
+ }
+ auth := Auth{
+ Username: "admin",
+ Password: "admin",
+ DeviceId: "1234567890",
+ }
+ jsonData, _ = json.Marshal(auth)
+
+ globalRedisClient.HSet(context.Background(), "auth:coap", "1234567890", jsonData)
- globalRedisClient.ZRemRangeByScore(context.Background(), "aaa", "-inf", "0")
}
func TestMqCustomer(t *testing.T) {
@@ -46,3 +104,75 @@ func TestMqCustomer(t *testing.T) {
}
}()
}
+
+
+func TestMongo(t *testing.T){
+
+ connStr := fmt.Sprintf("mongodb://%s:%s@%s:%d", url.QueryEscape("admin"),
+ url.QueryEscape("admin"), "127.0.0.1", 27017)
+ client, err := mongo.Connect(context.TODO(), options.Client().ApplyURI(connStr))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // 检查连接
+ err = client.Ping(context.TODO(), nil)
+
+ db := client.Database("iot")
+
+ prefix := ""
+
+ // 构建正则表达式,匹配以prefix开头的集合名称
+ regex := primitive.Regex{Pattern: "^" + prefix, Options: "i"} // 'i' 表示不区分大小写
+
+ // 构建查询条件
+ filter := bson.M{"name": regex}
+ // 检查集合是否存在
+ collectionNames, err := db.ListCollectionNames(context.TODO(),filter)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ collectionExists := false
+ for _, name := range collectionNames {
+ if name == "你的集合名" {
+ collectionExists = true
+ break
+ }
+ log.Println(name)
+ }
+ log.Println(collectionExists)
+
+ db.CreateCollection(context.TODO(), "aaaaaaaaa")
+}
+
+func TestInfluxDbCreateBucket(t *testing.T){
+ var config = InfluxConfig{
+ Host: "127.0.0.1",
+ Port: 8086,
+ Token: "mytoken",
+ Org: "myorg",
+ Bucket: "mybucket",
+ }
+ InitInfluxDbClient(config)
+ name, err := GlobalInfluxDbClient.OrganizationsAPI().FindOrganizationByName(context.Background(), config.Org)
+ if err != nil {
+ log.Fatal(err)
+ }
+ log.Println(name)
+ id, err := GlobalInfluxDbClient.BucketsAPI().FindBucketsByOrgID(context.Background(), *name.Id)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // 假设id是一个存储桶ID的切片
+ for _, bucketID := range *id {
+ log.Println(bucketID.Name)
+ }
+
+ GlobalInfluxDbClient.BucketsAPI().CreateBucketWithName(context.Background(), name, "cli_create")
+
+
+
+
+}
\ No newline at end of file
diff --git a/go-iot-plat-load/.editorconfig b/go-iot-plat-load/.editorconfig
new file mode 100644
index 0000000000000000000000000000000000000000..8927e2cad765ebe9b220ea7303aa7e81eaf08e05
--- /dev/null
+++ b/go-iot-plat-load/.editorconfig
@@ -0,0 +1,12 @@
+# https://EditorConfig.org
+
+# top-most EditorConfig file
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 2
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = false
\ No newline at end of file
diff --git a/go-iot-plat-load/.gitignore b/go-iot-plat-load/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..33741a87b6523bb9bfa2ee22794f9c84641c6010
--- /dev/null
+++ b/go-iot-plat-load/.gitignore
@@ -0,0 +1,24 @@
+# build output
+dist/
+.output/
+
+# dependencies
+node_modules/
+
+# logs
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+
+
+# environment variables
+.env
+.env.production
+
+# macOS-specific files
+.DS_Store
+
+pnpm-lock.yaml
+
+.astro
\ No newline at end of file
diff --git a/go-iot-plat-load/.npmrc b/go-iot-plat-load/.npmrc
new file mode 100644
index 0000000000000000000000000000000000000000..999db39309edbb715286f3b0faf08db0dd44e23d
--- /dev/null
+++ b/go-iot-plat-load/.npmrc
@@ -0,0 +1,2 @@
+# Expose Astro dependencies for `pnpm` users
+shamefully-hoist=true
\ No newline at end of file
diff --git a/go-iot-plat-load/.prettierignore b/go-iot-plat-load/.prettierignore
new file mode 100644
index 0000000000000000000000000000000000000000..76b517ce3f47e1bd3f71634aaa4040798251131f
--- /dev/null
+++ b/go-iot-plat-load/.prettierignore
@@ -0,0 +1,4 @@
+dist
+node_modules
+.github
+.changeset
\ No newline at end of file
diff --git a/go-iot-plat-load/.prettierrc.cjs b/go-iot-plat-load/.prettierrc.cjs
new file mode 100644
index 0000000000000000000000000000000000000000..752ef12a6b415e8b67c61f1475e8151c48ce4074
--- /dev/null
+++ b/go-iot-plat-load/.prettierrc.cjs
@@ -0,0 +1,13 @@
+/** @type {import('prettier').Config} */
+module.exports = {
+ printWidth: 120,
+ semi: true,
+ singleQuote: true,
+ tabWidth: 2,
+ trailingComma: 'es5',
+ useTabs: false,
+
+ plugins: [require.resolve('prettier-plugin-astro')],
+
+ overrides: [{ files: '*.astro', options: { parser: 'astro' } }],
+};
diff --git a/go-iot-plat-load/.stackblitzrc b/go-iot-plat-load/.stackblitzrc
new file mode 100644
index 0000000000000000000000000000000000000000..43798ecff844b770ec4b81ab39953e9bbb5e15f2
--- /dev/null
+++ b/go-iot-plat-load/.stackblitzrc
@@ -0,0 +1,6 @@
+{
+ "startCommand": "npm start",
+ "env": {
+ "ENABLE_CJS_IMPORTS": true
+ }
+}
\ No newline at end of file
diff --git a/go-iot-plat-load/.vscode/astrowind/config-schema.json b/go-iot-plat-load/.vscode/astrowind/config-schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..3297fa368e39cc46ca2ab98305b18410d608f876
--- /dev/null
+++ b/go-iot-plat-load/.vscode/astrowind/config-schema.json
@@ -0,0 +1,275 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "site": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "site": {
+ "type": "string"
+ },
+ "base": {
+ "type": "string"
+ },
+ "trailingSlash": {
+ "type": "boolean"
+ },
+ "googleSiteVerificationId": {
+ "type": "string"
+ }
+ },
+ "required": ["name", "site", "base", "trailingSlash"],
+ "additionalProperties": false
+ },
+ "metadata": {
+ "type": "object",
+ "properties": {
+ "title": {
+ "type": "object",
+ "properties": {
+ "default": {
+ "type": "string"
+ },
+ "template": {
+ "type": "string"
+ }
+ },
+ "required": ["default", "template"]
+ },
+ "description": {
+ "type": "string"
+ },
+ "robots": {
+ "type": "object",
+ "properties": {
+ "index": {
+ "type": "boolean"
+ },
+ "follow": {
+ "type": "boolean"
+ }
+ },
+ "required": ["index", "follow"]
+ },
+ "openGraph": {
+ "type": "object",
+ "properties": {
+ "site_name": {
+ "type": "string"
+ },
+ "images": {
+ "type": "array",
+ "items": [
+ {
+ "type": "object",
+ "properties": {
+ "url": {
+ "type": "string"
+ },
+ "width": {
+ "type": "integer"
+ },
+ "height": {
+ "type": "integer"
+ }
+ },
+ "required": ["url", "width", "height"]
+ }
+ ]
+ },
+ "type": {
+ "type": "string"
+ }
+ },
+ "required": ["site_name", "images", "type"]
+ },
+ "twitter": {
+ "type": "object",
+ "properties": {
+ "handle": {
+ "type": "string"
+ },
+ "site": {
+ "type": "string"
+ },
+ "cardType": {
+ "type": "string"
+ }
+ },
+ "required": ["handle", "site", "cardType"]
+ }
+ },
+ "required": ["title", "description", "robots", "openGraph", "twitter"]
+ },
+ "i18n": {
+ "type": "object",
+ "properties": {
+ "language": {
+ "type": "string"
+ },
+ "textDirection": {
+ "type": "string"
+ }
+ },
+ "required": ["language", "textDirection"]
+ },
+ "apps": {
+ "type": "object",
+ "properties": {
+ "blog": {
+ "type": "object",
+ "properties": {
+ "isEnabled": {
+ "type": "boolean"
+ },
+ "postsPerPage": {
+ "type": "integer"
+ },
+ "isRelatedPostsEnabled": {
+ "type": "boolean"
+ },
+ "relatedPostsCount": {
+ "type": "integer"
+ },
+ "post": {
+ "type": "object",
+ "properties": {
+ "isEnabled": {
+ "type": "boolean"
+ },
+ "permalink": {
+ "type": "string"
+ },
+ "robots": {
+ "type": "object",
+ "properties": {
+ "index": {
+ "type": "boolean"
+ },
+ "follow": {
+ "type": "boolean"
+ }
+ },
+ "required": ["index"]
+ }
+ },
+ "required": ["isEnabled", "permalink", "robots"]
+ },
+ "list": {
+ "type": "object",
+ "properties": {
+ "isEnabled": {
+ "type": "boolean"
+ },
+ "pathname": {
+ "type": "string"
+ },
+ "robots": {
+ "type": "object",
+ "properties": {
+ "index": {
+ "type": "boolean"
+ },
+ "follow": {
+ "type": "boolean"
+ }
+ },
+ "required": ["index"]
+ }
+ },
+ "required": ["isEnabled", "pathname", "robots"]
+ },
+ "category": {
+ "type": "object",
+ "properties": {
+ "isEnabled": {
+ "type": "boolean"
+ },
+ "pathname": {
+ "type": "string"
+ },
+ "robots": {
+ "type": "object",
+ "properties": {
+ "index": {
+ "type": "boolean"
+ },
+ "follow": {
+ "type": "boolean"
+ }
+ },
+ "required": ["index"]
+ }
+ },
+ "required": ["isEnabled", "pathname", "robots"]
+ },
+ "tag": {
+ "type": "object",
+ "properties": {
+ "isEnabled": {
+ "type": "boolean"
+ },
+ "pathname": {
+ "type": "string"
+ },
+ "robots": {
+ "type": "object",
+ "properties": {
+ "index": {
+ "type": "boolean"
+ },
+ "follow": {
+ "type": "boolean"
+ }
+ },
+ "required": ["index"]
+ }
+ },
+ "required": ["isEnabled", "pathname", "robots"]
+ }
+ },
+ "required": ["isEnabled", "postsPerPage", "post", "list", "category", "tag"]
+ }
+ },
+ "required": ["blog"]
+ },
+ "analytics": {
+ "type": "object",
+ "properties": {
+ "vendors": {
+ "type": "object",
+ "properties": {
+ "googleAnalytics": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": ["string", "null"]
+ },
+ "partytown": {
+ "type": "boolean",
+ "default": true
+ }
+ },
+ "required": ["id"]
+ }
+ },
+ "required": ["googleAnalytics"]
+ }
+ },
+ "required": ["vendors"]
+ },
+ "ui": {
+ "type": "object",
+ "properties": {
+ "theme": {
+ "type": "string"
+ }
+ },
+ "required": ["theme"]
+ }
+ },
+ "required": ["site", "metadata", "i18n", "apps", "analytics", "ui"]
+}
diff --git a/go-iot-plat-load/.vscode/extensions.json b/go-iot-plat-load/.vscode/extensions.json
new file mode 100644
index 0000000000000000000000000000000000000000..ec1bfc003ed7b215ba30f0d9c4c0c14ba1205f4c
--- /dev/null
+++ b/go-iot-plat-load/.vscode/extensions.json
@@ -0,0 +1,10 @@
+{
+ "recommendations": [
+ "astro-build.astro-vscode",
+ "bradlc.vscode-tailwindcss",
+ "dbaeumer.vscode-eslint",
+ "esbenp.prettier-vscode",
+ "unifiedjs.vscode-mdx"
+ ],
+ "unwantedRecommendations": []
+}
diff --git a/go-iot-plat-load/.vscode/launch.json b/go-iot-plat-load/.vscode/launch.json
new file mode 100644
index 0000000000000000000000000000000000000000..d6422097621fd7c1b1ccc6daa670c46aed7ef5b7
--- /dev/null
+++ b/go-iot-plat-load/.vscode/launch.json
@@ -0,0 +1,11 @@
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "command": "./node_modules/.bin/astro dev",
+ "name": "Development server",
+ "request": "launch",
+ "type": "node-terminal"
+ }
+ ]
+}
diff --git a/go-iot-plat-load/.vscode/settings.json b/go-iot-plat-load/.vscode/settings.json
new file mode 100644
index 0000000000000000000000000000000000000000..c4007732046c023699d7ab06109bc437bdf301c3
--- /dev/null
+++ b/go-iot-plat-load/.vscode/settings.json
@@ -0,0 +1,15 @@
+{
+ "css.customData": ["./vscode.tailwind.json"],
+ "eslint.validate": ["javascript", "javascriptreact", "astro", "typescript", "typescriptreact"],
+ "files.associations": {
+ "*.mdx": "markdown"
+ },
+ "prettier.documentSelectors": ["**/*.astro"],
+ "[astro]": {
+ "editor.defaultFormatter": "astro-build.astro-vscode"
+ },
+ "yaml.schemas": {
+ "./.vscode/astrowind/config-schema.json": "/src/config.yaml"
+ },
+ "eslint.useFlatConfig": true
+}
diff --git a/go-iot-plat-load/LICENSE.md b/go-iot-plat-load/LICENSE.md
new file mode 100644
index 0000000000000000000000000000000000000000..fa4db58b62e619a734827206a5d4129bd15095a5
--- /dev/null
+++ b/go-iot-plat-load/LICENSE.md
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 onWidget
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/go-iot-plat-load/README.md b/go-iot-plat-load/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..275e21044c502fc7121bd66bcadeebde7ba09326
--- /dev/null
+++ b/go-iot-plat-load/README.md
@@ -0,0 +1,295 @@
+# 🚀 AstroWind
+
+
+
+🌟 _Most *starred* & *forked* Astro theme in 2022 & 2023_. 🌟
+
+**AstroWind** is a free and open-source template to make your website using **[Astro 4.0](https://astro.build/) + [Tailwind CSS](https://tailwindcss.com/)**. Ready to start a new project and designed taking into account web best practices.
+
+- ✅ **Production-ready** scores in **PageSpeed Insights** reports.
+- ✅ Integration with **Tailwind CSS** supporting **Dark mode** and **_RTL_**.
+- ✅ **Fast and SEO friendly blog** with automatic **RSS feed**, **MDX** support, **Categories & Tags**, **Social Share**, ...
+- ✅ **Image Optimization** (using new **Astro Assets** and **Unpic** for Universal image CDN).
+- ✅ Generation of **project sitemap** based on your routes.
+- ✅ **Open Graph tags** for social media sharing.
+- ✅ **Analytics** built-in Google Analytics, and Splitbee integration.
+
+
+
+
+
+[](https://onwidget.com)
+[](https://github.com/onwidget/astrowind/blob/main/LICENSE.md)
+[](https://github.com/onwidget)
+[](https://github.com/onwidget/astrowind#contributing)
+[](https://snyk.io/test/github/onwidget/astrowind)
+[](https://github.com/onwidget/astrowind)
+[](https://github.com/onwidget/astrowind)
+
+
+
+
+Table of Contents
+
+- [Demo](#demo)
+- [Upcoming: AstroWind 2.0 – We Need Your Vision!](#-upcoming-astrowind-20--we-need-your-vision)
+- [Getting started](#getting-started)
+ - [Project structure](#project-structure)
+ - [Commands](#commands)
+ - [Configuration](#configuration)
+ - [Deploy](#deploy)
+- [Frequently Asked Questions](#frequently-asked-questions)
+- [Related Projects](#related-projects)
+- [Contributing](#contributing)
+- [Acknowledgements](#acknowledgements)
+- [License](#license)
+
+
+
+
+
+## Demo
+
+📌 [https://astrowind.vercel.app/](https://astrowind.vercel.app/)
+
+
+
+## 🔔 Upcoming: AstroWind 2.0 – We Need Your Vision!
+
+We're embarking on an exciting journey with **AstroWind 2.0**, and we want you to be a part of it! We're currently taking the first steps in developing this new version and your insights are invaluable. Join the discussion and share your feedback, ideas, and suggestions to help shape the future of **AstroWind**. Let's make **AstroWind 2.0** even better, together!
+
+[Share Your Feedback in Our Discussion!](https://github.com/onwidget/astrowind/discussions/392)
+
+
+
+## Getting started
+
+**AstroWind** tries to give you quick access to creating a website using [Astro 4.0](https://astro.build/) + [Tailwind CSS](https://tailwindcss.com/). It's a free theme which focuses on simplicity, good practices and high performance.
+
+Very little vanilla javascript is used only to provide basic functionality so that each developer decides which framework (React, Vue, Svelte, Solid JS...) to use and how to approach their goals.
+
+In this version the template supports all the options in the `output` configuration, `static`, `hybrid` and `server`, but the blog only works with `prerender = true`. We are working on the next version and aim to make it fully compatible with SSR.
+
+### Project structure
+
+Inside **AstroWind** template, you'll see the following folders and files:
+
+```
+/
+├── public/
+│ ├── _headers
+│ └── robots.txt
+├── src/
+│ ├── assets/
+│ │ ├── favicons/
+│ │ ├── images/
+│ │ └── styles/
+│ │ └── tailwind.css
+│ ├── components/
+│ │ ├── blog/
+│ │ ├── common/
+│ │ ├── ui/
+│ │ ├── widgets/
+│ │ │ ├── Header.astro
+│ │ │ └── ...
+│ │ ├── CustomStyles.astro
+│ │ ├── Favicons.astro
+│ │ └── Logo.astro
+│ ├── content/
+│ │ ├── post/
+│ │ │ ├── post-slug-1.md
+│ │ │ ├── post-slug-2.mdx
+│ │ │ └── ...
+│ │ └-- config.ts
+│ ├── layouts/
+│ │ ├── Layout.astro
+│ │ ├── MarkdownLayout.astro
+│ │ └── PageLayout.astro
+│ ├── pages/
+│ │ ├── [...blog]/
+│ │ │ ├── [category]/
+│ │ │ ├── [tag]/
+│ │ │ ├── [...page].astro
+│ │ │ └── index.astro
+│ │ ├── index.astro
+│ │ ├── 404.astro
+│ │ ├-- rss.xml.ts
+│ │ └── ...
+│ ├── utils/
+│ ├── config.yaml
+│ └── navigation.js
+├── package.json
+├── astro.config.mjs
+└── ...
+```
+
+Astro looks for `.astro` or `.md` files in the `src/pages/` directory. Each page is exposed as a route based on its file name.
+
+There's nothing special about `src/components/`, but that's where we like to put any Astro/React/Vue/Svelte/Preact components.
+
+Any static assets, like images, can be placed in the `public/` directory if they do not require any transformation or in the `assets/` directory if they are imported directly.
+
+[](https://githubbox.com/onwidget/astrowind/tree/main) [](https://gitpod.io/?on=gitpod#https://github.com/onwidget/astrowind) [](https://stackblitz.com/github/onwidget/astrowind)
+
+> 🧑🚀 **Seasoned astronaut?** Delete this file `README.md`. Update `src/config.yaml` and contents. Have fun!
+
+
+
+### Commands
+
+All commands are run from the root of the project, from a terminal:
+
+| Command | Action |
+| :-------------------- | :------------------------------------------------- |
+| `npm install` | Installs dependencies |
+| `npm run dev` | Starts local dev server at `localhost:3000` |
+| `npm run build` | Build your production site to `./dist/` |
+| `npm run preview` | Preview your build locally, before deploying |
+| `npm run format` | Format codes with Prettier |
+| `npm run lint:eslint` | Run Eslint |
+| `npm run astro ...` | Run CLI commands like `astro add`, `astro preview` |
+
+
+
+### Configuration
+
+Basic configuration file: `./src/config.yaml`
+
+```yaml
+site:
+ name: 'Example'
+ site: 'https://example.com'
+ base: '/' # Change this if you need to deploy to Github Pages, for example
+ trailingSlash: false # Generate permalinks with or without "/" at the end
+
+ googleSiteVerificationId: false # Or some value,
+
+# Default SEO metadata
+metadata:
+ title:
+ default: 'Example'
+ template: '%s — Example'
+ description: 'This is the default meta description of Example website'
+ robots:
+ index: true
+ follow: true
+ openGraph:
+ site_name: 'Example'
+ images:
+ - url: '~/assets/images/default.png'
+ width: 1200
+ height: 628
+ type: website
+ twitter:
+ handle: '@twitter_user'
+ site: '@twitter_user'
+ cardType: summary_large_image
+
+i18n:
+ language: en
+ textDirection: ltr
+
+apps:
+ blog:
+ isEnabled: true # If the blog will be enabled
+ postsPerPage: 6 # Number of posts per page
+
+ post:
+ isEnabled: true
+ permalink: '/blog/%slug%' # Variables: %slug%, %year%, %month%, %day%, %hour%, %minute%, %second%, %category%
+ robots:
+ index: true
+
+ list:
+ isEnabled: true
+ pathname: 'blog' # Blog main path, you can change this to "articles" (/articles)
+ robots:
+ index: true
+
+ category:
+ isEnabled: true
+ pathname: 'category' # Category main path /category/some-category, you can change this to "group" (/group/some-category)
+ robots:
+ index: true
+
+ tag:
+ isEnabled: true
+ pathname: 'tag' # Tag main path /tag/some-tag, you can change this to "topics" (/topics/some-category)
+ robots:
+ index: false
+
+ isRelatedPostsEnabled: true # If a widget with related posts is to be displayed below each post
+ relatedPostsCount: 4 # Number of related posts to display
+
+analytics:
+ vendors:
+ googleAnalytics:
+ id: null # or "G-XXXXXXXXXX"
+
+ui:
+ theme: 'system' # Values: "system" | "light" | "dark" | "light:only" | "dark:only"
+```
+
+
+
+#### Customize Design
+
+To customize Font families, Colors or more Elements refer to the following files:
+
+- `src/components/CustomStyles.astro`
+- `src/assets/styles/tailwind.css`
+
+### Deploy
+
+#### Deploy to production (manual)
+
+You can create an optimized production build with:
+
+```shell
+npm run build
+```
+
+Now, your website is ready to be deployed. All generated files are located at
+`dist` folder, which you can deploy the folder to any hosting service you
+prefer.
+
+#### Deploy to Netlify
+
+Clone this repository on your own GitHub account and deploy it to Netlify:
+
+[](https://app.netlify.com/start/deploy?repository=https://github.com/onwidget/astrowind)
+
+#### Deploy to Vercel
+
+Clone this repository on your own GitHub account and deploy to Vercel:
+
+[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fonwidget%2Fastrowind)
+
+
+
+## Frequently Asked Questions
+
+- Why?
+-
+-
+
+
+
+## Related projects
+
+- [TailNext](https://tailnext.vercel.app/) - Free template using Next.js 14 and Tailwind CSS with the new App Router.
+- [Qwind](https://qwind.pages.dev/) - Free template to make your website using Qwik + Tailwind CSS.
+
+## Contributing
+
+If you have any ideas, suggestions or find any bugs, feel free to open a discussion, an issue or create a pull request.
+That would be very useful for all of us and we would be happy to listen and take action.
+
+## Acknowledgements
+
+Initially created by [onWidget](https://onwidget.com) and maintained by a community of [contributors](https://github.com/onwidget/astrowind/graphs/contributors).
+
+## License
+
+**AstroWind** is licensed under the MIT license — see the [LICENSE](./LICENSE.md) file for details.
diff --git a/go-iot-plat-load/astro.config.mjs b/go-iot-plat-load/astro.config.mjs
new file mode 100644
index 0000000000000000000000000000000000000000..09453a48881481343785973c37b1873ef89bdbb6
--- /dev/null
+++ b/go-iot-plat-load/astro.config.mjs
@@ -0,0 +1,96 @@
+import path from 'path';
+import { fileURLToPath } from 'url';
+
+import { defineConfig, squooshImageService } from 'astro/config';
+
+import sitemap from '@astrojs/sitemap';
+import tailwind from '@astrojs/tailwind';
+import mdx from '@astrojs/mdx';
+import partytown from '@astrojs/partytown';
+import icon from 'astro-icon';
+import compress from 'astro-compress';
+import astrowind from './vendor/integration';
+import remarkMath from 'remark-math';
+import rehypeKatex from 'rehype-katex';
+import {
+ readingTimeRemarkPlugin,
+ responsiveTablesRehypePlugin,
+ lazyImagesRehypePlugin,
+} from './src/utils/frontmatter.mjs';
+
+
+import remarkMermaid from 'remark-mermaidjs'
+const __dirname = path.dirname(fileURLToPath(import.meta.url));
+
+const hasExternalScripts = false;
+const whenExternalScripts = (items = []) =>
+ hasExternalScripts ? (Array.isArray(items) ? items.map((item) => item()) : [items()]) : [];
+
+export default defineConfig({
+ output: 'static',
+ integrations: [
+ tailwind({
+ applyBaseStyles: false,
+ }),
+
+ sitemap(),
+ mdx(),
+ icon({
+ include: {
+ tabler: ['*'],
+ 'flat-color-icons': [
+ 'template',
+ 'gallery',
+ 'approval',
+ 'document',
+ 'advertising',
+ 'currency-exchange',
+ 'voice-presentation',
+ 'business-contact',
+ 'database',
+ ],
+ },
+ }),
+
+ ...whenExternalScripts(() =>
+ partytown({
+ config: { forward: ['dataLayer.push'] },
+ })
+ ),
+
+ compress({
+ CSS: true,
+ HTML: {
+ 'html-minifier-terser': {
+ removeAttributeQuotes: false,
+ },
+ },
+ Image: false,
+ JavaScript: true,
+ SVG: false,
+ Logger: 1,
+ }),
+
+ astrowind({
+ config: './src/config.yaml',
+ }),
+ ],
+
+ image: {
+ service: squooshImageService(),
+ domains: ['cdn.pixabay.com'],
+ },
+
+ markdown: {
+ remarkPlugins: [readingTimeRemarkPlugin,remarkMath,remarkMermaid],
+ rehypePlugins: [responsiveTablesRehypePlugin, lazyImagesRehypePlugin,rehypeKatex],
+ },
+
+ vite: {
+ resolve: {
+ alias: {
+ '~': path.resolve(__dirname, './src'),
+ },
+ },
+ },
+});
diff --git a/go-iot-plat-load/eslint.config.js b/go-iot-plat-load/eslint.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..3961a847e19612ca8cfd934d8700ad0a1db0fbeb
--- /dev/null
+++ b/go-iot-plat-load/eslint.config.js
@@ -0,0 +1,59 @@
+import astroEslintParser from 'astro-eslint-parser';
+import eslintPluginAstro from 'eslint-plugin-astro';
+import globals from 'globals';
+import js from '@eslint/js';
+import tseslint from 'typescript-eslint';
+import typescriptParser from '@typescript-eslint/parser';
+
+export default [
+ js.configs.recommended,
+ ...eslintPluginAstro.configs['flat/recommended'],
+ ...tseslint.configs.recommended,
+ {
+ languageOptions: {
+ globals: {
+ ...globals.browser,
+ ...globals.node,
+ },
+ },
+ },
+ {
+ files: ['**/*.astro'],
+ languageOptions: {
+ parser: astroEslintParser,
+ parserOptions: {
+ parser: '@typescript-eslint/parser',
+ extraFileExtensions: ['.astro'],
+ },
+ },
+ },
+ {
+ files: ['**/*.{js,jsx,astro}'],
+ rules: {
+ 'no-mixed-spaces-and-tabs': ['error', 'smart-tabs'],
+ },
+ },
+ {
+ // Define the configuration for `
+
+
+
+
+
+