📅  最后修改于: 2023-12-03 14:55:29.245000             🧑  作者: Mango
这是一个针对InfluxDB进行速度测试的bash脚本speichern。使用该脚本可以测试InfluxDB在插入和查询数据时的性能表现,并提供相应的测试结果。
1.下载speichern.sh脚本,放置于本地
2.修改脚本中的配置文件,使其满足自己的测试需求
3.执行脚本,即可得到测试结果
配置文件位于脚本的最上方,由以下三个变量组成:
1.数据数量,默认值为10000
DATAPOINTS=10000
2.插入数据的时长,默认值为10秒
INSERT_DURATION_SEC=10
3.查询数据的时长,默认值为10秒
QUERY_DURATION_SEC=10
根据自己的需求修改对应的变量即可。
执行脚本的命令为:
bash speichern.sh
执行结果为JSON格式数据,包含了插入和查询测试的结果。
结果中包含了InfluxDB在插入和查询数据时的吞吐量和延迟等信息,分别如下:
这些信息可以帮助我们更好地评估InfluxDB在实际应用场景中的性能表现。
#!/bin/bash
# Number of data points to write/read for each query
DATAPOINTS=10000
# Duration of data insertion period for each test
INSERT_DURATION_SEC=10
# Duration of data query period for each test
QUERY_DURATION_SEC=10
# Function to run a curl command and get the execution time from it
function time_curl {
start=$(date -uSs)
response=$(curl -XPOST "http://localhost:8086/write?db=test" --silent --data-binary "$1")
end=$(date -uSs)
elapsed=$(($(date -ud "$end" +%s%N) - $(date -ud "$start" +%s%N)))
echo "$response" | grep "error" >/dev/null && echo "Error: $response"
echo $elapsed
}
# Function to insert a specified number of data points into InfluxDB,
# measuring the throughput and latency of the writes
function run_insertion_test {
echo "running $DATAPOINTS point insert test for $INSERT_DURATION_SEC seconds"
data_points=""
for i in $(seq 1 $DATAPOINTS); do
data_points+="cpu_load_short,host=server01,region=us-west value=${i}.0\n"
done
elapsed_milliseconds=$(($(time_curl "$data_points") / 1000))
insertion_throughput=$(echo "scale=2; $DATAPOINTS / $elapsed_milliseconds" | bc -l)
insertion_latency=$(echo "scale=2; $elapsed_milliseconds / $DATAPOINTS" | bc -l)
echo "insertion throughput: $insertion_throughput data points/second"
echo "insertion latency: $insertion_latency ms/data point"
}
# Function to query for a specified number of data points from InfluxDB,
# measuring the throughput and latency of the read
function run_query_test {
echo "running query test for $QUERY_DURATION_SEC seconds"
time_to_stop=$(($(date -ud "$QUERY_DURATION_SEC seconds" +%s%N) / 1000000))
queries=""
until [ $(date +%s%N) -gt $time_to_stop ]; do
query=$(cat <<-EOF
SELECT mean(value) FROM cpu_load_short WHERE time >= $((DATAPOINTS / 2 + 1000000000))s+$((1000 * (${RANDOM}%4)))ms
AND time <= $((DATAPOINTS / 2 + 1000000000))s+$((1000 * (${RANDOM}%4)))ms AND host='server01' AND region='us-west'
EOF
)
queries+=$(echo -e "$query\n")
done
response=$(curl -XPOST "http://localhost:8086/query?db=test" --silent --data "$queries")
query_results=$(echo "$response" | grep -c "\"name\":\"cpu_load_short\"")
elapsed_milliseconds=$(($(date -uSs) / 1000 - START_TIME))
query_throughput=$(echo "scale=2; $query_results / $elapsed_milliseconds" | bc -l)
query_latency=$(echo "scale=2; $elapsed_milliseconds / $query_results" | bc -l)
echo "query throughput: $query_throughput query results/second"
echo "query latency: $query_latency ms/query result"
}
START_TIME=$(date -uSs) / 1000
run_insertion_test
run_query_test
# Return results in JSON format
echo "{
\"insertion_throughput\": \"$insertion_throughput\",
\"insertion_latency\": \"$insertion_latency\",
\"query_throughput\": \"$query_throughput\",
\"query_latency\": \"$query_latency\"
}" | jq
以上就是speichern脚本的介绍和相应的代码片段。