{"status":"success","data":{"groups":[{"name":"vm-health","rules":[{"state":"inactive","name":"TooManyRestarts","query":"changes(process_start_time_seconds{job=~\"victoriametrics|vmselect|vminsert|vmstorage|vmagent|vmalert\"}[15m]) \u003e 2","duration":0,"labels":{"severity":"critical"},"annotations":{"description":"Job {{ $labels.job }} (instance {{ $labels.instance }}) has restarted more than twice in the last 15 minutes. It might be crashlooping.","summary":"{{ $labels.job }} too many restarts (instance {{ $labels.instance }})"},"lastError":"","evaluationTime":0.001242485,"lastEvaluation":"2026-04-29T15:44:05.192939531Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"6237301384188023580","group_id":"1594445953246388460","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"ServiceDown","query":"up{job=~\"victoriametrics|vmselect|vminsert|vmstorage|vmagent|vmalert\"} == 0","duration":120,"labels":{"severity":"critical"},"annotations":{"description":"{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 2 minutes.","summary":"Service {{ $labels.job }} is down on {{ $labels.instance }}"},"lastError":"","evaluationTime":0.000956235,"lastEvaluation":"2026-04-29T15:44:05.194190686Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"4655180280729811658","group_id":"1594445953246388460","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"ProcessNearFDLimits","query":"(process_max_fds - process_open_fds) \u003c 100","duration":300,"labels":{"severity":"critical"},"annotations":{"description":"Exhausting OS file descriptors limit can cause severe degradation of the process. Consider to increase the limit as fast as possible.","summary":"Number of free file descriptors is less than 100 for \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") for the last 5m"},"lastError":"","evaluationTime":0.000952154,"lastEvaluation":"2026-04-29T15:44:05.195148771Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"15828888860588158234","group_id":"1594445953246388460","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"TooHighMemoryUsage","query":"(process_resident_memory_anon_bytes / vm_available_memory_bytes) \u003e 0.9","duration":300,"labels":{"severity":"critical"},"annotations":{"description":"Too high memory usage may result into multiple issues such as OOMs or degraded performance. Consider to either increase available memory or decrease the load on the process.","summary":"It is more than 90% of memory used by \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") during the last 5m"},"lastError":"","evaluationTime":0.000908484,"lastEvaluation":"2026-04-29T15:44:05.196107085Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"11362580426449474781","group_id":"1594445953246388460","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"TooHighCPUUsage","query":"rate(process_cpu_seconds_total[5m]) / process_cpu_cores_available \u003e 0.9","duration":300,"labels":{"severity":"critical"},"annotations":{"description":"Too high CPU usage may be a sign of insufficient resources and make process unstable. Consider to either increase available CPU resources or decrease the load on the process.","summary":"More than 90% of CPU is used by \"{{ $labels.job }}\"(\"{{ $labels.instance }}\") during the last 5m"},"lastError":"","evaluationTime":0.000719543,"lastEvaluation":"2026-04-29T15:44:05.197017869Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"17388543372823140628","group_id":"1594445953246388460","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"TooManyLogs","query":"sum(increase(vm_log_messages_total{level=\"error\"}[5m])) by (job, instance) \u003e 0","duration":900,"labels":{"severity":"warning"},"annotations":{"description":"Logging rate for job \"{{ $labels.job }}\" ({{ $labels.instance }}) is {{ $value }} for last 15m.\n Worth to check logs for specific error messages.","summary":"Too many logs printed for job \"{{ $labels.job }}\" ({{ $labels.instance }})"},"lastError":"","evaluationTime":0.000838343,"lastEvaluation":"2026-04-29T15:44:05.197742213Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"10353770298826902271","group_id":"1594445953246388460","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]}],"interval":60,"lastEvaluation":"2026-04-29T15:44:05.192936381Z","type":"prometheus","id":"1594445953246388460","file":"/etc/alerts/alerts-health.yml","concurrency":1},{"name":"vmagent","rules":[{"state":"inactive","name":"PersistentQueueIsDroppingData","query":"sum(increase(vm_persistentqueue_bytes_dropped_total[5m])) by (job, instance) \u003e 0","duration":600,"labels":{"severity":"critical"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=49\u0026var-instance={{ $labels.instance }}","description":"Vmagent dropped {{ $value | humanize1024 }} from persistent queue on instance {{ $labels.instance }} for the last 10m.","summary":"Instance {{ $labels.instance }} is dropping data from persistent queue"},"lastError":"","evaluationTime":0.001329426,"lastEvaluation":"2026-04-29T15:44:18.883643731Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"14941372497586835904","group_id":"11604620448585840090","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"RejectedRemoteWriteDataBlocksAreDropped","query":"sum(increase(vmagent_remotewrite_packets_dropped_total[5m])) by (job, instance) \u003e 0","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=79\u0026var-instance={{ $labels.instance }}","summary":"Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} drops the rejected by remote-write server data blocks. Check the logs to find the reason for rejects."},"lastError":"","evaluationTime":0.001324746,"lastEvaluation":"2026-04-29T15:44:18.883642351Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"14080245430458445615","group_id":"11604620448585840090","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"firing","name":"TooManyScrapeErrors","query":"sum(increase(vm_promscrape_scrapes_failed_total[5m])) by (job, instance) \u003e 0","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=31\u0026var-instance={{ $labels.instance }}","summary":"Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} fails to scrape targets for last 15m"},"lastError":"","evaluationTime":0.000841234,"lastEvaluation":"2026-04-29T15:44:18.884979357Z","alerts":[{"state":"firing","name":"TooManyScrapeErrors","value":"170","labels":{"alertgroup":"vmagent","alertname":"TooManyScrapeErrors","instance":"vmagent:8429","job":"vmagent","severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=31\u0026var-instance=vmagent:8429","summary":"Job \"vmagent\" on instance vmagent:8429 fails to scrape targets for last 15m"},"activeAt":"2025-08-28T20:17:48.873200558Z","id":"14035331287324447540","rule_id":"6093091293462635297","group_id":"11604620448585840090","expression":"sum(increase(vm_promscrape_scrapes_failed_total[5m])) by (job, instance) \u003e 0","source":"http://127.0.0.1:3000/explore?orgId=1\u0026left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\":%22sum%28increase%28vm_promscrape_scrapes_failed_total%5B5m%5D%29%29+by+%28job%2C+instance%29+%3E+0%22 },{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]","restored":false}],"health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":1,"id":"6093091293462635297","group_id":"11604620448585840090","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"TooManyWriteErrors","query":"(sum(increase(vm_ingestserver_request_errors_total[5m])) by (job, instance)\n+\nsum(increase(vmagent_http_request_errors_total[5m])) by (job, instance)) \u003e 0\n","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=77\u0026var-instance={{ $labels.instance }}","summary":"Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} responds with errors to write requests for last 15m."},"lastError":"","evaluationTime":0.001192296,"lastEvaluation":"2026-04-29T15:44:18.884989457Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"9408732227524010864","group_id":"11604620448585840090","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"TooManyRemoteWriteErrors","query":"sum(rate(vmagent_remotewrite_retries_count_total[5m])) by(job, instance, url) \u003e 0","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=61\u0026var-instance={{ $labels.instance }}","description":"Vmagent fails to push data via remote write protocol to destination \"{{ $labels.url }}\"\n Ensure that destination is up and reachable.","summary":"Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} fails to push to remote storage"},"lastError":"","evaluationTime":0.000600323,"lastEvaluation":"2026-04-29T15:44:18.886189403Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"14174193033363729612","group_id":"11604620448585840090","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"RemoteWriteConnectionIsSaturated","query":"sum(rate(vmagent_remotewrite_send_duration_seconds_total[5m])) by(job, instance, url) \n\u003e 0.9 * max(vmagent_remotewrite_queues) by(job, instance, url)\n","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=84\u0026var-instance={{ $labels.instance }}","description":"The remote write connection between vmagent \"{{ $labels.job }}\" (instance {{ $labels.instance }}) and destination \"{{ $labels.url }}\" is saturated by more than 90% and vmagent won't be able to keep up.\n This usually means that `-remoteWrite.queues` command-line flag must be increased in order to increase the number of connections per each remote storage.","summary":"Remote write connection from \"{{ $labels.job }}\" (instance {{ $labels.instance }}) to {{ $labels.url }} is saturated"},"lastError":"","evaluationTime":0.000673723,"lastEvaluation":"2026-04-29T15:44:18.886466424Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"12829031165463057726","group_id":"11604620448585840090","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"PersistentQueueForWritesIsSaturated","query":"rate(vm_persistentqueue_write_duration_seconds_total[5m]) \u003e 0.9","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=98\u0026var-instance={{ $labels.instance }}","description":"Persistent queue writes for vmagent \"{{ $labels.job }}\" (instance {{ $labels.instance }}) are saturated by more than 90% and vmagent won't be able to keep up with flushing data on disk. In this case, consider to decrease load on the vmagent or improve the disk throughput.","summary":"Persistent queue writes for instance {{ $labels.instance }} are saturated"},"lastError":"","evaluationTime":0.000559352,"lastEvaluation":"2026-04-29T15:44:18.886795826Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"3724483375939275623","group_id":"11604620448585840090","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"PersistentQueueForReadsIsSaturated","query":"rate(vm_persistentqueue_read_duration_seconds_total[5m]) \u003e 0.9","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=99\u0026var-instance={{ $labels.instance }}","description":"Persistent queue reads for vmagent \"{{ $labels.job }}\" (instance {{ $labels.instance }}) are saturated by more than 90% and vmagent won't be able to keep up with reading data from the disk. In this case, consider to decrease load on the vmagent or improve the disk throughput.","summary":"Persistent queue reads for instance {{ $labels.instance }} are saturated"},"lastError":"","evaluationTime":0.000542733,"lastEvaluation":"2026-04-29T15:44:18.887147487Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"9593711483023016771","group_id":"11604620448585840090","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"SeriesLimitHourReached","query":"(vmagent_hourly_series_limit_current_series / vmagent_hourly_series_limit_max_series) \u003e 0.9","duration":0,"labels":{"severity":"critical"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=88\u0026var-instance={{ $labels.instance }}","description":"Max series limit set via -remoteWrite.maxHourlySeries flag is close to reaching the max value. Then samples for new time series will be dropped instead of sending them to remote storage systems.","summary":"Instance {{ $labels.instance }} reached 90% of the limit"},"lastError":"","evaluationTime":0.000399682,"lastEvaluation":"2026-04-29T15:44:18.887362458Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"9905990287815795810","group_id":"11604620448585840090","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"SeriesLimitDayReached","query":"(vmagent_daily_series_limit_current_series / vmagent_daily_series_limit_max_series) \u003e 0.9","duration":0,"labels":{"severity":"critical"},"annotations":{"dashboard":"http://localhost:3000/d/G7Z9GzMGz?viewPanel=90\u0026var-instance={{ $labels.instance }}","description":"Max series limit set via -remoteWrite.maxDailySeries flag is close to reaching the max value. Then samples for new time series will be dropped instead of sending them to remote storage systems.","summary":"Instance {{ $labels.instance }} reached 90% of the limit"},"lastError":"","evaluationTime":0.000419611,"lastEvaluation":"2026-04-29T15:44:18.88769661Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"5813001511186364654","group_id":"11604620448585840090","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]}],"interval":30,"lastEvaluation":"2026-04-29T15:44:18.883612191Z","type":"prometheus","id":"11604620448585840090","file":"/etc/alerts/alerts-vmagent.yml","concurrency":2},{"name":"vmsingle","rules":[{"state":"inactive","name":"DiskRunsOutOfSpaceIn3Days","query":"vm_free_disk_space_bytes / ignoring(path)\n(\n   (\n    rate(vm_rows_added_to_storage_total[1d]) -\n    ignoring(type) rate(vm_deduplicated_samples_total{type=\"merge\"}[1d])\n   )\n  * scalar(\n    sum(vm_data_size_bytes{type!=\"indexdb\"}) /\n    sum(vm_rows{type!=\"indexdb\"})\n   )\n) \u003c 3 * 24 * 3600 \u003e 0\n","duration":1800,"labels":{"severity":"critical"},"annotations":{"dashboard":"http://localhost:3000/d/wNf0q_kZk?viewPanel=73\u0026var-instance={{ $labels.instance }}","description":"Taking into account current ingestion rate, free disk space will be enough only for {{ $value | humanizeDuration }} on instance {{ $labels.instance }}.\n Consider to limit the ingestion rate, decrease retention or scale the disk space if possible.","summary":"Instance {{ $labels.instance }} will run out of disk space soon"},"lastError":"","evaluationTime":0.002395421,"lastEvaluation":"2026-04-29T15:44:30.366221448Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"2003036198281861855","group_id":"216423836913459264","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"DiskRunsOutOfSpace","query":"sum(vm_data_size_bytes) by(instance) /\n(\n sum(vm_free_disk_space_bytes) by(instance) +\n sum(vm_data_size_bytes) by(instance)\n) \u003e 0.8\n","duration":1800,"labels":{"severity":"critical"},"annotations":{"dashboard":"http://localhost:3000/d/wNf0q_kZk?viewPanel=53\u0026var-instance={{ $labels.instance }}","description":"Disk utilisation on instance {{ $labels.instance }} is more than 80%.\n Having less than 20% of free disk space could cripple merges processes and overall performance. Consider to limit the ingestion rate, decrease retention or scale the disk space if possible.","summary":"Instance {{ $labels.instance }} will run out of disk space soon"},"lastError":"","evaluationTime":0.001801839,"lastEvaluation":"2026-04-29T15:44:30.366217878Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"18044055263708102221","group_id":"216423836913459264","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"RequestErrorsToAPI","query":"increase(vm_http_request_errors_total[5m]) \u003e 0","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/wNf0q_kZk?viewPanel=35\u0026var-instance={{ $labels.instance }}","description":"Requests to path {{ $labels.path }} are receiving errors. Please verify if clients are sending correct requests.","summary":"Too many errors served for path {{ $labels.path }} (instance {{ $labels.instance }})"},"lastError":"","evaluationTime":0.001249565,"lastEvaluation":"2026-04-29T15:44:30.368043597Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"9216859099280381622","group_id":"216423836913459264","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"ConcurrentFlushesHitTheLimit","query":"avg_over_time(vm_concurrent_addrows_current[1m]) \u003e= vm_concurrent_addrows_capacity","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/wNf0q_kZk?viewPanel=59\u0026var-instance={{ $labels.instance }}","description":"The limit of concurrent flushes on instance {{ $labels.instance }} is equal to number of CPUs.\n When VictoriaMetrics constantly hits the limit it means that storage is overloaded and requires more CPU.","summary":"VictoriaMetrics on instance {{ $labels.instance }} is constantly hitting concurrent flushes limit"},"lastError":"","evaluationTime":0.000674434,"lastEvaluation":"2026-04-29T15:44:30.368633339Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"12888099121128797919","group_id":"216423836913459264","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"RowsRejectedOnIngestion","query":"sum(rate(vm_rows_ignored_total[5m])) by (instance, reason) \u003e 0","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/wNf0q_kZk?viewPanel=58\u0026var-instance={{ $labels.instance }}","description":"VM is rejecting to ingest rows on \"{{ $labels.instance }}\" due to the following reason: \"{{ $labels.reason }}\"","summary":"Some rows are rejected on \"{{ $labels.instance }}\" on ingestion attempt"},"lastError":"","evaluationTime":0.000709713,"lastEvaluation":"2026-04-29T15:44:30.369301023Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"3652446059142022094","group_id":"216423836913459264","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"TooHighChurnRate","query":"(\n   sum(rate(vm_new_timeseries_created_total[5m])) by(instance)\n   /\n   sum(rate(vm_rows_inserted_total[5m])) by (instance)\n ) \u003e 0.1\n","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/wNf0q_kZk?viewPanel=66\u0026var-instance={{ $labels.instance }}","description":"VM constantly creates new time series on \"{{ $labels.instance }}\".\n This effect is known as Churn Rate.\n High Churn Rate tightly connected with database performance and may result in unexpected OOM's or slow queries.","summary":"Churn rate is more than 10% on \"{{ $labels.instance }}\" for the last 15m"},"lastError":"","evaluationTime":0.001022634,"lastEvaluation":"2026-04-29T15:44:30.369313753Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"16597393320100697084","group_id":"216423836913459264","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"TooHighChurnRate24h","query":"sum(increase(vm_new_timeseries_created_total[24h])) by(instance)\n\u003e\n(sum(vm_cache_entries{type=\"storage/hour_metric_ids\"}) by(instance) * 3)\n","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/wNf0q_kZk?viewPanel=66\u0026var-instance={{ $labels.instance }}","description":"The number of created new time series over last 24h is 3x times higher than current number of active series on \"{{ $labels.instance }}\".\n This effect is known as Churn Rate.\n High Churn Rate tightly connected with database performance and may result in unexpected OOM's or slow queries.","summary":"Too high number of new series on \"{{ $labels.instance }}\" created over last 24h"},"lastError":"","evaluationTime":0.000916264,"lastEvaluation":"2026-04-29T15:44:30.370019696Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"12103807088721046337","group_id":"216423836913459264","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"TooHighSlowInsertsRate","query":"(\n   sum(rate(vm_slow_row_inserts_total[5m])) by(instance)\n   /\n   sum(rate(vm_rows_inserted_total[5m])) by (instance)\n ) \u003e 0.05\n","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/wNf0q_kZk?viewPanel=68\u0026var-instance={{ $labels.instance }}","description":"High rate of slow inserts on \"{{ $labels.instance }}\" may be a sign of resource exhaustion for the current load. It is likely more RAM is needed for optimal handling of the current number of active time series.","summary":"Percentage of slow inserts is more than 5% on \"{{ $labels.instance }}\" for the last 15m"},"lastError":"","evaluationTime":0.001283426,"lastEvaluation":"2026-04-29T15:44:30.370344497Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"12076262473324980890","group_id":"216423836913459264","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]},{"state":"inactive","name":"LabelsLimitExceededOnIngestion","query":"sum(increase(vm_metrics_with_dropped_labels_total[5m])) by (instance) \u003e 0","duration":900,"labels":{"severity":"warning"},"annotations":{"dashboard":"http://localhost:3000/d/wNf0q_kZk?viewPanel=74\u0026var-instance={{ $labels.instance }}","description":"VictoriaMetrics limits the number of labels per each metric with `-maxLabelsPerTimeseries` command-line flag.\n This prevents from ingesting metrics with too many labels. Please verify that `-maxLabelsPerTimeseries` is configured correctly or that clients which send these metrics aren't misbehaving.","summary":"Metrics ingested in ({{ $labels.instance }}) are exceeding labels limit"},"lastError":"","evaluationTime":0.000547422,"lastEvaluation":"2026-04-29T15:44:30.3709435Z","health":"ok","type":"alerting","datasourceType":"prometheus","lastSamples":0,"id":"12806002462829935187","group_id":"216423836913459264","updates":[{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]}],"interval":30,"lastEvaluation":"2026-04-29T15:44:30.366186428Z","type":"prometheus","id":"216423836913459264","file":"/etc/alerts/alerts.yml","concurrency":2}]}}