mirror of
https://gitlab.archlinux.org/archlinux/aurweb.git
synced 2025-02-03 10:43:03 +01:00
feat: add Prometheus metrics for Redis cache
Adding a Prometheus counter to be able to monitor cache hits/misses for search queries Signed-off-by: moson-mo <mo-son@mailbox.org>
This commit is contained in:
parent
3acfb08a0f
commit
814ccf6b04
2 changed files with 51 additions and 2 deletions
|
@ -1,5 +1,6 @@
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
|
from prometheus_client import Counter
|
||||||
from sqlalchemy import orm
|
from sqlalchemy import orm
|
||||||
|
|
||||||
from aurweb import config
|
from aurweb import config
|
||||||
|
@ -7,6 +8,11 @@ from aurweb.aur_redis import redis_connection
|
||||||
|
|
||||||
_redis = redis_connection()
|
_redis = redis_connection()
|
||||||
|
|
||||||
|
# Prometheus metrics
|
||||||
|
SEARCH_REQUESTS = Counter(
|
||||||
|
"search_requests", "Number of search requests by cache hit/miss", ["cache"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
async def db_count_cache(key: str, query: orm.Query, expire: int = None) -> int:
|
async def db_count_cache(key: str, query: orm.Query, expire: int = None) -> int:
|
||||||
"""Store and retrieve a query.count() via redis cache.
|
"""Store and retrieve a query.count() via redis cache.
|
||||||
|
@ -24,7 +30,7 @@ async def db_count_cache(key: str, query: orm.Query, expire: int = None) -> int:
|
||||||
return int(result)
|
return int(result)
|
||||||
|
|
||||||
|
|
||||||
async def db_query_cache(key: str, query: orm.Query, expire: int = None):
|
async def db_query_cache(key: str, query: orm.Query, expire: int = None) -> list:
|
||||||
"""Store and retrieve query results via redis cache.
|
"""Store and retrieve query results via redis cache.
|
||||||
|
|
||||||
:param key: Redis key
|
:param key: Redis key
|
||||||
|
@ -34,10 +40,13 @@ async def db_query_cache(key: str, query: orm.Query, expire: int = None):
|
||||||
"""
|
"""
|
||||||
result = _redis.get(key)
|
result = _redis.get(key)
|
||||||
if result is None:
|
if result is None:
|
||||||
|
SEARCH_REQUESTS.labels(cache="miss").inc()
|
||||||
if _redis.dbsize() > config.getint("cache", "max_search_entries", 50000):
|
if _redis.dbsize() > config.getint("cache", "max_search_entries", 50000):
|
||||||
return query.all()
|
return query.all()
|
||||||
_redis.set(key, (result := pickle.dumps(query.all())), ex=expire)
|
_redis.set(key, (result := pickle.dumps(query.all())))
|
||||||
if expire:
|
if expire:
|
||||||
_redis.expire(key, expire)
|
_redis.expire(key, expire)
|
||||||
|
else:
|
||||||
|
SEARCH_REQUESTS.labels(cache="hit").inc()
|
||||||
|
|
||||||
return pickle.loads(result)
|
return pickle.loads(result)
|
||||||
|
|
40
test/test_metrics.py
Normal file
40
test/test_metrics.py
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
import pytest
|
||||||
|
from prometheus_client import REGISTRY, generate_latest
|
||||||
|
|
||||||
|
from aurweb import db
|
||||||
|
from aurweb.cache import db_query_cache
|
||||||
|
from aurweb.models.account_type import USER_ID
|
||||||
|
from aurweb.models.user import User
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def setup(db_test):
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def user() -> User:
|
||||||
|
with db.begin():
|
||||||
|
user = db.create(
|
||||||
|
User,
|
||||||
|
Username="test",
|
||||||
|
Email="test@example.org",
|
||||||
|
RealName="Test User",
|
||||||
|
Passwd="testPassword",
|
||||||
|
AccountTypeID=USER_ID,
|
||||||
|
)
|
||||||
|
yield user
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_search_cache_metrics(user: User):
|
||||||
|
# Fire off 3 identical queries for caching
|
||||||
|
for _ in range(3):
|
||||||
|
await db_query_cache("key", db.query(User))
|
||||||
|
|
||||||
|
# Get metrics
|
||||||
|
metrics = str(generate_latest(REGISTRY))
|
||||||
|
|
||||||
|
# We should have 1 miss and 2 hits
|
||||||
|
assert 'search_requests_total{cache="miss"} 1.0' in metrics
|
||||||
|
assert 'search_requests_total{cache="hit"} 2.0' in metrics
|
Loading…
Add table
Reference in a new issue