Complete Yocto mirror with license table for TQMa6UL (2038-compliance)
- 264 license table entries with exact download URLs (224/264 resolved) - Complete sources/ directory with all BitBake recipes - Build configuration: tqma6ul-multi-mba6ulx, spaetzle (musl) - Full traceability for Softwarefreigabeantrag - GCC 13.4.0, Linux 6.6.102, U-Boot 2023.04, musl 1.2.4 - License distribution: GPL-2.0 (24), MIT (23), GPL-2.0+ (18), BSD-3 (16)
This commit is contained in:
106
sources/poky/bitbake/lib/hashserv/__init__.py
Normal file
106
sources/poky/bitbake/lib/hashserv/__init__.py
Normal file
@@ -0,0 +1,106 @@
|
||||
# Copyright (C) 2018-2019 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import asyncio
|
||||
from contextlib import closing
|
||||
import itertools
|
||||
import json
|
||||
from collections import namedtuple
|
||||
from urllib.parse import urlparse
|
||||
from bb.asyncrpc.client import parse_address, ADDR_TYPE_UNIX, ADDR_TYPE_WS
|
||||
|
||||
User = namedtuple("User", ("username", "permissions"))
|
||||
|
||||
def create_server(
|
||||
addr,
|
||||
dbname,
|
||||
*,
|
||||
sync=True,
|
||||
upstream=None,
|
||||
read_only=False,
|
||||
db_username=None,
|
||||
db_password=None,
|
||||
anon_perms=None,
|
||||
admin_username=None,
|
||||
admin_password=None,
|
||||
):
|
||||
def sqlite_engine():
|
||||
from .sqlite import DatabaseEngine
|
||||
|
||||
return DatabaseEngine(dbname, sync)
|
||||
|
||||
def sqlalchemy_engine():
|
||||
from .sqlalchemy import DatabaseEngine
|
||||
|
||||
return DatabaseEngine(dbname, db_username, db_password)
|
||||
|
||||
from . import server
|
||||
|
||||
if "://" in dbname:
|
||||
db_engine = sqlalchemy_engine()
|
||||
else:
|
||||
db_engine = sqlite_engine()
|
||||
|
||||
if anon_perms is None:
|
||||
anon_perms = server.DEFAULT_ANON_PERMS
|
||||
|
||||
s = server.Server(
|
||||
db_engine,
|
||||
upstream=upstream,
|
||||
read_only=read_only,
|
||||
anon_perms=anon_perms,
|
||||
admin_username=admin_username,
|
||||
admin_password=admin_password,
|
||||
)
|
||||
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
s.start_unix_server(*a)
|
||||
elif typ == ADDR_TYPE_WS:
|
||||
url = urlparse(a[0])
|
||||
s.start_websocket_server(url.hostname, url.port)
|
||||
else:
|
||||
s.start_tcp_server(*a)
|
||||
|
||||
return s
|
||||
|
||||
|
||||
def create_client(addr, username=None, password=None):
|
||||
from . import client
|
||||
|
||||
c = client.Client(username, password)
|
||||
|
||||
try:
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
c.connect_unix(*a)
|
||||
elif typ == ADDR_TYPE_WS:
|
||||
c.connect_websocket(*a)
|
||||
else:
|
||||
c.connect_tcp(*a)
|
||||
return c
|
||||
except Exception as e:
|
||||
c.close()
|
||||
raise e
|
||||
|
||||
|
||||
async def create_async_client(addr, username=None, password=None):
|
||||
from . import client
|
||||
|
||||
c = client.AsyncClient(username, password)
|
||||
|
||||
try:
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
await c.connect_unix(*a)
|
||||
elif typ == ADDR_TYPE_WS:
|
||||
await c.connect_websocket(*a)
|
||||
else:
|
||||
await c.connect_tcp(*a)
|
||||
|
||||
return c
|
||||
except Exception as e:
|
||||
await c.close()
|
||||
raise e
|
||||
434
sources/poky/bitbake/lib/hashserv/client.py
Normal file
434
sources/poky/bitbake/lib/hashserv/client.py
Normal file
@@ -0,0 +1,434 @@
|
||||
# Copyright (C) 2019 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import logging
|
||||
import socket
|
||||
import asyncio
|
||||
import bb.asyncrpc
|
||||
import json
|
||||
from . import create_async_client
|
||||
|
||||
|
||||
logger = logging.getLogger("hashserv.client")
|
||||
|
||||
|
||||
class Batch(object):
|
||||
def __init__(self):
|
||||
self.done = False
|
||||
self.cond = asyncio.Condition()
|
||||
self.pending = []
|
||||
self.results = []
|
||||
self.sent_count = 0
|
||||
|
||||
async def recv(self, socket):
|
||||
while True:
|
||||
async with self.cond:
|
||||
await self.cond.wait_for(lambda: self.pending or self.done)
|
||||
|
||||
if not self.pending:
|
||||
if self.done:
|
||||
return
|
||||
continue
|
||||
|
||||
r = await socket.recv()
|
||||
self.results.append(r)
|
||||
|
||||
async with self.cond:
|
||||
self.pending.pop(0)
|
||||
|
||||
async def send(self, socket, msgs):
|
||||
try:
|
||||
# In the event of a restart due to a reconnect, all in-flight
|
||||
# messages need to be resent first to keep to result count in sync
|
||||
for m in self.pending:
|
||||
await socket.send(m)
|
||||
|
||||
for m in msgs:
|
||||
# Add the message to the pending list before attempting to send
|
||||
# it so that if the send fails it will be retried
|
||||
async with self.cond:
|
||||
self.pending.append(m)
|
||||
self.cond.notify()
|
||||
self.sent_count += 1
|
||||
|
||||
await socket.send(m)
|
||||
|
||||
finally:
|
||||
async with self.cond:
|
||||
self.done = True
|
||||
self.cond.notify()
|
||||
|
||||
async def process(self, socket, msgs):
|
||||
await asyncio.gather(
|
||||
self.recv(socket),
|
||||
self.send(socket, msgs),
|
||||
)
|
||||
|
||||
if len(self.results) != self.sent_count:
|
||||
raise ValueError(
|
||||
f"Expected result count {len(self.results)}. Expected {self.sent_count}"
|
||||
)
|
||||
|
||||
return self.results
|
||||
|
||||
|
||||
class AsyncClient(bb.asyncrpc.AsyncClient):
|
||||
MODE_NORMAL = 0
|
||||
MODE_GET_STREAM = 1
|
||||
MODE_EXIST_STREAM = 2
|
||||
|
||||
def __init__(self, username=None, password=None):
|
||||
super().__init__("OEHASHEQUIV", "1.1", logger)
|
||||
self.mode = self.MODE_NORMAL
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.saved_become_user = None
|
||||
|
||||
async def setup_connection(self):
|
||||
await super().setup_connection()
|
||||
self.mode = self.MODE_NORMAL
|
||||
if self.username:
|
||||
# Save off become user temporarily because auth() resets it
|
||||
become = self.saved_become_user
|
||||
await self.auth(self.username, self.password)
|
||||
|
||||
if become:
|
||||
await self.become_user(become)
|
||||
|
||||
async def send_stream_batch(self, mode, msgs):
|
||||
"""
|
||||
Does a "batch" process of stream messages. This sends the query
|
||||
messages as fast as possible, and simultaneously attempts to read the
|
||||
messages back. This helps to mitigate the effects of latency to the
|
||||
hash equivalence server be allowing multiple queries to be "in-flight"
|
||||
at once
|
||||
|
||||
The implementation does more complicated tracking using a count of sent
|
||||
messages so that `msgs` can be a generator function (i.e. its length is
|
||||
unknown)
|
||||
|
||||
"""
|
||||
|
||||
b = Batch()
|
||||
|
||||
async def proc():
|
||||
nonlocal b
|
||||
|
||||
await self._set_mode(mode)
|
||||
return await b.process(self.socket, msgs)
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
|
||||
async def invoke(self, *args, **kwargs):
|
||||
# It's OK if connection errors cause a failure here, because the mode
|
||||
# is also reset to normal on a new connection
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await super().invoke(*args, **kwargs)
|
||||
|
||||
async def _set_mode(self, new_mode):
|
||||
async def stream_to_normal():
|
||||
await self.socket.send("END")
|
||||
return await self.socket.recv()
|
||||
|
||||
async def normal_to_stream(command):
|
||||
r = await self.invoke({command: None})
|
||||
if r != "ok":
|
||||
raise ConnectionError(
|
||||
f"Unable to transition to stream mode: Bad response from server {r!r}"
|
||||
)
|
||||
|
||||
self.logger.debug("Mode is now %s", command)
|
||||
|
||||
if new_mode == self.mode:
|
||||
return
|
||||
|
||||
self.logger.debug("Transitioning mode %s -> %s", self.mode, new_mode)
|
||||
|
||||
# Always transition to normal mode before switching to any other mode
|
||||
if self.mode != self.MODE_NORMAL:
|
||||
r = await self._send_wrapper(stream_to_normal)
|
||||
if r != "ok":
|
||||
self.check_invoke_error(r)
|
||||
raise ConnectionError(
|
||||
f"Unable to transition to normal mode: Bad response from server {r!r}"
|
||||
)
|
||||
self.logger.debug("Mode is now normal")
|
||||
|
||||
if new_mode == self.MODE_GET_STREAM:
|
||||
await normal_to_stream("get-stream")
|
||||
elif new_mode == self.MODE_EXIST_STREAM:
|
||||
await normal_to_stream("exists-stream")
|
||||
elif new_mode != self.MODE_NORMAL:
|
||||
raise Exception("Undefined mode transition {self.mode!r} -> {new_mode!r}")
|
||||
|
||||
self.mode = new_mode
|
||||
|
||||
async def get_unihash(self, method, taskhash):
|
||||
r = await self.get_unihash_batch([(method, taskhash)])
|
||||
return r[0]
|
||||
|
||||
async def get_unihash_batch(self, args):
|
||||
result = await self.send_stream_batch(
|
||||
self.MODE_GET_STREAM,
|
||||
(f"{method} {taskhash}" for method, taskhash in args),
|
||||
)
|
||||
return [r if r else None for r in result]
|
||||
|
||||
async def report_unihash(self, taskhash, method, outhash, unihash, extra={}):
|
||||
m = extra.copy()
|
||||
m["taskhash"] = taskhash
|
||||
m["method"] = method
|
||||
m["outhash"] = outhash
|
||||
m["unihash"] = unihash
|
||||
return await self.invoke({"report": m})
|
||||
|
||||
async def report_unihash_equiv(self, taskhash, method, unihash, extra={}):
|
||||
m = extra.copy()
|
||||
m["taskhash"] = taskhash
|
||||
m["method"] = method
|
||||
m["unihash"] = unihash
|
||||
return await self.invoke({"report-equiv": m})
|
||||
|
||||
async def get_taskhash(self, method, taskhash, all_properties=False):
|
||||
return await self.invoke(
|
||||
{"get": {"taskhash": taskhash, "method": method, "all": all_properties}}
|
||||
)
|
||||
|
||||
async def unihash_exists(self, unihash):
|
||||
r = await self.unihash_exists_batch([unihash])
|
||||
return r[0]
|
||||
|
||||
async def unihash_exists_batch(self, unihashes):
|
||||
result = await self.send_stream_batch(self.MODE_EXIST_STREAM, unihashes)
|
||||
return [r == "true" for r in result]
|
||||
|
||||
async def get_outhash(self, method, outhash, taskhash, with_unihash=True):
|
||||
return await self.invoke(
|
||||
{
|
||||
"get-outhash": {
|
||||
"outhash": outhash,
|
||||
"taskhash": taskhash,
|
||||
"method": method,
|
||||
"with_unihash": with_unihash,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
async def get_stats(self):
|
||||
return await self.invoke({"get-stats": None})
|
||||
|
||||
async def reset_stats(self):
|
||||
return await self.invoke({"reset-stats": None})
|
||||
|
||||
async def backfill_wait(self):
|
||||
return (await self.invoke({"backfill-wait": None}))["tasks"]
|
||||
|
||||
async def remove(self, where):
|
||||
return await self.invoke({"remove": {"where": where}})
|
||||
|
||||
async def clean_unused(self, max_age):
|
||||
return await self.invoke({"clean-unused": {"max_age_seconds": max_age}})
|
||||
|
||||
async def auth(self, username, token):
|
||||
result = await self.invoke({"auth": {"username": username, "token": token}})
|
||||
self.username = username
|
||||
self.password = token
|
||||
self.saved_become_user = None
|
||||
return result
|
||||
|
||||
async def refresh_token(self, username=None):
|
||||
m = {}
|
||||
if username:
|
||||
m["username"] = username
|
||||
result = await self.invoke({"refresh-token": m})
|
||||
if (
|
||||
self.username
|
||||
and not self.saved_become_user
|
||||
and result["username"] == self.username
|
||||
):
|
||||
self.password = result["token"]
|
||||
return result
|
||||
|
||||
async def set_user_perms(self, username, permissions):
|
||||
return await self.invoke(
|
||||
{"set-user-perms": {"username": username, "permissions": permissions}}
|
||||
)
|
||||
|
||||
async def get_user(self, username=None):
|
||||
m = {}
|
||||
if username:
|
||||
m["username"] = username
|
||||
return await self.invoke({"get-user": m})
|
||||
|
||||
async def get_all_users(self):
|
||||
return (await self.invoke({"get-all-users": {}}))["users"]
|
||||
|
||||
async def new_user(self, username, permissions):
|
||||
return await self.invoke(
|
||||
{"new-user": {"username": username, "permissions": permissions}}
|
||||
)
|
||||
|
||||
async def delete_user(self, username):
|
||||
return await self.invoke({"delete-user": {"username": username}})
|
||||
|
||||
async def become_user(self, username):
|
||||
result = await self.invoke({"become-user": {"username": username}})
|
||||
if username == self.username:
|
||||
self.saved_become_user = None
|
||||
else:
|
||||
self.saved_become_user = username
|
||||
return result
|
||||
|
||||
async def get_db_usage(self):
|
||||
return (await self.invoke({"get-db-usage": {}}))["usage"]
|
||||
|
||||
async def get_db_query_columns(self):
|
||||
return (await self.invoke({"get-db-query-columns": {}}))["columns"]
|
||||
|
||||
async def gc_status(self):
|
||||
return await self.invoke({"gc-status": {}})
|
||||
|
||||
async def gc_mark(self, mark, where):
|
||||
"""
|
||||
Starts a new garbage collection operation identified by "mark". If
|
||||
garbage collection is already in progress with "mark", the collection
|
||||
is continued.
|
||||
|
||||
All unihash entries that match the "where" clause are marked to be
|
||||
kept. In addition, any new entries added to the database after this
|
||||
command will be automatically marked with "mark"
|
||||
"""
|
||||
return await self.invoke({"gc-mark": {"mark": mark, "where": where}})
|
||||
|
||||
async def gc_sweep(self, mark):
|
||||
"""
|
||||
Finishes garbage collection for "mark". All unihash entries that have
|
||||
not been marked will be deleted.
|
||||
|
||||
It is recommended to clean unused outhash entries after running this to
|
||||
cleanup any dangling outhashes
|
||||
"""
|
||||
return await self.invoke({"gc-sweep": {"mark": mark}})
|
||||
|
||||
|
||||
class Client(bb.asyncrpc.Client):
|
||||
def __init__(self, username=None, password=None):
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
super().__init__()
|
||||
self._add_methods(
|
||||
"connect_tcp",
|
||||
"connect_websocket",
|
||||
"get_unihash",
|
||||
"get_unihash_batch",
|
||||
"report_unihash",
|
||||
"report_unihash_equiv",
|
||||
"get_taskhash",
|
||||
"unihash_exists",
|
||||
"unihash_exists_batch",
|
||||
"get_outhash",
|
||||
"get_stats",
|
||||
"reset_stats",
|
||||
"backfill_wait",
|
||||
"remove",
|
||||
"clean_unused",
|
||||
"auth",
|
||||
"refresh_token",
|
||||
"set_user_perms",
|
||||
"get_user",
|
||||
"get_all_users",
|
||||
"new_user",
|
||||
"delete_user",
|
||||
"become_user",
|
||||
"get_db_usage",
|
||||
"get_db_query_columns",
|
||||
"gc_status",
|
||||
"gc_mark",
|
||||
"gc_sweep",
|
||||
)
|
||||
|
||||
def _get_async_client(self):
|
||||
return AsyncClient(self.username, self.password)
|
||||
|
||||
|
||||
class ClientPool(bb.asyncrpc.ClientPool):
|
||||
def __init__(
|
||||
self,
|
||||
address,
|
||||
max_clients,
|
||||
*,
|
||||
username=None,
|
||||
password=None,
|
||||
become=None,
|
||||
):
|
||||
super().__init__(max_clients)
|
||||
self.address = address
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.become = become
|
||||
|
||||
async def _new_client(self):
|
||||
client = await create_async_client(
|
||||
self.address,
|
||||
username=self.username,
|
||||
password=self.password,
|
||||
)
|
||||
if self.become:
|
||||
await client.become_user(self.become)
|
||||
return client
|
||||
|
||||
def _run_key_tasks(self, queries, call):
|
||||
results = {key: None for key in queries.keys()}
|
||||
|
||||
def make_task(key, args):
|
||||
async def task(client):
|
||||
nonlocal results
|
||||
unihash = await call(client, args)
|
||||
results[key] = unihash
|
||||
|
||||
return task
|
||||
|
||||
def gen_tasks():
|
||||
for key, args in queries.items():
|
||||
yield make_task(key, args)
|
||||
|
||||
self.run_tasks(gen_tasks())
|
||||
return results
|
||||
|
||||
def get_unihashes(self, queries):
|
||||
"""
|
||||
Query multiple unihashes in parallel.
|
||||
|
||||
The queries argument is a dictionary with arbitrary key. The values
|
||||
must be a tuple of (method, taskhash).
|
||||
|
||||
Returns a dictionary with a corresponding key for each input key, and
|
||||
the value is the queried unihash (which might be none if the query
|
||||
failed)
|
||||
"""
|
||||
|
||||
async def call(client, args):
|
||||
method, taskhash = args
|
||||
return await client.get_unihash(method, taskhash)
|
||||
|
||||
return self._run_key_tasks(queries, call)
|
||||
|
||||
def unihashes_exist(self, queries):
|
||||
"""
|
||||
Query multiple unihash existence checks in parallel.
|
||||
|
||||
The queries argument is a dictionary with arbitrary key. The values
|
||||
must be a unihash.
|
||||
|
||||
Returns a dictionary with a corresponding key for each input key, and
|
||||
the value is True or False if the unihash is known by the server (or
|
||||
None if there was a failure)
|
||||
"""
|
||||
|
||||
async def call(client, unihash):
|
||||
return await client.unihash_exists(unihash)
|
||||
|
||||
return self._run_key_tasks(queries, call)
|
||||
881
sources/poky/bitbake/lib/hashserv/server.py
Normal file
881
sources/poky/bitbake/lib/hashserv/server.py
Normal file
@@ -0,0 +1,881 @@
|
||||
# Copyright (C) 2019 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
import asyncio
|
||||
import logging
|
||||
import math
|
||||
import time
|
||||
import os
|
||||
import base64
|
||||
import hashlib
|
||||
from . import create_async_client
|
||||
import bb.asyncrpc
|
||||
|
||||
logger = logging.getLogger("hashserv.server")
|
||||
|
||||
|
||||
# This permission only exists to match nothing
|
||||
NONE_PERM = "@none"
|
||||
|
||||
READ_PERM = "@read"
|
||||
REPORT_PERM = "@report"
|
||||
DB_ADMIN_PERM = "@db-admin"
|
||||
USER_ADMIN_PERM = "@user-admin"
|
||||
ALL_PERM = "@all"
|
||||
|
||||
ALL_PERMISSIONS = {
|
||||
READ_PERM,
|
||||
REPORT_PERM,
|
||||
DB_ADMIN_PERM,
|
||||
USER_ADMIN_PERM,
|
||||
ALL_PERM,
|
||||
}
|
||||
|
||||
DEFAULT_ANON_PERMS = (
|
||||
READ_PERM,
|
||||
REPORT_PERM,
|
||||
DB_ADMIN_PERM,
|
||||
)
|
||||
|
||||
TOKEN_ALGORITHM = "sha256"
|
||||
|
||||
# 48 bytes of random data will result in 64 characters when base64
|
||||
# encoded. This number also ensures that the base64 encoding won't have any
|
||||
# trailing '=' characters.
|
||||
TOKEN_SIZE = 48
|
||||
|
||||
SALT_SIZE = 8
|
||||
|
||||
|
||||
class Measurement(object):
|
||||
def __init__(self, sample):
|
||||
self.sample = sample
|
||||
|
||||
def start(self):
|
||||
self.start_time = time.perf_counter()
|
||||
|
||||
def end(self):
|
||||
self.sample.add(time.perf_counter() - self.start_time)
|
||||
|
||||
def __enter__(self):
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
self.end()
|
||||
|
||||
|
||||
class Sample(object):
|
||||
def __init__(self, stats):
|
||||
self.stats = stats
|
||||
self.num_samples = 0
|
||||
self.elapsed = 0
|
||||
|
||||
def measure(self):
|
||||
return Measurement(self)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
self.end()
|
||||
|
||||
def add(self, elapsed):
|
||||
self.num_samples += 1
|
||||
self.elapsed += elapsed
|
||||
|
||||
def end(self):
|
||||
if self.num_samples:
|
||||
self.stats.add(self.elapsed)
|
||||
self.num_samples = 0
|
||||
self.elapsed = 0
|
||||
|
||||
|
||||
class Stats(object):
|
||||
def __init__(self):
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.num = 0
|
||||
self.total_time = 0
|
||||
self.max_time = 0
|
||||
self.m = 0
|
||||
self.s = 0
|
||||
self.current_elapsed = None
|
||||
|
||||
def add(self, elapsed):
|
||||
self.num += 1
|
||||
if self.num == 1:
|
||||
self.m = elapsed
|
||||
self.s = 0
|
||||
else:
|
||||
last_m = self.m
|
||||
self.m = last_m + (elapsed - last_m) / self.num
|
||||
self.s = self.s + (elapsed - last_m) * (elapsed - self.m)
|
||||
|
||||
self.total_time += elapsed
|
||||
|
||||
if self.max_time < elapsed:
|
||||
self.max_time = elapsed
|
||||
|
||||
def start_sample(self):
|
||||
return Sample(self)
|
||||
|
||||
@property
|
||||
def average(self):
|
||||
if self.num == 0:
|
||||
return 0
|
||||
return self.total_time / self.num
|
||||
|
||||
@property
|
||||
def stdev(self):
|
||||
if self.num <= 1:
|
||||
return 0
|
||||
return math.sqrt(self.s / (self.num - 1))
|
||||
|
||||
def todict(self):
|
||||
return {
|
||||
k: getattr(self, k)
|
||||
for k in ("num", "total_time", "max_time", "average", "stdev")
|
||||
}
|
||||
|
||||
|
||||
token_refresh_semaphore = asyncio.Lock()
|
||||
|
||||
|
||||
async def new_token():
|
||||
# Prevent malicious users from using this API to deduce the entropy
|
||||
# pool on the server and thus be able to guess a token. *All* token
|
||||
# refresh requests lock the same global semaphore and then sleep for a
|
||||
# short time. The effectively rate limits the total number of requests
|
||||
# than can be made across all clients to 10/second, which should be enough
|
||||
# since you have to be an authenticated users to make the request in the
|
||||
# first place
|
||||
async with token_refresh_semaphore:
|
||||
await asyncio.sleep(0.1)
|
||||
raw = os.getrandom(TOKEN_SIZE, os.GRND_NONBLOCK)
|
||||
|
||||
return base64.b64encode(raw, b"._").decode("utf-8")
|
||||
|
||||
|
||||
def new_salt():
|
||||
return os.getrandom(SALT_SIZE, os.GRND_NONBLOCK).hex()
|
||||
|
||||
|
||||
def hash_token(algo, salt, token):
|
||||
h = hashlib.new(algo)
|
||||
h.update(salt.encode("utf-8"))
|
||||
h.update(token.encode("utf-8"))
|
||||
return ":".join([algo, salt, h.hexdigest()])
|
||||
|
||||
|
||||
def permissions(*permissions, allow_anon=True, allow_self_service=False):
|
||||
"""
|
||||
Function decorator that can be used to decorate an RPC function call and
|
||||
check that the current users permissions match the require permissions.
|
||||
|
||||
If allow_anon is True, the user will also be allowed to make the RPC call
|
||||
if the anonymous user permissions match the permissions.
|
||||
|
||||
If allow_self_service is True, and the "username" property in the request
|
||||
is the currently logged in user, or not specified, the user will also be
|
||||
allowed to make the request. This allows users to access normal privileged
|
||||
API, as long as they are only modifying their own user properties (e.g.
|
||||
users can be allowed to reset their own token without @user-admin
|
||||
permissions, but not the token for any other user.
|
||||
"""
|
||||
|
||||
def wrapper(func):
|
||||
async def wrap(self, request):
|
||||
if allow_self_service and self.user is not None:
|
||||
username = request.get("username", self.user.username)
|
||||
if username == self.user.username:
|
||||
request["username"] = self.user.username
|
||||
return await func(self, request)
|
||||
|
||||
if not self.user_has_permissions(*permissions, allow_anon=allow_anon):
|
||||
if not self.user:
|
||||
username = "Anonymous user"
|
||||
user_perms = self.server.anon_perms
|
||||
else:
|
||||
username = self.user.username
|
||||
user_perms = self.user.permissions
|
||||
|
||||
self.logger.info(
|
||||
"User %s with permissions %r denied from calling %s. Missing permissions(s) %r",
|
||||
username,
|
||||
", ".join(user_perms),
|
||||
func.__name__,
|
||||
", ".join(permissions),
|
||||
)
|
||||
raise bb.asyncrpc.InvokeError(
|
||||
f"{username} is not allowed to access permissions(s) {', '.join(permissions)}"
|
||||
)
|
||||
|
||||
return await func(self, request)
|
||||
|
||||
return wrap
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class ServerClient(bb.asyncrpc.AsyncServerConnection):
|
||||
def __init__(self, socket, server):
|
||||
super().__init__(socket, "OEHASHEQUIV", server.logger)
|
||||
self.server = server
|
||||
self.max_chunk = bb.asyncrpc.DEFAULT_MAX_CHUNK
|
||||
self.user = None
|
||||
|
||||
self.handlers.update(
|
||||
{
|
||||
"get": self.handle_get,
|
||||
"get-outhash": self.handle_get_outhash,
|
||||
"get-stream": self.handle_get_stream,
|
||||
"exists-stream": self.handle_exists_stream,
|
||||
"get-stats": self.handle_get_stats,
|
||||
"get-db-usage": self.handle_get_db_usage,
|
||||
"get-db-query-columns": self.handle_get_db_query_columns,
|
||||
# Not always read-only, but internally checks if the server is
|
||||
# read-only
|
||||
"report": self.handle_report,
|
||||
"auth": self.handle_auth,
|
||||
"get-user": self.handle_get_user,
|
||||
"get-all-users": self.handle_get_all_users,
|
||||
"become-user": self.handle_become_user,
|
||||
}
|
||||
)
|
||||
|
||||
if not self.server.read_only:
|
||||
self.handlers.update(
|
||||
{
|
||||
"report-equiv": self.handle_equivreport,
|
||||
"reset-stats": self.handle_reset_stats,
|
||||
"backfill-wait": self.handle_backfill_wait,
|
||||
"remove": self.handle_remove,
|
||||
"gc-mark": self.handle_gc_mark,
|
||||
"gc-sweep": self.handle_gc_sweep,
|
||||
"gc-status": self.handle_gc_status,
|
||||
"clean-unused": self.handle_clean_unused,
|
||||
"refresh-token": self.handle_refresh_token,
|
||||
"set-user-perms": self.handle_set_perms,
|
||||
"new-user": self.handle_new_user,
|
||||
"delete-user": self.handle_delete_user,
|
||||
}
|
||||
)
|
||||
|
||||
def raise_no_user_error(self, username):
|
||||
raise bb.asyncrpc.InvokeError(f"No user named '{username}' exists")
|
||||
|
||||
def user_has_permissions(self, *permissions, allow_anon=True):
|
||||
permissions = set(permissions)
|
||||
if allow_anon:
|
||||
if ALL_PERM in self.server.anon_perms:
|
||||
return True
|
||||
|
||||
if not permissions - self.server.anon_perms:
|
||||
return True
|
||||
|
||||
if self.user is None:
|
||||
return False
|
||||
|
||||
if ALL_PERM in self.user.permissions:
|
||||
return True
|
||||
|
||||
if not permissions - self.user.permissions:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def validate_proto_version(self):
|
||||
return self.proto_version > (1, 0) and self.proto_version <= (1, 1)
|
||||
|
||||
async def process_requests(self):
|
||||
async with self.server.db_engine.connect(self.logger) as db:
|
||||
self.db = db
|
||||
if self.server.upstream is not None:
|
||||
self.upstream_client = await create_async_client(self.server.upstream)
|
||||
else:
|
||||
self.upstream_client = None
|
||||
|
||||
try:
|
||||
await super().process_requests()
|
||||
finally:
|
||||
if self.upstream_client is not None:
|
||||
await self.upstream_client.close()
|
||||
|
||||
async def dispatch_message(self, msg):
|
||||
for k in self.handlers.keys():
|
||||
if k in msg:
|
||||
self.logger.debug("Handling %s" % k)
|
||||
if "stream" in k:
|
||||
return await self.handlers[k](msg[k])
|
||||
else:
|
||||
with self.server.request_stats.start_sample() as self.request_sample, self.request_sample.measure():
|
||||
return await self.handlers[k](msg[k])
|
||||
|
||||
raise bb.asyncrpc.ClientError("Unrecognized command %r" % msg)
|
||||
|
||||
@permissions(READ_PERM)
|
||||
async def handle_get(self, request):
|
||||
method = request["method"]
|
||||
taskhash = request["taskhash"]
|
||||
fetch_all = request.get("all", False)
|
||||
|
||||
return await self.get_unihash(method, taskhash, fetch_all)
|
||||
|
||||
async def get_unihash(self, method, taskhash, fetch_all=False):
|
||||
d = None
|
||||
|
||||
if fetch_all:
|
||||
row = await self.db.get_unihash_by_taskhash_full(method, taskhash)
|
||||
if row is not None:
|
||||
d = {k: row[k] for k in row.keys()}
|
||||
elif self.upstream_client is not None:
|
||||
d = await self.upstream_client.get_taskhash(method, taskhash, True)
|
||||
await self.update_unified(d)
|
||||
else:
|
||||
row = await self.db.get_equivalent(method, taskhash)
|
||||
|
||||
if row is not None:
|
||||
d = {k: row[k] for k in row.keys()}
|
||||
elif self.upstream_client is not None:
|
||||
d = await self.upstream_client.get_taskhash(method, taskhash)
|
||||
await self.db.insert_unihash(d["method"], d["taskhash"], d["unihash"])
|
||||
|
||||
return d
|
||||
|
||||
@permissions(READ_PERM)
|
||||
async def handle_get_outhash(self, request):
|
||||
method = request["method"]
|
||||
outhash = request["outhash"]
|
||||
taskhash = request["taskhash"]
|
||||
with_unihash = request.get("with_unihash", True)
|
||||
|
||||
return await self.get_outhash(method, outhash, taskhash, with_unihash)
|
||||
|
||||
async def get_outhash(self, method, outhash, taskhash, with_unihash=True):
|
||||
d = None
|
||||
if with_unihash:
|
||||
row = await self.db.get_unihash_by_outhash(method, outhash)
|
||||
else:
|
||||
row = await self.db.get_outhash(method, outhash)
|
||||
|
||||
if row is not None:
|
||||
d = {k: row[k] for k in row.keys()}
|
||||
elif self.upstream_client is not None:
|
||||
d = await self.upstream_client.get_outhash(method, outhash, taskhash)
|
||||
await self.update_unified(d)
|
||||
|
||||
return d
|
||||
|
||||
async def update_unified(self, data):
|
||||
if data is None:
|
||||
return
|
||||
|
||||
await self.db.insert_unihash(data["method"], data["taskhash"], data["unihash"])
|
||||
await self.db.insert_outhash(data)
|
||||
|
||||
async def _stream_handler(self, handler):
|
||||
await self.socket.send_message("ok")
|
||||
|
||||
while True:
|
||||
upstream = None
|
||||
|
||||
l = await self.socket.recv()
|
||||
if not l:
|
||||
break
|
||||
|
||||
try:
|
||||
# This inner loop is very sensitive and must be as fast as
|
||||
# possible (which is why the request sample is handled manually
|
||||
# instead of using 'with', and also why logging statements are
|
||||
# commented out.
|
||||
self.request_sample = self.server.request_stats.start_sample()
|
||||
request_measure = self.request_sample.measure()
|
||||
request_measure.start()
|
||||
|
||||
if l == "END":
|
||||
break
|
||||
|
||||
msg = await handler(l)
|
||||
await self.socket.send(msg)
|
||||
finally:
|
||||
request_measure.end()
|
||||
self.request_sample.end()
|
||||
|
||||
await self.socket.send("ok")
|
||||
return self.NO_RESPONSE
|
||||
|
||||
@permissions(READ_PERM)
|
||||
async def handle_get_stream(self, request):
|
||||
async def handler(l):
|
||||
(method, taskhash) = l.split()
|
||||
# self.logger.debug('Looking up %s %s' % (method, taskhash))
|
||||
row = await self.db.get_equivalent(method, taskhash)
|
||||
|
||||
if row is not None:
|
||||
# self.logger.debug('Found equivalent task %s -> %s', (row['taskhash'], row['unihash']))
|
||||
return row["unihash"]
|
||||
|
||||
if self.upstream_client is not None:
|
||||
upstream = await self.upstream_client.get_unihash(method, taskhash)
|
||||
if upstream:
|
||||
await self.server.backfill_queue.put((method, taskhash))
|
||||
return upstream
|
||||
|
||||
return ""
|
||||
|
||||
return await self._stream_handler(handler)
|
||||
|
||||
@permissions(READ_PERM)
|
||||
async def handle_exists_stream(self, request):
|
||||
async def handler(l):
|
||||
if await self.db.unihash_exists(l):
|
||||
return "true"
|
||||
|
||||
if self.upstream_client is not None:
|
||||
if await self.upstream_client.unihash_exists(l):
|
||||
return "true"
|
||||
|
||||
return "false"
|
||||
|
||||
return await self._stream_handler(handler)
|
||||
|
||||
async def report_readonly(self, data):
|
||||
method = data["method"]
|
||||
outhash = data["outhash"]
|
||||
taskhash = data["taskhash"]
|
||||
|
||||
info = await self.get_outhash(method, outhash, taskhash)
|
||||
if info:
|
||||
unihash = info["unihash"]
|
||||
else:
|
||||
unihash = data["unihash"]
|
||||
|
||||
return {
|
||||
"taskhash": taskhash,
|
||||
"method": method,
|
||||
"unihash": unihash,
|
||||
}
|
||||
|
||||
# Since this can be called either read only or to report, the check to
|
||||
# report is made inside the function
|
||||
@permissions(READ_PERM)
|
||||
async def handle_report(self, data):
|
||||
if self.server.read_only or not self.user_has_permissions(REPORT_PERM):
|
||||
return await self.report_readonly(data)
|
||||
|
||||
outhash_data = {
|
||||
"method": data["method"],
|
||||
"outhash": data["outhash"],
|
||||
"taskhash": data["taskhash"],
|
||||
"created": datetime.now(),
|
||||
}
|
||||
|
||||
for k in ("owner", "PN", "PV", "PR", "task", "outhash_siginfo"):
|
||||
if k in data:
|
||||
outhash_data[k] = data[k]
|
||||
|
||||
if self.user:
|
||||
outhash_data["owner"] = self.user.username
|
||||
|
||||
# Insert the new entry, unless it already exists
|
||||
if await self.db.insert_outhash(outhash_data):
|
||||
# If this row is new, check if it is equivalent to another
|
||||
# output hash
|
||||
row = await self.db.get_equivalent_for_outhash(
|
||||
data["method"], data["outhash"], data["taskhash"]
|
||||
)
|
||||
|
||||
if row is not None:
|
||||
# A matching output hash was found. Set our taskhash to the
|
||||
# same unihash since they are equivalent
|
||||
unihash = row["unihash"]
|
||||
else:
|
||||
# No matching output hash was found. This is probably the
|
||||
# first outhash to be added.
|
||||
unihash = data["unihash"]
|
||||
|
||||
# Query upstream to see if it has a unihash we can use
|
||||
if self.upstream_client is not None:
|
||||
upstream_data = await self.upstream_client.get_outhash(
|
||||
data["method"], data["outhash"], data["taskhash"]
|
||||
)
|
||||
if upstream_data is not None:
|
||||
unihash = upstream_data["unihash"]
|
||||
|
||||
await self.db.insert_unihash(data["method"], data["taskhash"], unihash)
|
||||
|
||||
unihash_data = await self.get_unihash(data["method"], data["taskhash"])
|
||||
if unihash_data is not None:
|
||||
unihash = unihash_data["unihash"]
|
||||
else:
|
||||
unihash = data["unihash"]
|
||||
|
||||
return {
|
||||
"taskhash": data["taskhash"],
|
||||
"method": data["method"],
|
||||
"unihash": unihash,
|
||||
}
|
||||
|
||||
@permissions(READ_PERM, REPORT_PERM)
|
||||
async def handle_equivreport(self, data):
|
||||
await self.db.insert_unihash(data["method"], data["taskhash"], data["unihash"])
|
||||
|
||||
# Fetch the unihash that will be reported for the taskhash. If the
|
||||
# unihash matches, it means this row was inserted (or the mapping
|
||||
# was already valid)
|
||||
row = await self.db.get_equivalent(data["method"], data["taskhash"])
|
||||
|
||||
if row["unihash"] == data["unihash"]:
|
||||
self.logger.info(
|
||||
"Adding taskhash equivalence for %s with unihash %s",
|
||||
data["taskhash"],
|
||||
row["unihash"],
|
||||
)
|
||||
|
||||
return {k: row[k] for k in ("taskhash", "method", "unihash")}
|
||||
|
||||
@permissions(READ_PERM)
|
||||
async def handle_get_stats(self, request):
|
||||
return {
|
||||
"requests": self.server.request_stats.todict(),
|
||||
}
|
||||
|
||||
@permissions(DB_ADMIN_PERM)
|
||||
async def handle_reset_stats(self, request):
|
||||
d = {
|
||||
"requests": self.server.request_stats.todict(),
|
||||
}
|
||||
|
||||
self.server.request_stats.reset()
|
||||
return d
|
||||
|
||||
@permissions(READ_PERM)
|
||||
async def handle_backfill_wait(self, request):
|
||||
d = {
|
||||
"tasks": self.server.backfill_queue.qsize(),
|
||||
}
|
||||
await self.server.backfill_queue.join()
|
||||
return d
|
||||
|
||||
@permissions(DB_ADMIN_PERM)
|
||||
async def handle_remove(self, request):
|
||||
condition = request["where"]
|
||||
if not isinstance(condition, dict):
|
||||
raise TypeError("Bad condition type %s" % type(condition))
|
||||
|
||||
return {"count": await self.db.remove(condition)}
|
||||
|
||||
@permissions(DB_ADMIN_PERM)
|
||||
async def handle_gc_mark(self, request):
|
||||
condition = request["where"]
|
||||
mark = request["mark"]
|
||||
|
||||
if not isinstance(condition, dict):
|
||||
raise TypeError("Bad condition type %s" % type(condition))
|
||||
|
||||
if not isinstance(mark, str):
|
||||
raise TypeError("Bad mark type %s" % type(mark))
|
||||
|
||||
return {"count": await self.db.gc_mark(mark, condition)}
|
||||
|
||||
@permissions(DB_ADMIN_PERM)
|
||||
async def handle_gc_sweep(self, request):
|
||||
mark = request["mark"]
|
||||
|
||||
if not isinstance(mark, str):
|
||||
raise TypeError("Bad mark type %s" % type(mark))
|
||||
|
||||
current_mark = await self.db.get_current_gc_mark()
|
||||
|
||||
if not current_mark or mark != current_mark:
|
||||
raise bb.asyncrpc.InvokeError(
|
||||
f"'{mark}' is not the current mark. Refusing to sweep"
|
||||
)
|
||||
|
||||
count = await self.db.gc_sweep()
|
||||
|
||||
return {"count": count}
|
||||
|
||||
@permissions(DB_ADMIN_PERM)
|
||||
async def handle_gc_status(self, request):
|
||||
(keep_rows, remove_rows, current_mark) = await self.db.gc_status()
|
||||
return {
|
||||
"keep": keep_rows,
|
||||
"remove": remove_rows,
|
||||
"mark": current_mark,
|
||||
}
|
||||
|
||||
@permissions(DB_ADMIN_PERM)
|
||||
async def handle_clean_unused(self, request):
|
||||
max_age = request["max_age_seconds"]
|
||||
oldest = datetime.now() - timedelta(seconds=-max_age)
|
||||
return {"count": await self.db.clean_unused(oldest)}
|
||||
|
||||
@permissions(DB_ADMIN_PERM)
|
||||
async def handle_get_db_usage(self, request):
|
||||
return {"usage": await self.db.get_usage()}
|
||||
|
||||
@permissions(DB_ADMIN_PERM)
|
||||
async def handle_get_db_query_columns(self, request):
|
||||
return {"columns": await self.db.get_query_columns()}
|
||||
|
||||
# The authentication API is always allowed
|
||||
async def handle_auth(self, request):
|
||||
username = str(request["username"])
|
||||
token = str(request["token"])
|
||||
|
||||
async def fail_auth():
|
||||
nonlocal username
|
||||
# Rate limit bad login attempts
|
||||
await asyncio.sleep(1)
|
||||
raise bb.asyncrpc.InvokeError(f"Unable to authenticate as {username}")
|
||||
|
||||
user, db_token = await self.db.lookup_user_token(username)
|
||||
|
||||
if not user or not db_token:
|
||||
await fail_auth()
|
||||
|
||||
try:
|
||||
algo, salt, _ = db_token.split(":")
|
||||
except ValueError:
|
||||
await fail_auth()
|
||||
|
||||
if hash_token(algo, salt, token) != db_token:
|
||||
await fail_auth()
|
||||
|
||||
self.user = user
|
||||
|
||||
self.logger.info("Authenticated as %s", username)
|
||||
|
||||
return {
|
||||
"result": True,
|
||||
"username": self.user.username,
|
||||
"permissions": sorted(list(self.user.permissions)),
|
||||
}
|
||||
|
||||
@permissions(USER_ADMIN_PERM, allow_self_service=True, allow_anon=False)
|
||||
async def handle_refresh_token(self, request):
|
||||
username = str(request["username"])
|
||||
|
||||
token = await new_token()
|
||||
|
||||
updated = await self.db.set_user_token(
|
||||
username,
|
||||
hash_token(TOKEN_ALGORITHM, new_salt(), token),
|
||||
)
|
||||
if not updated:
|
||||
self.raise_no_user_error(username)
|
||||
|
||||
return {"username": username, "token": token}
|
||||
|
||||
def get_perm_arg(self, arg):
|
||||
if not isinstance(arg, list):
|
||||
raise bb.asyncrpc.InvokeError("Unexpected type for permissions")
|
||||
|
||||
arg = set(arg)
|
||||
try:
|
||||
arg.remove(NONE_PERM)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
unknown_perms = arg - ALL_PERMISSIONS
|
||||
if unknown_perms:
|
||||
raise bb.asyncrpc.InvokeError(
|
||||
"Unknown permissions %s" % ", ".join(sorted(list(unknown_perms)))
|
||||
)
|
||||
|
||||
return sorted(list(arg))
|
||||
|
||||
def return_perms(self, permissions):
|
||||
if ALL_PERM in permissions:
|
||||
return sorted(list(ALL_PERMISSIONS))
|
||||
return sorted(list(permissions))
|
||||
|
||||
@permissions(USER_ADMIN_PERM, allow_anon=False)
|
||||
async def handle_set_perms(self, request):
|
||||
username = str(request["username"])
|
||||
permissions = self.get_perm_arg(request["permissions"])
|
||||
|
||||
if not await self.db.set_user_perms(username, permissions):
|
||||
self.raise_no_user_error(username)
|
||||
|
||||
return {
|
||||
"username": username,
|
||||
"permissions": self.return_perms(permissions),
|
||||
}
|
||||
|
||||
@permissions(USER_ADMIN_PERM, allow_self_service=True, allow_anon=False)
|
||||
async def handle_get_user(self, request):
|
||||
username = str(request["username"])
|
||||
|
||||
user = await self.db.lookup_user(username)
|
||||
if user is None:
|
||||
return None
|
||||
|
||||
return {
|
||||
"username": user.username,
|
||||
"permissions": self.return_perms(user.permissions),
|
||||
}
|
||||
|
||||
@permissions(USER_ADMIN_PERM, allow_anon=False)
|
||||
async def handle_get_all_users(self, request):
|
||||
users = await self.db.get_all_users()
|
||||
return {
|
||||
"users": [
|
||||
{
|
||||
"username": u.username,
|
||||
"permissions": self.return_perms(u.permissions),
|
||||
}
|
||||
for u in users
|
||||
]
|
||||
}
|
||||
|
||||
@permissions(USER_ADMIN_PERM, allow_anon=False)
|
||||
async def handle_new_user(self, request):
|
||||
username = str(request["username"])
|
||||
permissions = self.get_perm_arg(request["permissions"])
|
||||
|
||||
token = await new_token()
|
||||
|
||||
inserted = await self.db.new_user(
|
||||
username,
|
||||
permissions,
|
||||
hash_token(TOKEN_ALGORITHM, new_salt(), token),
|
||||
)
|
||||
if not inserted:
|
||||
raise bb.asyncrpc.InvokeError(f"Cannot create new user '{username}'")
|
||||
|
||||
return {
|
||||
"username": username,
|
||||
"permissions": self.return_perms(permissions),
|
||||
"token": token,
|
||||
}
|
||||
|
||||
@permissions(USER_ADMIN_PERM, allow_self_service=True, allow_anon=False)
|
||||
async def handle_delete_user(self, request):
|
||||
username = str(request["username"])
|
||||
|
||||
if not await self.db.delete_user(username):
|
||||
self.raise_no_user_error(username)
|
||||
|
||||
return {"username": username}
|
||||
|
||||
@permissions(USER_ADMIN_PERM, allow_anon=False)
|
||||
async def handle_become_user(self, request):
|
||||
username = str(request["username"])
|
||||
|
||||
user = await self.db.lookup_user(username)
|
||||
if user is None:
|
||||
raise bb.asyncrpc.InvokeError(f"User {username} doesn't exist")
|
||||
|
||||
self.user = user
|
||||
|
||||
self.logger.info("Became user %s", username)
|
||||
|
||||
return {
|
||||
"username": self.user.username,
|
||||
"permissions": self.return_perms(self.user.permissions),
|
||||
}
|
||||
|
||||
|
||||
class Server(bb.asyncrpc.AsyncServer):
|
||||
def __init__(
|
||||
self,
|
||||
db_engine,
|
||||
upstream=None,
|
||||
read_only=False,
|
||||
anon_perms=DEFAULT_ANON_PERMS,
|
||||
admin_username=None,
|
||||
admin_password=None,
|
||||
):
|
||||
if upstream and read_only:
|
||||
raise bb.asyncrpc.ServerError(
|
||||
"Read-only hashserv cannot pull from an upstream server"
|
||||
)
|
||||
|
||||
disallowed_perms = set(anon_perms) - set(
|
||||
[NONE_PERM, READ_PERM, REPORT_PERM, DB_ADMIN_PERM]
|
||||
)
|
||||
|
||||
if disallowed_perms:
|
||||
raise bb.asyncrpc.ServerError(
|
||||
f"Permission(s) {' '.join(disallowed_perms)} are not allowed for anonymous users"
|
||||
)
|
||||
|
||||
super().__init__(logger)
|
||||
|
||||
self.request_stats = Stats()
|
||||
self.db_engine = db_engine
|
||||
self.upstream = upstream
|
||||
self.read_only = read_only
|
||||
self.backfill_queue = None
|
||||
self.anon_perms = set(anon_perms)
|
||||
self.admin_username = admin_username
|
||||
self.admin_password = admin_password
|
||||
|
||||
self.logger.info(
|
||||
"Anonymous user permissions are: %s", ", ".join(self.anon_perms)
|
||||
)
|
||||
|
||||
def accept_client(self, socket):
|
||||
return ServerClient(socket, self)
|
||||
|
||||
async def create_admin_user(self):
|
||||
admin_permissions = (ALL_PERM,)
|
||||
async with self.db_engine.connect(self.logger) as db:
|
||||
added = await db.new_user(
|
||||
self.admin_username,
|
||||
admin_permissions,
|
||||
hash_token(TOKEN_ALGORITHM, new_salt(), self.admin_password),
|
||||
)
|
||||
if added:
|
||||
self.logger.info("Created admin user '%s'", self.admin_username)
|
||||
else:
|
||||
await db.set_user_perms(
|
||||
self.admin_username,
|
||||
admin_permissions,
|
||||
)
|
||||
await db.set_user_token(
|
||||
self.admin_username,
|
||||
hash_token(TOKEN_ALGORITHM, new_salt(), self.admin_password),
|
||||
)
|
||||
self.logger.info("Admin user '%s' updated", self.admin_username)
|
||||
|
||||
async def backfill_worker_task(self):
|
||||
async with await create_async_client(
|
||||
self.upstream
|
||||
) as client, self.db_engine.connect(self.logger) as db:
|
||||
while True:
|
||||
item = await self.backfill_queue.get()
|
||||
if item is None:
|
||||
self.backfill_queue.task_done()
|
||||
break
|
||||
|
||||
method, taskhash = item
|
||||
d = await client.get_taskhash(method, taskhash)
|
||||
if d is not None:
|
||||
await db.insert_unihash(d["method"], d["taskhash"], d["unihash"])
|
||||
self.backfill_queue.task_done()
|
||||
|
||||
def start(self):
|
||||
tasks = super().start()
|
||||
if self.upstream:
|
||||
self.backfill_queue = asyncio.Queue()
|
||||
tasks += [self.backfill_worker_task()]
|
||||
|
||||
self.loop.run_until_complete(self.db_engine.create())
|
||||
|
||||
if self.admin_username:
|
||||
self.loop.run_until_complete(self.create_admin_user())
|
||||
|
||||
return tasks
|
||||
|
||||
async def stop(self):
|
||||
if self.backfill_queue is not None:
|
||||
await self.backfill_queue.put(None)
|
||||
await super().stop()
|
||||
598
sources/poky/bitbake/lib/hashserv/sqlalchemy.py
Normal file
598
sources/poky/bitbake/lib/hashserv/sqlalchemy.py
Normal file
@@ -0,0 +1,598 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2023 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from . import User
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
from sqlalchemy.pool import NullPool
|
||||
from sqlalchemy import (
|
||||
MetaData,
|
||||
Column,
|
||||
Table,
|
||||
Text,
|
||||
Integer,
|
||||
UniqueConstraint,
|
||||
DateTime,
|
||||
Index,
|
||||
select,
|
||||
insert,
|
||||
exists,
|
||||
literal,
|
||||
and_,
|
||||
delete,
|
||||
update,
|
||||
func,
|
||||
inspect,
|
||||
)
|
||||
import sqlalchemy.engine
|
||||
from sqlalchemy.orm import declarative_base
|
||||
from sqlalchemy.exc import IntegrityError
|
||||
from sqlalchemy.dialects.postgresql import insert as postgres_insert
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
class UnihashesV3(Base):
|
||||
__tablename__ = "unihashes_v3"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
method = Column(Text, nullable=False)
|
||||
taskhash = Column(Text, nullable=False)
|
||||
unihash = Column(Text, nullable=False)
|
||||
gc_mark = Column(Text, nullable=False)
|
||||
|
||||
__table_args__ = (
|
||||
UniqueConstraint("method", "taskhash"),
|
||||
Index("taskhash_lookup_v4", "method", "taskhash"),
|
||||
Index("unihash_lookup_v1", "unihash"),
|
||||
)
|
||||
|
||||
|
||||
class OuthashesV2(Base):
|
||||
__tablename__ = "outhashes_v2"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
method = Column(Text, nullable=False)
|
||||
taskhash = Column(Text, nullable=False)
|
||||
outhash = Column(Text, nullable=False)
|
||||
created = Column(DateTime)
|
||||
owner = Column(Text)
|
||||
PN = Column(Text)
|
||||
PV = Column(Text)
|
||||
PR = Column(Text)
|
||||
task = Column(Text)
|
||||
outhash_siginfo = Column(Text)
|
||||
|
||||
__table_args__ = (
|
||||
UniqueConstraint("method", "taskhash", "outhash"),
|
||||
Index("outhash_lookup_v3", "method", "outhash"),
|
||||
)
|
||||
|
||||
|
||||
class Users(Base):
|
||||
__tablename__ = "users"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
username = Column(Text, nullable=False)
|
||||
token = Column(Text, nullable=False)
|
||||
permissions = Column(Text)
|
||||
|
||||
__table_args__ = (UniqueConstraint("username"),)
|
||||
|
||||
|
||||
class Config(Base):
|
||||
__tablename__ = "config"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
name = Column(Text, nullable=False)
|
||||
value = Column(Text)
|
||||
__table_args__ = (
|
||||
UniqueConstraint("name"),
|
||||
Index("config_lookup", "name"),
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Old table versions
|
||||
#
|
||||
DeprecatedBase = declarative_base()
|
||||
|
||||
|
||||
class UnihashesV2(DeprecatedBase):
|
||||
__tablename__ = "unihashes_v2"
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
method = Column(Text, nullable=False)
|
||||
taskhash = Column(Text, nullable=False)
|
||||
unihash = Column(Text, nullable=False)
|
||||
|
||||
__table_args__ = (
|
||||
UniqueConstraint("method", "taskhash"),
|
||||
Index("taskhash_lookup_v3", "method", "taskhash"),
|
||||
)
|
||||
|
||||
|
||||
class DatabaseEngine(object):
|
||||
def __init__(self, url, username=None, password=None):
|
||||
self.logger = logging.getLogger("hashserv.sqlalchemy")
|
||||
self.url = sqlalchemy.engine.make_url(url)
|
||||
|
||||
if username is not None:
|
||||
self.url = self.url.set(username=username)
|
||||
|
||||
if password is not None:
|
||||
self.url = self.url.set(password=password)
|
||||
|
||||
async def create(self):
|
||||
def check_table_exists(conn, name):
|
||||
return inspect(conn).has_table(name)
|
||||
|
||||
self.logger.info("Using database %s", self.url)
|
||||
if self.url.drivername == 'postgresql+psycopg':
|
||||
# Psygopg 3 (psygopg) driver can handle async connection pooling
|
||||
self.engine = create_async_engine(self.url, max_overflow=-1)
|
||||
else:
|
||||
self.engine = create_async_engine(self.url, poolclass=NullPool)
|
||||
|
||||
async with self.engine.begin() as conn:
|
||||
# Create tables
|
||||
self.logger.info("Creating tables...")
|
||||
await conn.run_sync(Base.metadata.create_all)
|
||||
|
||||
if await conn.run_sync(check_table_exists, UnihashesV2.__tablename__):
|
||||
self.logger.info("Upgrading Unihashes V2 -> V3...")
|
||||
statement = insert(UnihashesV3).from_select(
|
||||
["id", "method", "unihash", "taskhash", "gc_mark"],
|
||||
select(
|
||||
UnihashesV2.id,
|
||||
UnihashesV2.method,
|
||||
UnihashesV2.unihash,
|
||||
UnihashesV2.taskhash,
|
||||
literal("").label("gc_mark"),
|
||||
),
|
||||
)
|
||||
self.logger.debug("%s", statement)
|
||||
await conn.execute(statement)
|
||||
|
||||
await conn.run_sync(Base.metadata.drop_all, [UnihashesV2.__table__])
|
||||
self.logger.info("Upgrade complete")
|
||||
|
||||
def connect(self, logger):
|
||||
return Database(self.engine, logger)
|
||||
|
||||
|
||||
def map_row(row):
|
||||
if row is None:
|
||||
return None
|
||||
return dict(**row._mapping)
|
||||
|
||||
|
||||
def map_user(row):
|
||||
if row is None:
|
||||
return None
|
||||
return User(
|
||||
username=row.username,
|
||||
permissions=set(row.permissions.split()),
|
||||
)
|
||||
|
||||
|
||||
def _make_condition_statement(table, condition):
|
||||
where = {}
|
||||
for c in table.__table__.columns:
|
||||
if c.key in condition and condition[c.key] is not None:
|
||||
where[c] = condition[c.key]
|
||||
|
||||
return [(k == v) for k, v in where.items()]
|
||||
|
||||
|
||||
class Database(object):
|
||||
def __init__(self, engine, logger):
|
||||
self.engine = engine
|
||||
self.db = None
|
||||
self.logger = logger
|
||||
|
||||
async def __aenter__(self):
|
||||
self.db = await self.engine.connect()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
|
||||
async def close(self):
|
||||
await self.db.close()
|
||||
self.db = None
|
||||
|
||||
async def _execute(self, statement):
|
||||
self.logger.debug("%s", statement)
|
||||
return await self.db.execute(statement)
|
||||
|
||||
async def _set_config(self, name, value):
|
||||
while True:
|
||||
result = await self._execute(
|
||||
update(Config).where(Config.name == name).values(value=value)
|
||||
)
|
||||
|
||||
if result.rowcount == 0:
|
||||
self.logger.debug("Config '%s' not found. Adding it", name)
|
||||
try:
|
||||
await self._execute(insert(Config).values(name=name, value=value))
|
||||
except IntegrityError:
|
||||
# Race. Try again
|
||||
continue
|
||||
|
||||
break
|
||||
|
||||
def _get_config_subquery(self, name, default=None):
|
||||
if default is not None:
|
||||
return func.coalesce(
|
||||
select(Config.value).where(Config.name == name).scalar_subquery(),
|
||||
default,
|
||||
)
|
||||
return select(Config.value).where(Config.name == name).scalar_subquery()
|
||||
|
||||
async def _get_config(self, name):
|
||||
result = await self._execute(select(Config.value).where(Config.name == name))
|
||||
row = result.first()
|
||||
if row is None:
|
||||
return None
|
||||
return row.value
|
||||
|
||||
async def get_unihash_by_taskhash_full(self, method, taskhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
OuthashesV2,
|
||||
UnihashesV3.unihash.label("unihash"),
|
||||
)
|
||||
.join(
|
||||
UnihashesV3,
|
||||
and_(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
),
|
||||
)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.taskhash == taskhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def get_unihash_by_outhash(self, method, outhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(OuthashesV2, UnihashesV3.unihash.label("unihash"))
|
||||
.join(
|
||||
UnihashesV3,
|
||||
and_(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
),
|
||||
)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.outhash == outhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def unihash_exists(self, unihash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(UnihashesV3).where(UnihashesV3.unihash == unihash).limit(1)
|
||||
)
|
||||
|
||||
return result.first() is not None
|
||||
|
||||
async def get_outhash(self, method, outhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(OuthashesV2)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.outhash == outhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def get_equivalent_for_outhash(self, method, outhash, taskhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
OuthashesV2.taskhash.label("taskhash"),
|
||||
UnihashesV3.unihash.label("unihash"),
|
||||
)
|
||||
.join(
|
||||
UnihashesV3,
|
||||
and_(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
),
|
||||
)
|
||||
.where(
|
||||
OuthashesV2.method == method,
|
||||
OuthashesV2.outhash == outhash,
|
||||
OuthashesV2.taskhash != taskhash,
|
||||
)
|
||||
.order_by(
|
||||
OuthashesV2.created.asc(),
|
||||
)
|
||||
.limit(1)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def get_equivalent(self, method, taskhash):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
UnihashesV3.unihash,
|
||||
UnihashesV3.method,
|
||||
UnihashesV3.taskhash,
|
||||
).where(
|
||||
UnihashesV3.method == method,
|
||||
UnihashesV3.taskhash == taskhash,
|
||||
)
|
||||
)
|
||||
return map_row(result.first())
|
||||
|
||||
async def remove(self, condition):
|
||||
async def do_remove(table):
|
||||
where = _make_condition_statement(table, condition)
|
||||
if where:
|
||||
async with self.db.begin():
|
||||
result = await self._execute(delete(table).where(*where))
|
||||
return result.rowcount
|
||||
|
||||
return 0
|
||||
|
||||
count = 0
|
||||
count += await do_remove(UnihashesV3)
|
||||
count += await do_remove(OuthashesV2)
|
||||
|
||||
return count
|
||||
|
||||
async def get_current_gc_mark(self):
|
||||
async with self.db.begin():
|
||||
return await self._get_config("gc-mark")
|
||||
|
||||
async def gc_status(self):
|
||||
async with self.db.begin():
|
||||
gc_mark_subquery = self._get_config_subquery("gc-mark", "")
|
||||
|
||||
result = await self._execute(
|
||||
select(func.count())
|
||||
.select_from(UnihashesV3)
|
||||
.where(UnihashesV3.gc_mark == gc_mark_subquery)
|
||||
)
|
||||
keep_rows = result.scalar()
|
||||
|
||||
result = await self._execute(
|
||||
select(func.count())
|
||||
.select_from(UnihashesV3)
|
||||
.where(UnihashesV3.gc_mark != gc_mark_subquery)
|
||||
)
|
||||
remove_rows = result.scalar()
|
||||
|
||||
return (keep_rows, remove_rows, await self._get_config("gc-mark"))
|
||||
|
||||
async def gc_mark(self, mark, condition):
|
||||
async with self.db.begin():
|
||||
await self._set_config("gc-mark", mark)
|
||||
|
||||
where = _make_condition_statement(UnihashesV3, condition)
|
||||
if not where:
|
||||
return 0
|
||||
|
||||
result = await self._execute(
|
||||
update(UnihashesV3)
|
||||
.values(gc_mark=self._get_config_subquery("gc-mark", ""))
|
||||
.where(*where)
|
||||
)
|
||||
return result.rowcount
|
||||
|
||||
async def gc_sweep(self):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
delete(UnihashesV3).where(
|
||||
# A sneaky conditional that provides some errant use
|
||||
# protection: If the config mark is NULL, this will not
|
||||
# match any rows because No default is specified in the
|
||||
# select statement
|
||||
UnihashesV3.gc_mark
|
||||
!= self._get_config_subquery("gc-mark")
|
||||
)
|
||||
)
|
||||
await self._set_config("gc-mark", None)
|
||||
|
||||
return result.rowcount
|
||||
|
||||
async def clean_unused(self, oldest):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
delete(OuthashesV2).where(
|
||||
OuthashesV2.created < oldest,
|
||||
~(
|
||||
select(UnihashesV3.id)
|
||||
.where(
|
||||
UnihashesV3.method == OuthashesV2.method,
|
||||
UnihashesV3.taskhash == OuthashesV2.taskhash,
|
||||
)
|
||||
.limit(1)
|
||||
.exists()
|
||||
),
|
||||
)
|
||||
)
|
||||
return result.rowcount
|
||||
|
||||
async def insert_unihash(self, method, taskhash, unihash):
|
||||
# Postgres specific ignore on insert duplicate
|
||||
if self.engine.name == "postgresql":
|
||||
statement = (
|
||||
postgres_insert(UnihashesV3)
|
||||
.values(
|
||||
method=method,
|
||||
taskhash=taskhash,
|
||||
unihash=unihash,
|
||||
gc_mark=self._get_config_subquery("gc-mark", ""),
|
||||
)
|
||||
.on_conflict_do_nothing(index_elements=("method", "taskhash"))
|
||||
)
|
||||
else:
|
||||
statement = insert(UnihashesV3).values(
|
||||
method=method,
|
||||
taskhash=taskhash,
|
||||
unihash=unihash,
|
||||
gc_mark=self._get_config_subquery("gc-mark", ""),
|
||||
)
|
||||
|
||||
try:
|
||||
async with self.db.begin():
|
||||
result = await self._execute(statement)
|
||||
return result.rowcount != 0
|
||||
except IntegrityError:
|
||||
self.logger.debug(
|
||||
"%s, %s, %s already in unihash database", method, taskhash, unihash
|
||||
)
|
||||
return False
|
||||
|
||||
async def insert_outhash(self, data):
|
||||
outhash_columns = set(c.key for c in OuthashesV2.__table__.columns)
|
||||
|
||||
data = {k: v for k, v in data.items() if k in outhash_columns}
|
||||
|
||||
if "created" in data and not isinstance(data["created"], datetime):
|
||||
data["created"] = datetime.fromisoformat(data["created"])
|
||||
|
||||
# Postgres specific ignore on insert duplicate
|
||||
if self.engine.name == "postgresql":
|
||||
statement = (
|
||||
postgres_insert(OuthashesV2)
|
||||
.values(**data)
|
||||
.on_conflict_do_nothing(
|
||||
index_elements=("method", "taskhash", "outhash")
|
||||
)
|
||||
)
|
||||
else:
|
||||
statement = insert(OuthashesV2).values(**data)
|
||||
|
||||
try:
|
||||
async with self.db.begin():
|
||||
result = await self._execute(statement)
|
||||
return result.rowcount != 0
|
||||
except IntegrityError:
|
||||
self.logger.debug(
|
||||
"%s, %s already in outhash database", data["method"], data["outhash"]
|
||||
)
|
||||
return False
|
||||
|
||||
async def _get_user(self, username):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
Users.username,
|
||||
Users.permissions,
|
||||
Users.token,
|
||||
).where(
|
||||
Users.username == username,
|
||||
)
|
||||
)
|
||||
return result.first()
|
||||
|
||||
async def lookup_user_token(self, username):
|
||||
row = await self._get_user(username)
|
||||
if not row:
|
||||
return None, None
|
||||
return map_user(row), row.token
|
||||
|
||||
async def lookup_user(self, username):
|
||||
return map_user(await self._get_user(username))
|
||||
|
||||
async def set_user_token(self, username, token):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
update(Users)
|
||||
.where(
|
||||
Users.username == username,
|
||||
)
|
||||
.values(
|
||||
token=token,
|
||||
)
|
||||
)
|
||||
return result.rowcount != 0
|
||||
|
||||
async def set_user_perms(self, username, permissions):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
update(Users)
|
||||
.where(Users.username == username)
|
||||
.values(permissions=" ".join(permissions))
|
||||
)
|
||||
return result.rowcount != 0
|
||||
|
||||
async def get_all_users(self):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
select(
|
||||
Users.username,
|
||||
Users.permissions,
|
||||
)
|
||||
)
|
||||
return [map_user(row) for row in result]
|
||||
|
||||
async def new_user(self, username, permissions, token):
|
||||
try:
|
||||
async with self.db.begin():
|
||||
await self._execute(
|
||||
insert(Users).values(
|
||||
username=username,
|
||||
permissions=" ".join(permissions),
|
||||
token=token,
|
||||
)
|
||||
)
|
||||
return True
|
||||
except IntegrityError as e:
|
||||
self.logger.debug("Cannot create new user %s: %s", username, e)
|
||||
return False
|
||||
|
||||
async def delete_user(self, username):
|
||||
async with self.db.begin():
|
||||
result = await self._execute(
|
||||
delete(Users).where(Users.username == username)
|
||||
)
|
||||
return result.rowcount != 0
|
||||
|
||||
async def get_usage(self):
|
||||
usage = {}
|
||||
async with self.db.begin() as session:
|
||||
for name, table in Base.metadata.tables.items():
|
||||
result = await self._execute(
|
||||
statement=select(func.count()).select_from(table)
|
||||
)
|
||||
usage[name] = {
|
||||
"rows": result.scalar(),
|
||||
}
|
||||
|
||||
return usage
|
||||
|
||||
async def get_query_columns(self):
|
||||
columns = set()
|
||||
for table in (UnihashesV3, OuthashesV2):
|
||||
for c in table.__table__.columns:
|
||||
if not isinstance(c.type, Text):
|
||||
continue
|
||||
columns.add(c.key)
|
||||
|
||||
return list(columns)
|
||||
562
sources/poky/bitbake/lib/hashserv/sqlite.py
Normal file
562
sources/poky/bitbake/lib/hashserv/sqlite.py
Normal file
@@ -0,0 +1,562 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2023 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
import sqlite3
|
||||
import logging
|
||||
from contextlib import closing
|
||||
from . import User
|
||||
|
||||
logger = logging.getLogger("hashserv.sqlite")
|
||||
|
||||
UNIHASH_TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL", "UNIQUE"),
|
||||
("taskhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("unihash", "TEXT NOT NULL", ""),
|
||||
("gc_mark", "TEXT NOT NULL", ""),
|
||||
)
|
||||
|
||||
UNIHASH_TABLE_COLUMNS = tuple(name for name, _, _ in UNIHASH_TABLE_DEFINITION)
|
||||
|
||||
OUTHASH_TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL", "UNIQUE"),
|
||||
("taskhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("outhash", "TEXT NOT NULL", "UNIQUE"),
|
||||
("created", "DATETIME", ""),
|
||||
# Optional fields
|
||||
("owner", "TEXT", ""),
|
||||
("PN", "TEXT", ""),
|
||||
("PV", "TEXT", ""),
|
||||
("PR", "TEXT", ""),
|
||||
("task", "TEXT", ""),
|
||||
("outhash_siginfo", "TEXT", ""),
|
||||
)
|
||||
|
||||
OUTHASH_TABLE_COLUMNS = tuple(name for name, _, _ in OUTHASH_TABLE_DEFINITION)
|
||||
|
||||
USERS_TABLE_DEFINITION = (
|
||||
("username", "TEXT NOT NULL", "UNIQUE"),
|
||||
("token", "TEXT NOT NULL", ""),
|
||||
("permissions", "TEXT NOT NULL", ""),
|
||||
)
|
||||
|
||||
USERS_TABLE_COLUMNS = tuple(name for name, _, _ in USERS_TABLE_DEFINITION)
|
||||
|
||||
|
||||
CONFIG_TABLE_DEFINITION = (
|
||||
("name", "TEXT NOT NULL", "UNIQUE"),
|
||||
("value", "TEXT", ""),
|
||||
)
|
||||
|
||||
CONFIG_TABLE_COLUMNS = tuple(name for name, _, _ in CONFIG_TABLE_DEFINITION)
|
||||
|
||||
|
||||
def _make_table(cursor, name, definition):
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS {name} (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
{fields}
|
||||
UNIQUE({unique})
|
||||
)
|
||||
""".format(
|
||||
name=name,
|
||||
fields=" ".join("%s %s," % (name, typ) for name, typ, _ in definition),
|
||||
unique=", ".join(
|
||||
name for name, _, flags in definition if "UNIQUE" in flags
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def map_user(row):
|
||||
if row is None:
|
||||
return None
|
||||
return User(
|
||||
username=row["username"],
|
||||
permissions=set(row["permissions"].split()),
|
||||
)
|
||||
|
||||
|
||||
def _make_condition_statement(columns, condition):
|
||||
where = {}
|
||||
for c in columns:
|
||||
if c in condition and condition[c] is not None:
|
||||
where[c] = condition[c]
|
||||
|
||||
return where, " AND ".join("%s=:%s" % (k, k) for k in where.keys())
|
||||
|
||||
|
||||
def _get_sqlite_version(cursor):
|
||||
cursor.execute("SELECT sqlite_version()")
|
||||
|
||||
version = []
|
||||
for v in cursor.fetchone()[0].split("."):
|
||||
try:
|
||||
version.append(int(v))
|
||||
except ValueError:
|
||||
version.append(v)
|
||||
|
||||
return tuple(version)
|
||||
|
||||
|
||||
def _schema_table_name(version):
|
||||
if version >= (3, 33):
|
||||
return "sqlite_schema"
|
||||
|
||||
return "sqlite_master"
|
||||
|
||||
|
||||
class DatabaseEngine(object):
|
||||
def __init__(self, dbname, sync):
|
||||
self.dbname = dbname
|
||||
self.logger = logger
|
||||
self.sync = sync
|
||||
|
||||
async def create(self):
|
||||
db = sqlite3.connect(self.dbname)
|
||||
db.row_factory = sqlite3.Row
|
||||
|
||||
with closing(db.cursor()) as cursor:
|
||||
_make_table(cursor, "unihashes_v3", UNIHASH_TABLE_DEFINITION)
|
||||
_make_table(cursor, "outhashes_v2", OUTHASH_TABLE_DEFINITION)
|
||||
_make_table(cursor, "users", USERS_TABLE_DEFINITION)
|
||||
_make_table(cursor, "config", CONFIG_TABLE_DEFINITION)
|
||||
|
||||
cursor.execute("PRAGMA journal_mode = WAL")
|
||||
cursor.execute(
|
||||
"PRAGMA synchronous = %s" % ("NORMAL" if self.sync else "OFF")
|
||||
)
|
||||
|
||||
# Drop old indexes
|
||||
cursor.execute("DROP INDEX IF EXISTS taskhash_lookup")
|
||||
cursor.execute("DROP INDEX IF EXISTS outhash_lookup")
|
||||
cursor.execute("DROP INDEX IF EXISTS taskhash_lookup_v2")
|
||||
cursor.execute("DROP INDEX IF EXISTS outhash_lookup_v2")
|
||||
cursor.execute("DROP INDEX IF EXISTS taskhash_lookup_v3")
|
||||
|
||||
# TODO: Upgrade from tasks_v2?
|
||||
cursor.execute("DROP TABLE IF EXISTS tasks_v2")
|
||||
|
||||
# Create new indexes
|
||||
cursor.execute(
|
||||
"CREATE INDEX IF NOT EXISTS taskhash_lookup_v4 ON unihashes_v3 (method, taskhash)"
|
||||
)
|
||||
cursor.execute(
|
||||
"CREATE INDEX IF NOT EXISTS unihash_lookup_v1 ON unihashes_v3 (unihash)"
|
||||
)
|
||||
cursor.execute(
|
||||
"CREATE INDEX IF NOT EXISTS outhash_lookup_v3 ON outhashes_v2 (method, outhash)"
|
||||
)
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS config_lookup ON config (name)")
|
||||
|
||||
sqlite_version = _get_sqlite_version(cursor)
|
||||
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT name FROM {_schema_table_name(sqlite_version)} WHERE type = 'table' AND name = 'unihashes_v2'
|
||||
"""
|
||||
)
|
||||
if cursor.fetchone():
|
||||
self.logger.info("Upgrading Unihashes V2 -> V3...")
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO unihashes_v3 (id, method, unihash, taskhash, gc_mark)
|
||||
SELECT id, method, unihash, taskhash, '' FROM unihashes_v2
|
||||
"""
|
||||
)
|
||||
cursor.execute("DROP TABLE unihashes_v2")
|
||||
db.commit()
|
||||
self.logger.info("Upgrade complete")
|
||||
|
||||
def connect(self, logger):
|
||||
return Database(logger, self.dbname, self.sync)
|
||||
|
||||
|
||||
class Database(object):
|
||||
def __init__(self, logger, dbname, sync):
|
||||
self.dbname = dbname
|
||||
self.logger = logger
|
||||
|
||||
self.db = sqlite3.connect(self.dbname)
|
||||
self.db.row_factory = sqlite3.Row
|
||||
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute("PRAGMA journal_mode = WAL")
|
||||
cursor.execute(
|
||||
"PRAGMA synchronous = %s" % ("NORMAL" if sync else "OFF")
|
||||
)
|
||||
|
||||
self.sqlite_version = _get_sqlite_version(cursor)
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
|
||||
async def _set_config(self, cursor, name, value):
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT OR REPLACE INTO config (id, name, value) VALUES
|
||||
((SELECT id FROM config WHERE name=:name), :name, :value)
|
||||
""",
|
||||
{
|
||||
"name": name,
|
||||
"value": value,
|
||||
},
|
||||
)
|
||||
|
||||
async def _get_config(self, cursor, name):
|
||||
cursor.execute(
|
||||
"SELECT value FROM config WHERE name=:name",
|
||||
{
|
||||
"name": name,
|
||||
},
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return row["value"]
|
||||
|
||||
async def close(self):
|
||||
self.db.close()
|
||||
|
||||
async def get_unihash_by_taskhash_full(self, method, taskhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT *, unihashes_v3.unihash AS unihash FROM outhashes_v2
|
||||
INNER JOIN unihashes_v3 ON unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.taskhash=:taskhash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"taskhash": taskhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def get_unihash_by_outhash(self, method, outhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT *, unihashes_v3.unihash AS unihash FROM outhashes_v2
|
||||
INNER JOIN unihashes_v3 ON unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.outhash=:outhash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"outhash": outhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def unihash_exists(self, unihash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT * FROM unihashes_v3 WHERE unihash=:unihash
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"unihash": unihash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone() is not None
|
||||
|
||||
async def get_outhash(self, method, outhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT * FROM outhashes_v2
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.outhash=:outhash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"outhash": outhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def get_equivalent_for_outhash(self, method, outhash, taskhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT outhashes_v2.taskhash AS taskhash, unihashes_v3.unihash AS unihash FROM outhashes_v2
|
||||
INNER JOIN unihashes_v3 ON unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash
|
||||
-- Select any matching output hash except the one we just inserted
|
||||
WHERE outhashes_v2.method=:method AND outhashes_v2.outhash=:outhash AND outhashes_v2.taskhash!=:taskhash
|
||||
-- Pick the oldest hash
|
||||
ORDER BY outhashes_v2.created ASC
|
||||
LIMIT 1
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"outhash": outhash,
|
||||
"taskhash": taskhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def get_equivalent(self, method, taskhash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"SELECT taskhash, method, unihash FROM unihashes_v3 WHERE method=:method AND taskhash=:taskhash",
|
||||
{
|
||||
"method": method,
|
||||
"taskhash": taskhash,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def remove(self, condition):
|
||||
def do_remove(columns, table_name, cursor):
|
||||
where, clause = _make_condition_statement(columns, condition)
|
||||
if where:
|
||||
query = f"DELETE FROM {table_name} WHERE {clause}"
|
||||
cursor.execute(query, where)
|
||||
return cursor.rowcount
|
||||
|
||||
return 0
|
||||
|
||||
count = 0
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
count += do_remove(OUTHASH_TABLE_COLUMNS, "outhashes_v2", cursor)
|
||||
count += do_remove(UNIHASH_TABLE_COLUMNS, "unihashes_v3", cursor)
|
||||
self.db.commit()
|
||||
|
||||
return count
|
||||
|
||||
async def get_current_gc_mark(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
return await self._get_config(cursor, "gc-mark")
|
||||
|
||||
async def gc_status(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT COUNT() FROM unihashes_v3 WHERE
|
||||
gc_mark=COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
"""
|
||||
)
|
||||
keep_rows = cursor.fetchone()[0]
|
||||
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT COUNT() FROM unihashes_v3 WHERE
|
||||
gc_mark!=COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
"""
|
||||
)
|
||||
remove_rows = cursor.fetchone()[0]
|
||||
|
||||
current_mark = await self._get_config(cursor, "gc-mark")
|
||||
|
||||
return (keep_rows, remove_rows, current_mark)
|
||||
|
||||
async def gc_mark(self, mark, condition):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
await self._set_config(cursor, "gc-mark", mark)
|
||||
|
||||
where, clause = _make_condition_statement(UNIHASH_TABLE_COLUMNS, condition)
|
||||
|
||||
new_rows = 0
|
||||
if where:
|
||||
cursor.execute(
|
||||
f"""
|
||||
UPDATE unihashes_v3 SET
|
||||
gc_mark=COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
WHERE {clause}
|
||||
""",
|
||||
where,
|
||||
)
|
||||
new_rows = cursor.rowcount
|
||||
|
||||
self.db.commit()
|
||||
return new_rows
|
||||
|
||||
async def gc_sweep(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
# NOTE: COALESCE is not used in this query so that if the current
|
||||
# mark is NULL, nothing will happen
|
||||
cursor.execute(
|
||||
"""
|
||||
DELETE FROM unihashes_v3 WHERE
|
||||
gc_mark!=(SELECT value FROM config WHERE name='gc-mark')
|
||||
"""
|
||||
)
|
||||
count = cursor.rowcount
|
||||
await self._set_config(cursor, "gc-mark", None)
|
||||
|
||||
self.db.commit()
|
||||
return count
|
||||
|
||||
async def clean_unused(self, oldest):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
DELETE FROM outhashes_v2 WHERE created<:oldest AND NOT EXISTS (
|
||||
SELECT unihashes_v3.id FROM unihashes_v3 WHERE unihashes_v3.method=outhashes_v2.method AND unihashes_v3.taskhash=outhashes_v2.taskhash LIMIT 1
|
||||
)
|
||||
""",
|
||||
{
|
||||
"oldest": oldest,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount
|
||||
|
||||
async def insert_unihash(self, method, taskhash, unihash):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
prevrowid = cursor.lastrowid
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT OR IGNORE INTO unihashes_v3 (method, taskhash, unihash, gc_mark) VALUES
|
||||
(
|
||||
:method,
|
||||
:taskhash,
|
||||
:unihash,
|
||||
COALESCE((SELECT value FROM config WHERE name='gc-mark'), '')
|
||||
)
|
||||
""",
|
||||
{
|
||||
"method": method,
|
||||
"taskhash": taskhash,
|
||||
"unihash": unihash,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.lastrowid != prevrowid
|
||||
|
||||
async def insert_outhash(self, data):
|
||||
data = {k: v for k, v in data.items() if k in OUTHASH_TABLE_COLUMNS}
|
||||
keys = sorted(data.keys())
|
||||
query = "INSERT OR IGNORE INTO outhashes_v2 ({fields}) VALUES({values})".format(
|
||||
fields=", ".join(keys),
|
||||
values=", ".join(":" + k for k in keys),
|
||||
)
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
prevrowid = cursor.lastrowid
|
||||
cursor.execute(query, data)
|
||||
self.db.commit()
|
||||
return cursor.lastrowid != prevrowid
|
||||
|
||||
def _get_user(self, username):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT username, permissions, token FROM users WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
},
|
||||
)
|
||||
return cursor.fetchone()
|
||||
|
||||
async def lookup_user_token(self, username):
|
||||
row = self._get_user(username)
|
||||
if row is None:
|
||||
return None, None
|
||||
return map_user(row), row["token"]
|
||||
|
||||
async def lookup_user(self, username):
|
||||
return map_user(self._get_user(username))
|
||||
|
||||
async def set_user_token(self, username, token):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
UPDATE users SET token=:token WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
"token": token,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount != 0
|
||||
|
||||
async def set_user_perms(self, username, permissions):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
UPDATE users SET permissions=:permissions WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
"permissions": " ".join(permissions),
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount != 0
|
||||
|
||||
async def get_all_users(self):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute("SELECT username, permissions FROM users")
|
||||
return [map_user(r) for r in cursor.fetchall()]
|
||||
|
||||
async def new_user(self, username, permissions, token):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
try:
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO users (username, token, permissions) VALUES (:username, :token, :permissions)
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
"token": token,
|
||||
"permissions": " ".join(permissions),
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return True
|
||||
except sqlite3.IntegrityError:
|
||||
return False
|
||||
|
||||
async def delete_user(self, username):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
"""
|
||||
DELETE FROM users WHERE username=:username
|
||||
""",
|
||||
{
|
||||
"username": username,
|
||||
},
|
||||
)
|
||||
self.db.commit()
|
||||
return cursor.rowcount != 0
|
||||
|
||||
async def get_usage(self):
|
||||
usage = {}
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT name FROM {_schema_table_name(self.sqlite_version)} WHERE type = 'table' AND name NOT LIKE 'sqlite_%'
|
||||
"""
|
||||
)
|
||||
for row in cursor.fetchall():
|
||||
cursor.execute(
|
||||
"""
|
||||
SELECT COUNT() FROM %s
|
||||
"""
|
||||
% row["name"],
|
||||
)
|
||||
usage[row["name"]] = {
|
||||
"rows": cursor.fetchone()[0],
|
||||
}
|
||||
return usage
|
||||
|
||||
async def get_query_columns(self):
|
||||
columns = set()
|
||||
for name, typ, _ in UNIHASH_TABLE_DEFINITION + OUTHASH_TABLE_DEFINITION:
|
||||
if typ.startswith("TEXT"):
|
||||
columns.add(name)
|
||||
return list(columns)
|
||||
1588
sources/poky/bitbake/lib/hashserv/tests.py
Normal file
1588
sources/poky/bitbake/lib/hashserv/tests.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user