From af01caaafae05c7547a6a21c2067769f51dbbe10 Mon Sep 17 00:00:00 2001 From: Lubos Mjachky Date: Fri, 19 May 2023 12:30:33 +0200 Subject: [PATCH] Add pull-through caching closes #507 --- CHANGES/507.feature | 3 + docs/tech-preview.rst | 3 +- docs/workflows/host.rst | 42 +++ pulp_container/app/cache.py | 17 +- pulp_container/app/content.py | 10 +- pulp_container/app/downloaders.py | 23 +- .../0037_create_pull_through_cache_models.py | 56 ++++ pulp_container/app/models.py | 70 ++++ pulp_container/app/registry.py | 304 ++++++++++++++++-- pulp_container/app/registry_api.py | 192 +++++++++-- pulp_container/app/serializers.py | 89 ++++- pulp_container/app/tasks/__init__.py | 1 + .../app/tasks/download_image_data.py | 74 +++++ pulp_container/app/tasks/sync_stages.py | 95 +----- pulp_container/app/utils.py | 90 +++++- pulp_container/app/viewsets.py | 177 ++++++++++ .../functional/api/test_pull_through_cache.py | 144 +++++++++ pulp_container/tests/functional/conftest.py | 14 + requirements.txt | 2 +- 19 files changed, 1263 insertions(+), 143 deletions(-) create mode 100644 CHANGES/507.feature create mode 100644 pulp_container/app/migrations/0037_create_pull_through_cache_models.py create mode 100644 pulp_container/app/tasks/download_image_data.py create mode 100644 pulp_container/tests/functional/api/test_pull_through_cache.py diff --git a/CHANGES/507.feature b/CHANGES/507.feature new file mode 100644 index 000000000..037ccbb01 --- /dev/null +++ b/CHANGES/507.feature @@ -0,0 +1,3 @@ +Added support for pull-through caching. Users can now configure a dedicated distribution and remote +linked to an external registry without the need to create and mirror repositories in advance. Pulp +downloads missing content automatically if requested and acts as a caching proxy. diff --git a/docs/tech-preview.rst b/docs/tech-preview.rst index f25f86048..44abca580 100644 --- a/docs/tech-preview.rst +++ b/docs/tech-preview.rst @@ -3,5 +3,6 @@ Tech previews The following features are currently being released as part of a tech preview: -* Build an OCI image from a Containerfile +* Building an OCI image from a Containerfile. * Support for hosting Flatpak content in OCI format. +* Pull-through caching (i.e., proxy cache) for upstream registries. diff --git a/docs/workflows/host.rst b/docs/workflows/host.rst index 867a874ed..4dbdbc358 100644 --- a/docs/workflows/host.rst +++ b/docs/workflows/host.rst @@ -117,3 +117,45 @@ Docker Output:: In general, the automatic conversion cannot be performed when the content is not available in the storage. Therefore, it may be successful only if the content was previously synced with the ``immediate`` policy. + + +Pull-Through Caching +-------------------- + +.. warning:: + This feature is provided as a tech preview and could change in backwards incompatible + ways in the future. + +The Pull-Through Caching feature offers an alternative way to host content by leveraging a **remote +registry** as the source of truth. This eliminates the need for in-advance repository +synchronization because Pulp acts as a **caching proxy** and stores images, after they have been +pulled by an end client, in a local repository. + +Configuring the caching:: + + # initialize a pull-through remote (the concept of upstream-name is not applicable here) + REMOTE_HREF=$(http ${BASE_ADDR}/pulp/api/v3/remotes/container/pull-through/ name=docker-cache url=https://registry-1.docker.io | jq -r ".pulp_href") + + # create a pull-through distribution linked to the initialized remote + http ${BASE_ADDR}/pulp/api/v3/distributions/container/pull-through/ remote=${REMOTE_HREF} name=docker-cache base_path=docker-cache + +Pulling content:: + + podman pull localhost:24817/docker-cache/library/busybox + +In the example above, the image "busybox" is pulled from *DockerHub* through the "docker-cache" +distribution, acting as a transparent caching layer. + +By incorporating the Pull-Through Caching feature into standard workflows, users **do not need** to +pre-configure a new repository and sync it to facilitate the retrieval of the actual content. This +speeds up the whole process of shipping containers from its early management stages to distribution. +Similarly to on-demand syncing, the feature also **reduces external network dependencies**, and +ensures a more reliable container deployment system in production environments. + +.. note:: + During the pull-through operation, Pulp creates a local repository that maintains a single + version for pulled images. For instance, when pulling an image like "debian:10," a local + repository named "debian" with the tag "10" is created. Subsequent pulls, such as "debian:11," + generate a new repository version that incorporates both the "10" and "11" tags, automatically + removing the previous version. Repositories and their content remain manageable through standard + Pulp API endpoints. The repositories are read-only and public by default. diff --git a/pulp_container/app/cache.py b/pulp_container/app/cache.py index 16ee76c44..38ed477e4 100644 --- a/pulp_container/app/cache.py +++ b/pulp_container/app/cache.py @@ -1,8 +1,9 @@ from django.core.exceptions import ObjectDoesNotExist +from django.db.models import F, Value from pulpcore.plugin.cache import CacheKeys, AsyncContentCache, SyncContentCache -from pulp_container.app.models import ContainerDistribution +from pulp_container.app.models import ContainerDistribution, ContainerPullThroughDistribution from pulp_container.app.exceptions import RepositoryNotFound ACCEPT_HEADER_KEY = "accept_header" @@ -69,11 +70,17 @@ def find_base_path_cached(request, cached): return path else: try: - distro = ContainerDistribution.objects.select_related( - "repository", "repository_version" - ).get(base_path=path) + distro = ContainerDistribution.objects.get(base_path=path) except ObjectDoesNotExist: - raise RepositoryNotFound(name=path) + distro = ( + ContainerPullThroughDistribution.objects.annotate(path=Value(path)) + .filter(path__startswith=F("base_path")) + .order_by("-base_path") + .first() + ) + if not distro: + raise RepositoryNotFound(name=path) + return distro.base_path diff --git a/pulp_container/app/content.py b/pulp_container/app/content.py index fae311e4e..fcdc7786f 100644 --- a/pulp_container/app/content.py +++ b/pulp_container/app/content.py @@ -6,9 +6,11 @@ registry = Registry() app.add_routes( - [web.get(r"/pulp/container/{path:.+}/blobs/sha256:{digest:.+}", registry.get_by_digest)] -) -app.add_routes( - [web.get(r"/pulp/container/{path:.+}/manifests/sha256:{digest:.+}", registry.get_by_digest)] + [ + web.get( + r"/pulp/container/{path:.+}/{content:(blobs|manifests)}/sha256:{digest:.+}", + registry.get_by_digest, + ) + ] ) app.add_routes([web.get(r"/pulp/container/{path:.+}/manifests/{tag_name}", registry.get_tag)]) diff --git a/pulp_container/app/downloaders.py b/pulp_container/app/downloaders.py index 4db7ae44d..bcde18e5e 100644 --- a/pulp_container/app/downloaders.py +++ b/pulp_container/app/downloaders.py @@ -5,6 +5,7 @@ import re from aiohttp.client_exceptions import ClientResponseError +from collections import namedtuple from logging import getLogger from multidict import MultiDict from urllib import parse @@ -15,6 +16,11 @@ log = getLogger(__name__) +HeadResult = namedtuple( + "HeadResult", + ["status_code", "path", "artifact_attributes", "url", "headers"], +) + class RegistryAuthHttpDownloader(HttpDownloader): """ @@ -31,6 +37,7 @@ def __init__(self, *args, **kwargs): Initialize the downloader. """ self.remote = kwargs.pop("remote") + super().__init__(*args, **kwargs) async def _run(self, handle_401=True, extra_data=None): @@ -95,7 +102,12 @@ async def _run(self, handle_401=True, extra_data=None): return await self._run(handle_401=False, extra_data=extra_data) else: raise - to_return = await self._handle_response(response) + + if http_method == "head": + to_return = await self._handle_head_response(response) + else: + to_return = await self._handle_response(response) + await response.release() self.response_headers = response.headers @@ -173,6 +185,15 @@ def auth_header(token, basic_auth): return {"Authorization": basic_auth} return {} + async def _handle_head_response(self, response): + return HeadResult( + status_code=response.status, + path=None, + artifact_attributes=None, + url=self.url, + headers=response.headers, + ) + class NoAuthSignatureDownloader(HttpDownloader): """A downloader class suited for signature downloads.""" diff --git a/pulp_container/app/migrations/0037_create_pull_through_cache_models.py b/pulp_container/app/migrations/0037_create_pull_through_cache_models.py new file mode 100644 index 000000000..2ed4ff8e1 --- /dev/null +++ b/pulp_container/app/migrations/0037_create_pull_through_cache_models.py @@ -0,0 +1,56 @@ +# Generated by Django 4.2.8 on 2023-12-12 21:15 + +from django.db import migrations, models +import django.db.models.deletion +import pulpcore.app.models.access_policy + + +class Migration(migrations.Migration): + + dependencies = [ + ('core', '0116_alter_remoteartifact_md5_alter_remoteartifact_sha1_and_more'), + ('container', '0036_containerpushrepository_pending_blobs_manifests'), + ] + + operations = [ + migrations.CreateModel( + name='ContainerPullThroughRemote', + fields=[ + ('remote_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.remote')), + ], + options={ + 'permissions': [('manage_roles_containerpullthroughremote', 'Can manage role assignments on pull-through container remote')], + 'default_related_name': '%(app_label)s_%(model_name)s', + }, + bases=('core.remote', pulpcore.app.models.access_policy.AutoAddObjPermsMixin), + ), + migrations.AddField( + model_name='containerrepository', + name='pending_blobs', + field=models.ManyToManyField(to='container.blob'), + ), + migrations.AddField( + model_name='containerrepository', + name='pending_manifests', + field=models.ManyToManyField(to='container.manifest'), + ), + migrations.CreateModel( + name='ContainerPullThroughDistribution', + fields=[ + ('distribution_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='core.distribution')), + ('private', models.BooleanField(default=False, help_text='Restrict pull access to explicitly authorized users. Related distributions inherit this value. Defaults to unrestricted pull access.')), + ('description', models.TextField(null=True)), + ('namespace', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='container_pull_through_distributions', to='container.containernamespace')), + ], + options={ + 'permissions': [('manage_roles_containerpullthroughdistribution', 'Can manage role assignments on pull-through cache distribution')], + 'default_related_name': '%(app_label)s_%(model_name)s', + }, + bases=('core.distribution', pulpcore.app.models.access_policy.AutoAddObjPermsMixin), + ), + migrations.AddField( + model_name='containerdistribution', + name='pull_through_distribution', + field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='distributions', to='container.containerpullthroughdistribution'), + ), + ] diff --git a/pulp_container/app/models.py b/pulp_container/app/models.py index beef5f984..e21ede94c 100644 --- a/pulp_container/app/models.py +++ b/pulp_container/app/models.py @@ -413,6 +413,25 @@ class Meta: ] +class ContainerPullThroughRemote(Remote, AutoAddObjPermsMixin): + """ + A remote for pull-through caching, omitting the requirement for the upstream name. + + This remote is used for instantiating new regular container remotes with the upstream name. + Configuring credentials and everything related to container workflows can be therefore done + from within a single instance of this remote. + """ + + class Meta: + default_related_name = "%(app_label)s_%(model_name)s" + permissions = [ + ( + "manage_roles_containerpullthroughremote", + "Can manage role assignments on pull-through container remote", + ), + ] + + class ManifestSigningService(SigningService): """ Signing service used for creating container signatures. @@ -486,6 +505,8 @@ class ContainerRepository( manifest_signing_service = models.ForeignKey( ManifestSigningService, on_delete=models.SET_NULL, null=True ) + pending_blobs = models.ManyToManyField(Blob) + pending_manifests = models.ManyToManyField(Manifest) class Meta: default_related_name = "%(app_label)s_%(model_name)s" @@ -509,6 +530,15 @@ def finalize_new_version(self, new_version): """ remove_duplicates(new_version) validate_repo_version(new_version) + self.remove_pending_content(new_version) + + def remove_pending_content(self, repository_version): + """Remove pending blobs and manifests when committing the content to the repository.""" + added_content = repository_version.added( + base_version=repository_version.base_version + ).values_list("pk") + self.pending_blobs.remove(*Blob.objects.filter(pk__in=added_content)) + self.pending_manifests.remove(*Manifest.objects.filter(pk__in=added_content)) class ContainerPushRepository(Repository, AutoAddObjPermsMixin): @@ -565,6 +595,39 @@ def remove_pending_content(self, repository_version): self.pending_manifests.remove(*Manifest.objects.filter(pk__in=added_content)) +class ContainerPullThroughDistribution(Distribution, AutoAddObjPermsMixin): + """ + A distribution for pull-through caching, referencing normal distributions. + """ + + TYPE = "pull-through" + + namespace = models.ForeignKey( + ContainerNamespace, + on_delete=models.CASCADE, + related_name="container_pull_through_distributions", + null=True, + ) + private = models.BooleanField( + default=False, + help_text=_( + "Restrict pull access to explicitly authorized users. " + "Related distributions inherit this value. " + "Defaults to unrestricted pull access." + ), + ) + description = models.TextField(null=True) + + class Meta: + default_related_name = "%(app_label)s_%(model_name)s" + permissions = [ + ( + "manage_roles_containerpullthroughdistribution", + "Can manage role assignments on pull-through cache distribution", + ), + ] + + class ContainerDistribution(Distribution, AutoAddObjPermsMixin): """ A container distribution defines how a repository version is distributed by Pulp's webserver. @@ -595,6 +658,13 @@ class ContainerDistribution(Distribution, AutoAddObjPermsMixin): ) description = models.TextField(null=True) + pull_through_distribution = models.ForeignKey( + ContainerPullThroughDistribution, + related_name="distributions", + on_delete=models.CASCADE, + null=True, + ) + def get_repository_version(self): """ Returns the repository version that is supposed to be served by this ContainerDistribution. diff --git a/pulp_container/app/registry.py b/pulp_container/app/registry.py index de9376755..c93723ec2 100644 --- a/pulp_container/app/registry.py +++ b/pulp_container/app/registry.py @@ -1,30 +1,42 @@ +import json import logging import os from asgiref.sync import sync_to_async +from contextlib import suppress +from urllib.parse import urljoin + from aiohttp import web +from aiohttp.client_exceptions import ClientResponseError +from aiohttp.web_exceptions import HTTPTooManyRequests +from django_guid import set_guid +from django_guid.utils import generate_guid from django.conf import settings from django.core.exceptions import ObjectDoesNotExist +from django.db import IntegrityError from multidict import MultiDict from pulpcore.plugin.content import Handler, PathNotResolved -from pulpcore.plugin.models import Content, ContentArtifact +from pulpcore.plugin.models import RemoteArtifact, Content, ContentArtifact from pulpcore.plugin.content import ArtifactResponse +from pulpcore.plugin.tasking import dispatch from pulp_container.app.cache import RegistryContentCache -from pulp_container.app.models import ContainerDistribution, Tag, Blob +from pulp_container.app.models import ContainerDistribution, Tag, Blob, Manifest, BlobManifest from pulp_container.app.schema_convert import Schema2toSchema1ConverterWrapper -from pulp_container.app.utils import get_accepted_media_types -from pulp_container.constants import BLOB_CONTENT_TYPE, EMPTY_BLOB, MEDIA_TYPE +from pulp_container.app.tasks import download_image_data +from pulp_container.app.utils import ( + calculate_digest, + get_accepted_media_types, + determine_media_type, + save_artifact, +) +from pulp_container.constants import BLOB_CONTENT_TYPE, EMPTY_BLOB, MEDIA_TYPE, V2_ACCEPT_HEADERS log = logging.getLogger(__name__) -v2_headers = MultiDict() -v2_headers["Docker-Distribution-API-Version"] = "registry/2.0" - - class Registry(Handler): """ A set of handlers for the Container Registry v2 API. @@ -110,14 +122,64 @@ async def get_tag(self, request): repository_version = await sync_to_async(distribution.get_repository_version)() if not repository_version: raise PathNotResolved(tag_name) - accepted_media_types = get_accepted_media_types(request.headers) + distribution = await distribution.acast() try: tag = await Tag.objects.select_related("tagged_manifest").aget( pk__in=await sync_to_async(repository_version.get_content)(), name=tag_name ) except ObjectDoesNotExist: - raise PathNotResolved(tag_name) + if distribution.remote_id and distribution.pull_through_distribution_id: + pull_downloader = await PullThroughDownloader.create( + distribution, repository_version, path, tag_name + ) + raw_manifest, digest, media_type = await pull_downloader.download_manifest( + run_pipeline=True + ) + headers = { + "Content-Type": media_type, + "Docker-Content-Digest": digest, + "Docker-Distribution-API-Version": "registry/2.0", + } + return web.Response(text=raw_manifest, headers=headers) + else: + raise PathNotResolved(tag_name) + + # check if the content is pulled via the pull-through caching distribution; + # if yes, update the respective manifest from the remote when its digest changed + if distribution.remote_id and distribution.pull_through_distribution_id: + remote = await distribution.remote.acast() + relative_url = "/v2/{name}/manifests/{tag}".format( + name=remote.namespaced_upstream_name, tag=tag_name + ) + tag_url = urljoin(remote.url, relative_url) + downloader = remote.get_downloader(url=tag_url) + try: + response = await downloader.run( + extra_data={"headers": V2_ACCEPT_HEADERS, "http_method": "head"} + ) + except ClientResponseError: + # the manifest is not available on the remote anymore + # but the old one is still stored in the database + pass + else: + digest = response.headers.get("docker-content-digest") + if tag.tagged_manifest.digest != digest: + pull_downloader = await PullThroughDownloader.create( + distribution, repository_version, path, tag_name + ) + pull_downloader.downloader = downloader + raw_manifest, digest, media_type = await pull_downloader.download_manifest( + run_pipeline=True + ) + headers = { + "Content-Type": media_type, + "Docker-Content-Digest": digest, + "Docker-Distribution-API-Version": "registry/2.0", + } + return web.Response(text=raw_manifest, headers=headers) + + accepted_media_types = get_accepted_media_types(request.headers) # we do not convert OCI to docker oci_mediatypes = [MEDIA_TYPE.MANIFEST_OCI, MEDIA_TYPE.INDEX_OCI] @@ -219,7 +281,6 @@ async def get_by_digest(self, request): """ Return a response to the "GET" action. """ - path = request.match_info["path"] digest = "sha256:{digest}".format(digest=request.match_info["digest"]) distribution = await sync_to_async(self._match_distribution)(path, add_trailing_slash=False) @@ -229,16 +290,14 @@ async def get_by_digest(self, request): raise PathNotResolved(path) if digest == EMPTY_BLOB: return await Registry._empty_blob() - try: - content = await sync_to_async(repository_version.get_content)() - repository = await sync_to_async(repository_version.repository.cast)() - if repository.PUSH_ENABLED: - pending_blobs = repository.pending_blobs.values_list("pk") - pending_manifests = repository.pending_manifests.values_list("pk") - pending_content = pending_blobs.union(pending_manifests) - content |= Content.objects.filter(pk__in=pending_content) + repository = await repository_version.repository.acast() + pending_blobs = repository.pending_blobs.values_list("pk") + pending_manifests = repository.pending_manifests.values_list("pk") + pending_content = pending_blobs.union(pending_manifests) + content = repository_version.content | Content.objects.filter(pk__in=pending_content) + try: ca = await ContentArtifact.objects.select_related("artifact", "content").aget( content__in=content, relative_path=digest ) @@ -252,7 +311,33 @@ async def get_by_digest(self, request): "Docker-Content-Digest": ca_content.digest, } except ObjectDoesNotExist: - raise PathNotResolved(path) + distribution = await distribution.acast() + if distribution.remote_id and distribution.pull_through_distribution_id: + pull_downloader = await PullThroughDownloader.create( + distribution, repository_version, path, digest + ) + + # "/pulp/container/{path:.+}/{content:(blobs|manifests)}/sha256:{digest:.+}" + content_type = request.match_info["content"] + if content_type == "manifests": + raw_manifest, digest, media_type = await pull_downloader.download_manifest() + headers = { + "Content-Type": media_type, + "Docker-Content-Digest": digest, + "Docker-Distribution-API-Version": "registry/2.0", + } + return web.Response(text=raw_manifest, headers=headers) + elif content_type == "blobs": + # there might be a case where the client has all the manifest data in place + # and tries to download only missing blobs; because of that, only the reference + # to a remote blob is returned (i.e., RemoteArtifact) + blob = await pull_downloader.init_remote_blob() + ca = await blob.contentartifact_set.afirst() + return await self._stream_content_artifact(request, web.StreamResponse(), ca) + else: + raise RuntimeError("Only blobs or manifests are supported by the parser.") + else: + raise PathNotResolved(path) else: artifact = ca.artifact if artifact: @@ -275,3 +360,182 @@ async def _empty_blob(): "Docker-Distribution-API-Version": "registry/2.0", } return web.Response(body=body, headers=response_headers) + + +class PullThroughDownloader: + def __init__(self, distribution, remote, repository, repository_version, path, identifier): + self.distribution = distribution + self.remote = remote + self.repository = repository + self.repository_version = repository_version + self.path = path + self.identifier = identifier + self.downloader = None + + @classmethod + async def create(cls, distribution, repository_version, path, identifier): + remote = await distribution.remote.acast() + repository = await repository_version.repository.acast() + return cls(distribution, remote, repository, repository_version, path, identifier) + + async def init_remote_blob(self): + return await self.save_blob(self.identifier, None) + + async def download_manifest(self, run_pipeline=False): + response = await self.run_manifest_downloader() + + with open(response.path) as f: + raw_data = f.read() + + response.artifact_attributes["file"] = response.path + saved_artifact = await save_artifact(response.artifact_attributes) + + if run_pipeline: + await self.run_pipeline(saved_artifact) + + try: + manifest_data = json.loads(raw_data) + except json.decoder.JSONDecodeError: + raise PathNotResolved(self.identifier) + media_type = determine_media_type(manifest_data, response) + if media_type in (MEDIA_TYPE.MANIFEST_V1_SIGNED, MEDIA_TYPE.MANIFEST_V1): + digest = calculate_digest(raw_data) + else: + digest = f"sha256:{response.artifact_attributes['sha256']}" + + if media_type not in (MEDIA_TYPE.MANIFEST_LIST, MEDIA_TYPE.INDEX_OCI): + # add the manifest and blobs to the repository to be able to stream it + # in the next round when a client approaches the registry + await self.init_pending_content(digest, manifest_data, media_type, saved_artifact) + + return raw_data, digest, media_type + + async def run_manifest_downloader(self): + if self.downloader is None: + relative_url = "/v2/{name}/manifests/{identifier}".format( + name=self.remote.namespaced_upstream_name, identifier=self.identifier + ) + url = urljoin(self.remote.url, relative_url) + self.downloader = self.remote.get_downloader(url=url) + + try: + response = await self.downloader.run(extra_data={"headers": V2_ACCEPT_HEADERS}) + except ClientResponseError as response_error: + if response_error.status == 429: + # the client could request the manifest outside the docker hub pull limit; + # it is necessary to pass this information back to the client + raise HTTPTooManyRequests() + else: + # TODO: do not mask out relevant errors, like HTTP 502 + raise PathNotResolved(self.path) + + return response + + async def run_pipeline(self, saved_artifact): + set_guid(generate_guid()) + await sync_to_async(dispatch)( + download_image_data, + exclusive_resources=[self.repository_version.repository], + kwargs={ + "repository_pk": self.repository_version.repository.pk, + "remote_pk": self.remote.pk, + "manifest_artifact_pk": saved_artifact.pk, + "tag_name": self.identifier, + }, + ) + + async def init_pending_content(self, digest, manifest_data, media_type, artifact): + if config := manifest_data.get("config", None): + config_digest = config["digest"] + config_blob = await self.save_config_blob(config_digest) + await sync_to_async(self.repository.pending_blobs.add)(config_blob) + else: + config_blob = None + + manifest = Manifest( + digest=digest, + schema_version=2 + if manifest_data["mediaType"] in (MEDIA_TYPE.MANIFEST_V2, MEDIA_TYPE.MANIFEST_OCI) + else 1, + media_type=media_type, + config_blob=config_blob, + ) + try: + await manifest.asave() + except IntegrityError: + manifest = await Manifest.objects.aget(digest=manifest.digest) + await sync_to_async(manifest.touch)() + await sync_to_async(self.repository.pending_manifests.add)(manifest) + + for layer in manifest_data["layers"]: + blob = await self.save_blob(layer["digest"], manifest) + await sync_to_async(self.repository.pending_blobs.add)(blob) + + content_artifact = ContentArtifact( + artifact=artifact, content=manifest, relative_path=manifest.digest + ) + with suppress(IntegrityError): + await content_artifact.asave() + + async def save_blob(self, digest, manifest): + blob = Blob(digest=digest) + try: + await blob.asave() + except IntegrityError: + blob = await Blob.objects.aget(digest=digest) + await sync_to_async(blob.touch)() + + bm_rel = BlobManifest(manifest=manifest, manifest_blob=blob) + with suppress(IntegrityError): + await bm_rel.asave() + + ca = ContentArtifact( + content=blob, + artifact=None, + relative_path=digest, + ) + with suppress(IntegrityError): + await ca.asave() + + relative_url = "/v2/{name}/blobs/{digest}".format( + name=self.remote.namespaced_upstream_name, digest=digest + ) + blob_url = urljoin(self.remote.url, relative_url) + ra = RemoteArtifact( + url=blob_url, + sha256=digest[len("sha256:") :], + content_artifact=ca, + remote=self.remote, + ) + with suppress(IntegrityError): + await ra.asave() + + return blob + + async def save_config_blob(self, config_digest): + blob_relative_url = "/v2/{name}/blobs/{digest}".format( + name=self.remote.namespaced_upstream_name, digest=config_digest + ) + blob_url = urljoin(self.remote.url, blob_relative_url) + downloader = self.remote.get_downloader(url=blob_url) + response = await downloader.run() + + response.artifact_attributes["file"] = response.path + saved_artifact = await save_artifact(response.artifact_attributes) + + config_blob = Blob(digest=config_digest) + try: + await config_blob.asave() + except IntegrityError: + config_blob = await Blob.objects.aget(digest=config_digest) + await sync_to_async(config_blob.touch)() + + content_artifact = ContentArtifact( + content=config_blob, + artifact=saved_artifact, + relative_path=config_digest, + ) + with suppress(IntegrityError): + await content_artifact.asave() + + return config_blob diff --git a/pulp_container/app/registry_api.py b/pulp_container/app/registry_api.py index 70bd80abc..e334bd85c 100644 --- a/pulp_container/app/registry_api.py +++ b/pulp_container/app/registry_api.py @@ -11,13 +11,15 @@ import hashlib import re +from aiohttp.client_exceptions import ClientResponseError from itertools import chain -from urllib.parse import urlparse, urlunparse, parse_qs, urlencode +from urllib.parse import urljoin, urlparse, urlunparse, parse_qs, urlencode from tempfile import NamedTemporaryFile from django.core.files.storage import default_storage as storage from django.core.files.base import ContentFile, File from django.db import IntegrityError, transaction +from django.db.models import F, Value from django.shortcuts import get_object_or_404 from django.conf import settings @@ -84,10 +86,21 @@ SIGNATURE_HEADER, SIGNATURE_PAYLOAD_MAX_SIZE, SIGNATURE_TYPE, + V2_ACCEPT_HEADERS, ) log = logging.getLogger(__name__) +IGNORED_PULL_THROUGH_REMOTE_ATTRIBUTES = [ + "remote_ptr_id", + "pulp_type", + "pulp_last_updated", + "pulp_created", + "pulp_id", + "url", + "name", +] + class ContentRenderer(BaseRenderer): """ @@ -233,7 +246,7 @@ def default_response_headers(self): def get_exception_handler_context(self): """ - Adjust the reder context for exceptions. + Adjust the render context for exceptions. """ context = super().get_exception_handler_context() if context["request"]: @@ -271,7 +284,8 @@ def get_drv_pull(self, path): try: distribution = models.ContainerDistribution.objects.get(base_path=path) except models.ContainerDistribution.DoesNotExist: - raise RepositoryNotFound(name=path) + # get a pull-through cache distribution whose base_path is a substring of the path + return self.get_pull_through_drv(path) if distribution.repository: repository_version = distribution.repository.latest_version() elif distribution.repository_version: @@ -280,6 +294,48 @@ def get_drv_pull(self, path): raise RepositoryNotFound(name=path) return distribution, distribution.repository, repository_version + def get_pull_through_drv(self, path): + pull_through_cache_distribution = ( + models.ContainerPullThroughDistribution.objects.annotate(path=Value(path)) + .filter(path__startswith=F("base_path")) + .order_by("-base_path") + .first() + ) + if not pull_through_cache_distribution: + raise RepositoryNotFound(name=path) + + try: + with transaction.atomic(): + repository, _ = models.ContainerRepository.objects.get_or_create( + name=path, retain_repo_versions=1 + ) + + remote_data = _get_pull_through_remote_data(pull_through_cache_distribution) + upstream_name = path.split(pull_through_cache_distribution.base_path, maxsplit=1)[1] + remote, _ = models.ContainerRemote.objects.get_or_create( + name=path, + upstream_name=upstream_name.strip("/"), + url=pull_through_cache_distribution.remote.url, + **remote_data, + ) + + # TODO: Propagate the user's permissions and private flag from the pull-through + # distribution to this distribution + distribution, _ = models.ContainerDistribution.objects.get_or_create( + name=path, + base_path=path, + remote=remote, + repository=repository, + ) + except IntegrityError: + # some entities needed to be created, but their keys already exist in the database + # (e.g., a repository with the same name as the constructed path) + raise RepositoryNotFound(name=path) + else: + pull_through_cache_distribution.distributions.add(distribution) + + return distribution, repository, repository.latest_version() + def get_dr_push(self, request, path, create=False): """ Get distribution and repository for push access. @@ -328,6 +384,15 @@ def create_dr(self, path, request): return distribution, repository +def _get_pull_through_remote_data(root_cache_distribution): + remote_data = models.ContainerPullThroughRemote.objects.filter( + pk=root_cache_distribution.remote_id + ).values()[0] + for attr in IGNORED_PULL_THROUGH_REMOTE_ATTRIBUTES: + remote_data.pop(attr, None) + return remote_data + + class BearerTokenView(APIView): """ Hand out anonymous or authenticated bearer tokens. @@ -906,13 +971,10 @@ def handle_safe_method(self, request, path, pk): if pk == EMPTY_BLOB: return redirects.redirect_to_content_app("blobs", pk) repository = repository.cast() - if repository.PUSH_ENABLED: - try: - blob = repository.pending_blobs.get(digest=pk) - blob.touch() - except models.Blob.DoesNotExist: - raise BlobNotFound(digest=pk) - else: + try: + blob = repository.pending_blobs.get(digest=pk) + blob.touch() + except models.Blob.DoesNotExist: raise BlobNotFound(digest=pk) return redirects.issue_blob_redirect(blob) @@ -951,25 +1013,107 @@ def handle_safe_method(self, request, path, pk): try: tag = models.Tag.objects.get(name=pk, pk__in=repository_version.content) except models.Tag.DoesNotExist: - raise ManifestNotFound(reference=pk) + distribution = distribution.cast() + if distribution.remote and distribution.pull_through_distribution_id: + remote = distribution.remote.cast() + # issue a head request first to ensure that the content exists on the remote + # source; we want to prevent immediate "not found" error responses from + # content-app: 302 (api-app) -> 404 (content-app) + manifest = self.fetch_manifest(remote, pk) + if manifest is None: + return redirects.redirect_to_content_app("manifests", pk) + + tag = models.Tag(name=pk, tagged_manifest=manifest) + try: + tag.save() + except IntegrityError: + tag = models.Tag.objects.get(name=tag.name, tagged_manifest=manifest) + tag.touch() + + add_content_units = self.get_content_units_to_add(manifest, tag) + + dispatch( + add_and_remove, + exclusive_resources=[repository], + kwargs={ + "repository_pk": str(repository.pk), + "add_content_units": add_content_units, + "remove_content_units": [], + }, + immediate=True, + deferred=True, + ) + + return redirects.redirect_to_content_app("manifests", tag.name) + else: + raise ManifestNotFound(reference=pk) return redirects.issue_tag_redirect(tag) else: try: manifest = models.Manifest.objects.get(digest=pk, pk__in=repository_version.content) - except models.Manifest.DoesNotExit: - if repository.PUSH_ENABLED: - # the manifest might be a part of listed manifests currently being uploaded - try: - manifest = repository.pending_manifests.get(digest=pk) - manifest.touch() - except models.Manifest.DoesNotExist: - raise ManifestNotFound(reference=pk) + except models.Manifest.DoesNotExist: + repository = repository.cast() + # the manifest might be a part of listed manifests currently being uploaded + # or saved during the pull-through caching + try: + manifest = repository.pending_manifests.get(digest=pk) + manifest.touch() + except models.Manifest.DoesNotExist: + pass + + distribution = distribution.cast() + if distribution.remote and distribution.pull_through_distribution_id: + remote = distribution.remote.cast() + self.fetch_manifest(remote, pk) + return redirects.redirect_to_content_app("manifests", pk) else: - ManifestNotFound(reference=pk) + raise ManifestNotFound(reference=pk) return redirects.issue_manifest_redirect(manifest) + def get_content_units_to_add(self, manifest, tag): + add_content_units = [str(tag.pk), str(manifest.pk)] + if manifest.media_type in ( + models.MEDIA_TYPE.MANIFEST_LIST, + models.MEDIA_TYPE.INDEX_OCI, + ): + for listed_manifest in manifest.listed_manifests: + add_content_units.append(str(listed_manifest.pk)) + add_content_units.append(str(listed_manifest.config_blob_id)) + add_content_units.extend(listed_manifest.blobs.values_list("pk", flat=True)) + elif manifest.media_type in ( + models.MEDIA_TYPE.MANIFEST_V2, + models.MEDIA_TYPE.MANIFEST_OCI, + ): + add_content_units.append(str(manifest.config_blob_id)) + add_content_units.extend(manifest.blobs.values_list("pk", flat=True)) + else: + add_content_units.extend(manifest.blobs.values_list("pk", flat=True)) + return add_content_units + + def fetch_manifest(self, remote, pk): + relative_url = "/v2/{name}/manifests/{pk}".format( + name=remote.namespaced_upstream_name, pk=pk + ) + tag_url = urljoin(remote.url, relative_url) + downloader = remote.get_downloader(url=tag_url) + try: + response = downloader.fetch( + extra_data={"headers": V2_ACCEPT_HEADERS, "http_method": "head"} + ) + except ClientResponseError as response_error: + if response_error.status == 429: + # the client could request the manifest outside the docker hub pull limit; + # it is necessary to pass this information back to the client + raise Throttled() + else: + # TODO: do not mask out relevant errors, like HTTP 502 + raise ManifestNotFound(reference=pk) + else: + digest = response.headers.get("docker-content-digest") + return models.Manifest.objects.filter(digest=digest).first() + def put(self, request, path, pk=None): """ Responds with the actual manifest @@ -1212,7 +1356,13 @@ def get(self, request, path, pk): try: manifest = models.Manifest.objects.get(digest=pk, pk__in=repository_version.content) except models.Manifest.DoesNotExist: - raise ManifestNotFound(reference=pk) + try: + # the manifest was initialized as a pending content unit + # or has not been assigned to any repository yet + manifest = models.Manifest.objects.get(digest=pk) + manifest.touch() + except models.Manifest.DoesNotExist: + raise ManifestNotFound(reference=pk) signatures = models.ManifestSignature.objects.filter( signed_manifest=manifest, pk__in=repository_version.content diff --git a/pulp_container/app/serializers.py b/pulp_container/app/serializers.py index dd4de64d8..0407bc3e7 100644 --- a/pulp_container/app/serializers.py +++ b/pulp_container/app/serializers.py @@ -277,6 +277,18 @@ class Meta: model = models.ContainerRemote +class ContainerPullThroughRemoteSerializer(RemoteSerializer): + """ + A serializer for a remote used in the pull-through distribution. + """ + + policy = serializers.ChoiceField(choices=[Remote.ON_DEMAND], default=Remote.ON_DEMAND) + + class Meta: + fields = RemoteSerializer.Meta.fields + model = models.ContainerPullThroughRemote + + class ContainerDistributionSerializer(DistributionSerializer, GetOrCreateSerializerMixin): """ A serializer for ContainerDistribution. @@ -309,10 +321,16 @@ class ContainerDistributionSerializer(DistributionSerializer, GetOrCreateSeriali repository_version = RepositoryVersionRelatedField( required=False, help_text=_("RepositoryVersion to be served"), allow_null=True ) + remote = DetailRelatedField( + required=False, + help_text=_("Remote that can be used to fetch content when using pull-through caching."), + view_name_pattern=r"remotes(-.*/.*)?-detail", + read_only=True, + ) def validate(self, data): """ - Validate the ContainterDistribution. + Validate the ContainerDistribution. Make sure there is an instance of ContentRedirectContentGuard always present in validated data. @@ -360,6 +378,69 @@ class Meta: fields = tuple(set(DistributionSerializer.Meta.fields) - {"base_url"}) + ( "repository_version", "registry_path", + "remote", + "namespace", + "private", + "description", + ) + + +class ContainerPullThroughDistributionSerializer(DistributionSerializer): + """ + A serializer for a specialized pull-through distribution referencing sub-distributions. + """ + + remote = DetailRelatedField( + help_text=_("Remote that can be used to fetch content when using pull-through caching."), + view_name_pattern=r"remotes(-.*/.*)-detail", + queryset=models.ContainerPullThroughRemote.objects.all(), + ) + namespace = RelatedField( + required=False, + read_only=True, + view_name="pulp_container/namespaces-detail", + help_text=_("Namespace this distribution belongs to."), + ) + content_guard = DetailRelatedField( + required=False, + help_text=_("An optional content-guard. If none is specified, a default one will be used."), + view_name=r"contentguards-container/content-redirect-detail", + queryset=ContentRedirectContentGuard.objects.all(), + allow_null=False, + ) + distributions = DetailRelatedField( + many=True, + help_text="Distributions created after pulling content through cache", + view_name="distributions-detail", + queryset=models.ContainerDistribution.objects.all(), + required=False, + ) + description = serializers.CharField( + help_text=_("An optional description."), required=False, allow_null=True + ) + + def validate(self, data): + validated_data = super().validate(data) + + if "content_guard" not in validated_data: + validated_data["content_guard"] = ContentRedirectContentGuardSerializer.get_or_create( + {"name": "content redirect"} + ) + + base_path = validated_data.get("base_path") + if base_path: + namespace_name = base_path.split("/")[0] + validated_data["namespace"] = ContainerNamespaceSerializer.get_or_create( + {"name": namespace_name} + ) + + return validated_data + + class Meta: + model = models.ContainerPullThroughDistribution + fields = tuple(set(DistributionSerializer.Meta.fields) - {"base_url"}) + ( + "remote", + "distributions", "namespace", "private", "description", @@ -712,6 +793,12 @@ class ContainerRepositorySyncURLSerializer(RepositorySyncURLSerializer): Serializer for Container Sync. """ + remote = DetailRelatedField( + required=False, + view_name_pattern=r"remotes(-.*/.*)-detail", + queryset=models.ContainerRemote.objects.all(), + help_text=_("A remote to sync from. This will override a remote set on repository."), + ) signed_only = serializers.BooleanField( required=False, default=False, diff --git a/pulp_container/app/tasks/__init__.py b/pulp_container/app/tasks/__init__.py index 6e4392924..09f335a3d 100644 --- a/pulp_container/app/tasks/__init__.py +++ b/pulp_container/app/tasks/__init__.py @@ -1,3 +1,4 @@ +from .download_image_data import download_image_data # noqa from .builder import build_image_from_containerfile # noqa from .recursive_add import recursive_add_content # noqa from .recursive_remove import recursive_remove_content # noqa diff --git a/pulp_container/app/tasks/download_image_data.py b/pulp_container/app/tasks/download_image_data.py new file mode 100644 index 000000000..6c62c5501 --- /dev/null +++ b/pulp_container/app/tasks/download_image_data.py @@ -0,0 +1,74 @@ +import json +import logging + +from pulpcore.plugin.models import Artifact +from pulpcore.plugin.stages import DeclarativeContent + +from pulp_container.app.models import ContainerRemote, ContainerRepository, Tag +from pulp_container.app.utils import determine_media_type_from_json +from pulp_container.constants import MEDIA_TYPE + +from .synchronize import ContainerDeclarativeVersion +from .sync_stages import ContainerFirstStage + +log = logging.getLogger(__name__) + + +def download_image_data(repository_pk, remote_pk, manifest_artifact_pk, tag_name): + repository = ContainerRepository.objects.get(pk=repository_pk) + remote = ContainerRemote.objects.get(pk=remote_pk) + manifest_artifact = Artifact.objects.get(pk=manifest_artifact_pk) + log.info("Pulling cache: repository={r} remote={p}".format(r=repository.name, p=remote.name)) + first_stage = ContainerPullThroughFirstStage(remote, manifest_artifact, tag_name) + dv = ContainerDeclarativeVersion(first_stage, repository) + return dv.create() + + +class ContainerPullThroughFirstStage(ContainerFirstStage): + """The stage that prepares the pipeline for downloading a single tag and its related data.""" + + def __init__(self, remote, manifest_artifact, tag_name): + """Initialize the stage with the artifact defined in content-app.""" + super().__init__(remote, signed_only=False) + self.tag_name = tag_name + self.manifest_artifact = manifest_artifact + + async def run(self): + """Run the stage and create declarative content for one tag, its manifest, and blobs. + + This method is a tinified method based on ``ContainerFirstStage.run`` with syncing just + a single tag. + """ + tag_dc = DeclarativeContent(Tag(name=self.tag_name)) + self.tag_dcs.append(tag_dc) + + raw_data = self.manifest_artifact.file.read() + content_data = json.loads(raw_data) + self.manifest_artifact.file.close() + + media_type = determine_media_type_from_json(content_data) + if media_type in (MEDIA_TYPE.MANIFEST_LIST, MEDIA_TYPE.INDEX_OCI): + list_dc = self.create_tagged_manifest_list( + self.tag_name, self.manifest_artifact, content_data, media_type + ) + for manifest_data in content_data.get("manifests"): + listed_manifest = await self.create_listed_manifest(manifest_data) + list_dc.extra_data["listed_manifests"].append(listed_manifest) + else: + tag_dc.extra_data["tagged_manifest_dc"] = list_dc + for listed_manifest in list_dc.extra_data["listed_manifests"]: + await self.handle_blobs( + listed_manifest["manifest_dc"], listed_manifest["content_data"] + ) + self.manifest_dcs.append(listed_manifest["manifest_dc"]) + self.manifest_list_dcs.append(list_dc) + else: + # Simple tagged manifest + man_dc = self.create_tagged_manifest( + self.tag_name, self.manifest_artifact, content_data, raw_data, media_type + ) + tag_dc.extra_data["tagged_manifest_dc"] = man_dc + await self.handle_blobs(man_dc, content_data) + self.manifest_dcs.append(man_dc) + + await self.resolve_flush() diff --git a/pulp_container/app/tasks/sync_stages.py b/pulp_container/app/tasks/sync_stages.py index 2dbefa876..100ab961f 100644 --- a/pulp_container/app/tasks/sync_stages.py +++ b/pulp_container/app/tasks/sync_stages.py @@ -9,17 +9,16 @@ from urllib.parse import urljoin, urlparse, urlunparse from asgiref.sync import sync_to_async -from django.db import IntegrityError from pulpcore.plugin.models import Artifact, ProgressReport, Remote from pulpcore.plugin.stages import DeclarativeArtifact, DeclarativeContent, Stage, ContentSaver from pulp_container.constants import ( - V2_ACCEPT_HEADERS, MEDIA_TYPE, SIGNATURE_API_EXTENSION_VERSION, SIGNATURE_HEADER, SIGNATURE_SOURCE, SIGNATURE_TYPE, + V2_ACCEPT_HEADERS, ) from pulp_container.app.models import ( Blob, @@ -30,26 +29,17 @@ Tag, ) from pulp_container.app.utils import ( + save_artifact, extract_data_from_signature, urlpath_sanitize, determine_media_type, validate_manifest, + calculate_digest, ) log = logging.getLogger(__name__) -async def _save_artifact(artifact_attributes): - saved_artifact = Artifact(**artifact_attributes) - try: - await saved_artifact.asave() - except IntegrityError: - del artifact_attributes["file"] - saved_artifact = await Artifact.objects.aget(**artifact_attributes) - await sync_to_async(saved_artifact.touch)() - return saved_artifact - - class ContainerFirstStage(Stage): """ The first stage of a pulp_container sync pipeline. @@ -83,7 +73,7 @@ async def _download_and_save_artifact_data(self, manifest_url): raw_data = content_file.read() response.artifact_attributes["file"] = response.path - saved_artifact = await _save_artifact(response.artifact_attributes) + saved_artifact = await save_artifact(response.artifact_attributes) content_data = json.loads(raw_data) return saved_artifact, content_data, raw_data, response @@ -156,7 +146,7 @@ async def run(self): for artifact in asyncio.as_completed(to_download_artifact): saved_artifact, content_data, raw_data, response = await artifact - digest = response.artifact_attributes["sha256"] + digest = saved_artifact.sha256 # Look for cosign signatures # cosign signature has a tag convention 'sha256-1234.sig' @@ -382,7 +372,7 @@ def create_tagged_manifest_list(self, tag_name, saved_artifact, manifest_list_da tag_name (str): A name of a tag saved_artifact (pulpcore.plugin.models.Artifact): A saved manifest's Artifact manifest_list_data (dict): Data about a ManifestList - media_type (str): The type of a manifest + media_type (str): The type of manifest """ digest = f"sha256:{saved_artifact.sha256}" @@ -411,7 +401,7 @@ def create_tagged_manifest(self, tag_name, saved_artifact, manifest_data, raw_da if media_type in (MEDIA_TYPE.MANIFEST_V2, MEDIA_TYPE.MANIFEST_OCI): digest = f"sha256:{saved_artifact.sha256}" else: - digest = self._calculate_digest(raw_data) + digest = calculate_digest(raw_data) manifest = Manifest( digest=digest, schema_version=manifest_data["schemaVersion"], media_type=media_type @@ -649,77 +639,6 @@ def _include_layer(self, layer): return False return True - def _calculate_digest(self, manifest): - """ - Calculate the requested digest of the ImageManifest, given in JSON. - - Args: - manifest (str): The raw JSON representation of the Manifest. - - Returns: - str: The digest of the given ImageManifest - - """ - decoded_manifest = json.loads(manifest) - if "signatures" in decoded_manifest: - # This manifest contains signatures. Unfortunately, the Docker manifest digest - # is calculated on the unsigned version of the Manifest so we need to remove the - # signatures. To do this, we will look at the 'protected' key within the first - # signature. This key indexes a (malformed) base64 encoded JSON dictionary that - # tells us how many bytes of the manifest we need to keep before the signature - # appears in the original JSON and what the original ending to the manifest was after - # the signature block. We will strip out the bytes after this cutoff point, add back the - # original ending, and then calculate the sha256 sum of the transformed JSON to get the - # digest. - protected = decoded_manifest["signatures"][0]["protected"] - # Add back the missing padding to the protected block so that it is valid base64. - protected = self._pad_unpadded_b64(protected) - # Now let's decode the base64 and load it as a dictionary so we can get the length - protected = base64.b64decode(protected) - protected = json.loads(protected) - # This is the length of the signed portion of the Manifest, except for a trailing - # newline and closing curly brace. - signed_length = protected["formatLength"] - # The formatTail key indexes a base64 encoded string that represents the end of the - # original Manifest before signatures. We will need to add this string back to the - # trimmed Manifest to get the correct digest. We'll do this as a one liner since it is - # a very similar process to what we've just done above to get the protected block - # decoded. - signed_tail = base64.b64decode(self._pad_unpadded_b64(protected["formatTail"])) - # Now we can reconstruct the original Manifest that the digest should be based on. - manifest = manifest[:signed_length] + signed_tail - - return "sha256:{digest}".format(digest=hashlib.sha256(manifest).hexdigest()) - - def _pad_unpadded_b64(self, unpadded_b64): - """ - Fix bad padding. - - Docker has not included the required padding at the end of the base64 encoded - 'protected' block, or in some encased base64 within it. This function adds the correct - number of ='s signs to the unpadded base64 text so that it can be decoded with Python's - base64 library. - - Args: - unpadded_b64 (str): The unpadded base64 text. - - Returns: - str: The same base64 text with the appropriate number of ='s symbols. - - """ - # The Pulp team has not observed any newlines or spaces within the base64 from Docker, but - # Docker's own code does this same operation so it seemed prudent to include it here. - # See lines 167 to 168 here: - # https://github.com/docker/libtrust/blob/9cbd2a1374f46905c68a4eb3694a130610adc62a/util.go - unpadded_b64 = unpadded_b64.replace("\n", "").replace(" ", "") - # It is illegal base64 for the remainder to be 1 when the length of the block is - # divided by 4. - if len(unpadded_b64) % 4 == 1: - raise ValueError("Invalid base64: {t}".format(t=unpadded_b64)) - # Add back the missing padding characters, based on the length of the encoded string - paddings = {0: "", 2: "==", 3: "="} - return unpadded_b64 + paddings[len(unpadded_b64) % 4] - class ContainerContentSaver(ContentSaver): """Container specific content saver stage to add content associations.""" diff --git a/pulp_container/app/utils.py b/pulp_container/app/utils.py index fe6c6e71a..3e7f64206 100644 --- a/pulp_container/app/utils.py +++ b/pulp_container/app/utils.py @@ -1,3 +1,5 @@ +import base64 +import hashlib import re import subprocess import gnupg @@ -5,10 +7,12 @@ import logging import time +from asgiref.sync import sync_to_async from jsonschema import Draft7Validator, validate, ValidationError +from django.db import IntegrityError from rest_framework.exceptions import Throttled -from pulpcore.plugin.models import Task +from pulpcore.plugin.models import Artifact, Task from pulp_container.constants import ALLOWED_ARTIFACT_TYPES, MANIFEST_MEDIA_TYPES, MEDIA_TYPE from pulp_container.app.exceptions import ManifestInvalid @@ -213,3 +217,87 @@ def validate_manifest(content_data, media_type, digest): raise ManifestInvalid( reason=f'{".".join(map(str, error.path))}: {error.message}', digest=digest ) + + +def calculate_digest(manifest): + """ + Calculate the requested digest of the ImageManifest, given in JSON. + + Args: + manifest (str): The raw JSON representation of the Manifest. + + Returns: + str: The digest of the given ImageManifest + + """ + decoded_manifest = json.loads(manifest) + if "signatures" in decoded_manifest: + # This manifest contains signatures. Unfortunately, the Docker manifest digest + # is calculated on the unsigned version of the Manifest so we need to remove the + # signatures. To do this, we will look at the 'protected' key within the first + # signature. This key indexes a (malformed) base64 encoded JSON dictionary that + # tells us how many bytes of the manifest we need to keep before the signature + # appears in the original JSON and what the original ending to the manifest was after + # the signature block. We will strip out the bytes after this cutoff point, add back the + # original ending, and then calculate the sha256 sum of the transformed JSON to get the + # digest. + protected = decoded_manifest["signatures"][0]["protected"] + # Add back the missing padding to the protected block so that it is valid base64. + protected = pad_unpadded_b64(protected) + # Now let's decode the base64 and load it as a dictionary so we can get the length + protected = base64.b64decode(protected) + protected = json.loads(protected) + # This is the length of the signed portion of the Manifest, except for a trailing + # newline and closing curly brace. + signed_length = protected["formatLength"] + # The formatTail key indexes a base64 encoded string that represents the end of the + # original Manifest before signatures. We will need to add this string back to the + # trimmed Manifest to get the correct digest. We'll do this as a one liner since it is + # a very similar process to what we've just done above to get the protected block + # decoded. + signed_tail = base64.b64decode(pad_unpadded_b64(protected["formatTail"])) + # Now we can reconstruct the original Manifest that the digest should be based on. + manifest = manifest[:signed_length] + signed_tail + + return "sha256:{digest}".format(digest=hashlib.sha256(manifest).hexdigest()) + + +def pad_unpadded_b64(unpadded_b64): + """ + Fix bad padding. + + Docker has not included the required padding at the end of the base64 encoded + 'protected' block, or in some encased base64 within it. This function adds the correct + number of ='s signs to the unpadded base64 text so that it can be decoded with Python's + base64 library. + + Args: + unpadded_b64 (str): The unpadded base64 text. + + Returns: + str: The same base64 text with the appropriate number of ='s symbols. + + """ + # The Pulp team has not observed any newlines or spaces within the base64 from Docker, but + # Docker's own code does this same operation so it seemed prudent to include it here. + # See lines 167 to 168 here: + # https://github.com/docker/libtrust/blob/9cbd2a1374f46905c68a4eb3694a130610adc62a/util.go + unpadded_b64 = unpadded_b64.replace("\n", "").replace(" ", "") + # It is illegal base64 for the remainder to be 1 when the length of the block is + # divided by 4. + if len(unpadded_b64) % 4 == 1: + raise ValueError("Invalid base64: {t}".format(t=unpadded_b64)) + # Add back the missing padding characters, based on the length of the encoded string + paddings = {0: "", 2: "==", 3: "="} + return unpadded_b64 + paddings[len(unpadded_b64) % 4] + + +async def save_artifact(artifact_attributes): + saved_artifact = Artifact(**artifact_attributes) + try: + await saved_artifact.asave() + except IntegrityError: + del artifact_attributes["file"] + saved_artifact = await Artifact.objects.aget(**artifact_attributes) + await sync_to_async(saved_artifact.touch)() + return saved_artifact diff --git a/pulp_container/app/viewsets.py b/pulp_container/app/viewsets.py index 4a4501a1f..535e147eb 100644 --- a/pulp_container/app/viewsets.py +++ b/pulp_container/app/viewsets.py @@ -429,6 +429,86 @@ class ContainerRemoteViewSet(RemoteViewSet, RolesMixin): } +class ContainerPullThroughRemoteViewSet(RemoteViewSet, RolesMixin): + """ + A Container Remote referencing a remote registry used as a source for the pull-through caching. + """ + + endpoint_name = "pull-through" + queryset = models.ContainerPullThroughRemote.objects.all() + serializer_class = serializers.ContainerPullThroughRemoteSerializer + queryset_filtering_required_permission = "container.view_containerpullthroughremote" + + DEFAULT_ACCESS_POLICY = { + "statements": [ + { + "action": ["list", "my_permissions"], + "principal": "authenticated", + "effect": "allow", + }, + { + "action": ["create"], + "principal": "authenticated", + "effect": "allow", + "condition": "has_model_perms:container.add_containerpullthroughremote", + }, + { + "action": ["retrieve"], + "principal": "authenticated", + "effect": "allow", + "condition": "has_model_or_obj_perms:container.view_containerpullthroughremote", + }, + { + "action": ["update", "partial_update"], + "principal": "authenticated", + "effect": "allow", + "condition": [ + "has_model_or_obj_perms:container.change_containerpullthroughremote", + "has_model_or_obj_perms:container.view_containerpullthroughremote", + ], + }, + { + "action": ["destroy"], + "principal": "authenticated", + "effect": "allow", + "condition": [ + "has_model_or_obj_perms:container.delete_containerpullthroughremote", + "has_model_or_obj_perms:container.view_containerpullthroughremote", + ], + }, + { + "action": ["list_roles", "add_role", "remove_role"], + "principal": "authenticated", + "effect": "allow", + "condition": [ + "has_model_or_obj_perms:container.manage_roles_containerpullthroughremote" + ], + }, + ], + "creation_hooks": [ + { + "function": "add_roles_for_object_creator", + "parameters": {"roles": "container.containerpullthroughremote_owner"}, + }, + ], + "queryset_scoping": {"function": "scope_queryset"}, + } + LOCKED_ROLES = { + "container.containerpullthroughremote_creator": [ + "container.add_containerpullthroughremote", + ], + "container.containerpullthroughremote_owner": [ + "container.view_containerpullthroughremote", + "container.change_containerpullthroughremote", + "container.delete_containerpullthroughremote", + "container.manage_roles_containerpullthroughremote", + ], + "container.containerpullthroughremote_viewer": [ + "container.view_containerpullthroughremote", + ], + } + + class TagOperationsMixin: """ A mixin that adds functionality for creating and deleting tags. @@ -1302,6 +1382,103 @@ def destroy(self, request, pk, **kwargs): return OperationPostponedResponse(async_result, request) +class ContainerPullThroughDistributionViewSet(DistributionViewSet, RolesMixin): + """ + A special pull-through Container Distribution that will reference distributions serving content. + """ + + endpoint_name = "pull-through" + queryset = models.ContainerPullThroughDistribution.objects.all() + serializer_class = serializers.ContainerPullThroughDistributionSerializer + + DEFAULT_ACCESS_POLICY = { + "statements": [ + { + "action": ["list", "my_permissions"], + "principal": "authenticated", + "effect": "allow", + }, + { + "action": ["create"], + "principal": "authenticated", + "effect": "allow", + "condition": "has_namespace_model_perms", + }, + { + "action": ["create"], + "principal": "authenticated", + "effect": "allow", + "condition": "has_namespace_perms:container.add_containerpullthroughdistribution", + }, + { + "action": ["create"], + "principal": "authenticated", + "effect": "allow", + "condition": "namespace_is_username", + }, + { + "action": ["retrieve"], + "principal": "authenticated", + "effect": "allow", + "condition_expression": [ + "has_namespace_or_obj_perms:container.view_containerpullthroughdistribution", + ], + }, + { + "action": ["update", "partial_update"], + "principal": "authenticated", + "effect": "allow", + "condition": [ + "has_namespace_or_obj_perms:container.change_containerpullthroughdistribution", + "has_namespace_or_obj_perms:container.view_containerpullthroughdistribution", + ], + }, + { + "action": ["destroy"], + "principal": "authenticated", + "effect": "allow", + "condition": [ + "has_namespace_or_obj_perms:container.delete_containerpullthroughdistribution", + "has_namespace_or_obj_perms:container.view_containerpullthroughdistribution", + ], + }, + { + "action": ["list_roles", "add_role", "remove_role"], + "principal": "authenticated", + "effect": "allow", + "condition": [ + "has_model_or_obj_perms:container.manage_roles_containerpullthroughdistribution" + ], + }, + ], + "creation_hooks": [ + { + "function": "add_roles_for_object_creator", + "parameters": { + "roles": "container.containerpullthroughdistribution_owner", + }, + }, + ], + } + LOCKED_ROLES = { + "container.containerpullthroughdistribution_creator": [ + "container.add_containerpullthroughdistribution" + ], + "container.containerpullthroughdistribution_owner": [ + "container.view_containerpullthroughdistribution", + "container.delete_containerpullthroughdistribution", + "container.change_containerpullthroughdistribution", + "container.manage_roles_containerpullthroughdistribution", + ], + "container.containerpullthroughdistribution_collaborator": [ + "container.view_containerpullthroughdistribution", + ], + "container.containerpullthroughdistribution_consumer": [ + "container.view_containerpullthroughdistribution", + ], + } + + class ContainerNamespaceViewSet( NamedModelViewSet, mixins.CreateModelMixin, diff --git a/pulp_container/tests/functional/api/test_pull_through_cache.py b/pulp_container/tests/functional/api/test_pull_through_cache.py new file mode 100644 index 000000000..6a623d72d --- /dev/null +++ b/pulp_container/tests/functional/api/test_pull_through_cache.py @@ -0,0 +1,144 @@ +import time +import subprocess +import pytest + +from uuid import uuid4 + +from pulp_container.tests.functional.constants import ( + REGISTRY_V2, + REGISTRY_V2_FEED_URL, + PULP_HELLO_WORLD_REPO, + PULP_FIXTURE_1, +) + + +@pytest.fixture +def pull_through_distribution( + gen_object_with_cleanup, + container_pull_through_remote_api, + container_pull_through_distribution_api, +): + remote = gen_object_with_cleanup( + container_pull_through_remote_api, + {"name": str(uuid4()), "url": REGISTRY_V2_FEED_URL}, + ) + distribution = gen_object_with_cleanup( + container_pull_through_distribution_api, + {"name": str(uuid4()), "base_path": str(uuid4()), "remote": remote.pulp_href}, + ) + return distribution + + +@pytest.fixture +def pull_and_verify( + add_to_cleanup, + container_pull_through_distribution_api, + container_distribution_api, + container_repository_api, + container_remote_api, + container_tag_api, + registry_client, + local_registry, +): + def _pull_and_verify(images, pull_through_distribution): + tags_to_verify = [] + for version, image_path in enumerate(images, start=1): + remote_image_path = f"{REGISTRY_V2}/{image_path}" + local_image_path = f"{pull_through_distribution.base_path}/{image_path}" + + # 1. pull remote content through the pull-through distribution + local_registry.pull(local_image_path) + local_image = local_registry.inspect(local_image_path) + + # when the client pulls the image, a repository, distribution, and remote is created in + # the background; therefore, scheduling the cleanup for these entities is necessary + path, tag = local_image_path.split(":") + tags_to_verify.append(tag) + repository = container_repository_api.list(name=path).results[0] + add_to_cleanup(container_repository_api, repository.pulp_href) + remote = container_remote_api.list(name=path).results[0] + add_to_cleanup(container_remote_api, remote.pulp_href) + distribution = container_distribution_api.list(name=path).results[0] + add_to_cleanup(container_distribution_api, distribution.pulp_href) + + pull_through_distribution = container_pull_through_distribution_api.list( + name=pull_through_distribution.name + ).results[0] + assert [distribution.pulp_href] == pull_through_distribution.distributions + + # 2. verify if the pulled content is the same as on the remote + registry_client.pull(remote_image_path) + remote_image = registry_client.inspect(remote_image_path) + assert local_image[0]["Id"] == remote_image[0]["Id"] + + # 3. check if the repository version has changed + for _ in range(5): + repository = container_repository_api.list(name=path).results[0] + if f"{repository.pulp_href}versions/{version}/" == repository.latest_version_href: + break + + # there might be still the saving process running in the background + time.sleep(1) + else: + assert False, "The repository was not updated with the cached content." + + # 4. test if pulling the same content twice does not raise any error + local_registry.pull(local_image_path) + + # 5. assert the cached tags + tags = container_tag_api.list(repository_version=repository.latest_version_href).results + assert sorted(tags_to_verify) == sorted([tag.name for tag in tags]) + + return _pull_and_verify + + +def test_manifest_list_pull(delete_orphans_pre, pull_through_distribution, pull_and_verify): + images = [f"{PULP_HELLO_WORLD_REPO}:latest", f"{PULP_HELLO_WORLD_REPO}:linux"] + pull_and_verify(images, pull_through_distribution) + + +def test_manifest_pull(delete_orphans_pre, pull_through_distribution, pull_and_verify): + images = [f"{PULP_FIXTURE_1}:manifest_a", f"{PULP_FIXTURE_1}:manifest_b"] + pull_and_verify(images, pull_through_distribution) + + +def test_conflicting_names_and_paths( + container_remote_api, + container_remote_factory, + container_repository_api, + container_repository_factory, + container_distribution_api, + pull_through_distribution, + gen_object_with_cleanup, + local_registry, + monitor_task, +): + local_image_path = f"{pull_through_distribution.base_path}/{str(uuid4())}" + + remote = container_remote_factory(name=local_image_path) + # a remote with the same name but a different URL already exists + with pytest.raises(subprocess.CalledProcessError): + local_registry.pull(local_image_path) + monitor_task(container_remote_api.delete(remote.pulp_href).task) + + assert 0 == len(container_repository_api.list(name=local_image_path).results) + assert 0 == len(container_distribution_api.list(name=local_image_path).results) + + repository = container_repository_factory(name=local_image_path) + # a repository with the same name but a different retain configuration already exists + with pytest.raises(subprocess.CalledProcessError): + local_registry.pull(local_image_path) + monitor_task(container_repository_api.delete(repository.pulp_href).task) + + assert 0 == len(container_remote_api.list(name=local_image_path).results) + assert 0 == len(container_distribution_api.list(name=local_image_path).results) + + data = {"name": local_image_path, "base_path": local_image_path} + distribution = gen_object_with_cleanup(container_distribution_api, data) + # a distribution with the same name but different foreign keys already exists + with pytest.raises(subprocess.CalledProcessError): + local_registry.pull(local_image_path) + monitor_task(container_distribution_api.delete(distribution.pulp_href).task) + + assert 0 == len(container_repository_api.list(name=local_image_path).results) + assert 0 == len(container_remote_api.list(name=local_image_path).results) diff --git a/pulp_container/tests/functional/conftest.py b/pulp_container/tests/functional/conftest.py index be9b13fd7..457e443f7 100644 --- a/pulp_container/tests/functional/conftest.py +++ b/pulp_container/tests/functional/conftest.py @@ -13,11 +13,13 @@ ApiClient, PulpContainerNamespacesApi, RemotesContainerApi, + RemotesPullThroughApi, RepositoriesContainerApi, RepositoriesContainerPushApi, RepositoriesContainerVersionsApi, RepositoriesContainerPushVersionsApi, DistributionsContainerApi, + DistributionsPullThroughApi, ContentTagsApi, ContentManifestsApi, ContentBlobsApi, @@ -317,6 +319,12 @@ def container_remote_api(container_client): return RemotesContainerApi(container_client) +@pytest.fixture(scope="session") +def container_pull_through_remote_api(container_client): + """Pull through cache container remote API fixture.""" + return RemotesPullThroughApi(container_client) + + @pytest.fixture(scope="session") def container_repository_api(container_client): """Container repository API fixture.""" @@ -347,6 +355,12 @@ def container_distribution_api(container_client): return DistributionsContainerApi(container_client) +@pytest.fixture(scope="session") +def container_pull_through_distribution_api(container_client): + """Pull through cache distribution API Fixture.""" + return DistributionsPullThroughApi(container_client) + + @pytest.fixture(scope="session") def container_tag_api(container_client): """Container tag API fixture.""" diff --git a/requirements.txt b/requirements.txt index 4e8130902..e78158fea 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ ecdsa>=0.14,<=0.18.0 jsonschema>=4.4,<4.21 -pulpcore>=3.40.3,<3.55 +pulpcore>=3.43.0,<3.55 pyjwkest>=1.4,<=1.4.2 pyjwt[crypto]>=2.4,<2.9