Commit c7af751c authored by Jake Buchholz Göktürk's avatar Jake Buchholz Göktürk
Browse files

There is too much in this branch.

I am committing this as things currently are -- with a judicious peppering of "TODO" notes -- so that it can be split into somaller more manageable commits.
parent cd8f374f
......@@ -208,10 +208,6 @@ may not) be further partitioned, based on other factors.
The image's primary login user, set to `alpine`.
### `local_format` string
The local VM's disk image format, set to `qcow2`.
### `repos` map
Defines the contents of the image's `/etc/apk/repositories` file. The map's
......
......@@ -29,6 +29,12 @@ variable "qemu" {
### Local Data
locals {
# TODO: also include "release"?
# what the post-processor will do, if necessary
actions = [
"build", "upload", "import", "publish"
]
debug_arg = var.DEBUG == 0 ? "" : "--debug"
broker_arg = var.USE_BROKER == 0 ? "" : "--use-broker"
......@@ -102,8 +108,8 @@ build {
# results
output_directory = "work/images/${B.value.cloud}/${B.value.image_key}"
disk_size = B.value.size
format = B.value.local_format
vm_name = "image.${B.value.local_format}"
format = "qcow2"
vm_name = "image.qcow2"
}
}
......@@ -181,13 +187,13 @@ build {
# import and/or publish cloud images
dynamic "post-processor" {
for_each = { for b, c in local.configs:
b => c if contains(c.actions, "import") || contains(c.actions, "publish")
b => c if length(setintersection(c.actions, local.actions)) > 0
}
iterator = B
labels = ["shell-local"]
content {
only = [ "qemu.${B.key}", "null.${B.key}" ]
inline = [ for action in ["import", "publish"]:
inline = [ for action in local.actions:
"./cloud_helper.py ${action} ${local.debug_arg} ${local.broker_arg} ${B.key}" if contains(B.value.actions, action)
]
}
......
......@@ -68,6 +68,7 @@ class Alpine():
rel = self.versions[ver]['release']
return f"{self.cdn_url}/v{ver}/releases/{arch}/alpine-virt-{rel}-{arch}.iso"
# TODO: maybe we can also get links to version/releases announcments somewhere around here
def version_info(self, ver=None):
ver = self._ver(ver)
if ver not in self.versions:
......
......@@ -48,7 +48,8 @@ from image_configs import ImageConfigManager
### Constants & Variables
STEPS = ['configs', 'state', 'rollback', 'local', 'import', 'publish']
# TODO: add 'release' as final step
STEPS = ['configs', 'state', 'rollback', 'local', 'upload', 'import', 'publish']
LOGFORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
WORK_CLEAN = {'bin', 'include', 'lib', 'pyvenv.cfg', '__pycache__'}
WORK_OVERLAYS = ['configs', 'scripts']
......@@ -224,7 +225,7 @@ parser.add_argument(
default=[], help='only variants with dimension key(s)')
parser.add_argument(
'--revise', action='store_true',
help='remove existing local/imported image, or bump revision and rebuild'
help='remove existing local/imported image, or bump revision and rebuild '
'if published')
parser.add_argument(
'--use-broker', action='store_true',
......@@ -252,7 +253,9 @@ console.setFormatter(logfmt)
log.addHandler(console)
log.debug(args)
# TODO: rollback requires --revise
if args.step == 'rollback':
log.warning('"rollback" step enables --revise option')
args.revise = True
# set up credential provider, if we're going to use it
if args.use_broker:
......
......@@ -38,7 +38,7 @@ from image_configs import ImageConfigManager
### Constants & Variables
ACTIONS = ['import', 'publish']
ACTIONS = ['build', 'upload', 'import', 'publish']
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
......@@ -78,13 +78,26 @@ yaml.explicit_start = True
for image_key in args.image_keys:
image_config = configs.get(image_key)
if args.action == 'import':
clouds.convert_image(image_config)
if args.action == 'build':
image_config.convert_image()
elif args.action == 'upload':
image_config.upload_image()
elif args.action == 'import':
clouds.import_image(image_config)
#clouds.upload_image(image_config)
elif args.action == 'publish':
# TODO: we probably need to do this for all the metadata writing too
os.makedirs(image_config.local_dir, exist_ok=True)
# TODO: make artifacts part of metadata?
artifacts = clouds.publish_image(image_config)
yaml.dump(artifacts, image_config.artifacts_yaml)
#clouds.release_image(image_config) # sha256, sign, metadata, put in place for downloading
# clouds.publish_image(image_config) <-- publish sets artifacts property
elif args.action == 'release':
pass
# TODO: image_config.release_image() - configurable steps to take on remote host
# save per-image metadata, maybe upload it too
image_config.save_metadata(upload=(False if args.action =='build' else True))
\ No newline at end of file
# vim: ts=4 et:
# TODO: we may as well stub this out for everyone
from . import aws, nocloud # , oci, gcp, azure
ADAPTERS = {}
......@@ -25,6 +26,7 @@ def set_credential_provider(debug=False):
### forward to the correct adapter
# TODO: this needs a better name
def latest_build_image(config):
return ADAPTERS[config.cloud].latest_build_image(
config.project,
......@@ -40,12 +42,8 @@ def import_image(config):
return ADAPTERS[config.cloud].import_image(config)
def remove_image(config, image_id):
return ADAPTERS[config.cloud].remove_image(image_id)
def upload_image(config):
return ADAPTERS[config.cloud].upload_image(config)
def delete_image(config, image_id):
return ADAPTERS[config.cloud].delete_image(image_id)
def publish_image(config):
......
......@@ -7,7 +7,7 @@ import os
import time
from datetime import datetime
from subprocess import Popen, PIPE, run
from subprocess import run
from .interfaces.adapter import CloudAdapterInterface
from image_configs import Tags, DictObj
......@@ -95,6 +95,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
tags = Tags(from_list=i.tags)
return DictObj({k: tags.get(k, None) for k in self.IMAGE_INFO})
# TODO: this needs a better name
# get the latest imported image for a given build name
def latest_build_image(self, project, image_key):
images = self._get_images_with_tags(
......@@ -225,10 +226,12 @@ class AWSCloudAdapter(CloudAdapterInterface):
snapshot.delete()
raise
# TODO: set ic.<tag> attributes from tags
# TODO: this isn't ever really used?
return self._image_info(image)
# remove an (unpublished) image
def remove_image(self, image_id):
def delete_image(self, image_id):
log = logging.getLogger('build')
ec2r = self.session().resource('ec2')
image = ec2r.Image(image_id)
......@@ -384,6 +387,7 @@ class AWSCloudAdapter(CloudAdapterInterface):
time.sleep(copy_wait)
copy_wait = 30
# TODO? ic.artifacts = artifacts
return artifacts
......
# vim: ts=4 et:
import logging
from subprocess import Popen, PIPE
class CloudAdapterInterface:
CONVERT_CMD = {
'qcow2': ['ln', '-f'],
'vhd': ['qemu-img', 'convert', '-f', 'qcow2', '-O', 'vpc', '-o', 'force_size=on'],
}
def __init__(self, cloud, cred_provider=None):
self._sdk = None
......@@ -40,44 +32,23 @@ class CloudAdapterInterface:
def client(self, client, region=None):
raise NotImplementedError
# TODO: this needs a new name
# get information about the latest released image
def latest_build_image(self, project, image_key):
raise NotImplementedError
# convert local QCOW2 to format appropriate for a cloud
def convert_image(self, ic):
log = logging.getLogger('import')
local_path = ic.local_path
image_path = ic.local_dir / ic.image_file
log.info('Converting %s to %s', image_path, image_path)
p = Popen(self.CONVERT_CMD[ic.image_format] + [ic.local_path, ic.image_path], stdout=PIPE, stdin=PIPE, encoding='utf8')
out, err = p.communicate()
if p.returncode:
log.error('Unable to convert %s to %s format (%s)', ic.local_path, ic.image_path, p.returncode)
log.error('EXIT: %d', p.returncode)
log.error('STDOUT:\n%s', out)
log.error('STDERR:\n%s', err)
raise RuntimeError
# TODO: The following things don't need to be implemented (see NoCloud) unless we
# actually do import/publish images for those cloud providers (like we do for AWS).
# In the meantime, these stubs should be functionally NOOPs
# import local image to cloud provider
def import_image(self, config):
raise NotImplementedError
# remove unpublished image from cloud provider
def remove_image(self, config, image_id):
raise NotImplementedError
# upload cloud image for testing, if upload_path
def upload_image(self, config):
# delete/deregister unpublished image from cloud provider
def delete_image(self, config, image_id): # TODO: might we have image id in config?
raise NotImplementedError
# TODO: implement here
# publish image to cloud provider regions
def publish_image(self, config):
raise NotImplementedError
# generate image checksum, save metadata, sign image, make downloadable, if download_path
def release_image(self, config):
raise NotImplementedError
# TODO: implement here!
\ No newline at end of file
raise NotImplementedError
\ No newline at end of file
......@@ -22,7 +22,7 @@ class NoCloudAdapter(CloudAdapterInterface):
# get the latest imported image for a given build name
def latest_build_image(self, project, image_key):
# TODO: get info from permanently published image (if exists)
# TODO: get info from latest download_path published image (if exists)
return None
# import an image
......@@ -34,14 +34,13 @@ class NoCloudAdapter(CloudAdapterInterface):
# 'import_id': '?',
})
# remove an (unpublished) image
def remove_image(self, image_id):
# TODO: remove image from temporary location
# there's no cloud provider to delete/deregister the image
def delete_image(self, image_id):
pass
# publish an image
def publish_image(self, ic):
# TODO: what exaclty should be returned? nocloud isn't launchabl.
# TODO: what exaclty should be returned? nocloud isn't launchable
return {
'generic?': 'url?'
}
......
......@@ -22,6 +22,7 @@ Default {
"information about administrating Alpine systems.\n"\
"See <https://wiki.alpinelinux.org/>."
# TODO: fix how we find/build these links
version_notes = "Release Notes:\n"\
"* <https://alpinelinux.org/posts/Alpine-{version}.0/released.html>"
release_notes = "* <https://alpinelinux.org/posts/Alpine-{release}/released.html>"
......@@ -34,14 +35,15 @@ Default {
size = 1G
login = alpine
local_format = qcow2
image_format = qcow2
# these paths are subject to change, as image downloads are developed
upload_path = "ssh://dev.alpinelinux.org/~tomalok/public_html/alpine-cloud-images"
download_path = "https://dl-cdn.alpinelinux.org/alpine"
#download_path = "https://dev.alpinelinux.org/~tomalok/alpine-cloud-images" # development
remote_path = "{v_version}/cloud/{cloud}"
storage_url = "ssh://tomalok@dev.alpinelinux.org/public_html/alpine-cloud-images/{v_version}/cloud/{cloud}"
# TODO: replace upload_path with storage_ul
upload_path = "ssh://tomalok@dev.alpinelinux.org/public_html/alpine-cloud-images/{v_version}/cloud/{cloud}"
# TODO: download_url instead
#download_path = "https://dl-cdn.alpinelinux.org/alpine/{v_version}/cloud/{cloud}"
download_path = "https://dev.alpinelinux.org/~tomalok/alpine-cloud-images/{v_version}/cloud/{cloud}" # development
# image access
access.PUBLIC = true
......@@ -74,6 +76,7 @@ Dimensions {
cloud {
aws { include required("cloud/aws.conf") }
nocloud { include required("cloud/nocloud.conf") }
# TODO: add azure, gcp, oci
}
}
......
#!/usr/bin/env python3
# vim: ts=4 et:
# TODO: perhaps integrate into "./build release"
# Ensure we're using the Python virtual env with our installed dependencies
import os
import sys
......
#!/usr/bin/env python3
# vim: ts=4 et:
# Ensure we're using the Python virtual env with our installed dependencies
import os
import sys
import textwrap
NOTE = textwrap.dedent("""
NOTE: This is an old script, replaced by 'gen_mksite_releases.py' after
https://gitlab.alpinelinux.org/alpine/infra/alpine-mksite/-/merge_requests/52
is merged.
This script's output is compatible with the retired alpine-ec2-ami repo's
releases/alpine.yaml, in order to bridge the gap until
https://alpinelinux.org/cloud dynamically calls a published-image metadata
service. This script should only be run after the main 'build' script has
been used successfully to publish ALL images, and the STDOUT should be
committed to the https://gitlab.alpinelinux.org/alpine/infra/alpine-mksite
repo as 'cloud/releases-in.yaml'.
""")
sys.pycache_prefix = 'work/__pycache__'
if not os.path.exists('work'):
print('FATAL: Work directory does not exist.', file=sys.stderr)
print(NOTE, file=sys.stderr)
exit(1)
# Re-execute using the right virtual environment, if necessary.
venv_args = [os.path.join('work', 'bin', 'python3')] + sys.argv
if os.path.join(os.getcwd(), venv_args[0]) != sys.executable:
print("Re-executing with work environment's Python...\n", file=sys.stderr)
os.execv(venv_args[0], venv_args)
# We're now in the right Python environment
import argparse
import logging
from collections import defaultdict
from ruamel.yaml import YAML
import clouds
from image_configs import ImageConfigManager
### Constants & Variables
LOGFORMAT = '%(name)s - %(levelname)s - %(message)s'
### Functions
# allows us to set values deep within an object that might not be fully defined
def dictfactory():
return defaultdict(dictfactory)
# undo dictfactory() objects to normal objects
def undictfactory(o):
if isinstance(o, defaultdict):
o = {k: undictfactory(v) for k, v in o.items()}
return o
### Command Line & Logging
parser = argparse.ArgumentParser(description=NOTE)
parser.add_argument(
'--use-broker', action='store_true',
help='use the identity broker to get credentials')
parser.add_argument('--debug', action='store_true', help='enable debug output')
args = parser.parse_args()
log = logging.getLogger('gen_releases')
log.setLevel(logging.DEBUG if args.debug else logging.INFO)
console = logging.StreamHandler(sys.stderr)
console.setFormatter(logging.Formatter(LOGFORMAT))
log.addHandler(console)
log.debug(args)
# set up credential provider, if we're going to use it
if args.use_broker:
clouds.set_credential_provider()
# load build configs
configs = ImageConfigManager(
conf_path='work/configs/images.conf',
yaml_path='work/images.yaml',
log='gen_releases'
)
# make sure images.yaml is up-to-date with reality
configs.refresh_state('final')
yaml = YAML()
releases = dictfactory()
for i_key, i_cfg in configs.get().items():
if i_cfg.bootstrap != 'tiny':
continue
release = i_cfg.version if i_cfg.version == 'edge' else i_cfg.release
releases[release][i_key][i_cfg.tags.name] = dict(i_cfg.tags) | {
'creation_date': i_cfg.published,
'artifacts': i_cfg.artifacts,
}
yaml.dump(undictfactory(releases), sys.stdout)
# vim: ts=4 et:
import hashlib
import itertools
import logging
import mergedeep
import os
import pyhocon
import shutil
......@@ -10,6 +12,8 @@ from copy import deepcopy
from datetime import datetime
from pathlib import Path
from ruamel.yaml import YAML
from subprocess import Popen, PIPE
from urllib.parse import urlparse
import clouds
......@@ -48,7 +52,9 @@ class ImageConfigManager():
def _load_yaml(self):
self.log.info('Loading existing %s', self.yaml_path)
for key, config in self.yaml.load(self.yaml_path).items():
self._configs[key] = ImageConfig(key, config)
self._configs[key] = ImageConfig(key, config, log=self.log, yaml=self.yaml)
# TODO: also pull in additional per-image metatdata from the build process?
# save resolved configs to YAML
def _save_yaml(self):
......@@ -90,7 +96,9 @@ class ImageConfigManager():
{
'image_key': image_key,
'release': release
} | dim_map
} | dim_map,
log=self.log,
yaml=self.yaml
)
# merge in the Default config
......@@ -177,8 +185,18 @@ class ImageConfigManager():
class ImageConfig():
def __init__(self, config_key, obj={}):
CONVERT_CMD = {
'qcow2': ['ln', '-f'],
'vhd': ['qemu-img', 'convert', '-f', 'qcow2', '-O', 'vpc', '-o', 'force_size=on'],
}
# these tags may-or-may-not exist at various times
OPTIONAL_TAGS = [
'built', 'uploaded', 'imported', 'import_id', 'import_region', 'published', 'released'
]
def __init__(self, config_key, obj={}, log=None, yaml=None):
self._log = log
self._yaml = yaml
self.config_key = str(config_key)
tags = obj.pop('tags', None)
self.__dict__ |= self._deep_dict(obj)
......@@ -186,6 +204,18 @@ class ImageConfig():
if tags:
self.tags = tags
@classmethod
def to_yaml(cls, representer, node):
d = {}
for k in node.__dict__:
# don't serialize attributes starting with _
if k.startswith('_'):
continue
d[k] = node.__getattribute__(k)
return representer.represent_mapping('!ImageConfig', d)
@property
def v_version(self):
return 'edge' if self.version == 'edge' else 'v' + self.version
......@@ -196,12 +226,9 @@ class ImageConfig():
@property
def local_path(self):
return self.local_dir / ('image.' + self.local_format)
@property
def published_yaml(self):
return self.local_dir / 'published.yaml'
return self.local_dir / 'image.qcow2'
# TODO? make this metadata_yaml instead, if it contains tags & artifacts?
@property
def artifacts_yaml(self):
return self.local_dir / 'artifacts.yaml'
......@@ -222,13 +249,41 @@ class ImageConfig():
def image_path(self):
return self.local_dir / self.image_file
@property
def image_metadata_file(self):
return '.'.join([self.image_name, 'yaml'])
@property
def image_metadata_path(self):
return self.local_dir / self.image_metadata_file
# TODO: flesh this out to replace upload_url
@property
def storage(self):
if not self._storage:
s = DictObj({})
if self.storage_url:
s.url = urlparse(self.upload_url)
else:
# ...
pass
return self._storage
@property
def upload_url(self):
return '/'.join([self.upload_path, self.remote_path, self.image_file]).format(v_version=self.v_version, **self.__dict__)
if not self.upload_path:
return None
return '/'.join([self.upload_path, self.image_file]).format(v_version=self.v_version, **self.__dict__)
@property
def download_url(self):
return '/'.join([self.download_path, self.remote_path, self.image_file]).format(v_version=self.v_version, **self.__dict__)
if not self.download_path:
return None
return '/'.join([self.download_path, self.image_file]).format(v_version=self.v_version, **self.__dict__)
# TODO? region_url instead?
def region_url(self, region, image_id):
......@@ -255,9 +310,10 @@ class ImageConfig():
'version': self.version
}
# stuff that might not be there yet
for k in ['imported', 'import_id', 'import_region', 'published']:
for k in self.OPTIONAL_TAGS:
if self.__dict__.get(k, None):
t[k] = self.__dict__[k]
return Tags(t)
# recursively convert a ConfigTree object to a dict object
......@@ -396,17 +452,23 @@ class ImageConfig():
)))
def refresh_state(self, step, revise=False):
log = logging.getLogger('build')
log = self._log
actions = {}
revision = 0
# TODO: local metadata -> stored metadata -> renamed clouds.latest_build_image
remote_image = clouds.latest_build_image(self)
log.debug('\n%s', remote_image)
step_state = step == 'state'
# TODO: this needs to be sorted out for upload and release targets
# enable actions based on the specified step
if step in ['local', 'import', 'publish', 'state']:
if step in ['local', 'upload', 'import', 'publish', 'state']:
actions['build'] = True
if self.upload_path and step in ['upload', 'import', 'publish', 'state']:
actions['upload'] = True
if step in ['import', 'publish', 'state']:
actions['import'] = True
......@@ -423,6 +485,8 @@ class ImageConfig():