[systemd-devel] [PATCH 5/5] import/pull-dkr: V2 Image specification + manifest support
Reverend Homer
mk.43.ecko at gmail.com
Thu May 7 08:58:32 PDT 2015
07.05.2015 18:47, Pavel Odvody пишет:
> Signed-off-by: Pavel Odvody <podvody at redhat.com>
> ---
> src/import/pull-dkr.c | 531 ++++++++++++++++++++++++++++++++++++++++++++------
> src/import/pull-dkr.h | 48 ++++-
> 2 files changed, 518 insertions(+), 61 deletions(-)
>
> diff --git a/src/import/pull-dkr.c b/src/import/pull-dkr.c
> index 0eefec5..408b795 100644
> --- a/src/import/pull-dkr.c
> +++ b/src/import/pull-dkr.c
> @@ -61,9 +61,10 @@ struct DkrPull {
> PullJob *layer_job;
>
> char *name;
> - char *tag;
> + char *reference;
> char *id;
>
> + char *response_digest;
> char *response_token;
> char **response_registries;
>
> @@ -87,7 +88,11 @@ struct DkrPull {
> #define PROTOCOL_PREFIX "https://"
>
> #define HEADER_TOKEN "X-Do" /* the HTTP header for the auth token */ "cker-Token:"
> -#define HEADER_REGISTRY "X-Do" /*the HTTP header for the registry */ "cker-Endpoints:"
> +#define HEADER_REGISTRY "X-Do" /* the HTTP header for the registry */ "cker-Endpoints:"
> +#define HEADER_DIGEST "Do" /* the HTTP header for the manifest digest */ "cker-Content-Digest:"
> +#define HEADER_USER_AGENT_V2 "User-Agent: do" /* otherwise we get load-balanced(!) to a V1 registyry */ "cker/1.6.0"
> +#define HEADER_BEARER_REALM "https://auth.doc" /* URL which we query for a bearer token */ "ker.io/token"
> +#define HEADER_BEARER_SERVICE "registry.doc" /* the service we query the token for */ "ker.io"
>
> #define LAYERS_MAX 2048
>
> @@ -117,7 +122,7 @@ DkrPull* dkr_pull_unref(DkrPull *i) {
> }
>
> free(i->name);
> - free(i->tag);
> + free(i->reference);
> free(i->id);
> free(i->response_token);
> free(i->response_registries);
> @@ -416,10 +421,25 @@ static int dkr_pull_add_token(DkrPull *i, PullJob *j) {
> return 0;
> }
>
> +static int dkr_pull_add_bearer_token(DkrPull *i, PullJob *j) {
> + const char *t = NULL;
> +
> + assert(i);
> + assert(j);
> +
> + if (i->response_token)
> + t = strjoina("Authorization: Bearer ", i->response_token);
> +
> + j->request_header = curl_slist_new(HEADER_USER_AGENT_V2, "Accept: application/json", t, NULL);
> + if (!j->request_header)
> + return -ENOMEM;
> +
> + return 0;
> +}
> +
> static bool dkr_pull_is_done(DkrPull *i) {
> assert(i);
> assert(i->images_job);
> -
> if (i->images_job->state != PULL_JOB_DONE)
> return false;
>
> @@ -429,7 +449,7 @@ static bool dkr_pull_is_done(DkrPull *i) {
> if (!i->ancestry_job || i->ancestry_job->state != PULL_JOB_DONE)
> return false;
>
> - if (!i->json_job || i->json_job->state != PULL_JOB_DONE)
> + if (i->json_job && i->json_job->state != PULL_JOB_DONE)
> return false;
>
> if (i->layer_job && i->layer_job->state != PULL_JOB_DONE)
> @@ -441,8 +461,9 @@ static bool dkr_pull_is_done(DkrPull *i) {
> return true;
> }
>
> -static int dkr_pull_make_local_copy(DkrPull *i) {
> +static int dkr_pull_make_local_copy(DkrPull *i, enum PullStrategy strategy) {
> int r;
> + _cleanup_free_ char *p = NULL;
>
> assert(i);
>
> @@ -455,10 +476,30 @@ static int dkr_pull_make_local_copy(DkrPull *i) {
> return log_oom();
> }
>
> - r = pull_make_local_copy(i->final_path, i->image_root, i->local, i->force_local);
> + if (strategy == PULL_V2) {
> + r = path_get_parent(i->image_root, &p);
> + if (r < 0)
> + return r;
> + }
> +
> + r = pull_make_local_copy(i->final_path, p ?: i->image_root, i->local, i->force_local);
> if (r < 0)
> return r;
>
> + if (strategy == PULL_V2) {
> + char **k = NULL;
> + STRV_FOREACH(k, i->ancestry) {
> + char *d = strjoina(i->image_root, "/.dkr-", *k, NULL);
> + r = btrfs_subvol_remove(d, false);
> + if (r < 0)
> + return r;
> + }
> +
> + r = rmdir(i->image_root);
> + if (r < 0)
> + return r;
> + }
> +
> return 0;
> }
>
> @@ -516,6 +557,68 @@ static void dkr_pull_job_on_progress(PullJob *j) {
> DKR_DOWNLOADING);
> }
>
> +static void dkr_pull_job_on_finished_v2(PullJob *j);
> +
> +static int dkr_pull_pull_layer_v2(DkrPull *i) {
> + _cleanup_free_ char *path = NULL;
> + const char *url, *layer = NULL;
> + int r;
> +
> + assert(i);
> + assert(!i->layer_job);
> + assert(!i->temp_path);
> + assert(!i->final_path);
> +
> + for (;;) {
> + layer = dkr_pull_current_layer(i);
> + if (!layer)
> + return 0; /* no more layers */
> +
> + path = strjoin(i->image_root, "/.dkr-", layer, NULL);
> + if (!path)
> + return log_oom();
> +
> + if (laccess(path, F_OK) < 0) {
> + if (errno == ENOENT)
> + break;
> +
> + return log_error_errno(errno, "Failed to check for container: %m");
> + }
> +
> + log_info("Layer %s already exists, skipping.", layer);
> +
> + i->current_ancestry++;
> +
> + free(path);
> + path = NULL;
> + }
> +
> + log_info("Pulling layer %s...", layer);
> +
> + i->final_path = path;
> + path = NULL;
> +
> + url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v2/", i->name, "/blobs/", layer);
> + r = pull_job_new(&i->layer_job, url, i->glue, i);
> + if (r < 0)
> + return log_error_errno(r, "Failed to allocate layer job: %m");
> +
> + r = dkr_pull_add_bearer_token(i, i->layer_job);
> + if (r < 0)
> + return log_oom();
> +
> + i->layer_job->on_finished = dkr_pull_job_on_finished_v2;
> + i->layer_job->on_open_disk = dkr_pull_job_on_open_disk;
> + i->layer_job->on_progress = dkr_pull_job_on_progress;
> + i->layer_job->grow_machine_directory = i->grow_machine_directory;
> +
> + r = pull_job_begin(i->layer_job);
> + if (r < 0)
> + return log_error_errno(r, "Failed to start layer job: %m");
> +
> + return 0;
> +}
> +
> static int dkr_pull_pull_layer(DkrPull *i) {
> _cleanup_free_ char *path = NULL;
> const char *url, *layer = NULL;
> @@ -576,6 +679,355 @@ static int dkr_pull_pull_layer(DkrPull *i) {
> return 0;
> }
>
> +static int dkr_pull_job_on_header(PullJob *j, const char *header, size_t sz) {
> + _cleanup_free_ char *registry = NULL;
> + char *token, *digest;
> + DkrPull *i;
> + int r;
> +
> + assert(j);
> + assert(j->userdata);
> +
> + i = j->userdata;
> + r = curl_header_strdup(header, sz, HEADER_TOKEN, &token);
> + if (r < 0)
> + return log_oom();
> + if (r > 0) {
> + free(i->response_token);
> + i->response_token = token;
> + return 0;
> + }
> +
> + r = curl_header_strdup(header, sz, HEADER_DIGEST, &digest);
> + if (r < 0)
> + return log_oom();
> + if (r > 0) {
> + free(i->response_digest);
> + i->response_digest = digest;
> + return 0;
> + }
> +
> + r = curl_header_strdup(header, sz, HEADER_REGISTRY, ®istry);
> + if (r < 0)
> + return log_oom();
> + if (r > 0) {
> + char **l, **k;
> +
> + l = strv_split(registry, ",");
> + if (!l)
> + return log_oom();
> +
> + STRV_FOREACH(k, l) {
> + if (!hostname_is_valid(*k)) {
> + log_error("Registry hostname is not valid.");
> + strv_free(l);
> + return -EBADMSG;
> + }
> + }
> +
> + strv_free(i->response_registries);
> + i->response_registries = l;
> + }
> +
> + return 0;
> +}
> +
> +static void dkr_pull_job_on_finished_v2(PullJob *j) {
> + DkrPull *i;
> + int r;
> +
> + assert(j);
> + assert(j->userdata);
> +
> + i = j->userdata;
> + if (j->error != 0) {
> + if (j == i->images_job)
> + log_error_errno(j->error, "Failed to retrieve images list. (Wrong index URL?)");
> + else if (j == i->ancestry_job)
> + log_error_errno(j->error, "Failed to retrieve manifest.");
> + else if (j == i->json_job)
> + log_error_errno(j->error, "Failed to retrieve json data.");
> + else
> + log_error_errno(j->error, "Failed to retrieve layer data.");
> +
> + r = j->error;
> + goto finish;
> + }
> +
> + if (i->images_job == j) {
> + const char *url;
> +
> + assert(!i->tags_job);
> + assert(!i->ancestry_job);
> + assert(!i->json_job);
> + assert(!i->layer_job);
> +
> + if (strv_isempty(i->response_registries)) {
> + r = -EBADMSG;
> + log_error("Didn't get registry information.");
> + goto finish;
> + }
> +
> + log_info("Index lookup succeeded, directed to registry %s.", i->response_registries[0]);
> + dkr_pull_report_progress(i, DKR_RESOLVING);
> +
> + url = strjoina(HEADER_BEARER_REALM, "?scope=repository:", i->name, ":pull&service=", HEADER_BEARER_SERVICE);
> + r = pull_job_new(&i->tags_job, url, i->glue, i);
> + if (r < 0) {
> + log_error_errno(r, "Failed to allocate tags job: %m");
> + goto finish;
> + }
> +
> + i->tags_job->on_finished = dkr_pull_job_on_finished_v2;
> + i->tags_job->on_progress = dkr_pull_job_on_progress;
> +
> + r = pull_job_begin(i->tags_job);
> + if (r < 0) {
> + log_error_errno(r, "Failed to start tags job: %m");
> + goto finish;
> + }
> +
> + } else if (i->tags_job == j) {
> + const char *url;
> + _cleanup_free_ const char *buf;
> + _cleanup_jsonunref_ json_variant *doc = NULL;
> + json_variant *e = NULL;
> +
> + assert(!i->ancestry_job);
> + assert(!i->json_job);
> + assert(!i->layer_job);
> +
> + buf = strndup((const char *)j->payload, j->payload_size);
> + if (!buf) {
> + r = -ENOMEM;
> + log_oom();
> + goto finish;
> + }
> +
> + r = json_parse(buf, &doc);
> + if (r < 0) {
> + log_error("Unable to parse bearer token\n%s", j->payload);
> + goto finish;
> + }
> +
> + e = json_variant_value(doc, "token");
> + if (!e || e->type != JSON_VARIANT_STRING) {
> + r = -EBADMSG;
> + log_error("Invalid JSON format for Bearer token");
> + goto finish;
> + }
> +
> + r = free_and_strdup(&i->response_token, json_variant_string(e));
> + if (r < 0) {
> + log_oom();
> + goto finish;
> + }
> +
> + url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v2/", i->name, "/manifests/", i->reference);
> + r = pull_job_new(&i->ancestry_job, url, i->glue, i);
> + if (r < 0) {
> + log_error_errno(r, "Failed to allocate ancestry job: %m");
> + goto finish;
> + }
> +
> + r = dkr_pull_add_bearer_token(i, i->ancestry_job);
> + if (r != 0) {
> + log_oom();
> + goto finish;
> + }
> +
> + i->ancestry_job->on_finished = dkr_pull_job_on_finished_v2;
> + i->ancestry_job->on_progress = dkr_pull_job_on_progress;
> + i->ancestry_job->on_header = dkr_pull_job_on_header;
> +
> + r = pull_job_begin(i->ancestry_job);
> + if (r < 0) {
> + log_error_errno(r, "Failed to start ancestry job: %m");
> + goto finish;
> + }
> +
> + } else if (i->ancestry_job == j) {
> +
> + _cleanup_jsonunref_ json_variant *doc = NULL;
> + _cleanup_jsonunref_ json_variant *compat = NULL;
> + json_variant *e = NULL;
> + _cleanup_strv_free_ char **ancestry = NULL;
> + size_t allocated = 0, size = 0;
> + char *path = NULL, **k = NULL;
> +
> + assert(!i->layer_job);
> +
> + r = json_parse((const char *)j->payload, &doc);
> + if (r < 0) {
> + log_error("Invalid JSON Manifest");
> + goto finish;
> + }
> +
> + e = json_variant_value(doc, "fsLayers");
> + if (!e || e->type != JSON_VARIANT_ARRAY) {
> + r = -EBADMSG;
> + goto finish;
> + }
> +
> + log_info("JSON manifest with schema v%"PRIi64" for %s parsed!",
> + json_variant_integer(json_variant_value(doc, "schemaVersion")),
> + json_variant_string(json_variant_value(doc, "name")));
> +
> + for (unsigned z = 0; z < e->size; z++) {
> + json_variant *f = json_variant_element(e, z), *g = NULL;
> + const char *layer, *hash, *value;
> + if (f->type != JSON_VARIANT_OBJECT) {
> + r = -EBADMSG;
> + goto finish;
> + }
> +
> + g = json_variant_value(f, "blobSum");
> +
> + layer = json_variant_string(g);
> + hash = strchr(layer, ':');
> + if (!hash) {
> + r = -EBADMSG;
> + goto finish;
> + }
> +
> + value = strdupa(hash + 1);
> + hash = strndupa(layer, hash - layer);
> +
> + if (!streq(hash, "sha256") || !in_charset(value, "1234567890abcdef")) {
> + r = -EBADMSG;
> + goto finish;
> +
> + }
> +
> + if (!GREEDY_REALLOC(ancestry, allocated, size + 2)) {
> + r = -ENOMEM;
> + log_oom();
> + goto finish;
> + }
> +
> + ancestry[size] = strdup(layer);
> + if (!ancestry[size]) {
> + r = -ENOMEM;
> + log_oom();
> + goto finish;
> + }
> +
> + ancestry[size+1] = NULL;
> + size += 1;
> + }
> +
> + e = json_variant_value(doc, "history");
> + if (!e || e->type != JSON_VARIANT_ARRAY) {
> + r = -EBADMSG;
> + goto finish;
> + }
> +
> + e = json_variant_element(e, 0);
> + e = json_variant_value(e, "v1Compatibility");
> + r = json_parse(json_variant_string(e), &compat);
> + if (r < 0) {
> + log_error("Invalid v1Compatibility JSON");
> + goto finish;
> + }
> +
> + e = json_variant_value(compat, "id");
> +
> + strv_free(i->ancestry);
> + i->ancestry = strv_reverse(strv_uniq(ancestry));
> + i->n_ancestry = strv_length(i->ancestry);
> + i->current_ancestry = 0;
> + i->id = strdup(i->ancestry[i->n_ancestry - 1]);
> + if (!i->id) {
> + r = -ENOMEM;
> + log_oom();
> + goto finish;
> + }
> + path = strjoin(i->image_root, "/.dkr-", json_variant_string(e), NULL);
> + if (!path) {
> + r = -ENOMEM;
> + log_oom();
> + goto finish;
> + }
> + free(i->image_root);
> + i->image_root = path;
> + ancestry = NULL;
> +
> + log_info("Required layers:\n");
> + STRV_FOREACH(k, i->ancestry)
> + log_info("\t%s", *k);
> + log_info("\nProvenance:\n\tImageID: %s\n\tDigest: %s", json_variant_string(e), i->response_digest);
> +
> + dkr_pull_report_progress(i, DKR_DOWNLOADING);
> +
> + r = dkr_pull_pull_layer_v2(i);
> + if (r < 0)
> + goto finish;
> +
> + } else if (i->layer_job == j) {
> + assert(i->temp_path);
> + assert(i->final_path);
> +
> + j->disk_fd = safe_close(j->disk_fd);
> +
> + if (i->tar_pid > 0) {
> + r = wait_for_terminate_and_warn("tar", i->tar_pid, true);
> + i->tar_pid = 0;
> + if (r < 0)
> + goto finish;
> + }
> +
> + r = aufs_resolve(i->temp_path);
> + if (r < 0) {
> + log_error_errno(r, "Failed to resolve aufs whiteouts: %m");
> + goto finish;
> + }
> +
> + r = btrfs_subvol_set_read_only(i->temp_path, true);
> + if (r < 0) {
> + log_error_errno(r, "Failed to mark snapshot read-only: %m");
> + goto finish;
> + }
> +
> + if (rename(i->temp_path, i->final_path) < 0) {
> + log_error_errno(errno, "Failed to rename snaphsot: %m");
> + goto finish;
> + }
> +
> + log_info("Completed writing to layer %s.", i->final_path);
> +
> + i->layer_job = pull_job_unref(i->layer_job);
> + free(i->temp_path);
> + i->temp_path = NULL;
> + free(i->final_path);
> + i->final_path = NULL;
> +
> + i->current_ancestry ++;
> + r = dkr_pull_pull_layer_v2(i);
> + if (r < 0)
> + goto finish;
> +
> + } else if (i->json_job != j)
> + assert_not_reached("Got finished event for unknown curl object");
> +
> + if (!dkr_pull_is_done(i))
> + return;
> +
> + dkr_pull_report_progress(i, DKR_COPYING);
> +
> + r = dkr_pull_make_local_copy(i, PULL_V2);
> + if (r < 0)
> + goto finish;
> +
> + r = 0;
> +
> +finish:
> + if (i->on_finished)
> + i->on_finished(i, r, i->userdata);
> + else
> + sd_event_exit(i->event, r);
> +
> +}
> +
> static void dkr_pull_job_on_finished(PullJob *j) {
> DkrPull *i;
> int r;
> @@ -617,7 +1069,7 @@ static void dkr_pull_job_on_finished(PullJob *j) {
> log_info("Index lookup succeeded, directed to registry %s.", i->response_registries[0]);
> dkr_pull_report_progress(i, DKR_RESOLVING);
>
> - url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/repositories/", i->name, "/tags/", i->tag);
> + url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/repositories/", i->name, "/tags/", i->reference);
> r = pull_job_new(&i->tags_job, url, i->glue, i);
> if (r < 0) {
> log_error_errno(r, "Failed to allocate tags job: %m");
> @@ -789,7 +1241,7 @@ static void dkr_pull_job_on_finished(PullJob *j) {
>
> dkr_pull_report_progress(i, DKR_COPYING);
>
> - r = dkr_pull_make_local_copy(i);
> + r = dkr_pull_make_local_copy(i, PULL_V1);
> if (r < 0)
> goto finish;
>
> @@ -802,52 +1254,7 @@ finish:
> sd_event_exit(i->event, r);
> }
>
> -static int dkr_pull_job_on_header(PullJob *j, const char *header, size_t sz) {
> - _cleanup_free_ char *registry = NULL;
> - char *token;
> - DkrPull *i;
> - int r;
> -
> - assert(j);
> - assert(j->userdata);
> -
> - i = j->userdata;
> -
> - r = curl_header_strdup(header, sz, HEADER_TOKEN, &token);
> - if (r < 0)
> - return log_oom();
> - if (r > 0) {
> - free(i->response_token);
> - i->response_token = token;
> - return 0;
> - }
> -
> - r = curl_header_strdup(header, sz, HEADER_REGISTRY, ®istry);
> - if (r < 0)
> - return log_oom();
> - if (r > 0) {
> - char **l, **k;
> -
> - l = strv_split(registry, ",");
> - if (!l)
> - return log_oom();
> -
> - STRV_FOREACH(k, l) {
> - if (!hostname_is_valid(*k)) {
> - log_error("Registry hostname is not valid.");
> - strv_free(l);
> - return -EBADMSG;
> - }
> - }
> -
> - strv_free(i->response_registries);
> - i->response_registries = l;
> - }
> -
> - return 0;
> -}
> -
> -int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *local, bool force_local) {
> +int dkr_pull_start(DkrPull *i, const char *name, const char *reference, const char *local, bool force_local, enum PullStrategy strategy) {
> const char *url;
> int r;
>
> @@ -856,7 +1263,7 @@ int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *lo
> if (!dkr_name_is_valid(name))
> return -EINVAL;
>
> - if (tag && !dkr_tag_is_valid(tag))
> + if (reference && !dkr_ref_is_valid(reference))
> return -EINVAL;
>
> if (local && !machine_name_is_valid(local))
> @@ -865,8 +1272,8 @@ int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *lo
> if (i->images_job)
> return -EBUSY;
>
> - if (!tag)
> - tag = "latest";
> + if (!reference)
> + reference = "latest";
>
> r = free_and_strdup(&i->local, local);
> if (r < 0)
> @@ -876,7 +1283,7 @@ int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *lo
> r = free_and_strdup(&i->name, name);
> if (r < 0)
> return r;
> - r = free_and_strdup(&i->tag, tag);
> + r = free_and_strdup(&i->reference, reference);
> if (r < 0)
> return r;
>
> @@ -890,7 +1297,11 @@ int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *lo
> if (r < 0)
> return r;
>
> - i->images_job->on_finished = dkr_pull_job_on_finished;
> + if (strategy == PULL_V1)
> + i->images_job->on_finished = dkr_pull_job_on_finished;
> + else
> + i->images_job->on_finished = dkr_pull_job_on_finished_v2;
> +
> i->images_job->on_header = dkr_pull_job_on_header;
> i->images_job->on_progress = dkr_pull_job_on_progress;
>
> diff --git a/src/import/pull-dkr.h b/src/import/pull-dkr.h
> index 4c4b10c..eb44051 100644
> --- a/src/import/pull-dkr.h
> +++ b/src/import/pull-dkr.h
> @@ -24,6 +24,52 @@
> #include "sd-event.h"
> #include "util.h"
>
> +enum PullStrategy { PULL_V1, PULL_V2 };
> +
> +typedef struct DkrSignature {
> + char *curve;
> +
> + char *key_id;
> + char *key_type;
> +
> + char *x;
> + char *y;
> +
> + char *algorithm;
> +
> + char *signature;
> + char *protected;
> +} DkrSignature;
> +
> +//typedef struct DkrHistory {
> +// char *image_id;
> +// char *parent_id;
> +//} DkrHistory;
Are you sure that this commented part must be in the patch?
> +
> +typedef struct DkrManifest {
> + char *name;
> + char *tag;
> + char *architecture;
> +
> + unsigned schema_version;
> +
> + char **fs_layers;
> +
> + //DkrHistory *history;
> +
> + DkrSignature *signature;
> +} DkrManifest;
> +
> +int dkr_signature_new(DkrSignature **signature, const char* raw_bytes);
> +DkrSignature* dkr_signature_unref(DkrSignature *);
> +
> +DEFINE_TRIVIAL_CLEANUP_FUNC(DkrSignature*, dkr_signature_unref);
> +
> +int dkr_manifest_new(DkrManifest **manifest, const char* raw_bytes);
> +DkrManifest* dkr_manifest_unref(DkrManifest *);
> +
> +DEFINE_TRIVIAL_CLEANUP_FUNC(DkrManifest*, dkr_manifest_unref);
> +
> typedef struct DkrPull DkrPull;
>
> typedef void (*DkrPullFinished)(DkrPull *pull, int error, void *userdata);
> @@ -33,4 +79,4 @@ DkrPull* dkr_pull_unref(DkrPull *pull);
>
> DEFINE_TRIVIAL_CLEANUP_FUNC(DkrPull*, dkr_pull_unref);
>
> -int dkr_pull_start(DkrPull *pull, const char *name, const char *tag, const char *local, bool force_local);
> +int dkr_pull_start(DkrPull *pull, const char *name, const char *tag, const char *local, bool force_local, enum PullStrategy strategy);
>
>
> _______________________________________________
> systemd-devel mailing list
> systemd-devel at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/systemd-devel
--
Regards,
R.H.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.freedesktop.org/archives/systemd-devel/attachments/20150507/faf723b1/attachment-0001.html>
More information about the systemd-devel
mailing list