[systemd-commits] 5 commits - src/import src/shared src/test
Lennart Poettering
lennart at kemper.freedesktop.org
Tue May 19 09:24:02 PDT 2015
src/import/pull-dkr.c | 542 ++++++++++++++++++++++++++++++++++++++++++-----
src/import/pull-dkr.h | 3
src/import/pull.c | 28 +-
src/shared/import-util.c | 21 +
src/shared/import-util.h | 2
src/shared/json.c | 432 ++++++++++++++++++++++++++++++++++++-
src/shared/json.h | 37 +++
src/test/test-json.c | 97 ++++++++
8 files changed, 1087 insertions(+), 75 deletions(-)
New commits:
commit 2c4fb0eab8fd05d6375fa3b78b5e2ee5eb93bf09
Author: Pavel Odvody <podvody at redhat.com>
Date: Tue May 19 16:30:33 2015 +0200
import/pull-dkr: V2 image specification and manifest support
The maximum number of layers changed to 127, as in Dkr.
diff --git a/src/import/pull-dkr.c b/src/import/pull-dkr.c
index f89eb88..38c8db9 100644
--- a/src/import/pull-dkr.c
+++ b/src/import/pull-dkr.c
@@ -52,6 +52,9 @@ struct DkrPull {
sd_event *event;
CurlGlue *glue;
+ char *index_protocol;
+ char *index_address;
+
char *index_url;
char *image_root;
@@ -62,9 +65,10 @@ struct DkrPull {
PullJob *layer_job;
char *name;
- char *tag;
+ char *reference;
char *id;
+ char *response_digest;
char *response_token;
char **response_registries;
@@ -88,9 +92,9 @@ struct DkrPull {
#define PROTOCOL_PREFIX "https://"
#define HEADER_TOKEN "X-Do" /* the HTTP header for the auth token */ "cker-Token:"
-#define HEADER_REGISTRY "X-Do" /*the HTTP header for the registry */ "cker-Endpoints:"
-
-#define LAYERS_MAX 2048
+#define HEADER_REGISTRY "X-Do" /* the HTTP header for the registry */ "cker-Endpoints:"
+#define HEADER_DIGEST "Do" /* the HTTP header for the manifest digest */ "cker-Content-Digest:"
+#define LAYERS_MAX 127
static void dkr_pull_job_on_finished(PullJob *j);
@@ -118,12 +122,13 @@ DkrPull* dkr_pull_unref(DkrPull *i) {
}
free(i->name);
- free(i->tag);
+ free(i->reference);
free(i->id);
free(i->response_token);
- free(i->response_registries);
strv_free(i->ancestry);
free(i->final_path);
+ free(i->index_address);
+ free(i->index_protocol);
free(i->index_url);
free(i->image_root);
free(i->local);
@@ -417,10 +422,27 @@ static int dkr_pull_add_token(DkrPull *i, PullJob *j) {
return 0;
}
+static int dkr_pull_add_bearer_token(DkrPull *i, PullJob *j) {
+ const char *t = NULL;
+
+ assert(i);
+ assert(j);
+
+ if (i->response_token)
+ t = strjoina("Authorization: Bearer ", i->response_token);
+ else
+ return -EINVAL;
+
+ j->request_header = curl_slist_new("Accept: application/json", t, NULL);
+ if (!j->request_header)
+ return -ENOMEM;
+
+ return 0;
+}
+
static bool dkr_pull_is_done(DkrPull *i) {
assert(i);
assert(i->images_job);
-
if (i->images_job->state != PULL_JOB_DONE)
return false;
@@ -430,7 +452,7 @@ static bool dkr_pull_is_done(DkrPull *i) {
if (!i->ancestry_job || i->ancestry_job->state != PULL_JOB_DONE)
return false;
- if (!i->json_job || i->json_job->state != PULL_JOB_DONE)
+ if (i->json_job && i->json_job->state != PULL_JOB_DONE)
return false;
if (i->layer_job && i->layer_job->state != PULL_JOB_DONE)
@@ -442,8 +464,9 @@ static bool dkr_pull_is_done(DkrPull *i) {
return true;
}
-static int dkr_pull_make_local_copy(DkrPull *i) {
+static int dkr_pull_make_local_copy(DkrPull *i, DkrPullVersion version) {
int r;
+ _cleanup_free_ char *p = NULL;
assert(i);
@@ -456,10 +479,30 @@ static int dkr_pull_make_local_copy(DkrPull *i) {
return log_oom();
}
- r = pull_make_local_copy(i->final_path, i->image_root, i->local, i->force_local);
+ if (version == DKR_PULL_V2) {
+ r = path_get_parent(i->image_root, &p);
+ if (r < 0)
+ return r;
+ }
+
+ r = pull_make_local_copy(i->final_path, p ?: i->image_root, i->local, i->force_local);
if (r < 0)
return r;
+ if (version == DKR_PULL_V2) {
+ char **k = NULL;
+ STRV_FOREACH(k, i->ancestry) {
+ _cleanup_free_ char *d = strjoin(i->image_root, "/.dkr-", *k, NULL);
+ r = btrfs_subvol_remove(d, false);
+ if (r < 0)
+ return r;
+ }
+
+ r = rmdir(i->image_root);
+ if (r < 0)
+ return r;
+ }
+
return 0;
}
@@ -517,6 +560,68 @@ static void dkr_pull_job_on_progress(PullJob *j) {
DKR_DOWNLOADING);
}
+static void dkr_pull_job_on_finished_v2(PullJob *j);
+
+static int dkr_pull_pull_layer_v2(DkrPull *i) {
+ _cleanup_free_ char *path = NULL;
+ const char *url, *layer = NULL;
+ int r;
+
+ assert(i);
+ assert(!i->layer_job);
+ assert(!i->temp_path);
+ assert(!i->final_path);
+
+ for (;;) {
+ layer = dkr_pull_current_layer(i);
+ if (!layer)
+ return 0; /* no more layers */
+
+ path = strjoin(i->image_root, "/.dkr-", layer, NULL);
+ if (!path)
+ return log_oom();
+
+ if (laccess(path, F_OK) < 0) {
+ if (errno == ENOENT)
+ break;
+
+ return log_error_errno(errno, "Failed to check for container: %m");
+ }
+
+ log_info("Layer %s already exists, skipping.", layer);
+
+ i->current_ancestry++;
+
+ free(path);
+ path = NULL;
+ }
+
+ log_info("Pulling layer %s...", layer);
+
+ i->final_path = path;
+ path = NULL;
+
+ url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v2/", i->name, "/blobs/", layer);
+ r = pull_job_new(&i->layer_job, url, i->glue, i);
+ if (r < 0)
+ return log_error_errno(r, "Failed to allocate layer job: %m");
+
+ r = dkr_pull_add_bearer_token(i, i->layer_job);
+ if (r < 0)
+ return log_oom();
+
+ i->layer_job->on_finished = dkr_pull_job_on_finished_v2;
+ i->layer_job->on_open_disk = dkr_pull_job_on_open_disk;
+ i->layer_job->on_progress = dkr_pull_job_on_progress;
+ i->layer_job->grow_machine_directory = i->grow_machine_directory;
+
+ r = pull_job_begin(i->layer_job);
+ if (r < 0)
+ return log_error_errno(r, "Failed to start layer job: %m");
+
+ return 0;
+}
+
static int dkr_pull_pull_layer(DkrPull *i) {
_cleanup_free_ char *path = NULL;
const char *url, *layer = NULL;
@@ -577,6 +682,343 @@ static int dkr_pull_pull_layer(DkrPull *i) {
return 0;
}
+static int dkr_pull_job_on_header(PullJob *j, const char *header, size_t sz) {
+ _cleanup_free_ char *registry = NULL;
+ char *token, *digest;
+ DkrPull *i;
+ int r;
+
+ assert(j);
+ assert(j->userdata);
+
+ i = j->userdata;
+ r = curl_header_strdup(header, sz, HEADER_TOKEN, &token);
+ if (r < 0)
+ return log_oom();
+ if (r > 0) {
+ free(i->response_token);
+ i->response_token = token;
+ return 0;
+ }
+
+ r = curl_header_strdup(header, sz, HEADER_DIGEST, &digest);
+ if (r < 0)
+ return log_oom();
+ if (r > 0) {
+ free(i->response_digest);
+ i->response_digest = digest;
+ return 0;
+ }
+
+ r = curl_header_strdup(header, sz, HEADER_REGISTRY, ®istry);
+ if (r < 0)
+ return log_oom();
+ if (r > 0) {
+ char **l, **k;
+
+ l = strv_split(registry, ",");
+ if (!l)
+ return log_oom();
+
+ STRV_FOREACH(k, l) {
+ if (!hostname_is_valid(*k)) {
+ log_error("Registry hostname is not valid.");
+ strv_free(l);
+ return -EBADMSG;
+ }
+ }
+
+ strv_free(i->response_registries);
+ i->response_registries = l;
+ }
+
+ return 0;
+}
+
+static void dkr_pull_job_on_finished_v2(PullJob *j) {
+ DkrPull *i;
+ int r;
+
+ assert(j);
+ assert(j->userdata);
+
+ i = j->userdata;
+ if (j->error != 0) {
+ if (j == i->images_job)
+ log_error_errno(j->error, "Failed to retrieve images list. (Wrong index URL?)");
+ else if (j == i->ancestry_job)
+ log_error_errno(j->error, "Failed to retrieve manifest.");
+ else if (j == i->json_job)
+ log_error_errno(j->error, "Failed to retrieve json data.");
+ else
+ log_error_errno(j->error, "Failed to retrieve layer data.");
+
+ r = j->error;
+ goto finish;
+ }
+
+ if (i->images_job == j) {
+ const char *url;
+
+ assert(!i->tags_job);
+ assert(!i->ancestry_job);
+ assert(!i->json_job);
+ assert(!i->layer_job);
+
+ if (strv_isempty(i->response_registries)) {
+ r = -EBADMSG;
+ log_error("Didn't get registry information.");
+ goto finish;
+ }
+
+ log_info("Index lookup succeeded, directed to registry %s.", i->response_registries[0]);
+ dkr_pull_report_progress(i, DKR_RESOLVING);
+
+ url = strjoina(i->index_protocol, "auth.", i->index_address, "/v2/token/?scope=repository:",
+ i->name, ":pull&service=registry.", i->index_address);
+ r = pull_job_new(&i->tags_job, url, i->glue, i);
+ if (r < 0) {
+ log_error_errno(r, "Failed to allocate tags job: %m");
+ goto finish;
+ }
+
+ i->tags_job->on_finished = dkr_pull_job_on_finished_v2;
+ i->tags_job->on_progress = dkr_pull_job_on_progress;
+
+ r = pull_job_begin(i->tags_job);
+ if (r < 0) {
+ log_error_errno(r, "Failed to start tags job: %m");
+ goto finish;
+ }
+
+ } else if (i->tags_job == j) {
+ const char *url;
+ _cleanup_free_ const char *buf;
+ _cleanup_jsonunref_ JsonVariant *doc = NULL;
+ JsonVariant *e = NULL;
+
+ assert(!i->ancestry_job);
+ assert(!i->json_job);
+ assert(!i->layer_job);
+
+ buf = strndup((const char *)j->payload, j->payload_size);
+ if (!buf) {
+ r = -ENOMEM;
+ log_oom();
+ goto finish;
+ }
+
+ r = json_parse(buf, &doc);
+ if (r < 0) {
+ log_error("Unable to parse bearer token\n%s", j->payload);
+ goto finish;
+ }
+
+ e = json_variant_value(doc, "token");
+ if (!e || e->type != JSON_VARIANT_STRING) {
+ r = -EBADMSG;
+ log_error("Invalid JSON format for Bearer token");
+ goto finish;
+ }
+
+ r = free_and_strdup(&i->response_token, json_variant_string(e));
+ if (r < 0) {
+ log_oom();
+ goto finish;
+ }
+
+ url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v2/", i->name, "/manifests/", i->reference);
+ r = pull_job_new(&i->ancestry_job, url, i->glue, i);
+ if (r < 0) {
+ log_error_errno(r, "Failed to allocate ancestry job: %m");
+ goto finish;
+ }
+
+ r = dkr_pull_add_bearer_token(i, i->ancestry_job);
+ if (r < 0)
+ goto finish;
+
+ i->ancestry_job->on_finished = dkr_pull_job_on_finished_v2;
+ i->ancestry_job->on_progress = dkr_pull_job_on_progress;
+ i->ancestry_job->on_header = dkr_pull_job_on_header;
+
+
+ r = pull_job_begin(i->ancestry_job);
+ if (r < 0) {
+ log_error_errno(r, "Failed to start ancestry job: %m");
+ goto finish;
+ }
+
+ } else if (i->ancestry_job == j) {
+
+ _cleanup_jsonunref_ JsonVariant *doc = NULL;
+ _cleanup_jsonunref_ JsonVariant *compat = NULL;
+ JsonVariant *e = NULL;
+ _cleanup_strv_free_ char **ancestry = NULL;
+ size_t allocated = 0, size = 0;
+ char *path = NULL, **k = NULL;
+
+ r = json_parse((const char *)j->payload, &doc);
+ if (r < 0) {
+ log_error("Invalid JSON Manifest");
+ goto finish;
+ }
+
+ e = json_variant_value(doc, "fsLayers");
+ if (!e || e->type != JSON_VARIANT_ARRAY) {
+ r = -EBADMSG;
+ goto finish;
+ }
+
+ log_info("JSON manifest with schema v%"PRIi64" for %s parsed!",
+ json_variant_integer(json_variant_value(doc, "schemaVersion")),
+ json_variant_string(json_variant_value(doc, "name")));
+
+ for (unsigned z = 0; z < e->size; z++) {
+ JsonVariant *f = json_variant_element(e, z), *g = NULL;
+ const char *layer;
+ if (f->type != JSON_VARIANT_OBJECT) {
+ r = -EBADMSG;
+ goto finish;
+ }
+
+ g = json_variant_value(f, "blobSum");
+
+ layer = json_variant_string(g);
+ if (!dkr_digest_is_valid(layer)) {
+ r = -EBADMSG;
+ goto finish;
+ }
+
+ if (!GREEDY_REALLOC(ancestry, allocated, size + 2)) {
+ r = -ENOMEM;
+ log_oom();
+ goto finish;
+ }
+
+ ancestry[size] = strdup(layer);
+ if (!ancestry[size]) {
+ r = -ENOMEM;
+ log_oom();
+ goto finish;
+ }
+
+ ancestry[size+1] = NULL;
+ size += 1;
+ }
+
+ e = json_variant_value(doc, "history");
+ if (!e || e->type != JSON_VARIANT_ARRAY) {
+ r = -EBADMSG;
+ goto finish;
+ }
+
+ e = json_variant_element(e, 0);
+ e = json_variant_value(e, "v1Compatibility");
+ r = json_parse(json_variant_string(e), &compat);
+ if (r < 0) {
+ log_error("Invalid v1Compatibility JSON");
+ goto finish;
+ }
+
+ e = json_variant_value(compat, "id");
+
+ strv_free(i->ancestry);
+ i->ancestry = strv_reverse(strv_uniq(ancestry));
+ i->n_ancestry = strv_length(i->ancestry);
+ i->current_ancestry = 0;
+ i->id = strdup(i->ancestry[i->n_ancestry - 1]);
+ if (!i->id) {
+ r = -ENOMEM;
+ log_oom();
+ goto finish;
+ }
+ path = strjoin(i->image_root, "/.dkr-", json_variant_string(e), NULL);
+ if (!path) {
+ r = -ENOMEM;
+ log_oom();
+ goto finish;
+ }
+ free(i->image_root);
+ i->image_root = path;
+ ancestry = NULL;
+
+ log_info("Required layers:\n");
+ STRV_FOREACH(k, i->ancestry)
+ log_info("\t%s", *k);
+ log_info("\nProvenance:\n\tImageID: %s\n\tDigest: %s", json_variant_string(e), i->response_digest);
+
+ dkr_pull_report_progress(i, DKR_DOWNLOADING);
+
+ r = dkr_pull_pull_layer_v2(i);
+ if (r < 0)
+ goto finish;
+
+ } else if (i->layer_job == j) {
+ assert(i->temp_path);
+ assert(i->final_path);
+
+ j->disk_fd = safe_close(j->disk_fd);
+
+ if (i->tar_pid > 0) {
+ r = wait_for_terminate_and_warn("tar", i->tar_pid, true);
+ i->tar_pid = 0;
+ if (r < 0)
+ goto finish;
+ }
+
+ r = aufs_resolve(i->temp_path);
+ if (r < 0) {
+ log_error_errno(r, "Failed to resolve aufs whiteouts: %m");
+ goto finish;
+ }
+
+ r = btrfs_subvol_set_read_only(i->temp_path, true);
+ if (r < 0) {
+ log_error_errno(r, "Failed to mark snapshot read-only: %m");
+ goto finish;
+ }
+
+ if (rename(i->temp_path, i->final_path) < 0) {
+ log_error_errno(errno, "Failed to rename snaphsot: %m");
+ goto finish;
+ }
+
+ log_info("Completed writing to layer %s.", i->final_path);
+
+ i->layer_job = pull_job_unref(i->layer_job);
+ free(i->temp_path);
+ i->temp_path = NULL;
+ free(i->final_path);
+ i->final_path = NULL;
+
+ i->current_ancestry ++;
+ r = dkr_pull_pull_layer_v2(i);
+ if (r < 0)
+ goto finish;
+
+ } else if (i->json_job != j)
+ assert_not_reached("Got finished event for unknown curl object");
+
+ if (!dkr_pull_is_done(i))
+ return;
+
+ dkr_pull_report_progress(i, DKR_COPYING);
+
+ r = dkr_pull_make_local_copy(i, DKR_PULL_V2);
+ if (r < 0)
+ goto finish;
+
+ r = 0;
+
+finish:
+ if (i->on_finished)
+ i->on_finished(i, r, i->userdata);
+ else
+ sd_event_exit(i->event, r);
+
+}
+
static void dkr_pull_job_on_finished(PullJob *j) {
DkrPull *i;
int r;
@@ -618,7 +1060,7 @@ static void dkr_pull_job_on_finished(PullJob *j) {
log_info("Index lookup succeeded, directed to registry %s.", i->response_registries[0]);
dkr_pull_report_progress(i, DKR_RESOLVING);
- url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/repositories/", i->name, "/tags/", i->tag);
+ url = strjoina(PROTOCOL_PREFIX, i->response_registries[0], "/v1/repositories/", i->name, "/tags/", i->reference);
r = pull_job_new(&i->tags_job, url, i->glue, i);
if (r < 0) {
log_error_errno(r, "Failed to allocate tags job: %m");
@@ -790,12 +1232,11 @@ static void dkr_pull_job_on_finished(PullJob *j) {
dkr_pull_report_progress(i, DKR_COPYING);
- r = dkr_pull_make_local_copy(i);
+ r = dkr_pull_make_local_copy(i, DKR_PULL_V1);
if (r < 0)
goto finish;
r = 0;
-
finish:
if (i->on_finished)
i->on_finished(i, r, i->userdata);
@@ -803,52 +1244,37 @@ finish:
sd_event_exit(i->event, r);
}
-static int dkr_pull_job_on_header(PullJob *j, const char *header, size_t sz) {
- _cleanup_free_ char *registry = NULL;
- char *token;
- DkrPull *i;
- int r;
-
- assert(j);
- assert(j->userdata);
+static int get_protocol_address(char **protocol, char **address, const char *url) {
+ const char *sep, *dot;
+ char *a, *p;
- i = j->userdata;
+ sep = strstr(url, "://");
+ if (!sep)
+ return -EINVAL;
- r = curl_header_strdup(header, sz, HEADER_TOKEN, &token);
- if (r < 0)
- return log_oom();
- if (r > 0) {
- free(i->response_token);
- i->response_token = token;
- return 0;
- }
+ dot = strrchr(url, '.');
+ if (!dot)
+ return -EINVAL;
+ dot--;
- r = curl_header_strdup(header, sz, HEADER_REGISTRY, ®istry);
- if (r < 0)
+ p = strndup(url, (sep - url) + 3);
+ if (!p)
return log_oom();
- if (r > 0) {
- char **l, **k;
- l = strv_split(registry, ",");
- if (!l)
- return log_oom();
+ while (dot > (sep + 3) && *dot != '.')
+ dot--;
- STRV_FOREACH(k, l) {
- if (!hostname_is_valid(*k)) {
- log_error("Registry hostname is not valid.");
- strv_free(l);
- return -EBADMSG;
- }
- }
+ a = strdup(dot + 1);
+ if (!a)
+ return log_oom();
- strv_free(i->response_registries);
- i->response_registries = l;
- }
+ *address = a;
+ *protocol = p;
return 0;
}
-int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *local, bool force_local) {
+int dkr_pull_start(DkrPull *i, const char *name, const char *reference, const char *local, bool force_local, DkrPullVersion version) {
const char *url;
int r;
@@ -857,7 +1283,7 @@ int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *lo
if (!dkr_name_is_valid(name))
return -EINVAL;
- if (tag && !dkr_tag_is_valid(tag))
+ if (reference && !dkr_ref_is_valid(reference))
return -EINVAL;
if (local && !machine_name_is_valid(local))
@@ -866,8 +1292,14 @@ int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *lo
if (i->images_job)
return -EBUSY;
- if (!tag)
- tag = "latest";
+ if (!reference)
+ reference = "latest";
+
+ free(i->index_protocol);
+ free(i->index_address);
+ r = get_protocol_address(&i->index_protocol, &i->index_address, i->index_url);
+ if (r < 0)
+ return r;
r = free_and_strdup(&i->local, local);
if (r < 0)
@@ -877,7 +1309,7 @@ int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *lo
r = free_and_strdup(&i->name, name);
if (r < 0)
return r;
- r = free_and_strdup(&i->tag, tag);
+ r = free_and_strdup(&i->reference, reference);
if (r < 0)
return r;
@@ -891,7 +1323,11 @@ int dkr_pull_start(DkrPull *i, const char *name, const char *tag, const char *lo
if (r < 0)
return r;
- i->images_job->on_finished = dkr_pull_job_on_finished;
+ if (version == DKR_PULL_V1)
+ i->images_job->on_finished = dkr_pull_job_on_finished;
+ else
+ i->images_job->on_finished = dkr_pull_job_on_finished_v2;
+
i->images_job->on_header = dkr_pull_job_on_header;
i->images_job->on_progress = dkr_pull_job_on_progress;
diff --git a/src/import/pull-dkr.h b/src/import/pull-dkr.h
index 4c4b10c..33d18cb 100644
--- a/src/import/pull-dkr.h
+++ b/src/import/pull-dkr.h
@@ -24,6 +24,7 @@
#include "sd-event.h"
#include "util.h"
+typedef enum { DKR_PULL_V1, DKR_PULL_V2 } DkrPullVersion;
typedef struct DkrPull DkrPull;
typedef void (*DkrPullFinished)(DkrPull *pull, int error, void *userdata);
@@ -33,4 +34,4 @@ DkrPull* dkr_pull_unref(DkrPull *pull);
DEFINE_TRIVIAL_CLEANUP_FUNC(DkrPull*, dkr_pull_unref);
-int dkr_pull_start(DkrPull *pull, const char *name, const char *tag, const char *local, bool force_local);
+int dkr_pull_start(DkrPull *pull, const char *name, const char *tag, const char *local, bool force_local, DkrPullVersion version);
commit 7037d506b3c19b617531a6295c38896a03d17c1e
Author: Pavel Odvody <podvody at redhat.com>
Date: Tue May 19 16:30:04 2015 +0200
import/pull: Tag replaced with reference
Default pull version set to V2
diff --git a/src/import/pull.c b/src/import/pull.c
index ef7b035..0f2ad92 100644
--- a/src/import/pull.c
+++ b/src/import/pull.c
@@ -227,7 +227,7 @@ static void on_dkr_finished(DkrPull *pull, int error, void *userdata) {
static int pull_dkr(int argc, char *argv[], void *userdata) {
_cleanup_(dkr_pull_unrefp) DkrPull *pull = NULL;
_cleanup_event_unref_ sd_event *event = NULL;
- const char *name, *tag, *local;
+ const char *name, *reference, *local, *digest;
int r;
if (!arg_dkr_index_url) {
@@ -240,13 +240,19 @@ static int pull_dkr(int argc, char *argv[], void *userdata) {
return -EINVAL;
}
- tag = strchr(argv[1], ':');
- if (tag) {
- name = strndupa(argv[1], tag - argv[1]);
- tag++;
+ digest = strchr(argv[1], '@');
+ if (digest) {
+ reference = digest + 1;
+ name = strndupa(argv[1], digest - argv[1]);
+ }
+
+ reference = strchr(argv[1], ':');
+ if (reference) {
+ name = strndupa(argv[1], reference - argv[1]);
+ reference++;
} else {
name = argv[1];
- tag = "latest";
+ reference = "latest";
}
if (!dkr_name_is_valid(name)) {
@@ -254,8 +260,8 @@ static int pull_dkr(int argc, char *argv[], void *userdata) {
return -EINVAL;
}
- if (!dkr_tag_is_valid(tag)) {
- log_error("Tag name '%s' is not valid.", tag);
+ if (!dkr_ref_is_valid(reference)) {
+ log_error("Tag name '%s' is not valid.", reference);
return -EINVAL;
}
@@ -288,9 +294,9 @@ static int pull_dkr(int argc, char *argv[], void *userdata) {
}
}
- log_info("Pulling '%s' with tag '%s', saving as '%s'.", name, tag, local);
+ log_info("Pulling '%s' with reference '%s', saving as '%s'.", name, reference, local);
} else
- log_info("Pulling '%s' with tag '%s'.", name, tag);
+ log_info("Pulling '%s' with reference '%s'.", name, reference);
r = sd_event_default(&event);
if (r < 0)
@@ -304,7 +310,7 @@ static int pull_dkr(int argc, char *argv[], void *userdata) {
if (r < 0)
return log_error_errno(r, "Failed to allocate puller: %m");
- r = dkr_pull_start(pull, name, tag, local, arg_force);
+ r = dkr_pull_start(pull, name, reference, local, arg_force, DKR_PULL_V2);
if (r < 0)
return log_error_errno(r, "Failed to pull image: %m");
commit d4fc45afa9074e34e61fd47b59ad4d1a05d0ee82
Author: Pavel Odvody <podvody at redhat.com>
Date: Tue May 19 16:29:53 2015 +0200
test/test-json: Tests for the tokenizer bugfix and the DOM parser
The DOM parser tests are accompanied with structure and element analysis
diff --git a/src/test/test-json.c b/src/test/test-json.c
index 24dc700..c4b4a22 100644
--- a/src/test/test-json.c
+++ b/src/test/test-json.c
@@ -72,6 +72,98 @@ static void test_one(const char *data, ...) {
va_end(ap);
}
+typedef void (*Test)(JsonVariant *);
+
+static void test_file(const char *data, Test test) {
+ JsonVariant *v = NULL;
+ int r = json_parse(data, &v);
+
+ assert_se(r == 0);
+ assert_se(v != NULL);
+ assert_se(v->type == JSON_VARIANT_OBJECT);
+
+ if (test)
+ test(v);
+
+ json_variant_unref(v);
+}
+
+static void test_1(JsonVariant *v) {
+ JsonVariant *p, *q;
+ unsigned i;
+
+ /* 3 keys + 3 values */
+ assert_se(v->size == 6);
+
+ /* has k */
+ p = json_variant_value(v, "k");
+ assert_se(p && p->type == JSON_VARIANT_STRING);
+
+ /* k equals v */
+ assert_se(streq(json_variant_string(p), "v"));
+
+ /* has foo */
+ p = json_variant_value(v, "foo");
+ assert_se(p && p->type == JSON_VARIANT_ARRAY && p->size == 3);
+
+ /* check foo[0] = 1, foo[1] = 2, foo[2] = 3 */
+ for (i = 0; i < 3; ++i) {
+ q = json_variant_element(p, i);
+ assert_se(q && q->type == JSON_VARIANT_INTEGER && json_variant_integer(q) == (i+1));
+ }
+
+ /* has bar */
+ p = json_variant_value(v, "bar");
+ assert_se(p && p->type == JSON_VARIANT_OBJECT && p->size == 2);
+
+ /* zap is null */
+ q = json_variant_value(p, "zap");
+ assert_se(q && q->type == JSON_VARIANT_NULL);
+}
+
+static void test_2(JsonVariant *v) {
+ JsonVariant *p, *q;
+
+ /* 2 keys + 2 values */
+ assert_se(v->size == 4);
+
+ /* has mutant */
+ p = json_variant_value(v, "mutant");
+ assert_se(p && p->type == JSON_VARIANT_ARRAY && p->size == 4);
+
+ /* mutant[0] == 1 */
+ q = json_variant_element(p, 0);
+ assert_se(q && q->type == JSON_VARIANT_INTEGER && json_variant_integer(q) == 1);
+
+ /* mutant[1] == null */
+ q = json_variant_element(p, 1);
+ assert_se(q && q->type == JSON_VARIANT_NULL);
+
+ /* mutant[2] == "1" */
+ q = json_variant_element(p, 2);
+ assert_se(q && q->type == JSON_VARIANT_STRING && streq(json_variant_string(q), "1"));
+
+ /* mutant[3] == JSON_VARIANT_OBJECT */
+ q = json_variant_element(p, 3);
+ assert_se(q && q->type == JSON_VARIANT_OBJECT && q->size == 2);
+
+ /* has 1 */
+ p = json_variant_value(q, "1");
+ assert_se(p && p->type == JSON_VARIANT_ARRAY && p->size == 2);
+
+ /* "1"[0] == 1 */
+ q = json_variant_element(p, 0);
+ assert_se(q && q->type == JSON_VARIANT_INTEGER && json_variant_integer(q) == 1);
+
+ /* "1"[1] == "1" */
+ q = json_variant_element(p, 1);
+ assert_se(q && q->type == JSON_VARIANT_STRING && streq(json_variant_string(q), "1"));
+
+ /* has blah */
+ p = json_variant_value(v, "blah");
+ assert_se(p && p->type == JSON_VARIANT_REAL && fabs(json_variant_real(p) - 1.27) < 0.001);
+}
+
int main(int argc, char *argv[]) {
test_one("x", -EINVAL);
@@ -102,5 +194,10 @@ int main(int argc, char *argv[]) {
test_one("\"\\udc00\\udc00\"", -EINVAL);
test_one("\"\\ud801\\udc37\"", JSON_STRING, "\xf0\x90\x90\xb7", JSON_END);
+ test_one("[1, 2]", JSON_ARRAY_OPEN, JSON_INTEGER, 1, JSON_COMMA, JSON_INTEGER, 2, JSON_ARRAY_CLOSE, JSON_END);
+
+ test_file("{\"k\": \"v\", \"foo\": [1, 2, 3], \"bar\": {\"zap\": null}}", test_1);
+ test_file("{\"mutant\": [1, null, \"1\", {\"1\": [1, \"1\"]}], \"blah\": 1.27}", test_2);
+
return 0;
}
commit ed967b12be53889b522f02f915336d37d3ff1551
Author: Pavel Odvody <podvody at redhat.com>
Date: Tue May 19 16:29:42 2015 +0200
shared/json: Added DOM-like JSON parser
This makes working with complexly structured documents easy
and more reliable as the parser is not susceptible to
element re-ordering.
Also fixes a bug when the tokenizer would choke after reading
a number.
diff --git a/src/shared/json.c b/src/shared/json.c
index 45c8ece..f6c44f4 100644
--- a/src/shared/json.c
+++ b/src/shared/json.c
@@ -21,17 +21,171 @@
#include <sys/types.h>
#include <math.h>
-
#include "macro.h"
-#include "util.h"
#include "utf8.h"
#include "json.h"
-enum {
- STATE_NULL,
- STATE_VALUE,
- STATE_VALUE_POST,
-};
+int json_variant_new(JsonVariant **ret, JsonVariantType type) {
+ JsonVariant *v;
+ v = new0(JsonVariant, 1);
+ if (!v)
+ return -ENOMEM;
+ v->type = type;
+ *ret = v;
+ return 0;
+}
+
+static int json_variant_deep_copy(JsonVariant *ret, JsonVariant *variant) {
+ assert(ret);
+ assert(variant);
+
+ ret->type = variant->type;
+ ret->size = variant->size;
+
+ if (variant->type == JSON_VARIANT_STRING) {
+ ret->string = memdup(variant->string, variant->size+1);
+ if (!ret->string)
+ return -ENOMEM;
+ } else if (variant->type == JSON_VARIANT_ARRAY || variant->type == JSON_VARIANT_OBJECT) {
+ ret->objects = new0(JsonVariant, variant->size);
+ if (!ret->objects)
+ return -ENOMEM;
+
+ for (unsigned i = 0; i < variant->size; ++i) {
+ int r;
+ r = json_variant_deep_copy(&ret->objects[i], &variant->objects[i]);
+ if (r < 0)
+ return r;
+ }
+ }
+ else
+ ret->value = variant->value;
+
+ return 0;
+}
+
+static JsonVariant *json_object_unref(JsonVariant *variant);
+
+static JsonVariant *json_variant_unref_inner(JsonVariant *variant) {
+ if (!variant)
+ return NULL;
+
+ if (variant->type == JSON_VARIANT_ARRAY || variant->type == JSON_VARIANT_OBJECT)
+ return json_object_unref(variant);
+
+ else if (variant->type == JSON_VARIANT_STRING)
+ free(variant->string);
+
+ return NULL;
+}
+
+static JsonVariant *json_raw_unref(JsonVariant *variant, size_t size) {
+ if (!variant)
+ return NULL;
+
+ for (size_t i = 0; i < size; ++i)
+ json_variant_unref_inner(&variant[i]);
+
+ free(variant);
+ return NULL;
+}
+
+static JsonVariant *json_object_unref(JsonVariant *variant) {
+ assert(variant);
+ if (!variant->objects)
+ return NULL;
+
+ for (unsigned i = 0; i < variant->size; ++i)
+ json_variant_unref_inner(&variant->objects[i]);
+
+ free(variant->objects);
+ return NULL;
+}
+
+static JsonVariant **json_variant_array_unref(JsonVariant **variant) {
+ size_t i = 0;
+ JsonVariant *p = NULL;
+
+ if (!variant)
+ return NULL;
+
+ while((p = (variant[i++])) != NULL) {
+ if (p->type == JSON_VARIANT_STRING)
+ free(p->string);
+ free(p);
+ }
+
+ free(variant);
+
+ return NULL;
+}
+DEFINE_TRIVIAL_CLEANUP_FUNC(JsonVariant **, json_variant_array_unref);
+
+JsonVariant *json_variant_unref(JsonVariant *variant) {
+ if (!variant)
+ return NULL;
+
+ if (variant->type == JSON_VARIANT_ARRAY || variant->type == JSON_VARIANT_OBJECT)
+ return json_object_unref(variant);
+
+ else if (variant->type == JSON_VARIANT_STRING)
+ free(variant->string);
+
+ free(variant);
+
+ return NULL;
+}
+
+char *json_variant_string(JsonVariant *variant){
+ assert(variant);
+ assert(variant->type == JSON_VARIANT_STRING);
+
+ return variant->string;
+}
+
+bool json_variant_bool(JsonVariant *variant) {
+ assert(variant);
+ assert(variant->type == JSON_VARIANT_BOOLEAN);
+
+ return variant->value.boolean;
+}
+
+intmax_t json_variant_integer(JsonVariant *variant) {
+ assert(variant);
+ assert(variant->type == JSON_VARIANT_INTEGER);
+
+ return variant->value.integer;
+}
+
+double json_variant_real(JsonVariant *variant) {
+ assert(variant);
+ assert(variant->type == JSON_VARIANT_REAL);
+
+ return variant->value.real;
+}
+
+JsonVariant *json_variant_element(JsonVariant *variant, unsigned index) {
+ assert(variant);
+ assert(variant->type == JSON_VARIANT_ARRAY || variant->type == JSON_VARIANT_OBJECT);
+ assert(index < variant->size);
+ assert(variant->objects);
+
+ return &variant->objects[index];
+}
+
+JsonVariant *json_variant_value(JsonVariant *variant, const char *key) {
+ assert(variant);
+ assert(variant->type == JSON_VARIANT_OBJECT);
+ assert(variant->objects);
+
+ for (unsigned i = 0; i < variant->size; i += 2) {
+ JsonVariant *p = &variant->objects[i];
+ if (p->type == JSON_VARIANT_STRING && streq(key, p->string))
+ return &variant->objects[i + 1];
+ }
+
+ return NULL;
+}
static void inc_lines(unsigned *line, const char *s, size_t n) {
const char *p = s;
@@ -285,9 +439,6 @@ static int json_parse_number(const char **p, union json_value *ret) {
} while (strchr("0123456789", *c) && *c != 0);
}
- if (*c != 0)
- return -EINVAL;
-
*p = c;
if (is_double) {
@@ -310,6 +461,12 @@ int json_tokenize(
int t;
int r;
+ enum {
+ STATE_NULL,
+ STATE_VALUE,
+ STATE_VALUE_POST,
+ };
+
assert(p);
assert(*p);
assert(ret_string);
@@ -443,3 +600,258 @@ int json_tokenize(
}
}
+
+static bool json_is_value(JsonVariant *var) {
+ assert(var);
+
+ return var->type != JSON_VARIANT_CONTROL;
+}
+
+static int json_scoped_parse(JsonVariant **tokens, size_t *i, size_t n, JsonVariant *scope) {
+ bool arr = scope->type == JSON_VARIANT_ARRAY;
+ int terminator = arr ? JSON_ARRAY_CLOSE : JSON_OBJECT_CLOSE;
+ size_t allocated = 0, size = 0;
+ JsonVariant *key = NULL, *value = NULL, *var = NULL, *items = NULL;
+ enum {
+ STATE_KEY,
+ STATE_COLON,
+ STATE_COMMA,
+ STATE_VALUE
+ } state = arr ? STATE_VALUE : STATE_KEY;
+
+ assert(tokens);
+ assert(i);
+ assert(scope);
+
+ while((var = *i < n ? tokens[(*i)++] : NULL) != NULL) {
+ bool stopper = !json_is_value(var) && var->value.integer == terminator;
+ int r;
+
+ if (stopper) {
+ if (state != STATE_COMMA && size > 0)
+ goto error;
+
+ goto out;
+ }
+
+ if (state == STATE_KEY) {
+ if (var->type != JSON_VARIANT_STRING)
+ goto error;
+ else {
+ key = var;
+ state = STATE_COLON;
+ }
+ }
+ else if (state == STATE_COLON) {
+ if (key == NULL)
+ goto error;
+
+ if (json_is_value(var))
+ goto error;
+
+ if (var->value.integer != JSON_COLON)
+ goto error;
+
+ state = STATE_VALUE;
+ }
+ else if (state == STATE_VALUE) {
+ _cleanup_jsonunref_ JsonVariant *v = NULL;
+ size_t toadd = arr ? 1 : 2;
+
+ if (!json_is_value(var)) {
+ int type = (var->value.integer == JSON_ARRAY_OPEN) ? JSON_VARIANT_ARRAY : JSON_VARIANT_OBJECT;
+
+ r = json_variant_new(&v, type);
+ if (r < 0)
+ goto error;
+
+ r = json_scoped_parse(tokens, i, n, v);
+ if (r < 0)
+ goto error;
+
+ value = v;
+ }
+ else
+ value = var;
+
+ if(!GREEDY_REALLOC(items, allocated, size + toadd))
+ goto error;
+
+ if (arr) {
+ r = json_variant_deep_copy(&items[size], value);
+ if (r < 0)
+ goto error;
+ } else {
+ r = json_variant_deep_copy(&items[size], key);
+ if (r < 0)
+ goto error;
+
+ r = json_variant_deep_copy(&items[size+1], value);
+ if (r < 0)
+ goto error;
+ }
+
+ size += toadd;
+ state = STATE_COMMA;
+ }
+ else if (state == STATE_COMMA) {
+ if (json_is_value(var))
+ goto error;
+
+ if (var->value.integer != JSON_COMMA)
+ goto error;
+
+ key = NULL;
+ value = NULL;
+
+ state = arr ? STATE_VALUE : STATE_KEY;
+ }
+ }
+
+error:
+ json_raw_unref(items, size);
+ return -EBADMSG;
+
+out:
+ scope->size = size;
+ scope->objects = items;
+
+ return scope->type;
+}
+
+static int json_parse_tokens(JsonVariant **tokens, size_t ntokens, JsonVariant **rv) {
+ size_t it = 0;
+ int r;
+ JsonVariant *e;
+ _cleanup_jsonunref_ JsonVariant *p;
+
+ assert(tokens);
+ assert(ntokens);
+
+ e = tokens[it++];
+ r = json_variant_new(&p, JSON_VARIANT_OBJECT);
+ if (r < 0)
+ return r;
+
+ if (e->type != JSON_VARIANT_CONTROL && e->value.integer != JSON_OBJECT_OPEN)
+ return -EBADMSG;
+
+ r = json_scoped_parse(tokens, &it, ntokens, p);
+ if (r < 0)
+ return r;
+
+ *rv = p;
+ p = NULL;
+
+ return 0;
+}
+
+static int json_tokens(const char *string, size_t size, JsonVariant ***tokens, size_t *n) {
+ _cleanup_free_ char *buf = NULL;
+ _cleanup_(json_variant_array_unrefp) JsonVariant **items = NULL;
+ union json_value v = {};
+ void *json_state = NULL;
+ const char *p;
+ int t, r;
+ size_t allocated = 0, s = 0;
+
+ assert(string);
+ assert(n);
+
+ if (size <= 0)
+ return -EBADMSG;
+
+ buf = strndup(string, size);
+ if (!buf)
+ return -ENOMEM;
+
+ p = buf;
+ for (;;) {
+ _cleanup_free_ char *rstr = NULL;
+ _cleanup_jsonunref_ JsonVariant *var = NULL;
+
+ t = json_tokenize(&p, &rstr, &v, &json_state, NULL);
+
+ if (t < 0)
+ return t;
+ else if (t == JSON_END)
+ break;
+
+ if (t <= JSON_ARRAY_CLOSE) {
+ r = json_variant_new(&var, JSON_VARIANT_CONTROL);
+ if (r < 0)
+ return r;
+ var->value.integer = t;
+ } else {
+ switch (t) {
+ case JSON_STRING:
+ r = json_variant_new(&var, JSON_VARIANT_STRING);
+ if (r < 0)
+ return r;
+ var->size = strlen(rstr);
+ var->string = strdup(rstr);
+ if (!var->string) {
+ return -ENOMEM;
+ }
+ break;
+ case JSON_INTEGER:
+ r = json_variant_new(&var, JSON_VARIANT_INTEGER);
+ if (r < 0)
+ return r;
+ var->value = v;
+ break;
+ case JSON_REAL:
+ r = json_variant_new(&var, JSON_VARIANT_REAL);
+ if (r < 0)
+ return r;
+ var->value = v;
+ break;
+ case JSON_BOOLEAN:
+ r = json_variant_new(&var, JSON_VARIANT_BOOLEAN);
+ if (r < 0)
+ return r;
+ var->value = v;
+ break;
+ case JSON_NULL:
+ r = json_variant_new(&var, JSON_VARIANT_NULL);
+ if (r < 0)
+ return r;
+ break;
+ }
+ }
+
+ if (!GREEDY_REALLOC(items, allocated, s+2))
+ return -ENOMEM;
+
+ items[s++] = var;
+ items[s] = NULL;
+ var = NULL;
+ }
+
+ *n = s;
+ *tokens = items;
+ items = NULL;
+
+ return 0;
+}
+
+int json_parse(const char *string, JsonVariant **rv) {
+ _cleanup_(json_variant_array_unrefp) JsonVariant **s = NULL;
+ JsonVariant *v = NULL;
+ size_t n = 0;
+ int r;
+
+ assert(string);
+ assert(rv);
+
+ r = json_tokens(string, strlen(string), &s, &n);
+ if (r < 0)
+ return r;
+
+ r = json_parse_tokens(s, n, &v);
+ if (r < 0)
+ return r;
+
+ *rv = v;
+ return 0;
+}
diff --git a/src/shared/json.h b/src/shared/json.h
index 55976d5..2ce5642 100644
--- a/src/shared/json.h
+++ b/src/shared/json.h
@@ -22,6 +22,7 @@
***/
#include <stdbool.h>
+#include "util.h"
enum {
JSON_END,
@@ -38,12 +39,48 @@ enum {
JSON_NULL,
};
+typedef enum {
+ JSON_VARIANT_CONTROL,
+ JSON_VARIANT_STRING,
+ JSON_VARIANT_INTEGER,
+ JSON_VARIANT_BOOLEAN,
+ JSON_VARIANT_REAL,
+ JSON_VARIANT_ARRAY,
+ JSON_VARIANT_OBJECT,
+ JSON_VARIANT_NULL
+} JsonVariantType;
+
union json_value {
bool boolean;
double real;
intmax_t integer;
};
+typedef struct JsonVariant {
+ union {
+ char *string;
+ struct JsonVariant *objects;
+ union json_value value;
+ };
+ JsonVariantType type;
+ unsigned size;
+} JsonVariant;
+
+int json_variant_new(JsonVariant **ret, JsonVariantType type);
+JsonVariant *json_variant_unref(JsonVariant *);
+DEFINE_TRIVIAL_CLEANUP_FUNC(JsonVariant *, json_variant_unref);
+#define _cleanup_jsonunref_ _cleanup_(json_variant_unrefp)
+
+char *json_variant_string(JsonVariant *);
+bool json_variant_bool(JsonVariant *);
+intmax_t json_variant_integer(JsonVariant *);
+double json_variant_real(JsonVariant *);
+
+JsonVariant *json_variant_element(JsonVariant *, unsigned index);
+JsonVariant *json_variant_value(JsonVariant *, const char *key);
+
#define JSON_VALUE_NULL ((union json_value) {})
int json_tokenize(const char **p, char **ret_string, union json_value *ret_value, void **state, unsigned *line);
+int json_parse(const char *string, JsonVariant **rv);
+int json_parse_measure(const char *string, size_t *size);
commit 74eff91e5f118224c1e556d2716d5360dd97a87d
Author: Pavel Odvody <podvody at redhat.com>
Date: Tue May 19 16:29:29 2015 +0200
shared/import-util: Tag renamed to reference
Added (sha256) digest validation function
diff --git a/src/shared/import-util.c b/src/shared/import-util.c
index 660d92a..001a8a3 100644
--- a/src/shared/import-util.c
+++ b/src/shared/import-util.c
@@ -150,6 +150,27 @@ int raw_strip_suffixes(const char *p, char **ret) {
return 0;
}
+bool dkr_digest_is_valid(const char *digest) {
+ /* 7 chars for prefix, 64 chars for the digest itself */
+ if (strlen(digest) != 71)
+ return false;
+
+ return startswith(digest, "sha256:") && in_charset(digest + 7, "0123456789abcdef");
+}
+
+bool dkr_ref_is_valid(const char *ref) {
+ const char *colon;
+
+ if (isempty(ref))
+ return false;
+
+ colon = strchr(ref, ':');
+ if (!colon)
+ return filename_is_valid(ref);
+
+ return dkr_digest_is_valid(ref);
+}
+
bool dkr_name_is_valid(const char *name) {
const char *slash, *p;
diff --git a/src/shared/import-util.h b/src/shared/import-util.h
index ff155b0..7bf7d4c 100644
--- a/src/shared/import-util.h
+++ b/src/shared/import-util.h
@@ -44,4 +44,6 @@ int raw_strip_suffixes(const char *name, char **ret);
bool dkr_name_is_valid(const char *name);
bool dkr_id_is_valid(const char *id);
+bool dkr_ref_is_valid(const char *ref);
+bool dkr_digest_is_valid(const char *digest);
#define dkr_tag_is_valid(tag) filename_is_valid(tag)
More information about the systemd-commits
mailing list