[systemd-commits] 6 commits - Makefile.am src/core src/import src/shared src/test
Lennart Poettering
lennart at kemper.freedesktop.org
Tue Jan 20 06:07:04 PST 2015
Makefile.am | 21
src/core/load-fragment.c | 2
src/import/import-dkr.c | 2
src/import/import-job.c | 664 +++++++++++++++++++++++++++++++
src/import/import-job.h | 110 +++++
src/import/import-raw.c | 990 ++++++++++++-----------------------------------
src/import/import-raw.h | 9
src/import/import-tar.c | 308 ++++++++++++++
src/import/import-tar.h | 36 +
src/import/import-util.c | 272 ++++++++++++
src/import/import-util.h | 38 +
src/import/import.c | 168 ++++++-
src/shared/btrfs-util.c | 22 -
src/shared/btrfs-util.h | 1
src/shared/util.c | 42 +
src/shared/util.h | 3
src/test/test-util.c | 18
17 files changed, 1905 insertions(+), 801 deletions(-)
New commits:
commit 85dbc41dc67ff49fd8a843dbac5b8b5cb0b61155
Author: Lennart Poettering <lennart at poettering.net>
Date: Tue Jan 20 15:06:34 2015 +0100
import: add a simple scheme for validating the SHA256 sums of downloaded raw files
diff --git a/Makefile.am b/Makefile.am
index f165042..b6a4e3e 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -5238,6 +5238,9 @@ lib_LTLIBRARIES += \
libnss_mymachines.la
if HAVE_LIBCURL
+if HAVE_XZ
+if HAVE_ZLIB
+if HAVE_GCRYPT
bin_PROGRAMS += \
systemd-import
@@ -5265,7 +5268,8 @@ systemd_import_CFLAGS = \
$(AM_CFLAGS) \
$(LIBCURL_CFLAGS) \
$(XZ_CFLAGS) \
- $(ZLIB_CFLAGS)
+ $(ZLIB_CFLAGS) \
+ $(GCRYPT_CFLAGS)
systemd_import_LDADD = \
libsystemd-internal.la \
@@ -5273,11 +5277,9 @@ systemd_import_LDADD = \
libsystemd-shared.la \
$(LIBCURL_LIBS) \
$(XZ_LIBS) \
- $(ZLIB_LIBS)
-
-endif
+ $(ZLIB_LIBS) \
+ $(GCRYPT_LIBS)
-if HAVE_ZLIB
manual_tests += \
test-qcow2
@@ -5296,6 +5298,9 @@ test_qcow2_LDADD = \
libsystemd-shared.la \
$(ZLIB_LIBS)
endif
+endif
+endif
+endif
endif
diff --git a/src/import/import-job.c b/src/import/import-job.c
index 37f8ef7..6de3268 100644
--- a/src/import/import-job.c
+++ b/src/import/import-job.c
@@ -38,10 +38,14 @@ ImportJob* import_job_unref(ImportJob *j) {
else if (j->compressed == IMPORT_JOB_GZIP)
inflateEnd(&j->gzip);
+ if (j->hash_context)
+ gcry_md_close(j->hash_context);
+
free(j->url);
free(j->etag);
strv_free(j->old_etags);
free(j->payload);
+ free(j->sha256);
free(j);
@@ -94,6 +98,7 @@ void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
goto finish;
} else if (status == 304) {
log_info("Image already downloaded. Skipping download.");
+ j->etag_exists = true;
r = 0;
goto finish;
} else if (status >= 300) {
@@ -119,6 +124,25 @@ void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
goto finish;
}
+ if (j->hash_context) {
+ uint8_t *k;
+
+ k = gcry_md_read(j->hash_context, GCRY_MD_SHA256);
+ if (!k) {
+ log_error("Failed to get checksum.");
+ r = -EIO;
+ goto finish;
+ }
+
+ j->sha256 = hexmem(k, gcry_md_get_algo_dlen(GCRY_MD_SHA256));
+ if (!j->sha256) {
+ r = log_oom();
+ goto finish;
+ }
+
+ log_debug("SHA256 of %s is %s.", j->url, j->sha256);
+ }
+
if (j->disk_fd >= 0 && j->allow_sparse) {
/* Make sure the file size is right, in case the file was
* sparse and we just seeked for the last part */
@@ -151,14 +175,12 @@ finish:
import_job_finish(j, r);
}
-
static int import_job_write_uncompressed(ImportJob *j, void *p, size_t sz) {
ssize_t n;
assert(j);
assert(p);
assert(sz > 0);
- assert(j->disk_fd >= 0);
if (j->written_uncompressed + sz < j->written_uncompressed) {
log_error("File too large, overflow");
@@ -204,7 +226,6 @@ static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) {
assert(j);
assert(p);
assert(sz > 0);
- assert(j->disk_fd >= 0);
if (j->written_compressed + sz < j->written_compressed) {
log_error("File too large, overflow");
@@ -222,6 +243,9 @@ static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) {
return -EFBIG;
}
+ if (j->hash_context)
+ gcry_md_write(j->hash_context, p, sz);
+
switch (j->compressed) {
case IMPORT_JOB_UNCOMPRESSED:
@@ -311,6 +335,13 @@ static int import_job_open_disk(ImportJob *j) {
}
}
+ if (j->calc_hash) {
+ if (gcry_md_open(&j->hash_context, GCRY_MD_SHA256, 0) != 0) {
+ log_error("Failed to initialize hash context.");
+ return -EIO;
+ }
+ }
+
return 0;
}
@@ -459,6 +490,7 @@ static size_t import_job_header_callback(void *contents, size_t size, size_t nme
if (strv_contains(j->old_etags, j->etag)) {
log_info("Image already downloaded. Skipping download.");
+ j->etag_exists = true;
import_job_finish(j, 0);
return sz;
}
diff --git a/src/import/import-job.h b/src/import/import-job.h
index 843daa2..9ccaaf2 100644
--- a/src/import/import-job.h
+++ b/src/import/import-job.h
@@ -23,6 +23,7 @@
#include <lzma.h>
#include <zlib.h>
+#include <gcrypt.h>
#include "macro.h"
#include "curl-util.h"
@@ -42,6 +43,8 @@ typedef enum ImportJobState {
_IMPORT_JOB_STATE_INVALID = -1,
} ImportJobState;
+#define IMPORT_JOB_STATE_IS_COMPLETE(j) (IN_SET((j)->state, IMPORT_JOB_DONE, IMPORT_JOB_FAILED))
+
typedef enum ImportJobCompression {
IMPORT_JOB_UNCOMPRESSED,
IMPORT_JOB_XZ,
@@ -66,6 +69,7 @@ struct ImportJob {
char *etag;
char **old_etags;
+ bool etag_exists;
uint64_t content_length;
uint64_t written_compressed;
@@ -91,6 +95,11 @@ struct ImportJob {
usec_t last_status_usec;
bool allow_sparse;
+
+ bool calc_hash;
+ gcry_md_hd_t hash_context;
+
+ char *sha256;
};
int import_job_new(ImportJob **job, const char *url, CurlGlue *glue, void *userdata);
diff --git a/src/import/import-raw.c b/src/import/import-raw.c
index 635779c..8ca1091 100644
--- a/src/import/import-raw.c
+++ b/src/import/import-raw.c
@@ -22,6 +22,7 @@
#include <sys/xattr.h>
#include <linux/fs.h>
#include <curl/curl.h>
+#include <gcrypt.h>
#include "utf8.h"
#include "strv.h"
@@ -45,6 +46,7 @@ struct RawImport {
char *image_root;
ImportJob *raw_job;
+ ImportJob *sha256sums_job;
RawImportFinished on_finished;
void *userdata;
@@ -176,10 +178,11 @@ static int raw_import_make_local_copy(RawImport *i) {
if (!i->local)
return 0;
- if (i->raw_job->disk_fd >= 0) {
- if (lseek(i->raw_job->disk_fd, SEEK_SET, 0) == (off_t) -1)
- return log_error_errno(errno, "Failed to seek to beginning of vendor image: %m");
- } else {
+ if (i->raw_job->etag_exists) {
+ /* We have downloaded this one previously, reopen it */
+
+ assert(i->raw_job->disk_fd < 0);
+
if (!i->final_path) {
r = import_make_path(i->raw_job->url, i->raw_job->etag, i->image_root, ".raw-", ".raw", &i->final_path);
if (r < 0)
@@ -189,6 +192,13 @@ static int raw_import_make_local_copy(RawImport *i) {
i->raw_job->disk_fd = open(i->final_path, O_RDONLY|O_NOCTTY|O_CLOEXEC);
if (i->raw_job->disk_fd < 0)
return log_error_errno(errno, "Failed to open vendor image: %m");
+ } else {
+ /* We freshly downloaded the image, use it */
+
+ assert(i->raw_job->disk_fd >= 0);
+
+ if (lseek(i->raw_job->disk_fd, SEEK_SET, 0) == (off_t) -1)
+ return log_error_errno(errno, "Failed to seek to beginning of vendor image: %m");
}
p = strappenda(i->image_root, "/", i->local, ".raw");
@@ -235,7 +245,91 @@ static int raw_import_make_local_copy(RawImport *i) {
return 0;
}
-static void raw_import_job_on_finished(ImportJob *j) {
+static int raw_import_verify_sha256sum(RawImport *i) {
+ _cleanup_free_ char *fn = NULL;
+ const char *p, *line;
+ int r;
+
+ assert(i);
+
+ assert(i->raw_job);
+ assert(i->raw_job->sha256);
+
+ assert(i->sha256sums_job);
+ assert(i->sha256sums_job->payload);
+ assert(i->sha256sums_job->payload_size > 0);
+
+ r = import_url_last_component(i->raw_job->url, &fn);
+ if (r < 0)
+ return log_oom();
+
+ if (!filename_is_valid(fn)) {
+ log_error("Cannot verify checksum, could not determine valid server-side file name.");
+ return -EBADMSG;
+ }
+
+ line = strappenda(i->raw_job->sha256, " *", fn, "\n");
+
+ p = memmem(i->sha256sums_job->payload,
+ i->sha256sums_job->payload_size,
+ line,
+ strlen(line));
+
+ if (!p || (p != (char*) i->sha256sums_job->payload && p[-1] != '\n')) {
+ log_error("Checksum did not check out, payload has been tempered with.");
+ return -EBADMSG;
+ }
+
+ log_info("SHA256 checksum of %s is valid.", i->raw_job->url);
+
+ return 0;
+}
+
+static int raw_import_finalize(RawImport *i) {
+ int r;
+
+ assert(i);
+
+ if (!IMPORT_JOB_STATE_IS_COMPLETE(i->raw_job) ||
+ !IMPORT_JOB_STATE_IS_COMPLETE(i->sha256sums_job))
+ return 0;
+
+ if (!i->raw_job->etag_exists) {
+ assert(i->temp_path);
+ assert(i->final_path);
+ assert(i->raw_job->disk_fd >= 0);
+
+ r = raw_import_verify_sha256sum(i);
+ if (r < 0)
+ return r;
+
+ r = rename(i->temp_path, i->final_path);
+ if (r < 0)
+ return log_error_errno(errno, "Failed to move RAW file into place: %m");
+
+ free(i->temp_path);
+ i->temp_path = NULL;
+ }
+
+ r = raw_import_make_local_copy(i);
+ if (r < 0)
+ return r;
+
+ i->raw_job->disk_fd = safe_close(i->raw_job->disk_fd);
+
+ return 1;
+}
+
+static void raw_import_invoke_finished(RawImport *i, int r) {
+ assert(i);
+
+ if (i->on_finished)
+ i->on_finished(i, r, i->userdata);
+ else
+ sd_event_exit(i->event, r);
+}
+
+static void raw_import_raw_job_on_finished(ImportJob *j) {
RawImport *i;
int r;
@@ -250,9 +344,12 @@ static void raw_import_job_on_finished(ImportJob *j) {
/* This is invoked if either the download completed
* successfully, or the download was skipped because we
- * already have the etag. */
+ * already have the etag. In this case ->etag_exists is
+ * true. */
+
+ if (!j->etag_exists) {
+ assert(j->disk_fd >= 0);
- if (j->disk_fd >= 0) {
r = raw_import_maybe_convert_qcow2(i);
if (r < 0)
goto finish;
@@ -260,33 +357,45 @@ static void raw_import_job_on_finished(ImportJob *j) {
r = import_make_read_only_fd(j->disk_fd);
if (r < 0)
goto finish;
+ }
- r = rename(i->temp_path, i->final_path);
- if (r < 0) {
- r = log_error_errno(errno, "Failed to move RAW file into place: %m");
- goto finish;
- }
+ r = raw_import_finalize(i);
+ if (r < 0)
+ goto finish;
+ if (r == 0)
+ return;
- free(i->temp_path);
- i->temp_path = NULL;
+ r = 0;
+
+finish:
+ raw_import_invoke_finished(i, r);
+}
+
+static void raw_import_sha256sums_job_on_finished(ImportJob *j) {
+ RawImport *i;
+ int r;
+
+ assert(j);
+ assert(j->userdata);
+
+ i = j->userdata;
+ if (j->error != 0) {
+ r = j->error;
+ goto finish;
}
- r = raw_import_make_local_copy(i);
+ r = raw_import_finalize(i);
if (r < 0)
goto finish;
-
- j->disk_fd = safe_close(j->disk_fd);
+ if (r == 0)
+ return;
r = 0;
-
finish:
- if (i->on_finished)
- i->on_finished(i, r, i->userdata);
- else
- sd_event_exit(i->event, r);
+ raw_import_invoke_finished(i, r);
}
-static int raw_import_job_on_open_disk(ImportJob *j) {
+static int raw_import_raw_job_on_open_disk(ImportJob *j) {
RawImport *i;
int r;
@@ -317,6 +426,7 @@ static int raw_import_job_on_open_disk(ImportJob *j) {
}
int raw_import_pull(RawImport *i, const char *url, const char *local, bool force_local) {
+ _cleanup_free_ char *sha256sums_url = NULL;
int r;
assert(i);
@@ -333,19 +443,40 @@ int raw_import_pull(RawImport *i, const char *url, const char *local, bool force
r = free_and_strdup(&i->local, local);
if (r < 0)
return r;
-
i->force_local = force_local;
+ /* Queue job for the image itself */
r = import_job_new(&i->raw_job, url, i->glue, i);
if (r < 0)
return r;
- i->raw_job->on_finished = raw_import_job_on_finished;
- i->raw_job->on_open_disk = raw_import_job_on_open_disk;
+ i->raw_job->on_finished = raw_import_raw_job_on_finished;
+ i->raw_job->on_open_disk = raw_import_raw_job_on_open_disk;
+ i->raw_job->calc_hash = true;
r = import_find_old_etags(url, i->image_root, DT_REG, ".raw-", ".raw", &i->raw_job->old_etags);
if (r < 0)
return r;
- return import_job_begin(i->raw_job);
+ /* Queue job for the SHA256SUMS file for the image */
+ r = import_url_change_last_component(url, "SHA256SUMS", &sha256sums_url);
+ if (r < 0)
+ return r;
+
+ r = import_job_new(&i->sha256sums_job, sha256sums_url, i->glue, i);
+ if (r < 0)
+ return r;
+
+ i->sha256sums_job->on_finished = raw_import_sha256sums_job_on_finished;
+ i->sha256sums_job->uncompressed_max = i->sha256sums_job->compressed_max = 1ULL * 1024ULL * 1024ULL;
+
+ r = import_job_begin(i->raw_job);
+ if (r < 0)
+ return r;
+
+ r = import_job_begin(i->sha256sums_job);
+ if (r < 0)
+ return r;
+
+ return 0;
}
diff --git a/src/import/import-util.c b/src/import/import-util.c
index 341a566..1212025 100644
--- a/src/import/import-util.c
+++ b/src/import/import-util.c
@@ -218,3 +218,55 @@ int import_make_path(const char *url, const char *etag, const char *image_root,
*ret = path;
return 0;
}
+
+int import_url_last_component(const char *url, char **ret) {
+ const char *e, *p;
+ char *s;
+
+ e = strchrnul(url, '?');
+
+ while (e > url && e[-1] == '/')
+ e--;
+
+ p = e;
+ while (p > url && p[-1] != '/')
+ p--;
+
+ if (e <= p)
+ return -EINVAL;
+
+ s = strndup(p, e - p);
+ if (!s)
+ return -ENOMEM;
+
+ *ret = s;
+ return 0;
+}
+
+
+int import_url_change_last_component(const char *url, const char *suffix, char **ret) {
+ const char *e;
+ char *s;
+
+ assert(url);
+ assert(ret);
+
+ e = strchrnul(url, '?');
+
+ while (e > url && e[-1] == '/')
+ e--;
+
+ while (e > url && e[-1] != '/')
+ e--;
+
+ if (e <= url)
+ return -EINVAL;
+
+ s = new(char, (e - url) + strlen(suffix) + 1);
+ if (!s)
+ return -ENOMEM;
+
+ strcpy(mempcpy(s, url, e - url), suffix);
+ *ret = s;
+ return 0;
+}
diff --git a/src/import/import-util.h b/src/import/import-util.h
index a930ea4..a8a5ca5 100644
--- a/src/import/import-util.h
+++ b/src/import/import-util.h
@@ -33,3 +33,6 @@ int import_make_read_only_fd(int fd);
int import_make_read_only(const char *path);
int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret);
+
+int import_url_last_component(const char *url, char **ret);
+int import_url_change_last_component(const char *url, const char *suffix, char **ret);
diff --git a/src/import/import.c b/src/import/import.c
index 861a448..3362f4a 100644
--- a/src/import/import.c
+++ b/src/import/import.c
@@ -29,6 +29,7 @@
#include "import-tar.h"
#include "import-raw.h"
#include "import-dkr.h"
+#include "import-util.h"
static bool arg_force = false;
static const char *arg_image_root = "/var/lib/machines";
@@ -47,30 +48,6 @@ static void on_tar_finished(TarImport *import, int error, void *userdata) {
sd_event_exit(event, error);
}
-static int url_final_component(const char *url, char **ret) {
- const char *e, *p;
- char *s;
-
- e = strchrnul(url, '?');
-
- while (e > url && e[-1] == '/')
- e--;
-
- p = e;
- while (p > url && p[-1] != '/')
- p--;
-
- if (e <= p)
- return -EINVAL;
-
- s = strndup(p, e - p);
- if (!s)
- return -ENOMEM;
-
- *ret = s;
- return 0;
-}
-
static int strip_tar_suffixes(const char *name, char **ret) {
const char *e;
char *s;
@@ -112,7 +89,7 @@ static int pull_tar(int argc, char *argv[], void *userdata) {
if (argc >= 3)
local = argv[2];
else {
- r = url_final_component(url, &l);
+ r = import_url_last_component(url, &l);
if (r < 0)
return log_error_errno(r, "Failed get final component of URL: %m");
@@ -238,7 +215,7 @@ static int pull_raw(int argc, char *argv[], void *userdata) {
if (argc >= 3)
local = argv[2];
else {
- r = url_final_component(url, &l);
+ r = import_url_last_component(url, &l);
if (r < 0)
return log_error_errno(r, "Failed get final component of URL: %m");
commit 88a1aadc48e5bbefd2e689db099569ec4c3c1e4b
Author: Lennart Poettering <lennart at poettering.net>
Date: Tue Jan 20 15:06:01 2015 +0100
import: be less aggressive when allocating memory for downloaded payload
diff --git a/src/import/import-job.c b/src/import/import-job.c
index fa6dea5..37f8ef7 100644
--- a/src/import/import-job.c
+++ b/src/import/import-job.c
@@ -369,6 +369,7 @@ static int import_job_detect_compression(ImportJob *j) {
j->payload = NULL;
j->payload_size = 0;
+ j->payload_allocated = 0;
j->state = IMPORT_JOB_RUNNING;
commit 68c913fd751f5c750e3c786b4ed512154b354599
Author: Lennart Poettering <lennart at poettering.net>
Date: Tue Jan 20 15:05:25 2015 +0100
import: improve logging a bit
diff --git a/src/import/import-job.c b/src/import/import-job.c
index 5a4ea69..fa6dea5 100644
--- a/src/import/import-job.c
+++ b/src/import/import-job.c
@@ -57,9 +57,10 @@ static void import_job_finish(ImportJob *j, int ret) {
j->state == IMPORT_JOB_FAILED)
return;
- if (ret == 0)
+ if (ret == 0) {
j->state = IMPORT_JOB_DONE;
- else {
+ log_info("Download of %s complete.", j->url);
+ } else {
j->state = IMPORT_JOB_FAILED;
j->error = ret;
}
@@ -481,7 +482,7 @@ static size_t import_job_header_callback(void *contents, size_t size, size_t nme
goto fail;
}
- log_info("Downloading %s.", format_bytes(bytes, sizeof(bytes), j->content_length));
+ log_info("Downloading %s for %s.", format_bytes(bytes, sizeof(bytes), j->content_length), j->url);
}
return sz;
@@ -518,7 +519,8 @@ static int import_job_progress_callback(void *userdata, curl_off_t dltotal, curl
n = now(CLOCK_MONOTONIC);
if (n > j->last_status_usec + USEC_PER_SEC &&
- percent != j->progress_percent) {
+ percent != j->progress_percent &&
+ dlnow < dltotal) {
char buf[FORMAT_TIMESPAN_MAX];
if (n - j->start_usec > USEC_PER_SEC && dlnow > 0) {
commit 0d6e763b48cabe8899a20823b015c9a988e38659
Author: Lennart Poettering <lennart at poettering.net>
Date: Tue Jan 20 03:00:07 2015 +0100
import: port pull-raw to helper tools implemented for pull-tar
This allows us to reuse a lot more code, and simplify pull-raw
drastically.
diff --git a/src/import/import-raw.c b/src/import/import-raw.c
index 1fe27b6..635779c 100644
--- a/src/import/import-raw.c
+++ b/src/import/import-raw.c
@@ -22,242 +22,116 @@
#include <sys/xattr.h>
#include <linux/fs.h>
#include <curl/curl.h>
-#include <lzma.h>
-#include "hashmap.h"
#include "utf8.h"
-#include "curl-util.h"
-#include "qcow2-util.h"
#include "strv.h"
#include "copy.h"
+#include "btrfs-util.h"
+#include "util.h"
+#include "macro.h"
+#include "mkdir.h"
+#include "curl-util.h"
+#include "qcow2-util.h"
+#include "import-job.h"
#include "import-util.h"
#include "import-raw.h"
typedef struct RawImportFile RawImportFile;
-struct RawImportFile {
- RawImport *import;
-
- char *url;
- char *local;
-
- CURL *curl;
- struct curl_slist *request_header;
-
- char *temp_path;
- char *final_path;
- char *etag;
- char **old_etags;
-
- uint64_t content_length;
- uint64_t written_compressed;
- uint64_t written_uncompressed;
-
- void *payload;
- size_t payload_size;
-
- usec_t mtime;
-
- bool force_local;
- bool done;
-
- int disk_fd;
-
- lzma_stream lzma;
- bool compressed;
-
- unsigned progress_percent;
- usec_t start_usec;
- usec_t last_status_usec;
-};
-
struct RawImport {
sd_event *event;
CurlGlue *glue;
char *image_root;
- Hashmap *files;
- raw_import_on_finished on_finished;
- void *userdata;
+ ImportJob *raw_job;
- bool finished;
-};
+ RawImportFinished on_finished;
+ void *userdata;
-#define FILENAME_ESCAPE "/.#\"\'"
+ char *local;
+ bool force_local;
-#define RAW_MAX_SIZE (1024LLU*1024LLU*1024LLU*8) /* 8 GB */
+ char *temp_path;
+ char *final_path;
+};
-static RawImportFile *raw_import_file_unref(RawImportFile *f) {
- if (!f)
+RawImport* raw_import_unref(RawImport *i) {
+ if (!i)
return NULL;
- if (f->import)
- curl_glue_remove_and_free(f->import->glue, f->curl);
- curl_slist_free_all(f->request_header);
+ import_job_unref(i->raw_job);
- safe_close(f->disk_fd);
+ curl_glue_unref(i->glue);
+ sd_event_unref(i->event);
- free(f->final_path);
-
- if (f->temp_path) {
- unlink(f->temp_path);
- free(f->temp_path);
+ if (i->temp_path) {
+ (void) unlink(i->temp_path);
+ free(i->temp_path);
}
- lzma_end(&f->lzma);
- free(f->url);
- free(f->local);
- free(f->etag);
- strv_free(f->old_etags);
- free(f->payload);
- free(f);
+ free(i->final_path);
+ free(i->image_root);
+ free(i->local);
+ free(i);
return NULL;
}
-DEFINE_TRIVIAL_CLEANUP_FUNC(RawImportFile*, raw_import_file_unref);
-
-static void raw_import_finish(RawImport *import, int error) {
- assert(import);
-
- if (import->finished)
- return;
-
- import->finished = true;
-
- if (import->on_finished)
- import->on_finished(import, error, import->userdata);
- else
- sd_event_exit(import->event, error);
-}
-
-static int raw_import_file_make_final_path(RawImportFile *f) {
- _cleanup_free_ char *escaped_url = NULL, *escaped_etag = NULL;
-
- assert(f);
+int raw_import_new(RawImport **ret, sd_event *event, const char *image_root, RawImportFinished on_finished, void *userdata) {
+ _cleanup_(raw_import_unrefp) RawImport *i = NULL;
+ int r;
- if (f->final_path)
- return 0;
+ assert(ret);
- escaped_url = xescape(f->url, FILENAME_ESCAPE);
- if (!escaped_url)
+ i = new0(RawImport, 1);
+ if (!i)
return -ENOMEM;
- if (f->etag) {
- escaped_etag = xescape(f->etag, FILENAME_ESCAPE);
- if (!escaped_etag)
- return -ENOMEM;
+ i->on_finished = on_finished;
+ i->userdata = userdata;
- f->final_path = strjoin(f->import->image_root, "/.raw-", escaped_url, ".", escaped_etag, ".raw", NULL);
- } else
- f->final_path = strjoin(f->import->image_root, "/.raw-", escaped_url, ".raw", NULL);
- if (!f->final_path)
+ i->image_root = strdup(image_root ?: "/var/lib/machines");
+ if (!i->image_root)
return -ENOMEM;
- return 0;
-}
-
-static int raw_import_file_make_local_copy(RawImportFile *f) {
- _cleanup_free_ char *tp = NULL;
- _cleanup_close_ int dfd = -1;
- const char *p;
- int r;
-
- assert(f);
-
- if (!f->local)
- return 0;
-
- if (f->disk_fd >= 0) {
- if (lseek(f->disk_fd, SEEK_SET, 0) == (off_t) -1)
- return log_error_errno(errno, "Failed to seek to beginning of vendor image: %m");
- } else {
- r = raw_import_file_make_final_path(f);
+ if (event)
+ i->event = sd_event_ref(event);
+ else {
+ r = sd_event_default(&i->event);
if (r < 0)
- return log_oom();
-
- f->disk_fd = open(f->final_path, O_RDONLY|O_NOCTTY|O_CLOEXEC);
- if (f->disk_fd < 0)
- return log_error_errno(errno, "Failed to open vendor image: %m");
+ return r;
}
- p = strappenda(f->import->image_root, "/", f->local, ".raw");
- if (f->force_local)
- (void) rm_rf_dangerous(p, false, true, false);
-
- r = tempfn_random(p, &tp);
- if (r < 0)
- return log_oom();
-
- dfd = open(tp, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY|O_CLOEXEC, 0664);
- if (dfd < 0)
- return log_error_errno(errno, "Failed to create writable copy of image: %m");
-
- /* Turn off COW writing. This should greatly improve
- * performance on COW file systems like btrfs, since it
- * reduces fragmentation caused by not allowing in-place
- * writes. */
- r = chattr_fd(dfd, true, FS_NOCOW_FL);
+ r = curl_glue_new(&i->glue, i->event);
if (r < 0)
- log_warning_errno(errno, "Failed to set file attributes on %s: %m", tp);
-
- r = copy_bytes(f->disk_fd, dfd, (off_t) -1, true);
- if (r < 0) {
- unlink(tp);
- return log_error_errno(r, "Failed to make writable copy of image: %m");
- }
-
- (void) copy_times(f->disk_fd, dfd);
- (void) copy_xattr(f->disk_fd, dfd);
+ return r;
- dfd = safe_close(dfd);
+ i->glue->on_finished = import_job_curl_on_finished;
+ i->glue->userdata = i;
- r = rename(tp, p);
- if (r < 0) {
- unlink(tp);
- return log_error_errno(errno, "Failed to move writable image into place: %m");
- }
+ *ret = i;
+ i = NULL;
- log_info("Created new local image %s.", p);
return 0;
}
-static void raw_import_file_success(RawImportFile *f) {
- int r;
-
- assert(f);
-
- f->done = true;
-
- r = raw_import_file_make_local_copy(f);
- if (r < 0)
- goto finish;
-
- f->disk_fd = safe_close(f->disk_fd);
- r = 0;
-
-finish:
- raw_import_finish(f->import, r);
-}
-
-static int raw_import_maybe_convert_qcow2(RawImportFile *f) {
+static int raw_import_maybe_convert_qcow2(RawImport *i) {
_cleanup_close_ int converted_fd = -1;
_cleanup_free_ char *t = NULL;
int r;
- assert(f);
- assert(f->disk_fd);
- assert(f->temp_path);
+ assert(i);
+ assert(i->raw_job);
- r = qcow2_detect(f->disk_fd);
+ r = qcow2_detect(i->raw_job->disk_fd);
if (r < 0)
return log_error_errno(r, "Failed to detect whether this is a QCOW2 image: %m");
if (r == 0)
return 0;
/* This is a QCOW2 image, let's convert it */
- r = tempfn_random(f->final_path, &t);
+ r = tempfn_random(i->final_path, &t);
if (r < 0)
return log_oom();
@@ -265,677 +139,213 @@ static int raw_import_maybe_convert_qcow2(RawImportFile *f) {
if (converted_fd < 0)
return log_error_errno(errno, "Failed to create %s: %m", t);
+ r = chattr_fd(converted_fd, true, FS_NOCOW_FL);
+ if (r < 0)
+ log_warning_errno(errno, "Failed to set file attributes on %s: %m", t);
+
log_info("Unpacking QCOW2 file.");
- r = qcow2_convert(f->disk_fd, converted_fd);
+ r = qcow2_convert(i->raw_job->disk_fd, converted_fd);
if (r < 0) {
unlink(t);
return log_error_errno(r, "Failed to convert qcow2 image: %m");
}
- unlink(f->temp_path);
- free(f->temp_path);
+ unlink(i->temp_path);
+ free(i->temp_path);
- f->temp_path = t;
+ i->temp_path = t;
t = NULL;
- safe_close(f->disk_fd);
- f->disk_fd = converted_fd;
+ safe_close(i->raw_job->disk_fd);
+ i->raw_job->disk_fd = converted_fd;
converted_fd = -1;
return 1;
}
-static void raw_import_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
- RawImportFile *f = NULL;
- struct stat st;
- CURLcode code;
- long status;
- int r;
-
- if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &f) != CURLE_OK)
- return;
-
- if (!f || f->done)
- return;
-
- f->done = true;
-
- if (result != CURLE_OK) {
- log_error("Transfer failed: %s", curl_easy_strerror(result));
- r = -EIO;
- goto fail;
- }
-
- code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status);
- if (code != CURLE_OK) {
- log_error("Failed to retrieve response code: %s", curl_easy_strerror(code));
- r = -EIO;
- goto fail;
- } else if (status == 304) {
- log_info("Image already downloaded. Skipping download.");
- raw_import_file_success(f);
- return;
- } else if (status >= 300) {
- log_error("HTTP request to %s failed with code %li.", f->url, status);
- r = -EIO;
- goto fail;
- } else if (status < 200) {
- log_error("HTTP request to %s finished with unexpected code %li.", f->url, status);
- r = -EIO;
- goto fail;
- }
-
- if (f->disk_fd < 0) {
- log_error("No data received.");
- r = -EIO;
- goto fail;
- }
-
- if (f->content_length != (uint64_t) -1 &&
- f->content_length != f->written_compressed) {
- log_error("Download truncated.");
- r = -EIO;
- goto fail;
- }
-
- /* Make sure the file size is right, in case the file was
- * sparse and we just seeked for the last part */
- if (ftruncate(f->disk_fd, f->written_uncompressed) < 0) {
- log_error_errno(errno, "Failed to truncate file: %m");
- r = -errno;
- goto fail;
- }
-
- r = raw_import_maybe_convert_qcow2(f);
- if (r < 0)
- goto fail;
-
- if (f->etag)
- (void) fsetxattr(f->disk_fd, "user.source_etag", f->etag, strlen(f->etag), 0);
- if (f->url)
- (void) fsetxattr(f->disk_fd, "user.source_url", f->url, strlen(f->url), 0);
-
- if (f->mtime != 0) {
- struct timespec ut[2];
-
- timespec_store(&ut[0], f->mtime);
- ut[1] = ut[0];
- (void) futimens(f->disk_fd, ut);
-
- fd_setcrtime(f->disk_fd, f->mtime);
- }
-
- if (fstat(f->disk_fd, &st) < 0) {
- r = log_error_errno(errno, "Failed to stat file: %m");
- goto fail;
- }
-
- /* Mark read-only */
- (void) fchmod(f->disk_fd, st.st_mode & 07444);
-
- assert(f->temp_path);
- assert(f->final_path);
-
- r = rename(f->temp_path, f->final_path);
- if (r < 0) {
- r = log_error_errno(errno, "Failed to move RAW file into place: %m");
- goto fail;
- }
-
- free(f->temp_path);
- f->temp_path = NULL;
-
- log_info("Completed writing vendor image %s.", f->final_path);
-
- raw_import_file_success(f);
- return;
-
-fail:
- raw_import_finish(f->import, r);
-}
-
-static int raw_import_file_open_disk_for_write(RawImportFile *f) {
+static int raw_import_make_local_copy(RawImport *i) {
+ _cleanup_free_ char *tp = NULL;
+ _cleanup_close_ int dfd = -1;
+ const char *p;
int r;
- assert(f);
+ assert(i);
+ assert(i->raw_job);
- if (f->disk_fd >= 0)
+ if (!i->local)
return 0;
- r = raw_import_file_make_final_path(f);
- if (r < 0)
- return log_oom();
-
- if (!f->temp_path) {
- r = tempfn_random(f->final_path, &f->temp_path);
- if (r < 0)
- return log_oom();
- }
-
- f->disk_fd = open(f->temp_path, O_RDWR|O_CREAT|O_EXCL|O_NOCTTY|O_CLOEXEC, 0644);
- if (f->disk_fd < 0)
- return log_error_errno(errno, "Failed to create %s: %m", f->temp_path);
-
- r = chattr_fd(f->disk_fd, true, FS_NOCOW_FL);
- if (r < 0)
- log_warning_errno(errno, "Failed to set file attributes on %s: %m", f->temp_path);
-
- return 0;
-}
-
-static int raw_import_file_write_uncompressed(RawImportFile *f, void *p, size_t sz) {
- ssize_t n;
-
- assert(f);
- assert(p);
- assert(sz > 0);
- assert(f->disk_fd >= 0);
-
- if (f->written_uncompressed + sz < f->written_uncompressed) {
- log_error("File too large, overflow");
- return -EOVERFLOW;
- }
-
- if (f->written_uncompressed + sz > RAW_MAX_SIZE) {
- log_error("File overly large, refusing");
- return -EFBIG;
- }
-
- n = sparse_write(f->disk_fd, p, sz, 64);
- if (n < 0) {
- log_error_errno(errno, "Failed to write file: %m");
- return -errno;
- }
- if ((size_t) n < sz) {
- log_error("Short write");
- return -EIO;
- }
-
- f->written_uncompressed += sz;
-
- return 0;
-}
-
-static int raw_import_file_write_compressed(RawImportFile *f, void *p, size_t sz) {
- int r;
-
- assert(f);
- assert(p);
- assert(sz > 0);
- assert(f->disk_fd >= 0);
-
- if (f->written_compressed + sz < f->written_compressed) {
- log_error("File too large, overflow");
- return -EOVERFLOW;
- }
-
- if (f->content_length != (uint64_t) -1 &&
- f->written_compressed + sz > f->content_length) {
- log_error("Content length incorrect.");
- return -EFBIG;
- }
-
- if (!f->compressed) {
- r = raw_import_file_write_uncompressed(f, p, sz);
- if (r < 0)
- return r;
+ if (i->raw_job->disk_fd >= 0) {
+ if (lseek(i->raw_job->disk_fd, SEEK_SET, 0) == (off_t) -1)
+ return log_error_errno(errno, "Failed to seek to beginning of vendor image: %m");
} else {
- f->lzma.next_in = p;
- f->lzma.avail_in = sz;
-
- while (f->lzma.avail_in > 0) {
- uint8_t buffer[16 * 1024];
- lzma_ret lzr;
-
- f->lzma.next_out = buffer;
- f->lzma.avail_out = sizeof(buffer);
-
- lzr = lzma_code(&f->lzma, LZMA_RUN);
- if (lzr != LZMA_OK && lzr != LZMA_STREAM_END) {
- log_error("Decompression error.");
- return -EIO;
- }
-
- r = raw_import_file_write_uncompressed(f, buffer, sizeof(buffer) - f->lzma.avail_out);
+ if (!i->final_path) {
+ r = import_make_path(i->raw_job->url, i->raw_job->etag, i->image_root, ".raw-", ".raw", &i->final_path);
if (r < 0)
- return r;
+ return log_oom();
}
- }
-
- f->written_compressed += sz;
-
- return 0;
-}
-
-static int raw_import_file_detect_xz(RawImportFile *f) {
- static const uint8_t xz_signature[] = {
- '\xfd', '7', 'z', 'X', 'Z', '\x00'
- };
- lzma_ret lzr;
- int r;
-
- assert(f);
-
- if (f->payload_size < sizeof(xz_signature))
- return 0;
-
- f->compressed = memcmp(f->payload, xz_signature, sizeof(xz_signature)) == 0;
- log_debug("Stream is XZ compressed: %s", yes_no(f->compressed));
-
- if (f->compressed) {
- lzr = lzma_stream_decoder(&f->lzma, UINT64_MAX, LZMA_TELL_UNSUPPORTED_CHECK);
- if (lzr != LZMA_OK) {
- log_error("Failed to initialize LZMA decoder.");
- return -EIO;
- }
- }
-
- r = raw_import_file_open_disk_for_write(f);
- if (r < 0)
- return r;
-
- r = raw_import_file_write_compressed(f, f->payload, f->payload_size);
- if (r < 0)
- return r;
-
- free(f->payload);
- f->payload = NULL;
- f->payload_size = 0;
-
- return 0;
-}
-static size_t raw_import_file_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
- RawImportFile *f = userdata;
- size_t sz = size * nmemb;
- int r;
-
- assert(contents);
- assert(f);
-
- if (f->done) {
- r = -ESTALE;
- goto fail;
+ i->raw_job->disk_fd = open(i->final_path, O_RDONLY|O_NOCTTY|O_CLOEXEC);
+ if (i->raw_job->disk_fd < 0)
+ return log_error_errno(errno, "Failed to open vendor image: %m");
}
- if (f->disk_fd < 0) {
- uint8_t *p;
-
- /* We haven't opened the file yet, let's first check what it actually is */
+ p = strappenda(i->image_root, "/", i->local, ".raw");
- p = realloc(f->payload, f->payload_size + sz);
- if (!p) {
- r = log_oom();
- goto fail;
- }
-
- memcpy(p + f->payload_size, contents, sz);
- f->payload_size = sz;
- f->payload = p;
-
- r = raw_import_file_detect_xz(f);
- if (r < 0)
- goto fail;
-
- return sz;
+ if (i->force_local) {
+ (void) btrfs_subvol_remove(p);
+ (void) rm_rf_dangerous(p, false, true, false);
}
- r = raw_import_file_write_compressed(f, contents, sz);
+ r = tempfn_random(p, &tp);
if (r < 0)
- goto fail;
-
- return sz;
-
-fail:
- raw_import_finish(f->import, r);
- return 0;
-}
-
-static size_t raw_import_file_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
- RawImportFile *f = userdata;
- size_t sz = size * nmemb;
- _cleanup_free_ char *length = NULL, *last_modified = NULL;
- char *etag;
- int r;
-
- assert(contents);
- assert(f);
-
- if (f->done) {
- r = -ESTALE;
- goto fail;
- }
-
- r = curl_header_strdup(contents, sz, "ETag:", &etag);
- if (r < 0) {
- log_oom();
- goto fail;
- }
- if (r > 0) {
- free(f->etag);
- f->etag = etag;
-
- if (strv_contains(f->old_etags, f->etag)) {
- log_info("Image already downloaded. Skipping download.");
- raw_import_file_success(f);
- return sz;
- }
-
- return sz;
- }
-
- r = curl_header_strdup(contents, sz, "Content-Length:", &length);
- if (r < 0) {
- log_oom();
- goto fail;
- }
- if (r > 0) {
- (void) safe_atou64(length, &f->content_length);
+ return log_oom();
- if (f->content_length != (uint64_t) -1) {
- char bytes[FORMAT_BYTES_MAX];
- log_info("Downloading %s.", format_bytes(bytes, sizeof(bytes), f->content_length));
- }
+ dfd = open(tp, O_WRONLY|O_CREAT|O_EXCL|O_NOCTTY|O_CLOEXEC, 0664);
+ if (dfd < 0)
+ return log_error_errno(errno, "Failed to create writable copy of image: %m");
- return sz;
- }
+ /* Turn off COW writing. This should greatly improve
+ * performance on COW file systems like btrfs, since it
+ * reduces fragmentation caused by not allowing in-place
+ * writes. */
+ r = chattr_fd(dfd, true, FS_NOCOW_FL);
+ if (r < 0)
+ log_warning_errno(errno, "Failed to set file attributes on %s: %m", tp);
- r = curl_header_strdup(contents, sz, "Last-Modified:", &last_modified);
+ r = copy_bytes(i->raw_job->disk_fd, dfd, (off_t) -1, true);
if (r < 0) {
- log_oom();
- goto fail;
- }
- if (r > 0) {
- (void) curl_parse_http_time(last_modified, &f->mtime);
- return sz;
+ unlink(tp);
+ return log_error_errno(r, "Failed to make writable copy of image: %m");
}
- return sz;
-
-fail:
- raw_import_finish(f->import, r);
- return 0;
-}
-
-static int raw_import_file_progress_callback(void *userdata, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) {
- RawImportFile *f = userdata;
- unsigned percent;
- usec_t n;
-
- assert(f);
+ (void) copy_times(i->raw_job->disk_fd, dfd);
+ (void) copy_xattr(i->raw_job->disk_fd, dfd);
- if (dltotal <= 0)
- return 0;
-
- percent = ((100 * dlnow) / dltotal);
- n = now(CLOCK_MONOTONIC);
-
- if (n > f->last_status_usec + USEC_PER_SEC &&
- percent != f->progress_percent) {
- char buf[FORMAT_TIMESPAN_MAX];
-
- if (n - f->start_usec > USEC_PER_SEC && dlnow > 0) {
- usec_t left, done;
-
- done = n - f->start_usec;
- left = (usec_t) (((double) done * (double) dltotal) / dlnow) - done;
-
- log_info("Got %u%%. %s left.", percent, format_timespan(buf, sizeof(buf), left, USEC_PER_SEC));
- } else
- log_info("Got %u%%.", percent);
+ dfd = safe_close(dfd);
- f->progress_percent = percent;
- f->last_status_usec = n;
+ r = rename(tp, p);
+ if (r < 0) {
+ unlink(tp);
+ return log_error_errno(errno, "Failed to move writable image into place: %m");
}
+ log_info("Created new local image '%s'.", i->local);
return 0;
}
-static int raw_import_file_find_old_etags(RawImportFile *f) {
- _cleanup_free_ char *escaped_url = NULL;
- _cleanup_closedir_ DIR *d = NULL;
- struct dirent *de;
+static void raw_import_job_on_finished(ImportJob *j) {
+ RawImport *i;
int r;
- escaped_url = xescape(f->url, FILENAME_ESCAPE);
- if (!escaped_url)
- return -ENOMEM;
-
- d = opendir(f->import->image_root);
- if (!d) {
- if (errno == ENOENT)
- return 0;
+ assert(j);
+ assert(j->userdata);
- return -errno;
+ i = j->userdata;
+ if (j->error != 0) {
+ r = j->error;
+ goto finish;
}
- FOREACH_DIRENT_ALL(de, d, return -errno) {
- const char *a, *b;
- char *u;
-
- if (de->d_type != DT_UNKNOWN &&
- de->d_type != DT_REG)
- continue;
-
- a = startswith(de->d_name, ".raw-");
- if (!a)
- continue;
-
- a = startswith(a, escaped_url);
- if (!a)
- continue;
+ /* This is invoked if either the download completed
+ * successfully, or the download was skipped because we
+ * already have the etag. */
- a = startswith(a, ".");
- if (!a)
- continue;
-
- b = endswith(de->d_name, ".raw");
- if (!b)
- continue;
-
- if (a >= b)
- continue;
+ if (j->disk_fd >= 0) {
+ r = raw_import_maybe_convert_qcow2(i);
+ if (r < 0)
+ goto finish;
- u = cunescape_length(a, b - a);
- if (!u)
- return -ENOMEM;
+ r = import_make_read_only_fd(j->disk_fd);
+ if (r < 0)
+ goto finish;
- if (!http_etag_is_valid(u)) {
- free(u);
- continue;
+ r = rename(i->temp_path, i->final_path);
+ if (r < 0) {
+ r = log_error_errno(errno, "Failed to move RAW file into place: %m");
+ goto finish;
}
- r = strv_consume(&f->old_etags, u);
- if (r < 0)
- return r;
+ free(i->temp_path);
+ i->temp_path = NULL;
}
- return 0;
-}
-
-static int raw_import_file_begin(RawImportFile *f) {
- int r;
-
- assert(f);
- assert(!f->curl);
-
- log_info("Getting %s.", f->url);
-
- r = raw_import_file_find_old_etags(f);
+ r = raw_import_make_local_copy(i);
if (r < 0)
- return r;
-
- r = curl_glue_make(&f->curl, f->url, f);
- if (r < 0)
- return r;
-
- if (!strv_isempty(f->old_etags)) {
- _cleanup_free_ char *cc = NULL, *hdr = NULL;
-
- cc = strv_join(f->old_etags, ", ");
- if (!cc)
- return -ENOMEM;
+ goto finish;
- hdr = strappend("If-None-Match: ", cc);
- if (!hdr)
- return -ENOMEM;
+ j->disk_fd = safe_close(j->disk_fd);
- f->request_header = curl_slist_new(hdr, NULL);
- if (!f->request_header)
- return -ENOMEM;
+ r = 0;
- if (curl_easy_setopt(f->curl, CURLOPT_HTTPHEADER, f->request_header) != CURLE_OK)
- return -EIO;
- }
+finish:
+ if (i->on_finished)
+ i->on_finished(i, r, i->userdata);
+ else
+ sd_event_exit(i->event, r);
+}
- if (curl_easy_setopt(f->curl, CURLOPT_WRITEFUNCTION, raw_import_file_write_callback) != CURLE_OK)
- return -EIO;
+static int raw_import_job_on_open_disk(ImportJob *j) {
+ RawImport *i;
+ int r;
- if (curl_easy_setopt(f->curl, CURLOPT_WRITEDATA, f) != CURLE_OK)
- return -EIO;
+ assert(j);
+ assert(j->userdata);
- if (curl_easy_setopt(f->curl, CURLOPT_HEADERFUNCTION, raw_import_file_header_callback) != CURLE_OK)
- return -EIO;
+ i = j->userdata;
- if (curl_easy_setopt(f->curl, CURLOPT_HEADERDATA, f) != CURLE_OK)
- return -EIO;
+ r = import_make_path(j->url, j->etag, i->image_root, ".raw-", ".raw", &i->final_path);
+ if (r < 0)
+ return log_oom();
- if (curl_easy_setopt(f->curl, CURLOPT_XFERINFOFUNCTION, raw_import_file_progress_callback) != CURLE_OK)
- return -EIO;
+ r = tempfn_random(i->final_path, &i->temp_path);
+ if (r <0)
+ return log_oom();
- if (curl_easy_setopt(f->curl, CURLOPT_XFERINFODATA, f) != CURLE_OK)
- return -EIO;
+ mkdir_parents_label(i->temp_path, 0700);
- if (curl_easy_setopt(f->curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK)
- return -EIO;
+ j->disk_fd = open(i->temp_path, O_RDWR|O_CREAT|O_EXCL|O_NOCTTY|O_CLOEXEC, 0644);
+ if (j->disk_fd < 0)
+ return log_error_errno(errno, "Failed to create %s: %m", i->temp_path);
- r = curl_glue_add(f->import->glue, f->curl);
+ r = chattr_fd(j->disk_fd, true, FS_NOCOW_FL);
if (r < 0)
- return r;
+ log_warning_errno(errno, "Failed to set file attributes on %s: %m", i->temp_path);
return 0;
}
-int raw_import_new(RawImport **import, sd_event *event, const char *image_root, raw_import_on_finished on_finished, void *userdata) {
- _cleanup_(raw_import_unrefp) RawImport *i = NULL;
+int raw_import_pull(RawImport *i, const char *url, const char *local, bool force_local) {
int r;
- assert(import);
- assert(image_root);
+ assert(i);
- i = new0(RawImport, 1);
- if (!i)
- return -ENOMEM;
+ if (i->raw_job)
+ return -EBUSY;
- i->on_finished = on_finished;
- i->userdata = userdata;
+ if (!http_url_is_valid(url))
+ return -EINVAL;
- i->image_root = strdup(image_root);
- if (!i->image_root)
- return -ENOMEM;
+ if (local && !machine_name_is_valid(local))
+ return -EINVAL;
- if (event)
- i->event = sd_event_ref(event);
- else {
- r = sd_event_default(&i->event);
- if (r < 0)
- return r;
- }
-
- r = curl_glue_new(&i->glue, i->event);
+ r = free_and_strdup(&i->local, local);
if (r < 0)
return r;
- i->glue->on_finished = raw_import_curl_on_finished;
- i->glue->userdata = i;
-
- *import = i;
- i = NULL;
-
- return 0;
-}
-
-RawImport* raw_import_unref(RawImport *import) {
- RawImportFile *f;
-
- if (!import)
- return NULL;
-
- while ((f = hashmap_steal_first(import->files)))
- raw_import_file_unref(f);
- hashmap_free(import->files);
-
- curl_glue_unref(import->glue);
- sd_event_unref(import->event);
-
- free(import->image_root);
- free(import);
-
- return NULL;
-}
-
-int raw_import_cancel(RawImport *import, const char *url) {
- RawImportFile *f;
+ i->force_local = force_local;
- assert(import);
- assert(url);
-
- f = hashmap_remove(import->files, url);
- if (!f)
- return 0;
-
- raw_import_file_unref(f);
- return 1;
-}
-
-int raw_import_pull(RawImport *import, const char *url, const char *local, bool force_local) {
- _cleanup_(raw_import_file_unrefp) RawImportFile *f = NULL;
- int r;
-
- assert(import);
- assert(http_url_is_valid(url));
- assert(!local || machine_name_is_valid(local));
-
- if (hashmap_get(import->files, url))
- return -EEXIST;
-
- r = hashmap_ensure_allocated(&import->files, &string_hash_ops);
+ r = import_job_new(&i->raw_job, url, i->glue, i);
if (r < 0)
return r;
- f = new0(RawImportFile, 1);
- if (!f)
- return -ENOMEM;
-
- f->import = import;
- f->disk_fd = -1;
- f->content_length = (uint64_t) -1;
- f->start_usec = now(CLOCK_MONOTONIC);
-
- f->url = strdup(url);
- if (!f->url)
- return -ENOMEM;
-
- if (local) {
- f->local = strdup(local);
- if (!f->local)
- return -ENOMEM;
-
- f->force_local = force_local;
- }
+ i->raw_job->on_finished = raw_import_job_on_finished;
+ i->raw_job->on_open_disk = raw_import_job_on_open_disk;
- r = hashmap_put(import->files, f->url, f);
+ r = import_find_old_etags(url, i->image_root, DT_REG, ".raw-", ".raw", &i->raw_job->old_etags);
if (r < 0)
return r;
- r = raw_import_file_begin(f);
- if (r < 0) {
- raw_import_cancel(import, f->url);
- f = NULL;
- return r;
- }
-
- f = NULL;
- return 0;
+ return import_job_begin(i->raw_job);
}
diff --git a/src/import/import-raw.h b/src/import/import-raw.h
index 17f7a1a..9e23142 100644
--- a/src/import/import-raw.h
+++ b/src/import/import-raw.h
@@ -1,5 +1,7 @@
/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+#pragma once
+
/***
This file is part of systemd.
@@ -24,12 +26,11 @@
typedef struct RawImport RawImport;
-typedef void (*raw_import_on_finished)(RawImport *import, int error, void *userdata);
+typedef void (*RawImportFinished)(RawImport *import, int error, void *userdata);
-int raw_import_new(RawImport **import, sd_event *event, const char *image_root, raw_import_on_finished on_finished, void *userdata);
+int raw_import_new(RawImport **import, sd_event *event, const char *image_root, RawImportFinished on_finished, void *userdata);
RawImport* raw_import_unref(RawImport *import);
DEFINE_TRIVIAL_CLEANUP_FUNC(RawImport*, raw_import_unref);
int raw_import_pull(RawImport *import, const char *url, const char *local, bool force_local);
-int raw_import_cancel(RawImport *import, const char *name);
diff --git a/src/import/import-tar.c b/src/import/import-tar.c
index 43227f6..08839ca 100644
--- a/src/import/import-tar.c
+++ b/src/import/import-tar.c
@@ -22,7 +22,6 @@
#include <sys/prctl.h>
#include <curl/curl.h>
-#include "hashmap.h"
#include "utf8.h"
#include "strv.h"
#include "copy.h"
@@ -46,8 +45,6 @@ struct TarImport {
TarImportFinished on_finished;
void *userdata;
- bool finished;
-
char *local;
bool force_local;
@@ -74,6 +71,7 @@ TarImport* tar_import_unref(TarImport *i) {
if (i->temp_path) {
(void) btrfs_subvol_remove(i->temp_path);
(void) rm_rf_dangerous(i->temp_path, false, true, false);
+ free(i->temp_path);
}
free(i->final_path);
@@ -124,6 +122,28 @@ int tar_import_new(TarImport **ret, sd_event *event, const char *image_root, Tar
return 0;
}
+static int tar_import_make_local_copy(TarImport *i) {
+ int r;
+
+ assert(i);
+ assert(i->tar_job);
+
+ if (!i->local)
+ return 0;
+
+ if (!i->final_path) {
+ r = import_make_path(i->tar_job->url, i->tar_job->etag, i->image_root, ".tar-", NULL, &i->final_path);
+ if (r < 0)
+ return log_oom();
+
+ r = import_make_local_copy(i->final_path, i->image_root, i->local, i->force_local);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
static void tar_import_job_on_finished(ImportJob *j) {
TarImport *i;
int r;
@@ -132,7 +152,6 @@ static void tar_import_job_on_finished(ImportJob *j) {
assert(j->userdata);
i = j->userdata;
-
if (j->error != 0) {
r = j->error;
goto finish;
@@ -160,25 +179,18 @@ static void tar_import_job_on_finished(ImportJob *j) {
r = log_error_errno(errno, "Failed to rename to final image name: %m");
goto finish;
}
- }
- if (i->local) {
- if (!i->final_path) {
- r = import_make_path(j->url, j->etag, i->image_root, ".tar-", NULL, &i->final_path);
- if (r < 0)
- goto finish;
- }
-
- r = import_make_local_copy(i->final_path, i->image_root, i->local, i->force_local);
- if (r < 0)
- goto finish;
+ free(i->temp_path);
+ i->temp_path = NULL;
}
+ r = tar_import_make_local_copy(i);
+ if (r < 0)
+ goto finish;
+
r = 0;
finish:
- i->finished = true;
-
if (i->on_finished)
i->on_finished(i, r, i->userdata);
else
diff --git a/src/import/import-util.c b/src/import/import-util.c
index 24c8ac1..341a566 100644
--- a/src/import/import-util.c
+++ b/src/import/import-util.c
@@ -147,28 +147,47 @@ int import_make_local_copy(const char *final, const char *image_root, const char
return 0;
}
-int import_make_read_only(const char *path) {
+int import_make_read_only_fd(int fd) {
int r;
- r = btrfs_subvol_set_read_only(path, true);
- if (r == -ENOTTY) {
+ assert(fd >= 0);
+
+ /* First, let's make this a read-only subvolume if it refers
+ * to a subvolume */
+ r = btrfs_subvol_set_read_only_fd(fd, true);
+ if (r == -ENOTTY || r == -ENOTDIR || r == -EINVAL) {
struct stat st;
- r = stat(path, &st);
+ /* This doesn't refer to a subvolume, or the file
+ * system isn't even btrfs. In that, case fall back to
+ * chmod()ing */
+
+ r = fstat(fd, &st);
if (r < 0)
return log_error_errno(errno, "Failed to stat temporary image: %m");
- if (chmod(path, st.st_mode & 0755) < 0)
+ /* Drop "w" flag */
+ if (fchmod(fd, st.st_mode & 07555) < 0)
return log_error_errno(errno, "Failed to chmod() final image: %m");
return 0;
- }
- if (r < 0)
- return log_error_errno(r, "Failed to mark final image read-only: %m");
+
+ } else if (r < 0)
+ return log_error_errno(r, "Failed to make subvolume read-only: %m");
return 0;
}
+int import_make_read_only(const char *path) {
+ _cleanup_close_ int fd = 1;
+
+ fd = open(path, O_RDONLY|O_NOCTTY|O_CLOEXEC);
+ if (fd < 0)
+ return log_error_errno(errno, "Failed to open %s: %m", path);
+
+ return import_make_read_only_fd(fd);
+}
+
int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret) {
_cleanup_free_ char *escaped_url = NULL;
char *path;
diff --git a/src/import/import-util.h b/src/import/import-util.h
index ad5ab50..a930ea4 100644
--- a/src/import/import-util.h
+++ b/src/import/import-util.h
@@ -26,6 +26,10 @@
bool http_etag_is_valid(const char *etag);
int import_make_local_copy(const char *final, const char *root, const char *local, bool force_local);
+
int import_find_old_etags(const char *url, const char *root, int dt, const char *prefix, const char *suffix, char ***etags);
+
+int import_make_read_only_fd(int fd);
int import_make_read_only(const char *path);
+
int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret);
diff --git a/src/import/import.c b/src/import/import.c
index c0fc224..861a448 100644
--- a/src/import/import.c
+++ b/src/import/import.c
@@ -226,7 +226,7 @@ static int pull_raw(int argc, char *argv[], void *userdata) {
_cleanup_(raw_import_unrefp) RawImport *import = NULL;
_cleanup_event_unref_ sd_event *event = NULL;
const char *url, *local;
- _cleanup_free_ char *l = NULL;
+ _cleanup_free_ char *l = NULL, *ll = NULL;
int r;
url = argv[1];
@@ -238,44 +238,37 @@ static int pull_raw(int argc, char *argv[], void *userdata) {
if (argc >= 3)
local = argv[2];
else {
- const char *e, *p;
-
- e = url + strlen(url);
- while (e > url && e[-1] == '/')
- e--;
-
- p = e;
- while (p > url && p[-1] != '/')
- p--;
+ r = url_final_component(url, &l);
+ if (r < 0)
+ return log_error_errno(r, "Failed get final component of URL: %m");
- local = strndupa(p, e - p);
+ local = l;
}
if (isempty(local) || streq(local, "-"))
local = NULL;
if (local) {
- const char *p;
-
- r = strip_raw_suffixes(local, &l);
+ r = strip_raw_suffixes(local, &ll);
if (r < 0)
return log_oom();
- local = l;
+ local = ll;
if (!machine_name_is_valid(local)) {
log_error("Local image name '%s' is not valid.", local);
return -EINVAL;
}
- p = strappenda(arg_image_root, "/", local, ".raw");
- if (laccess(p, F_OK) >= 0) {
- if (!arg_force) {
- log_info("Image '%s' already exists.", local);
- return 0;
+ if (!arg_force) {
+ r = image_find(local, NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to check whether image '%s' exists: %m", local);
+ else if (r > 0) {
+ log_error_errno(EEXIST, "Image '%s' already exists.", local);
+ return -EEXIST;
}
- } else if (errno != ENOENT)
- return log_error_errno(errno, "Can't check if image '%s' already exists: %m", local);
+ }
log_info("Pulling '%s', saving as '%s'.", url, local);
} else
@@ -417,7 +410,7 @@ static int help(int argc, char *argv[], void *userdata) {
" --image-root= Image root directory\n"
" --dkr-index-url=URL Specify index URL to use for downloads\n\n"
"Commands:\n"
- " pull-tar URL Download a TAR image\n"
+ " pull-tar URL [NAME] Download a TAR image\n"
" pull-raw URL [NAME] Download a RAW image\n"
" pull-dkr REMOTE [NAME] Download a DKR image\n",
program_invocation_short_name);
diff --git a/src/shared/btrfs-util.c b/src/shared/btrfs-util.c
index 254483c..b34ac8b 100644
--- a/src/shared/btrfs-util.c
+++ b/src/shared/btrfs-util.c
@@ -228,14 +228,18 @@ int btrfs_subvol_remove(const char *path) {
return 0;
}
-int btrfs_subvol_set_read_only(const char *path, bool b) {
- _cleanup_close_ int fd = -1;
+int btrfs_subvol_set_read_only_fd(int fd, bool b) {
uint64_t flags, nflags;
+ struct stat st;
- fd = open(path, O_RDONLY|O_NOCTTY|O_CLOEXEC);
- if (fd < 0)
+ assert(fd >= 0);
+
+ if (fstat(fd, &st) < 0)
return -errno;
+ if (!S_ISDIR(st.st_mode) || st.st_ino != 256)
+ return -EINVAL;
+
if (ioctl(fd, BTRFS_IOC_SUBVOL_GETFLAGS, &flags) < 0)
return -errno;
@@ -253,6 +257,16 @@ int btrfs_subvol_set_read_only(const char *path, bool b) {
return 0;
}
+int btrfs_subvol_set_read_only(const char *path, bool b) {
+ _cleanup_close_ int fd = -1;
+
+ fd = open(path, O_RDONLY|O_NOCTTY|O_CLOEXEC|O_DIRECTORY);
+ if (fd < 0)
+ return -errno;
+
+ return btrfs_subvol_set_read_only_fd(fd, b);
+}
+
int btrfs_subvol_get_read_only_fd(int fd) {
uint64_t flags;
diff --git a/src/shared/btrfs-util.h b/src/shared/btrfs-util.h
index 28946c6..d5249d1 100644
--- a/src/shared/btrfs-util.h
+++ b/src/shared/btrfs-util.h
@@ -48,6 +48,7 @@ int btrfs_subvol_make_label(const char *path);
int btrfs_subvol_remove(const char *path);
int btrfs_subvol_snapshot(const char *old_path, const char *new_path, bool read_only, bool fallback_copy);
+int btrfs_subvol_set_read_only_fd(int fd, bool b);
int btrfs_subvol_set_read_only(const char *path, bool b);
int btrfs_subvol_get_read_only_fd(int fd);
int btrfs_subvol_get_id_fd(int fd, uint64_t *ret);
commit 56ebfaf1ca185a93ffb372b6e1a1fa3a957d93cd
Author: Lennart Poettering <lennart at poettering.net>
Date: Tue Jan 20 01:36:11 2015 +0100
import: add support for pulling raw tar balls as containers
Ubuntu provides their cloud images optionally as tarball, hence also
support downloading those.
diff --git a/Makefile.am b/Makefile.am
index 788e634..f165042 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -5246,8 +5246,14 @@ systemd_import_SOURCES = \
src/import/import.c \
src/import/import-raw.c \
src/import/import-raw.h \
+ src/import/import-tar.c \
+ src/import/import-tar.h \
src/import/import-dkr.c \
src/import/import-dkr.h \
+ src/import/import-job.c \
+ src/import/import-job.h \
+ src/import/import-util.c \
+ src/import/import-util.h \
src/import/curl-util.c \
src/import/curl-util.h \
src/import/aufs-util.c \
diff --git a/src/import/import-dkr.c b/src/import/import-dkr.c
index b54a1a6..8dfd270 100644
--- a/src/import/import-dkr.c
+++ b/src/import/import-dkr.c
@@ -422,7 +422,7 @@ static void dkr_import_name_maybe_finish(DkrImportName *name) {
return;
}
- log_info("Created new local image %s.", p);
+ log_info("Created new local image '%s'.", name->local);
}
dkr_import_finish(name->import, 0);
diff --git a/src/import/import-job.c b/src/import/import-job.c
new file mode 100644
index 0000000..5a4ea69
--- /dev/null
+++ b/src/import/import-job.c
@@ -0,0 +1,629 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+/***
+ This file is part of systemd.
+
+ Copyright 2015 Lennart Poettering
+
+ systemd is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version.
+
+ systemd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include <sys/xattr.h>
+
+#include "strv.h"
+#include "import-job.h"
+
+ImportJob* import_job_unref(ImportJob *j) {
+ if (!j)
+ return NULL;
+
+ curl_glue_remove_and_free(j->glue, j->curl);
+ curl_slist_free_all(j->request_header);
+
+ safe_close(j->disk_fd);
+
+ if (j->compressed == IMPORT_JOB_XZ)
+ lzma_end(&j->xz);
+ else if (j->compressed == IMPORT_JOB_GZIP)
+ inflateEnd(&j->gzip);
+
+ free(j->url);
+ free(j->etag);
+ strv_free(j->old_etags);
+ free(j->payload);
+
+ free(j);
+
+ return NULL;
+}
+
+DEFINE_TRIVIAL_CLEANUP_FUNC(ImportJob*, import_job_unref);
+
+static void import_job_finish(ImportJob *j, int ret) {
+ assert(j);
+
+ if (j->state == IMPORT_JOB_DONE ||
+ j->state == IMPORT_JOB_FAILED)
+ return;
+
+ if (ret == 0)
+ j->state = IMPORT_JOB_DONE;
+ else {
+ j->state = IMPORT_JOB_FAILED;
+ j->error = ret;
+ }
+
+ if (j->on_finished)
+ j->on_finished(j);
+}
+
+void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result) {
+ ImportJob *j = NULL;
+ CURLcode code;
+ long status;
+ int r;
+
+ if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, &j) != CURLE_OK)
+ return;
+
+ if (!j || j->state == IMPORT_JOB_DONE || j->state == IMPORT_JOB_FAILED)
+ return;
+
+ if (result != CURLE_OK) {
+ log_error("Transfer failed: %s", curl_easy_strerror(result));
+ r = -EIO;
+ goto finish;
+ }
+
+ code = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &status);
+ if (code != CURLE_OK) {
+ log_error("Failed to retrieve response code: %s", curl_easy_strerror(code));
+ r = -EIO;
+ goto finish;
+ } else if (status == 304) {
+ log_info("Image already downloaded. Skipping download.");
+ r = 0;
+ goto finish;
+ } else if (status >= 300) {
+ log_error("HTTP request to %s failed with code %li.", j->url, status);
+ r = -EIO;
+ goto finish;
+ } else if (status < 200) {
+ log_error("HTTP request to %s finished with unexpected code %li.", j->url, status);
+ r = -EIO;
+ goto finish;
+ }
+
+ if (j->state != IMPORT_JOB_RUNNING) {
+ log_error("Premature connection termination.");
+ r = -EIO;
+ goto finish;
+ }
+
+ if (j->content_length != (uint64_t) -1 &&
+ j->content_length != j->written_compressed) {
+ log_error("Download truncated.");
+ r = -EIO;
+ goto finish;
+ }
+
+ if (j->disk_fd >= 0 && j->allow_sparse) {
+ /* Make sure the file size is right, in case the file was
+ * sparse and we just seeked for the last part */
+
+ if (ftruncate(j->disk_fd, j->written_uncompressed) < 0) {
+ log_error_errno(errno, "Failed to truncate file: %m");
+ r = -errno;
+ goto finish;
+ }
+
+ if (j->etag)
+ (void) fsetxattr(j->disk_fd, "user.source_etag", j->etag, strlen(j->etag), 0);
+ if (j->url)
+ (void) fsetxattr(j->disk_fd, "user.source_url", j->url, strlen(j->url), 0);
+
+ if (j->mtime != 0) {
+ struct timespec ut[2];
+
+ timespec_store(&ut[0], j->mtime);
+ ut[1] = ut[0];
+ (void) futimens(j->disk_fd, ut);
+
+ (void) fd_setcrtime(j->disk_fd, j->mtime);
+ }
+ }
+
+ r = 0;
+
+finish:
+ import_job_finish(j, r);
+}
+
+
+static int import_job_write_uncompressed(ImportJob *j, void *p, size_t sz) {
+ ssize_t n;
+
+ assert(j);
+ assert(p);
+ assert(sz > 0);
+ assert(j->disk_fd >= 0);
+
+ if (j->written_uncompressed + sz < j->written_uncompressed) {
+ log_error("File too large, overflow");
+ return -EOVERFLOW;
+ }
+
+ if (j->written_uncompressed + sz > j->uncompressed_max) {
+ log_error("File overly large, refusing");
+ return -EFBIG;
+ }
+
+ if (j->disk_fd >= 0) {
+
+ if (j->allow_sparse)
+ n = sparse_write(j->disk_fd, p, sz, 64);
+ else
+ n = write(j->disk_fd, p, sz);
+ if (n < 0) {
+ log_error_errno(errno, "Failed to write file: %m");
+ return -errno;
+ }
+ if ((size_t) n < sz) {
+ log_error("Short write");
+ return -EIO;
+ }
+ } else {
+
+ if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz))
+ return log_oom();
+
+ memcpy((uint8_t*) j->payload + j->payload_size, p, sz);
+ j->payload_size += sz;
+ }
+
+ j->written_uncompressed += sz;
+
+ return 0;
+}
+
+static int import_job_write_compressed(ImportJob *j, void *p, size_t sz) {
+ int r;
+
+ assert(j);
+ assert(p);
+ assert(sz > 0);
+ assert(j->disk_fd >= 0);
+
+ if (j->written_compressed + sz < j->written_compressed) {
+ log_error("File too large, overflow");
+ return -EOVERFLOW;
+ }
+
+ if (j->written_compressed + sz > j->compressed_max) {
+ log_error("File overly large, refusing.");
+ return -EFBIG;
+ }
+
+ if (j->content_length != (uint64_t) -1 &&
+ j->written_compressed + sz > j->content_length) {
+ log_error("Content length incorrect.");
+ return -EFBIG;
+ }
+
+ switch (j->compressed) {
+
+ case IMPORT_JOB_UNCOMPRESSED:
+ r = import_job_write_uncompressed(j, p, sz);
+ if (r < 0)
+ return r;
+
+ break;
+
+ case IMPORT_JOB_XZ:
+ j->xz.next_in = p;
+ j->xz.avail_in = sz;
+
+ while (j->xz.avail_in > 0) {
+ uint8_t buffer[16 * 1024];
+ lzma_ret lzr;
+
+ j->xz.next_out = buffer;
+ j->xz.avail_out = sizeof(buffer);
+
+ lzr = lzma_code(&j->xz, LZMA_RUN);
+ if (lzr != LZMA_OK && lzr != LZMA_STREAM_END) {
+ log_error("Decompression error.");
+ return -EIO;
+ }
+
+ r = import_job_write_uncompressed(j, buffer, sizeof(buffer) - j->xz.avail_out);
+ if (r < 0)
+ return r;
+ }
+
+ break;
+
+ case IMPORT_JOB_GZIP:
+ j->gzip.next_in = p;
+ j->gzip.avail_in = sz;
+
+ while (j->gzip.avail_in > 0) {
+ uint8_t buffer[16 * 1024];
+
+ j->gzip.next_out = buffer;
+ j->gzip.avail_out = sizeof(buffer);
+
+ r = inflate(&j->gzip, Z_NO_FLUSH);
+ if (r != Z_OK && r != Z_STREAM_END) {
+ log_error("Decompression error.");
+ return -EIO;
+ }
+
+ r = import_job_write_uncompressed(j, buffer, sizeof(buffer) - j->gzip.avail_out);
+ if (r < 0)
+ return r;
+ }
+
+ break;
+
+ default:
+ assert_not_reached("Unknown compression");
+ }
+
+ j->written_compressed += sz;
+
+ return 0;
+}
+
+static int import_job_open_disk(ImportJob *j) {
+ int r;
+
+ assert(j);
+
+ if (j->on_open_disk) {
+ r = j->on_open_disk(j);
+ if (r < 0)
+ return r;
+ }
+
+ if (j->disk_fd >= 0) {
+ /* Check if we can do sparse files */
+
+ if (lseek(j->disk_fd, SEEK_SET, 0) == 0)
+ j->allow_sparse = true;
+ else {
+ if (errno != ESPIPE)
+ return log_error_errno(errno, "Failed to seek on file descriptor: %m");
+
+ j->allow_sparse = false;
+ }
+ }
+
+ return 0;
+}
+
+static int import_job_detect_compression(ImportJob *j) {
+ static const uint8_t xz_signature[] = {
+ 0xfd, '7', 'z', 'X', 'Z', 0x00
+ };
+ static const uint8_t gzip_signature[] = {
+ 0x1f, 0x8b
+ };
+
+ _cleanup_free_ uint8_t *stub = NULL;
+ size_t stub_size;
+
+ int r;
+
+ assert(j);
+
+ if (j->payload_size < MAX(sizeof(xz_signature), sizeof(gzip_signature)))
+ return 0;
+
+ if (memcmp(j->payload, xz_signature, sizeof(xz_signature)) == 0)
+ j->compressed = IMPORT_JOB_XZ;
+ else if (memcmp(j->payload, gzip_signature, sizeof(gzip_signature)) == 0)
+ j->compressed = IMPORT_JOB_GZIP;
+ else
+ j->compressed = IMPORT_JOB_UNCOMPRESSED;
+
+ log_debug("Stream is XZ compressed: %s", yes_no(j->compressed == IMPORT_JOB_XZ));
+ log_debug("Stream is GZIP compressed: %s", yes_no(j->compressed == IMPORT_JOB_GZIP));
+
+ if (j->compressed == IMPORT_JOB_XZ) {
+ lzma_ret xzr;
+
+ xzr = lzma_stream_decoder(&j->xz, UINT64_MAX, LZMA_TELL_UNSUPPORTED_CHECK);
+ if (xzr != LZMA_OK) {
+ log_error("Failed to initialize XZ decoder.");
+ return -EIO;
+ }
+ }
+ if (j->compressed == IMPORT_JOB_GZIP) {
+ r = inflateInit2(&j->gzip, 15+16);
+ if (r != Z_OK) {
+ log_error("Failed to initialize gzip decoder.");
+ return -EIO;
+ }
+ }
+
+ r = import_job_open_disk(j);
+ if (r < 0)
+ return r;
+
+ /* Now, take the payload we read so far, and decompress it */
+ stub = j->payload;
+ stub_size = j->payload_size;
+
+ j->payload = NULL;
+ j->payload_size = 0;
+
+ j->state = IMPORT_JOB_RUNNING;
+
+ r = import_job_write_compressed(j, stub, stub_size);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static size_t import_job_write_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
+ ImportJob *j = userdata;
+ size_t sz = size * nmemb;
+ int r;
+
+ assert(contents);
+ assert(j);
+
+ switch (j->state) {
+
+ case IMPORT_JOB_ANALYZING:
+ /* Let's first check what it actually is */
+
+ if (!GREEDY_REALLOC(j->payload, j->payload_allocated, j->payload_size + sz)) {
+ r = log_oom();
+ goto fail;
+ }
+
+ memcpy((uint8_t*) j->payload + j->payload_size, contents, sz);
+ j->payload_size += sz;
+
+ r = import_job_detect_compression(j);
+ if (r < 0)
+ goto fail;
+
+ break;
+
+ case IMPORT_JOB_RUNNING:
+
+ r = import_job_write_compressed(j, contents, sz);
+ if (r < 0)
+ goto fail;
+
+ break;
+
+ case IMPORT_JOB_DONE:
+ case IMPORT_JOB_FAILED:
+ r = -ESTALE;
+ goto fail;
+
+ default:
+ assert_not_reached("Impossible state.");
+ }
+
+ return sz;
+
+fail:
+ import_job_finish(j, r);
+ return 0;
+}
+
+static size_t import_job_header_callback(void *contents, size_t size, size_t nmemb, void *userdata) {
+ ImportJob *j = userdata;
+ size_t sz = size * nmemb;
+ _cleanup_free_ char *length = NULL, *last_modified = NULL;
+ char *etag;
+ int r;
+
+ assert(contents);
+ assert(j);
+
+ if (j->state == IMPORT_JOB_DONE || j->state == IMPORT_JOB_FAILED) {
+ r = -ESTALE;
+ goto fail;
+ }
+
+ assert(j->state == IMPORT_JOB_ANALYZING);
+
+ r = curl_header_strdup(contents, sz, "ETag:", &etag);
+ if (r < 0) {
+ log_oom();
+ goto fail;
+ }
+ if (r > 0) {
+ free(j->etag);
+ j->etag = etag;
+
+ if (strv_contains(j->old_etags, j->etag)) {
+ log_info("Image already downloaded. Skipping download.");
+ import_job_finish(j, 0);
+ return sz;
+ }
+
+ return sz;
+ }
+
+ r = curl_header_strdup(contents, sz, "Content-Length:", &length);
+ if (r < 0) {
+ log_oom();
+ goto fail;
+ }
+ if (r > 0) {
+ (void) safe_atou64(length, &j->content_length);
+
+ if (j->content_length != (uint64_t) -1) {
+ char bytes[FORMAT_BYTES_MAX];
+
+ if (j->content_length > j->compressed_max) {
+ log_error("Content too large.");
+ r = -EFBIG;
+ goto fail;
+ }
+
+ log_info("Downloading %s.", format_bytes(bytes, sizeof(bytes), j->content_length));
+ }
+
+ return sz;
+ }
+
+ r = curl_header_strdup(contents, sz, "Last-Modified:", &last_modified);
+ if (r < 0) {
+ log_oom();
+ goto fail;
+ }
+ if (r > 0) {
+ (void) curl_parse_http_time(last_modified, &j->mtime);
+ return sz;
+ }
+
+ return sz;
+
+fail:
+ import_job_finish(j, r);
+ return 0;
+}
+
+static int import_job_progress_callback(void *userdata, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow) {
+ ImportJob *j = userdata;
+ unsigned percent;
+ usec_t n;
+
+ assert(j);
+
+ if (dltotal <= 0)
+ return 0;
+
+ percent = ((100 * dlnow) / dltotal);
+ n = now(CLOCK_MONOTONIC);
+
+ if (n > j->last_status_usec + USEC_PER_SEC &&
+ percent != j->progress_percent) {
+ char buf[FORMAT_TIMESPAN_MAX];
+
+ if (n - j->start_usec > USEC_PER_SEC && dlnow > 0) {
+ usec_t left, done;
+
+ done = n - j->start_usec;
+ left = (usec_t) (((double) done * (double) dltotal) / dlnow) - done;
+
+ log_info("Got %u%% of %s. %s left.", percent, j->url, format_timespan(buf, sizeof(buf), left, USEC_PER_SEC));
+ } else
+ log_info("Got %u%% of %s.", percent, j->url);
+
+ j->progress_percent = percent;
+ j->last_status_usec = n;
+ }
+
+ return 0;
+}
+
+int import_job_new(ImportJob **ret, const char *url, CurlGlue *glue, void *userdata) {
+ _cleanup_(import_job_unrefp) ImportJob *j = NULL;
+
+ assert(url);
+ assert(glue);
+ assert(ret);
+
+ j = new0(ImportJob, 1);
+ if (!j)
+ return -ENOMEM;
+
+ j->state = IMPORT_JOB_INIT;
+ j->disk_fd = -1;
+ j->userdata = userdata;
+ j->glue = glue;
+ j->content_length = (uint64_t) -1;
+ j->start_usec = now(CLOCK_MONOTONIC);
+ j->compressed_max = j->uncompressed_max = 8LLU * 1024LLU * 1024LLU * 1024LLU; /* 8GB */
+
+ j->url = strdup(url);
+ if (!j->url)
+ return -ENOMEM;
+
+ *ret = j;
+ j = NULL;
+
+ return 0;
+}
+
+int import_job_begin(ImportJob *j) {
+ int r;
+
+ assert(j);
+
+ if (j->state != IMPORT_JOB_INIT)
+ return -EBUSY;
+
+ r = curl_glue_make(&j->curl, j->url, j);
+ if (r < 0)
+ return r;
+
+ if (!strv_isempty(j->old_etags)) {
+ _cleanup_free_ char *cc = NULL, *hdr = NULL;
+
+ cc = strv_join(j->old_etags, ", ");
+ if (!cc)
+ return -ENOMEM;
+
+ hdr = strappend("If-None-Match: ", cc);
+ if (!hdr)
+ return -ENOMEM;
+
+ j->request_header = curl_slist_new(hdr, NULL);
+ if (!j->request_header)
+ return -ENOMEM;
+
+ if (curl_easy_setopt(j->curl, CURLOPT_HTTPHEADER, j->request_header) != CURLE_OK)
+ return -EIO;
+ }
+
+ if (curl_easy_setopt(j->curl, CURLOPT_WRITEFUNCTION, import_job_write_callback) != CURLE_OK)
+ return -EIO;
+
+ if (curl_easy_setopt(j->curl, CURLOPT_WRITEDATA, j) != CURLE_OK)
+ return -EIO;
+
+ if (curl_easy_setopt(j->curl, CURLOPT_HEADERFUNCTION, import_job_header_callback) != CURLE_OK)
+ return -EIO;
+
+ if (curl_easy_setopt(j->curl, CURLOPT_HEADERDATA, j) != CURLE_OK)
+ return -EIO;
+
+ if (curl_easy_setopt(j->curl, CURLOPT_XFERINFOFUNCTION, import_job_progress_callback) != CURLE_OK)
+ return -EIO;
+
+ if (curl_easy_setopt(j->curl, CURLOPT_XFERINFODATA, j) != CURLE_OK)
+ return -EIO;
+
+ if (curl_easy_setopt(j->curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK)
+ return -EIO;
+
+ r = curl_glue_add(j->glue, j->curl);
+ if (r < 0)
+ return r;
+
+ j->state = IMPORT_JOB_ANALYZING;
+
+ return 0;
+}
diff --git a/src/import/import-job.h b/src/import/import-job.h
new file mode 100644
index 0000000..843daa2
--- /dev/null
+++ b/src/import/import-job.h
@@ -0,0 +1,101 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+#pragma once
+
+/***
+ This file is part of systemd.
+
+ Copyright 2015 Lennart Poettering
+
+ systemd is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version.
+
+ systemd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include <lzma.h>
+#include <zlib.h>
+
+#include "macro.h"
+#include "curl-util.h"
+
+typedef struct ImportJob ImportJob;
+
+typedef void (*ImportJobFinished)(ImportJob *job);
+typedef int (*ImportJobOpenDisk)(ImportJob *job);
+
+typedef enum ImportJobState {
+ IMPORT_JOB_INIT,
+ IMPORT_JOB_ANALYZING, /* Still reading into ->payload, to figure out what we have */
+ IMPORT_JOB_RUNNING, /* Writing to destination */
+ IMPORT_JOB_DONE,
+ IMPORT_JOB_FAILED,
+ _IMPORT_JOB_STATE_MAX,
+ _IMPORT_JOB_STATE_INVALID = -1,
+} ImportJobState;
+
+typedef enum ImportJobCompression {
+ IMPORT_JOB_UNCOMPRESSED,
+ IMPORT_JOB_XZ,
+ IMPORT_JOB_GZIP,
+ _IMPORT_JOB_COMPRESSION_MAX,
+ _IMPORT_JOB_COMPRESSION_INVALID = -1,
+} ImportJobCompression;
+
+struct ImportJob {
+ ImportJobState state;
+ int error;
+
+ char *url;
+
+ void *userdata;
+ ImportJobFinished on_finished;
+ ImportJobOpenDisk on_open_disk;
+
+ CurlGlue *glue;
+ CURL *curl;
+ struct curl_slist *request_header;
+
+ char *etag;
+ char **old_etags;
+
+ uint64_t content_length;
+ uint64_t written_compressed;
+ uint64_t written_uncompressed;
+
+ uint64_t uncompressed_max;
+ uint64_t compressed_max;
+
+ uint8_t *payload;
+ size_t payload_size;
+ size_t payload_allocated;
+
+ int disk_fd;
+
+ usec_t mtime;
+
+ ImportJobCompression compressed;
+ lzma_stream xz;
+ z_stream gzip;
+
+ unsigned progress_percent;
+ usec_t start_usec;
+ usec_t last_status_usec;
+
+ bool allow_sparse;
+};
+
+int import_job_new(ImportJob **job, const char *url, CurlGlue *glue, void *userdata);
+ImportJob* import_job_unref(ImportJob *job);
+
+int import_job_begin(ImportJob *j);
+
+void import_job_curl_on_finished(CurlGlue *g, CURL *curl, CURLcode result);
diff --git a/src/import/import-raw.c b/src/import/import-raw.c
index 486157a..1fe27b6 100644
--- a/src/import/import-raw.c
+++ b/src/import/import-raw.c
@@ -30,6 +30,7 @@
#include "qcow2-util.h"
#include "strv.h"
#include "copy.h"
+#include "import-util.h"
#include "import-raw.h"
typedef struct RawImportFile RawImportFile;
@@ -691,17 +692,6 @@ static int raw_import_file_progress_callback(void *userdata, curl_off_t dltotal,
return 0;
}
-static bool etag_is_valid(const char *etag) {
-
- if (!endswith(etag, "\""))
- return false;
-
- if (!startswith(etag, "\"") && !startswith(etag, "W/\""))
- return false;
-
- return true;
-}
-
static int raw_import_file_find_old_etags(RawImportFile *f) {
_cleanup_free_ char *escaped_url = NULL;
_cleanup_closedir_ DIR *d = NULL;
@@ -751,7 +741,7 @@ static int raw_import_file_find_old_etags(RawImportFile *f) {
if (!u)
return -ENOMEM;
- if (!etag_is_valid(u)) {
+ if (!http_etag_is_valid(u)) {
free(u);
continue;
}
diff --git a/src/import/import-tar.c b/src/import/import-tar.c
new file mode 100644
index 0000000..43227f6
--- /dev/null
+++ b/src/import/import-tar.c
@@ -0,0 +1,296 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+/***
+ This file is part of systemd.
+
+ Copyright 2015 Lennart Poettering
+
+ systemd is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version.
+
+ systemd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include <sys/prctl.h>
+#include <curl/curl.h>
+
+#include "hashmap.h"
+#include "utf8.h"
+#include "strv.h"
+#include "copy.h"
+#include "btrfs-util.h"
+#include "util.h"
+#include "macro.h"
+#include "mkdir.h"
+#include "curl-util.h"
+#include "import-job.h"
+#include "import-util.h"
+#include "import-tar.h"
+
+struct TarImport {
+ sd_event *event;
+ CurlGlue *glue;
+
+ char *image_root;
+
+ ImportJob *tar_job;
+
+ TarImportFinished on_finished;
+ void *userdata;
+
+ bool finished;
+
+ char *local;
+ bool force_local;
+
+ pid_t tar_pid;
+
+ char *temp_path;
+ char *final_path;
+};
+
+TarImport* tar_import_unref(TarImport *i) {
+ if (!i)
+ return NULL;
+
+ if (i->tar_pid > 0) {
+ kill(i->tar_pid, SIGKILL);
+ wait_for_terminate(i->tar_pid, NULL);
+ }
+
+ import_job_unref(i->tar_job);
+
+ curl_glue_unref(i->glue);
+ sd_event_unref(i->event);
+
+ if (i->temp_path) {
+ (void) btrfs_subvol_remove(i->temp_path);
+ (void) rm_rf_dangerous(i->temp_path, false, true, false);
+ }
+
+ free(i->final_path);
+ free(i->image_root);
+ free(i->local);
+
+ free(i);
+
+ return NULL;
+}
+
+int tar_import_new(TarImport **ret, sd_event *event, const char *image_root, TarImportFinished on_finished, void *userdata) {
+ _cleanup_(tar_import_unrefp) TarImport *i = NULL;
+ int r;
+
+ assert(ret);
+ assert(event);
+
+ i = new0(TarImport, 1);
+ if (!i)
+ return -ENOMEM;
+
+ i->on_finished = on_finished;
+ i->userdata = userdata;
+
+ i->image_root = strdup(image_root ?: "/var/lib/machines");
+ if (!i->image_root)
+ return -ENOMEM;
+
+ if (event)
+ i->event = sd_event_ref(event);
+ else {
+ r = sd_event_default(&i->event);
+ if (r < 0)
+ return r;
+ }
+
+ r = curl_glue_new(&i->glue, i->event);
+ if (r < 0)
+ return r;
+
+ i->glue->on_finished = import_job_curl_on_finished;
+ i->glue->userdata = i;
+
+ *ret = i;
+ i = NULL;
+
+ return 0;
+}
+
+static void tar_import_job_on_finished(ImportJob *j) {
+ TarImport *i;
+ int r;
+
+ assert(j);
+ assert(j->userdata);
+
+ i = j->userdata;
+
+ if (j->error != 0) {
+ r = j->error;
+ goto finish;
+ }
+
+ /* This is invoked if either the download completed
+ * successfully, or the download was skipped because we
+ * already have the etag. */
+
+ j->disk_fd = safe_close(j->disk_fd);
+
+ if (i->tar_pid > 0) {
+ r = wait_for_terminate_and_warn("tar", i->tar_pid, true);
+ i->tar_pid = 0;
+ if (r < 0)
+ goto finish;
+ }
+
+ if (i->temp_path) {
+ r = import_make_read_only(i->temp_path);
+ if (r < 0)
+ goto finish;
+
+ if (rename(i->temp_path, i->final_path) < 0) {
+ r = log_error_errno(errno, "Failed to rename to final image name: %m");
+ goto finish;
+ }
+ }
+
+ if (i->local) {
+ if (!i->final_path) {
+ r = import_make_path(j->url, j->etag, i->image_root, ".tar-", NULL, &i->final_path);
+ if (r < 0)
+ goto finish;
+ }
+
+ r = import_make_local_copy(i->final_path, i->image_root, i->local, i->force_local);
+ if (r < 0)
+ goto finish;
+ }
+
+ r = 0;
+
+finish:
+ i->finished = true;
+
+ if (i->on_finished)
+ i->on_finished(i, r, i->userdata);
+ else
+ sd_event_exit(i->event, r);
+}
+
+static int tar_import_job_on_open_disk(ImportJob *j) {
+ _cleanup_close_pair_ int pipefd[2] = { -1 , -1 };
+ TarImport *i;
+ int r;
+
+ assert(j);
+ assert(j->userdata);
+
+ i = j->userdata;
+
+ r = import_make_path(j->url, j->etag, i->image_root, ".tar-", NULL, &i->final_path);
+ if (r < 0)
+ return log_oom();
+
+ r = tempfn_random(i->final_path, &i->temp_path);
+ if (r < 0)
+ return log_oom();
+
+ mkdir_parents_label(i->temp_path, 0700);
+
+ r = btrfs_subvol_make(i->temp_path);
+ if (r == -ENOTTY) {
+ if (mkdir(i->temp_path, 0755) < 0)
+ return log_error_errno(errno, "Failed to create directory %s: %m", i->temp_path);
+ } else if (r < 0)
+ return log_error_errno(errno, "Failed to create subvolume %s: %m", i->temp_path);
+
+ if (pipe2(pipefd, O_CLOEXEC) < 0)
+ return log_error_errno(errno, "Failed to create pipe for tar: %m");
+
+ i->tar_pid = fork();
+ if (i->tar_pid < 0)
+ return log_error_errno(errno, "Failed to fork off tar: %m");
+ if (i->tar_pid == 0) {
+ int null_fd;
+
+ reset_all_signal_handlers();
+ reset_signal_mask();
+ assert_se(prctl(PR_SET_PDEATHSIG, SIGTERM) == 0);
+
+ pipefd[1] = safe_close(pipefd[1]);
+
+ if (dup2(pipefd[0], STDIN_FILENO) != STDIN_FILENO) {
+ log_error_errno(errno, "Failed to dup2() fd: %m");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (pipefd[0] != STDIN_FILENO)
+ safe_close(pipefd[0]);
+
+ null_fd = open("/dev/null", O_WRONLY|O_NOCTTY);
+ if (null_fd < 0) {
+ log_error_errno(errno, "Failed to open /dev/null: %m");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (dup2(null_fd, STDOUT_FILENO) != STDOUT_FILENO) {
+ log_error_errno(errno, "Failed to dup2() fd: %m");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (null_fd != STDOUT_FILENO)
+ safe_close(null_fd);
+
+ execlp("tar", "tar", "--numeric-owner", "-C", i->temp_path, "-px", NULL);
+ _exit(EXIT_FAILURE);
+ }
+
+ pipefd[0] = safe_close(pipefd[0]);
+
+ j->disk_fd = pipefd[1];
+ pipefd[1] = -1;
+
+ return 0;
+}
+
+int tar_import_pull(TarImport *i, const char *url, const char *local, bool force_local) {
+ int r;
+
+ assert(i);
+
+ if (i->tar_job)
+ return -EBUSY;
+
+ if (!http_url_is_valid(url))
+ return -EINVAL;
+
+ if (local && !machine_name_is_valid(local))
+ return -EINVAL;
+
+ r = free_and_strdup(&i->local, local);
+ if (r < 0)
+ return r;
+
+ i->force_local = force_local;
+
+ r = import_job_new(&i->tar_job, url, i->glue, i);
+ if (r < 0)
+ return r;
+
+ i->tar_job->on_finished = tar_import_job_on_finished;
+ i->tar_job->on_open_disk = tar_import_job_on_open_disk;
+
+ r = import_find_old_etags(url, i->image_root, DT_DIR, ".tar-", NULL, &i->tar_job->old_etags);
+ if (r < 0)
+ return r;
+
+ return import_job_begin(i->tar_job);
+}
diff --git a/src/import/import-tar.h b/src/import/import-tar.h
new file mode 100644
index 0000000..6a7477f
--- /dev/null
+++ b/src/import/import-tar.h
@@ -0,0 +1,36 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+#pragma once
+
+/***
+ This file is part of systemd.
+
+ Copyright 2015 Lennart Poettering
+
+ systemd is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version.
+
+ systemd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include "sd-event.h"
+#include "macro.h"
+
+typedef struct TarImport TarImport;
+
+typedef void (*TarImportFinished)(TarImport *import, int error, void *userdata);
+
+int tar_import_new(TarImport **import, sd_event *event, const char *image_root, TarImportFinished on_finished, void *userdata);
+TarImport* tar_import_unref(TarImport *import);
+
+DEFINE_TRIVIAL_CLEANUP_FUNC(TarImport*, tar_import_unref);
+
+int tar_import_pull(TarImport *import, const char *rul, const char *local, bool force_local);
diff --git a/src/import/import-util.c b/src/import/import-util.c
new file mode 100644
index 0000000..24c8ac1
--- /dev/null
+++ b/src/import/import-util.c
@@ -0,0 +1,201 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+/***
+ This file is part of systemd.
+
+ Copyright 2015 Lennart Poettering
+
+ systemd is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version.
+
+ systemd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include "util.h"
+#include "strv.h"
+#include "copy.h"
+#include "btrfs-util.h"
+#include "import-util.h"
+
+#define FILENAME_ESCAPE "/.#\"\'"
+
+bool http_etag_is_valid(const char *etag) {
+ if (!endswith(etag, "\""))
+ return false;
+
+ if (!startswith(etag, "\"") && !startswith(etag, "W/\""))
+ return false;
+
+ return true;
+}
+
+int import_find_old_etags(const char *url, const char *image_root, int dt, const char *prefix, const char *suffix, char ***etags) {
+ _cleanup_free_ char *escaped_url = NULL;
+ _cleanup_closedir_ DIR *d = NULL;
+ _cleanup_strv_free_ char **l = NULL;
+ struct dirent *de;
+ int r;
+
+ assert(url);
+ assert(etags);
+
+ if (!image_root)
+ image_root = "/var/lib/machines";
+
+ escaped_url = xescape(url, FILENAME_ESCAPE);
+ if (!escaped_url)
+ return -ENOMEM;
+
+ d = opendir(image_root);
+ if (!d) {
+ if (errno == ENOENT) {
+ *etags = NULL;
+ return 0;
+ }
+
+ return -errno;
+ }
+
+ FOREACH_DIRENT_ALL(de, d, return -errno) {
+ const char *a, *b;
+ char *u;
+
+ if (de->d_type != DT_UNKNOWN &&
+ de->d_type != dt)
+ continue;
+
+ if (prefix) {
+ a = startswith(de->d_name, prefix);
+ if (!a)
+ continue;
+ } else
+ a = de->d_name;
+
+ a = startswith(a, escaped_url);
+ if (!a)
+ continue;
+
+ a = startswith(a, ".");
+ if (!a)
+ continue;
+
+ if (suffix) {
+ b = endswith(de->d_name, suffix);
+ if (!b)
+ continue;
+ } else
+ b = strchr(de->d_name, 0);
+
+ if (a >= b)
+ continue;
+
+ u = cunescape_length(a, b - a);
+ if (!u)
+ return -ENOMEM;
+
+ if (!http_etag_is_valid(u)) {
+ free(u);
+ continue;
+ }
+
+ r = strv_consume(&l, u);
+ if (r < 0)
+ return r;
+ }
+
+ *etags = l;
+ l = NULL;
+
+ return 0;
+}
+
+int import_make_local_copy(const char *final, const char *image_root, const char *local, bool force_local) {
+ const char *p;
+ int r;
+
+ assert(final);
+ assert(local);
+
+ if (!image_root)
+ image_root = "/var/lib/machines";
+
+ p = strappenda(image_root, "/", local);
+
+ if (force_local) {
+ (void) btrfs_subvol_remove(p);
+ (void) rm_rf_dangerous(p, false, true, false);
+ }
+
+ r = btrfs_subvol_snapshot(final, p, false, false);
+ if (r == -ENOTTY) {
+ r = copy_tree(final, p, false);
+ if (r < 0)
+ return log_error_errno(r, "Failed to copy image: %m");
+ } else if (r < 0)
+ return log_error_errno(r, "Failed to create local image: %m");
+
+ log_info("Created new local image '%s'.", local);
+
+ return 0;
+}
+
+int import_make_read_only(const char *path) {
+ int r;
+
+ r = btrfs_subvol_set_read_only(path, true);
+ if (r == -ENOTTY) {
+ struct stat st;
+
+ r = stat(path, &st);
+ if (r < 0)
+ return log_error_errno(errno, "Failed to stat temporary image: %m");
+
+ if (chmod(path, st.st_mode & 0755) < 0)
+ return log_error_errno(errno, "Failed to chmod() final image: %m");
+
+ return 0;
+ }
+ if (r < 0)
+ return log_error_errno(r, "Failed to mark final image read-only: %m");
+
+ return 0;
+}
+
+int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret) {
+ _cleanup_free_ char *escaped_url = NULL;
+ char *path;
+
+ assert(url);
+ assert(ret);
+
+ if (!image_root)
+ image_root = "/var/lib/machines";
+
+ escaped_url = xescape(url, FILENAME_ESCAPE);
+ if (!escaped_url)
+ return -ENOMEM;
+
+ if (etag) {
+ _cleanup_free_ char *escaped_etag = NULL;
+
+ escaped_etag = xescape(etag, FILENAME_ESCAPE);
+ if (!escaped_etag)
+ return -ENOMEM;
+
+ path = strjoin(image_root, "/", strempty(prefix), escaped_url, ".", escaped_etag, strempty(suffix), NULL);
+ } else
+ path = strjoin(image_root, "/", strempty(prefix), escaped_url, strempty(suffix), NULL);
+ if (!path)
+ return -ENOMEM;
+
+ *ret = path;
+ return 0;
+}
diff --git a/src/import/import-util.h b/src/import/import-util.h
new file mode 100644
index 0000000..ad5ab50
--- /dev/null
+++ b/src/import/import-util.h
@@ -0,0 +1,31 @@
+/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
+
+#pragma once
+
+/***
+ This file is part of systemd.
+
+ Copyright 2015 Lennart Poettering
+
+ systemd is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published by
+ the Free Software Foundation; either version 2.1 of the License, or
+ (at your option) any later version.
+
+ systemd is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with systemd; If not, see <http://www.gnu.org/licenses/>.
+***/
+
+#include <stdbool.h>
+
+bool http_etag_is_valid(const char *etag);
+
+int import_make_local_copy(const char *final, const char *root, const char *local, bool force_local);
+int import_find_old_etags(const char *url, const char *root, int dt, const char *prefix, const char *suffix, char ***etags);
+int import_make_read_only(const char *path);
+int import_make_path(const char *url, const char *etag, const char *image_root, const char *prefix, const char *suffix, char **ret);
diff --git a/src/import/import.c b/src/import/import.c
index 9b10de5..c0fc224 100644
--- a/src/import/import.c
+++ b/src/import/import.c
@@ -25,6 +25,8 @@
#include "event-util.h"
#include "verbs.h"
#include "build.h"
+#include "machine-image.h"
+#include "import-tar.h"
#include "import-raw.h"
#include "import-dkr.h"
@@ -33,6 +35,144 @@ static const char *arg_image_root = "/var/lib/machines";
static const char* arg_dkr_index_url = DEFAULT_DKR_INDEX_URL;
+static void on_tar_finished(TarImport *import, int error, void *userdata) {
+ sd_event *event = userdata;
+ assert(import);
+
+ if (error == 0)
+ log_info("Operation completed successfully.");
+ else
+ log_error_errno(error, "Operation failed: %m");
+
+ sd_event_exit(event, error);
+}
+
+static int url_final_component(const char *url, char **ret) {
+ const char *e, *p;
+ char *s;
+
+ e = strchrnul(url, '?');
+
+ while (e > url && e[-1] == '/')
+ e--;
+
+ p = e;
+ while (p > url && p[-1] != '/')
+ p--;
+
+ if (e <= p)
+ return -EINVAL;
+
+ s = strndup(p, e - p);
+ if (!s)
+ return -ENOMEM;
+
+ *ret = s;
+ return 0;
+}
+
+static int strip_tar_suffixes(const char *name, char **ret) {
+ const char *e;
+ char *s;
+
+ e = endswith(name, ".tar");
+ if (!e)
+ e = endswith(name, ".tar.gz");
+ if (!e)
+ e = endswith(name, ".tar.xz");
+ if (!e)
+ e = endswith(name, ".tgz");
+ if (!e)
+ e = strchr(name, 0);
+
+ if (e <= name)
+ return -EINVAL;
+
+ s = strndup(name, e - name);
+ if (!s)
+ return -ENOMEM;
+
+ *ret = s;
+ return 0;
+}
+
+static int pull_tar(int argc, char *argv[], void *userdata) {
+ _cleanup_(tar_import_unrefp) TarImport *import = NULL;
+ _cleanup_event_unref_ sd_event *event = NULL;
+ const char *url, *local;
+ _cleanup_free_ char *l = NULL, *ll = NULL;
+ int r;
+
+ url = argv[1];
+ if (!http_url_is_valid(url)) {
+ log_error("URL '%s' is not valid.", url);
+ return -EINVAL;
+ }
+
+ if (argc >= 3)
+ local = argv[2];
+ else {
+ r = url_final_component(url, &l);
+ if (r < 0)
+ return log_error_errno(r, "Failed get final component of URL: %m");
+
+ local = l;
+ }
+
+ if (isempty(local) || streq(local, "-"))
+ local = NULL;
+
+ if (local) {
+ r = strip_tar_suffixes(local, &ll);
+ if (r < 0)
+ return log_oom();
+
+ local = ll;
+
+ if (!machine_name_is_valid(local)) {
+ log_error("Local image name '%s' is not valid.", local);
+ return -EINVAL;
+ }
+
+ if (!arg_force) {
+ r = image_find(local, NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to check whether image '%s' exists: %m", local);
+ else if (r > 0) {
+ log_error_errno(EEXIST, "Image '%s' already exists.", local);
+ return -EEXIST;
+ }
+ }
+
+ log_info("Pulling '%s', saving as '%s'.", url, local);
+ } else
+ log_info("Pulling '%s'.", url);
+
+ r = sd_event_default(&event);
+ if (r < 0)
+ return log_error_errno(r, "Failed to allocate event loop: %m");
+
+ assert_se(sigprocmask_many(SIG_BLOCK, SIGTERM, SIGINT, -1) == 0);
+ sd_event_add_signal(event, NULL, SIGTERM, NULL, NULL);
+ sd_event_add_signal(event, NULL, SIGINT, NULL, NULL);
+
+ r = tar_import_new(&import, event, arg_image_root, on_tar_finished, event);
+ if (r < 0)
+ return log_error_errno(r, "Failed to allocate importer: %m");
+
+ r = tar_import_pull(import, url, local, arg_force);
+ if (r < 0)
+ return log_error_errno(r, "Failed to pull image: %m");
+
+ r = sd_event_loop(event);
+ if (r < 0)
+ return log_error_errno(r, "Failed to run event loop: %m");
+
+ log_info("Exiting.");
+
+ return 0;
+}
+
static void on_raw_finished(RawImport *import, int error, void *userdata) {
sd_event *event = userdata;
assert(import);
@@ -40,7 +180,7 @@ static void on_raw_finished(RawImport *import, int error, void *userdata) {
if (error == 0)
log_info("Operation completed successfully.");
else
- log_info_errno(error, "Operation failed: %m");
+ log_error_errno(error, "Operation failed: %m");
sd_event_exit(event, error);
}
@@ -173,7 +313,7 @@ static void on_dkr_finished(DkrImport *import, int error, void *userdata) {
if (error == 0)
log_info("Operation completed successfully.");
else
- log_info_errno(error, "Operation failed: %m");
+ log_error_errno(error, "Operation failed: %m");
sd_event_exit(event, error);
}
@@ -277,8 +417,9 @@ static int help(int argc, char *argv[], void *userdata) {
" --image-root= Image root directory\n"
" --dkr-index-url=URL Specify index URL to use for downloads\n\n"
"Commands:\n"
- " pull-dkr REMOTE [NAME] Download a DKR image\n"
- " pull-raw URL [NAME] Download a RAW image\n",
+ " pull-tar URL Download a TAR image\n"
+ " pull-raw URL [NAME] Download a RAW image\n"
+ " pull-dkr REMOTE [NAME] Download a DKR image\n",
program_invocation_short_name);
return 0;
@@ -350,8 +491,9 @@ static int import_main(int argc, char *argv[]) {
static const Verb verbs[] = {
{ "help", VERB_ANY, VERB_ANY, 0, help },
- { "pull-dkr", 2, 3, 0, pull_dkr },
+ { "pull-tar", 2, 3, 0, pull_tar },
{ "pull-raw", 2, 3, 0, pull_raw },
+ { "pull-dkr", 2, 3, 0, pull_dkr },
{}
};
commit a2e0337875addaf08225fbf9b231435ba12a88b5
Author: Lennart Poettering <lennart at poettering.net>
Date: Mon Jan 19 20:45:27 2015 +0100
util: make http url validity checks more generic, and move them to util.c
diff --git a/src/core/load-fragment.c b/src/core/load-fragment.c
index 242b684..516731a 100644
--- a/src/core/load-fragment.c
+++ b/src/core/load-fragment.c
@@ -2298,7 +2298,7 @@ int config_parse_documentation(const char *unit,
for (a = b = u->documentation; a && *a; a++) {
- if (is_valid_documentation_url(*a))
+ if (documentation_url_is_valid(*a))
*(b++) = *a;
else {
log_syntax(unit, LOG_ERR, filename, line, EINVAL,
diff --git a/src/import/import-raw.c b/src/import/import-raw.c
index 80fdbb7..486157a 100644
--- a/src/import/import-raw.c
+++ b/src/import/import-raw.c
@@ -28,9 +28,9 @@
#include "utf8.h"
#include "curl-util.h"
#include "qcow2-util.h"
-#include "import-raw.h"
#include "strv.h"
#include "copy.h"
+#include "import-raw.h"
typedef struct RawImportFile RawImportFile;
@@ -904,7 +904,7 @@ int raw_import_pull(RawImport *import, const char *url, const char *local, bool
int r;
assert(import);
- assert(raw_url_is_valid(url));
+ assert(http_url_is_valid(url));
assert(!local || machine_name_is_valid(local));
if (hashmap_get(import->files, url))
@@ -949,14 +949,3 @@ int raw_import_pull(RawImport *import, const char *url, const char *local, bool
f = NULL;
return 0;
}
-
-bool raw_url_is_valid(const char *url) {
- if (isempty(url))
- return false;
-
- if (!startswith(url, "http://") &&
- !startswith(url, "https://"))
- return false;
-
- return ascii_is_valid(url);
-}
diff --git a/src/import/import-raw.h b/src/import/import-raw.h
index a423ec0..17f7a1a 100644
--- a/src/import/import-raw.h
+++ b/src/import/import-raw.h
@@ -33,5 +33,3 @@ DEFINE_TRIVIAL_CLEANUP_FUNC(RawImport*, raw_import_unref);
int raw_import_pull(RawImport *import, const char *url, const char *local, bool force_local);
int raw_import_cancel(RawImport *import, const char *name);
-
-bool raw_url_is_valid(const char *url);
diff --git a/src/import/import.c b/src/import/import.c
index af8d0ec..9b10de5 100644
--- a/src/import/import.c
+++ b/src/import/import.c
@@ -90,7 +90,7 @@ static int pull_raw(int argc, char *argv[], void *userdata) {
int r;
url = argv[1];
- if (!raw_url_is_valid(url)) {
+ if (!http_url_is_valid(url)) {
log_error("URL '%s' is not valid.", url);
return -EINVAL;
}
diff --git a/src/shared/util.c b/src/shared/util.c
index fd54023..5157b94 100644
--- a/src/shared/util.c
+++ b/src/shared/util.c
@@ -5458,25 +5458,43 @@ int getenv_for_pid(pid_t pid, const char *field, char **_value) {
return r;
}
-bool is_valid_documentation_url(const char *url) {
- assert(url);
+bool http_url_is_valid(const char *url) {
+ const char *p;
- if (startswith(url, "http://") && url[7])
- return true;
+ if (isempty(url))
+ return false;
- if (startswith(url, "https://") && url[8])
- return true;
+ p = startswith(url, "http://");
+ if (!p)
+ p = startswith(url, "https://");
+ if (!p)
+ return false;
- if (startswith(url, "file:") && url[5])
- return true;
+ if (isempty(p))
+ return false;
- if (startswith(url, "info:") && url[5])
- return true;
+ return ascii_is_valid(p);
+}
- if (startswith(url, "man:") && url[4])
+bool documentation_url_is_valid(const char *url) {
+ const char *p;
+
+ if (isempty(url))
+ return false;
+
+ if (http_url_is_valid(url))
return true;
- return false;
+ p = startswith(url, "file:/");
+ if (!p)
+ p = startswith(url, "info:");
+ if (!p)
+ p = startswith(url, "man:");
+
+ if (isempty(p))
+ return false;
+
+ return ascii_is_valid(p);
}
bool in_initrd(void) {
diff --git a/src/shared/util.h b/src/shared/util.h
index 2e662c9..d40c0b0 100644
--- a/src/shared/util.h
+++ b/src/shared/util.h
@@ -651,7 +651,8 @@ int setrlimit_closest(int resource, const struct rlimit *rlim);
int getenv_for_pid(pid_t pid, const char *field, char **_value);
-bool is_valid_documentation_url(const char *url) _pure_;
+bool http_url_is_valid(const char *url) _pure_;
+bool documentation_url_is_valid(const char *url) _pure_;
bool in_initrd(void);
diff --git a/src/test/test-util.c b/src/test/test-util.c
index 0c0d2f6..8471d31 100644
--- a/src/test/test-util.c
+++ b/src/test/test-util.c
@@ -918,15 +918,15 @@ static void test_files_same(void) {
}
static void test_is_valid_documentation_url(void) {
- assert_se(is_valid_documentation_url("http://www.freedesktop.org/wiki/Software/systemd"));
- assert_se(is_valid_documentation_url("https://www.kernel.org/doc/Documentation/binfmt_misc.txt"));
- assert_se(is_valid_documentation_url("file:foo"));
- assert_se(is_valid_documentation_url("man:systemd.special(7)"));
- assert_se(is_valid_documentation_url("info:bar"));
-
- assert_se(!is_valid_documentation_url("foo:"));
- assert_se(!is_valid_documentation_url("info:"));
- assert_se(!is_valid_documentation_url(""));
+ assert_se(documentation_url_is_valid("http://www.freedesktop.org/wiki/Software/systemd"));
+ assert_se(documentation_url_is_valid("https://www.kernel.org/doc/Documentation/binfmt_misc.txt"));
+ assert_se(documentation_url_is_valid("file:/foo/foo"));
+ assert_se(documentation_url_is_valid("man:systemd.special(7)"));
+ assert_se(documentation_url_is_valid("info:bar"));
+
+ assert_se(!documentation_url_is_valid("foo:"));
+ assert_se(!documentation_url_is_valid("info:"));
+ assert_se(!documentation_url_is_valid(""));
}
static void test_file_in_same_dir(void) {
More information about the systemd-commits
mailing list