From a0b971dd9c19819d4f7a3a8ab102be9d7101e3e0 Mon Sep 17 00:00:00 2001
From: Eelco Dolstra <edolstra@gmail.com>
Date: Wed, 8 Aug 2018 21:39:11 +0200
Subject: [PATCH] S3BinaryCacheStore: Don't use the transfer status callback

This callback is executed on a different thread, so exceptions thrown
from the callback are not caught:

  Aug 08 16:25:48 chef hydra-queue-runner[11967]: terminate called after throwing an instance of 'nix::Error'
  Aug 08 16:25:48 chef hydra-queue-runner[11967]:   what():  AWS error: failed to upload 's3://nix-cache/19dbddlfb0vp68g68y19p9fswrgl0bg7.ls'

Therefore, just check the transfer status after it completes. Also
include the S3 error message in the exception.
---
 src/libstore/s3-binary-cache-store.cc | 37 ++++++++++-----------------
 1 file changed, 13 insertions(+), 24 deletions(-)

diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 6d95c1fa8..ef41e413f 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -296,36 +296,13 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
                 const std::shared_ptr<const TransferHandle>
                     &transferHandle) {
               //FIXME: find a way to properly abort the multipart upload.
-              checkInterrupt();
+              //checkInterrupt();
               debug("upload progress ('%s'): '%d' of '%d' bytes",
                              path,
                              transferHandle->GetBytesTransferred(),
                              transferHandle->GetBytesTotalSize());
             };
 
-        transferConfig.transferStatusUpdatedCallback =
-            [&](const TransferManager *,
-                const std::shared_ptr<const TransferHandle>
-                    &transferHandle) {
-              switch (transferHandle->GetStatus()) {
-                  case TransferStatus::COMPLETED:
-                      printTalkative("upload of '%s' completed", path);
-                      stats.put++;
-                      stats.putBytes += data.size();
-                      break;
-                  case TransferStatus::IN_PROGRESS:
-                      break;
-                  case TransferStatus::FAILED:
-                      throw Error("AWS error: failed to upload 's3://%s/%s'",
-                                  bucketName, path);
-                      break;
-                  default:
-                      throw Error("AWS error: transfer status of 's3://%s/%s' "
-                                  "in unexpected state",
-                                  bucketName, path);
-              };
-            };
-
         std::shared_ptr<TransferManager> transferManager =
             TransferManager::Create(transferConfig);
 
@@ -339,6 +316,16 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
 
         transferHandle->WaitUntilFinished();
 
+        if (transferHandle->GetStatus() == TransferStatus::FAILED)
+            throw Error("AWS error: failed to upload 's3://%s/%s': %s",
+                bucketName, path, transferHandle->GetLastError().GetMessage());
+
+        if (transferHandle->GetStatus() != TransferStatus::COMPLETED)
+            throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state",
+                bucketName, path);
+
+        printTalkative("upload of '%s' completed", path);
+
         auto now2 = std::chrono::steady_clock::now();
 
         auto duration =
@@ -349,6 +336,8 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
                   bucketName % path % data.size() % duration);
 
         stats.putTimeMs += duration;
+        stats.putBytes += data.size();
+        stats.put++;
     }
 
     void upsertFile(const std::string & path, const std::string & data,
-- 
GitLab