Previously we assumed all data in the ovsdb log is part of the data the
upper layers (e.g. raft).
In order to also support metadata in the future that just concerns the
log itself we wrap the data in a nested json structure. This allows us
to continue to pass data to the upper layers that is relevant to them
and still have metadata.

We are still able to parse the old data that is not yet wrapped in such
a nested json. With the next snapshot this will be converted.

Signed-off-by: Felix Huettner <felix.huettner@stackit.cloud>
---
 ovsdb/log.c           | 46 ++++++++++++++++++++++++++++++++-----------
 tests/ovsdb-log.at    | 12 +++++------
 tests/ovsdb-server.at | 42 +++++++++++++++++++--------------------
 tests/ovsdb-tool.at   | 38 +++++++++++++++++------------------
 4 files changed, 80 insertions(+), 58 deletions(-)

diff --git a/ovsdb/log.c b/ovsdb/log.c
index 0c3577119..754aa7892 100644
--- a/ovsdb/log.c
+++ b/ovsdb/log.c
@@ -40,6 +40,9 @@
 #include "transaction.h"
 #include "util.h"
 
+#define OVSDB_LOG_DATA_KEY "logdata"
+#define OVSDB_LOG_META_KEY "logmeta"
+
 VLOG_DEFINE_THIS_MODULE(ovsdb_log);
 
 /* State in a log's state machine.
@@ -506,9 +509,17 @@ ovsdb_log_read(struct ovsdb_log *file, struct json **jsonp)
         goto error;
     }
 
+    struct json *data = shash_find_and_delete(json_object(json),
+                                              OVSDB_LOG_DATA_KEY);
+    if (data) {
+        *jsonp = data;
+        json_destroy(json);
+    } else {
+        *jsonp = json;
+    }
+
     file->prev_offset = file->offset;
     file->offset = data_offset + data_length;
-    *jsonp = json;
     return NULL;
 
 error:
@@ -595,17 +606,8 @@ ovsdb_log_compose_record(const struct json *json,
                   magic, data->length, SHA1_ARGS(sha1));
 }
 
-/* Writes log record 'json' to 'file'.  Returns NULL if successful or an error
- * (which the caller must eventually destroy) on failure.
- *
- * If the log contains some records that have not yet been read, then calling
- * this function truncates them.
- *
- * Log writes are atomic.  A client may use ovsdb_log_commit() to ensure that
- * they are durable.
- */
-struct ovsdb_error *
-ovsdb_log_write(struct ovsdb_log *file, const struct json *json)
+static struct ovsdb_error *
+ovsdb_log_write_(struct ovsdb_log *file, const struct json *json)
 {
     switch (file->state) {
     case OVSDB_LOG_WRITE:
@@ -663,6 +665,26 @@ ovsdb_log_write(struct ovsdb_log *file, const struct json 
*json)
     return NULL;
 }
 
+/* Writes log record 'json' to 'file'.  Returns NULL if successful or an error
+ * (which the caller must eventually destroy) on failure.
+ *
+ * If the log contains some records that have not yet been read, then calling
+ * this function truncates them.
+ *
+ * Log writes are atomic.  A client may use ovsdb_log_commit() to ensure that
+ * they are durable.
+ */
+struct ovsdb_error *
+ovsdb_log_write(struct ovsdb_log *file, const struct json *json)
+{
+    struct json *outer = json_object_create();
+    json_object_put(outer, OVSDB_LOG_DATA_KEY, (struct json *) json);
+    struct ovsdb_error *error = ovsdb_log_write_(file, outer);
+    shash_find_and_delete_assert(json_object(outer), OVSDB_LOG_DATA_KEY);
+    json_destroy(outer);
+    return error;
+}
+
 struct ovsdb_error * OVS_WARN_UNUSED_RESULT
 ovsdb_log_write_and_free(struct ovsdb_log *log, struct json *json)
 {
diff --git a/tests/ovsdb-log.at b/tests/ovsdb-log.at
index 835ea728b..d00a64cb2 100644
--- a/tests/ovsdb-log.at
+++ b/tests/ovsdb-log.at
@@ -252,7 +252,7 @@ AT_CHECK(
 file: read: {"x":0}
 file: read: {"x":1}
 file: read: {"x":2}
-file: read failed: syntax error: file: parse error at offset 186 in header 
line "xxx"
+file: read failed: syntax error: file: parse error at offset 225 in header 
line "xxx"
 ]], [ignore])
 AT_CHECK([test -f .file.~lock~])
 AT_CLEANUP
@@ -274,7 +274,7 @@ AT_CHECK(
 file: read: {"x":0}
 file: read: {"x":1}
 file: read: {"x":2}
-file: read failed: syntax error: file: parse error at offset 186 in header 
line "xxx"
+file: read failed: syntax error: file: parse error at offset 225 in header 
line "xxx"
 file: write:{"x":3} successful
 ]], [ignore])
 AT_CHECK(
@@ -308,7 +308,7 @@ AT_CHECK(
   [[file: open successful
 file: read: {"x":0}
 file: read: {"x":1}
-file: read failed: syntax error: file: 8 bytes starting at offset 178 have 
SHA-1 hash 2683fd63b5b9fd49df4f2aa25bf7db5cbbebbe6f but should have hash 
3d8ed30f471ad1b7b4b571cb0c7d5ed3e81350aa
+file: read failed: syntax error: file: 20 bytes starting at offset 205 have 
SHA-1 hash 9611766b297a4c0e018f0f7bb8ddb140f3b6fc73 but should have hash 
d112cf97136ed93021698d0582a4b237ef2a6a64
 file: write:{"longer data":0} successful
 ]], [ignore])
 AT_CHECK(
@@ -334,14 +334,14 @@ file: write:{"x":2} successful
 ]], [ignore])
 AT_CHECK([[sed 's/{"x":2}/2/' < file > file.tmp]])
 AT_CHECK([mv file.tmp file])
-AT_CHECK([[grep -c '^2$' file]], [0], [1
+AT_CHECK([[grep -c '^{"logdata":2}$' file]], [0], [1
 ])
 AT_CHECK(
   [[test-ovsdb log-io file read/write read read read 'write:{"longer 
data":0}']], [0],
   [[file: open successful
 file: read: {"x":0}
 file: read: {"x":1}
-file: read failed: I/O error: file: error reading 8 bytes starting at offset 
178 (End of file)
+file: read failed: I/O error: file: error reading 20 bytes starting at offset 
205 (End of file)
 file: write:{"longer data":0} successful
 ]], [ignore])
 AT_CHECK(
@@ -372,7 +372,7 @@ AT_CHECK(
 file: read: {"x":0}
 file: read: {"x":1}
 file: read: {"x":2}
-file: read failed: syntax error: file: 5 bytes starting at offset 240 are not 
valid JSON (line 0, column 4, byte 4: syntax error at beginning of input)
+file: read failed: syntax error: file: 5 bytes starting at offset 279 are not 
valid JSON (line 0, column 4, byte 4: syntax error at beginning of input)
 file: write:{"replacement data":0} successful
 ]], [ignore])
 AT_CHECK(
diff --git a/tests/ovsdb-server.at b/tests/ovsdb-server.at
index ca6e931be..bf9d25fa2 100644
--- a/tests/ovsdb-server.at
+++ b/tests/ovsdb-server.at
@@ -121,8 +121,8 @@ AT_CHECK([ovsdb-server --remote=punix:socket db --run="sh 
txnfile"], [0], [stdou
 cat stdout >> output
 dnl Add some crap to the database log and run another transaction, which should
 dnl ignore the crap and truncate it out of the log.
-echo 'OVSDB JSON 15 ffbcdae4b0386265f9ea3280dd7c8f0b72a20e56
-{"invalid":{}}' >> db
+bash -c "echo 'OVSDB JSON 26 d1cd8c1ecd3aff070019a75973f35832ff0cfe15
+{\"logdata\":{\"invalid\":{}}}' >> db"
 AT_DATA([txnfile], [[ovsdb-client transact unix:socket \
 '["ordinals",
   {"op": "insert",
@@ -1103,25 +1103,25 @@ ovsdb_check_online_compaction() {
         AT_CHECK([[uuidfilt db | grep -v ^OVSDB | \
             sed 's/"_date":[0-9]*/"_date":0/' |  sed 's/"_is_diff":true,//' | \
             ovstest test-json --multiple -]], [0],
-[[{"cksum":"12345678 
9","name":"ordinals","tables":{"ordinals":{"columns":{"name":{"type":"string"},"number":{"type":"integer"}},"indexes":[["number"],["number","name"]]}},"version":"5.1.3"}
-{"_comment":"add row for zero 0","_date":0,"ordinals":{"<0>":{"name":"zero"}}}
-{"_comment":"delete row for 0","_date":0,"ordinals":{"<0>":null}}
-{"_comment":"add back row for zero 
0","_date":0,"ordinals":{"<1>":{"name":"zero"}}}
-{"_comment":"add row for one 
1","_date":0,"ordinals":{"<2>":{"name":"one","number":1}}}
-{"_comment":"delete row for 1","_date":0,"ordinals":{"<2>":null}}
-{"_comment":"add back row for one 
1","_date":0,"ordinals":{"<3>":{"name":"one","number":1}}}
-{"_comment":"add row for two 
2","_date":0,"ordinals":{"<4>":{"name":"two","number":2}}}
-{"_comment":"delete row for 2","_date":0,"ordinals":{"<4>":null}}
-{"_comment":"add back row for two 
2","_date":0,"ordinals":{"<5>":{"name":"two","number":2}}}
-{"_comment":"add row for three 
3","_date":0,"ordinals":{"<6>":{"name":"three","number":3}}}
-{"_comment":"delete row for 3","_date":0,"ordinals":{"<6>":null}}
-{"_comment":"add back row for three 
3","_date":0,"ordinals":{"<7>":{"name":"three","number":3}}}
-{"_comment":"add row for four 
4","_date":0,"ordinals":{"<8>":{"name":"four","number":4}}}
-{"_comment":"delete row for 4","_date":0,"ordinals":{"<8>":null}}
-{"_comment":"add back row for four 
4","_date":0,"ordinals":{"<9>":{"name":"four","number":4}}}
-{"_comment":"add row for five 
5","_date":0,"ordinals":{"<10>":{"name":"five","number":5}}}
-{"_comment":"delete row for 5","_date":0,"ordinals":{"<10>":null}}
-{"_comment":"add back row for five 
5","_date":0,"ordinals":{"<11>":{"name":"five","number":5}}}
+[[{"logdata":{"cksum":"12345678 
9","name":"ordinals","tables":{"ordinals":{"columns":{"name":{"type":"string"},"number":{"type":"integer"}},"indexes":[["number"],["number","name"]]}},"version":"5.1.3"}}
+{"logdata":{"_comment":"add row for zero 
0","_date":0,"ordinals":{"<0>":{"name":"zero"}}}}
+{"logdata":{"_comment":"delete row for 0","_date":0,"ordinals":{"<0>":null}}}
+{"logdata":{"_comment":"add back row for zero 
0","_date":0,"ordinals":{"<1>":{"name":"zero"}}}}
+{"logdata":{"_comment":"add row for one 
1","_date":0,"ordinals":{"<2>":{"name":"one","number":1}}}}
+{"logdata":{"_comment":"delete row for 1","_date":0,"ordinals":{"<2>":null}}}
+{"logdata":{"_comment":"add back row for one 
1","_date":0,"ordinals":{"<3>":{"name":"one","number":1}}}}
+{"logdata":{"_comment":"add row for two 
2","_date":0,"ordinals":{"<4>":{"name":"two","number":2}}}}
+{"logdata":{"_comment":"delete row for 2","_date":0,"ordinals":{"<4>":null}}}
+{"logdata":{"_comment":"add back row for two 
2","_date":0,"ordinals":{"<5>":{"name":"two","number":2}}}}
+{"logdata":{"_comment":"add row for three 
3","_date":0,"ordinals":{"<6>":{"name":"three","number":3}}}}
+{"logdata":{"_comment":"delete row for 3","_date":0,"ordinals":{"<6>":null}}}
+{"logdata":{"_comment":"add back row for three 
3","_date":0,"ordinals":{"<7>":{"name":"three","number":3}}}}
+{"logdata":{"_comment":"add row for four 
4","_date":0,"ordinals":{"<8>":{"name":"four","number":4}}}}
+{"logdata":{"_comment":"delete row for 4","_date":0,"ordinals":{"<8>":null}}}
+{"logdata":{"_comment":"add back row for four 
4","_date":0,"ordinals":{"<9>":{"name":"four","number":4}}}}
+{"logdata":{"_comment":"add row for five 
5","_date":0,"ordinals":{"<10>":{"name":"five","number":5}}}}
+{"logdata":{"_comment":"delete row for 5","_date":0,"ordinals":{"<10>":null}}}
+{"logdata":{"_comment":"add back row for five 
5","_date":0,"ordinals":{"<11>":{"name":"five","number":5}}}}
 ]])
     else
         dnl Check that at least there's a lot of transactions.
diff --git a/tests/ovsdb-tool.at b/tests/ovsdb-tool.at
index cf4a0fd2e..693524017 100644
--- a/tests/ovsdb-tool.at
+++ b/tests/ovsdb-tool.at
@@ -95,25 +95,25 @@ AT_CHECK(
 dnl Check that all the crap is in fact in the database log.
 AT_CHECK([[uuidfilt db | grep -v ^OVSDB | sed 's/"_date":[0-9]*/"_date":0/' | \
             sed 's/"_is_diff":true,//' | ovstest test-json --multiple -]], [0],
-  [[{"cksum":"12345678 
9","name":"ordinals","tables":{"ordinals":{"columns":{"name":{"type":"string"},"number":{"type":"integer"}},"indexes":[["number"],["number","name"]]}},"version":"5.1.3"}
-{"_comment":"add row for zero 0","_date":0,"ordinals":{"<0>":{"name":"zero"}}}
-{"_comment":"delete row for 0","_date":0,"ordinals":{"<0>":null}}
-{"_comment":"add back row for zero 
0","_date":0,"ordinals":{"<1>":{"name":"zero"}}}
-{"_comment":"add row for one 
1","_date":0,"ordinals":{"<2>":{"name":"one","number":1}}}
-{"_comment":"delete row for 1","_date":0,"ordinals":{"<2>":null}}
-{"_comment":"add back row for one 
1","_date":0,"ordinals":{"<3>":{"name":"one","number":1}}}
-{"_comment":"add row for two 
2","_date":0,"ordinals":{"<4>":{"name":"two","number":2}}}
-{"_comment":"delete row for 2","_date":0,"ordinals":{"<4>":null}}
-{"_comment":"add back row for two 
2","_date":0,"ordinals":{"<5>":{"name":"two","number":2}}}
-{"_comment":"add row for three 
3","_date":0,"ordinals":{"<6>":{"name":"three","number":3}}}
-{"_comment":"delete row for 3","_date":0,"ordinals":{"<6>":null}}
-{"_comment":"add back row for three 
3","_date":0,"ordinals":{"<7>":{"name":"three","number":3}}}
-{"_comment":"add row for four 
4","_date":0,"ordinals":{"<8>":{"name":"four","number":4}}}
-{"_comment":"delete row for 4","_date":0,"ordinals":{"<8>":null}}
-{"_comment":"add back row for four 
4","_date":0,"ordinals":{"<9>":{"name":"four","number":4}}}
-{"_comment":"add row for five 
5","_date":0,"ordinals":{"<10>":{"name":"five","number":5}}}
-{"_comment":"delete row for 5","_date":0,"ordinals":{"<10>":null}}
-{"_comment":"add back row for five 
5","_date":0,"ordinals":{"<11>":{"name":"five","number":5}}}
+  [[{"logdata":{"cksum":"12345678 
9","name":"ordinals","tables":{"ordinals":{"columns":{"name":{"type":"string"},"number":{"type":"integer"}},"indexes":[["number"],["number","name"]]}},"version":"5.1.3"}}
+{"logdata":{"_comment":"add row for zero 
0","_date":0,"ordinals":{"<0>":{"name":"zero"}}}}
+{"logdata":{"_comment":"delete row for 0","_date":0,"ordinals":{"<0>":null}}}
+{"logdata":{"_comment":"add back row for zero 
0","_date":0,"ordinals":{"<1>":{"name":"zero"}}}}
+{"logdata":{"_comment":"add row for one 
1","_date":0,"ordinals":{"<2>":{"name":"one","number":1}}}}
+{"logdata":{"_comment":"delete row for 1","_date":0,"ordinals":{"<2>":null}}}
+{"logdata":{"_comment":"add back row for one 
1","_date":0,"ordinals":{"<3>":{"name":"one","number":1}}}}
+{"logdata":{"_comment":"add row for two 
2","_date":0,"ordinals":{"<4>":{"name":"two","number":2}}}}
+{"logdata":{"_comment":"delete row for 2","_date":0,"ordinals":{"<4>":null}}}
+{"logdata":{"_comment":"add back row for two 
2","_date":0,"ordinals":{"<5>":{"name":"two","number":2}}}}
+{"logdata":{"_comment":"add row for three 
3","_date":0,"ordinals":{"<6>":{"name":"three","number":3}}}}
+{"logdata":{"_comment":"delete row for 3","_date":0,"ordinals":{"<6>":null}}}
+{"logdata":{"_comment":"add back row for three 
3","_date":0,"ordinals":{"<7>":{"name":"three","number":3}}}}
+{"logdata":{"_comment":"add row for four 
4","_date":0,"ordinals":{"<8>":{"name":"four","number":4}}}}
+{"logdata":{"_comment":"delete row for 4","_date":0,"ordinals":{"<8>":null}}}
+{"logdata":{"_comment":"add back row for four 
4","_date":0,"ordinals":{"<9>":{"name":"four","number":4}}}}
+{"logdata":{"_comment":"add row for five 
5","_date":0,"ordinals":{"<10>":{"name":"five","number":5}}}}
+{"logdata":{"_comment":"delete row for 5","_date":0,"ordinals":{"<10>":null}}}
+{"logdata":{"_comment":"add back row for five 
5","_date":0,"ordinals":{"<11>":{"name":"five","number":5}}}}
 ]])
 
 dnl Dump out and check the actual database contents.
-- 
2.43.0


_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to