summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDaniel Friesel <derf@finalrewind.org>2018-11-21 08:22:03 +0100
committerDaniel Friesel <derf@finalrewind.org>2018-11-21 08:22:03 +0100
commit5846344fab742ef97c4c0e22e530afab5a6aa219 (patch)
tree25605703afdf83b3429b0a30b92e0fd5ecdb9f01 /src
parentc225a738fc6ce90d167c98a131f999a8f629a0e1 (diff)
working minimal capnp_c test case
Diffstat (limited to 'src')
-rw-r--r--src/app/prototest/Makefile.inc20
-rw-r--r--src/app/prototest/main.cc113
-rw-r--r--src/lib/capnp-c/capn-malloc.c409
-rw-r--r--src/lib/capnp-c/capn-stream.c217
-rw-r--r--src/lib/capnp-c/capn.c1115
5 files changed, 1851 insertions, 23 deletions
diff --git a/src/app/prototest/Makefile.inc b/src/app/prototest/Makefile.inc
index a3477e6..4658af6 100644
--- a/src/app/prototest/Makefile.inc
+++ b/src/app/prototest/Makefile.inc
@@ -21,6 +21,15 @@ ifeq (${prototest_arduinojson}, 1)
COMMON_FLAGS += -DPROTOTEST_ARDUINOJSON
endif
+ifeq (${prototest_capnproto_c}, 1)
+ COMMON_FLAGS += -DPROTOTEST_CAPNPROTO_C
+ CXX_TARGETS += src/app/prototest/capnp_c_bench.capnp.cc
+ CXX_TARGETS += src/lib/capnp-c/capn.c
+ CXX_TARGETS += src/lib/capnp-c/capn-malloc.c
+ CXX_TARGETS += src/lib/capnp-c/capn-stream.c
+ INCLUDES += -Iinclude/lib/capnp-c
+endif
+
ifeq (${prototest_modernjson}, 1)
COMMON_FLAGS += -DPROTOTEST_MODERNJSON
ostream = 1
@@ -50,8 +59,19 @@ ifeq (${prototest_xdr}, 1)
CXX_TARGETS += src/os/object/xdrstream.cc src/os/object/xdrinput.cc
endif
+# Don't try to make .capnp from .capnp.c
+%.capnp: ;
+
+# Don't try to make .proto from .proto.c
+%.proto: ;
+
%.pb.cc: %.proto
protoc --plugin=protoc-gen-nanopb=${HOME}/var/ess/protocol-modeling/nanopb/generator/protoc-gen-nanopb --nanopb_out=. src/app/prototest/nanopbbench.proto
mv src/app/prototest/nanopbbench.pb.c src/app/prototest/nanopbbench.pb.cc
sed -i 's!src/app/prototest/!!' src/app/prototest/nanopbbench.pb.cc
sed -i 's!\#include "src/app/prototest/nanopb.pb.h"!!' src/app/prototest/nanopbbench.pb.h
+
+%.capnp.cc: %.capnp
+ capnp compile -oc $<
+ mv $<.c $<.cc
+ cp $<.cc /tmp
diff --git a/src/app/prototest/main.cc b/src/app/prototest/main.cc
index b4d2254..ee907ab 100644
--- a/src/app/prototest/main.cc
+++ b/src/app/prototest/main.cc
@@ -6,6 +6,10 @@
#ifdef PROTOTEST_ARDUINOJSON
#include "lib/ArduinoJson.h"
#endif
+#ifdef PROTOTEST_CAPNPROTO_C
+#include <capnp_c.h>
+#include "capnp_c_bench.capnp.h"
+#endif
#ifdef PROTOTEST_MODERNJSON
#include "lib/modernjson/json.h"
#endif
@@ -40,6 +44,19 @@ char buf[256];
char buf[256];
#endif
+#ifdef PROTOTEST_NANOPB
+bool encode_hurr(pb_ostream_t *stream, const pb_field_t *field, void * const *arg)
+{
+if (!pb_encode_tag_for_field(stream, field)) return false;
+return pb_encode_string(stream, (uint8_t*)"durr", 4);
+}
+bool encode_sensor(pb_ostream_t *stream, const pb_field_t *field, void * const *arg)
+{
+if (!pb_encode_tag_for_field(stream, field)) return false;
+return pb_encode_string(stream, (uint8_t*)"gps", 3);
+}
+#endif
+
// TODOs
//
// Code -> JSON
@@ -91,29 +108,65 @@ kout << "\"sensor\":\"gps\"" << ",";
kout << "\"time\":" << dec << ts;
kout << "}" << endl;
+#ifdef PROTOTEST_CAPNPROTO_C
+ uint8_t buf[1024];
+ for (unsigned int i = 0; i < sizeof(buf); i++) {
+ buf[i] = 0;
+ }
+ struct capn c;
+ capn_init_malloc(&c);
+ capn_ptr cr = capn_root(&c);
+ struct capn_segment *cs = cr.seg;
-#ifdef PROTOTEST_XDR
- BufferOutput<XDRStream> foostream(buf);
- XDRInput input(buf);
+ struct Benchmark benchmark;
+ //struct Benchmark_Nesting benchmark_nesting;
+ //struct Benchmark_Nesting_Foo benchmark_nesting_foo;
+ benchmark.time = ts + 1;
+
+ //benchmark.nesting = new_Benchmark_Nesting(cs);
+ //benchmark_nesting.foo = new_Benchmark_Nesting_Foo(cs);
- char test[] = "Obai World!";
+ //write_Benchmark_Nesting_Foo(&benchmark_nesting_foo, benchmark_nesting.foo);
+ //write_Benchmark_Nesting(&benchmark_nesting, benchmark.nesting);
- //counter.start();
- foostream << (uint32_t)123 << (int16_t)-2 << ts << (uint16_t)0 << (uint64_t)4294967296 << (uint16_t)0;
- foostream.setNextArrayLen(3);
- foostream << variable << "Hai";
- foostream.setNextArrayLen(sizeof(test));
- foostream << variable << (char const *)test;
+ Benchmark_ptr benchmark_ptr = new_Benchmark(cs);
+ write_Benchmark(&benchmark, benchmark_ptr);
- //tmp = counter.stop();
- //kout << "XDR serialization took " << tmp << " >" << counter.overflowed << " cycles" << endl;
- kout << "foostream is " << hex;
+ capn_write_mem(&c, buf, sizeof(buf), 0 /* packed */);
+ capn_free(&c);
+
+ kout << "capnproto is " << hex;
+ for (unsigned int i = 0; i < sizeof(buf); i++) {
+ kout << buf[i];
+ }
+ kout << endl;
+#endif
+
+#ifdef PROTOTEST_XDR
+ BufferOutput<XDRStream> xdrstream(buf);
+ xdrstream.setNextArrayLen(2);
+ xdrstream << 48.75608;
+ xdrstream << 2.302038;
+ xdrstream.setNextArrayLen(3);
+ xdrstream.setNextArrayLen(1);
+ xdrstream << 1;
+ xdrstream.setNextArrayLen(2);
+ xdrstream << 2;
+ xdrstream << 2;
+ xdrstream.setNextArrayLen(4);
+ xdrstream << variable << "durr";
+ xdrstream.setNextArrayLen(3);
+ xdrstream << variable << "gps";
+ xdrstream << ts;
+
+ kout << "xdrstream is " << hex;
for (unsigned int i = 0; i < 64; i += 4) {
kout << (unsigned char)buf[i] << (unsigned char)buf[i+1];
kout << (unsigned char)buf[i+2] << (unsigned char)buf[i+3] << " ";
}
kout << endl;
+ XDRInput input(buf);
kout << dec;
kout << "foostream = " << input.get_uint32() << " = " << 123;
kout << ", " << input.get_int32() << " = " << -2;
@@ -233,10 +286,15 @@ kout << "}" << endl;
bool status;
{
- TestMessage msg = TestMessage_init_zero;
+ Benchmark msg = Benchmark_init_zero;
pb_ostream_t stream = pb_ostream_from_buffer(buf, sizeof(buf));
- msg.number = 423;
- status = pb_encode(&stream, TestMessage_fields, &msg);
+ msg.data[0] = 48.75608;
+ msg.data[1] = 2.302038;
+ msg.nesting.foo.hurr.funcs.encode = encode_hurr;
+ msg.nesting.foo.qwop = 9001;
+ msg.sensor.funcs.encode = encode_sensor;
+ msg.time = ts;
+ status = pb_encode(&stream, Benchmark_fields, &msg);
len = stream.bytes_written;
kout << len << " bytes written" << endl;
}
@@ -254,17 +312,26 @@ kout << "}" << endl;
}
mpack_writer_t writer;
mpack_writer_init(&writer, buf, sizeof(buf));
-
- start = uptime.get_cycles();
- mpack_start_map(&writer, 2);
- stop = uptime.get_cycles();
- kout << stop - start << endl;
- mpack_write_cstr(&writer, "gps");
- mpack_write_uint(&writer, ts);
+ mpack_start_map(&writer, 4);
+ mpack_write_cstr(&writer, "data");
mpack_start_array(&writer, 2);
mpack_write_float(&writer, 48.756080);
mpack_write_float(&writer, 2.302038);
mpack_finish_array(&writer);
+ mpack_write_cstr(&writer, "nesting");
+ mpack_start_map(&writer, 1);
+ mpack_write_cstr(&writer, "foo");
+ mpack_start_map(&writer, 2);
+ mpack_write_cstr(&writer, "hurr");
+ mpack_write_cstr(&writer, "durr");
+ mpack_write_cstr(&writer, "qwop");
+ mpack_write_uint(&writer, 9001);
+ mpack_finish_map(&writer);
+ mpack_finish_map(&writer);
+ mpack_write_cstr(&writer, "sensor");
+ mpack_write_cstr(&writer, "gps");
+ mpack_write_cstr(&writer, "time");
+ mpack_write_uint(&writer, ts);
mpack_finish_map(&writer);
if (mpack_writer_destroy(&writer) != mpack_ok) {
diff --git a/src/lib/capnp-c/capn-malloc.c b/src/lib/capnp-c/capn-malloc.c
new file mode 100644
index 0000000..7308061
--- /dev/null
+++ b/src/lib/capnp-c/capn-malloc.c
@@ -0,0 +1,409 @@
+/* vim: set sw=8 ts=8 sts=8 noet: */
+/* capn-malloc.c
+ *
+ * Copyright (C) 2013 James McKaskill
+ * Copyright (C) 2014 Steve Dee
+ *
+ * This software may be modified and distributed under the terms
+ * of the MIT license. See the LICENSE file for details.
+ */
+
+#ifdef __GNUC__
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+#include "capnp_c.h"
+#include "capnp_priv.h"
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <errno.h>
+
+/*
+ * 8 byte alignment is required for struct capn_segment.
+ * This struct check_segment_alignment verifies this at compile time.
+ *
+ * Unless capn_segment is defined with 8 byte alignment, check_segment_alignment
+ * fails to compile in x86 mode (or on another CPU with 32-bit pointers),
+ * as (sizeof(struct capn_segment)&7) -> (44 & 7) evaluates to 4.
+ * It compiles in x64 mode (or on another CPU with 64-bit pointers),
+ * as (sizeof(struct capn_segment)&7) -> (80 & 7) evaluates to 0.
+ */
+struct check_segment_alignment {
+ unsigned int foo : (sizeof(struct capn_segment)&7) ? -1 : 1;
+};
+
+static struct capn_segment *create(void *u, uint32_t id, int sz) {
+ struct capn_segment *s;
+ sz += sizeof(*s);
+ if (sz < 4096) {
+ sz = 4096;
+ } else {
+ sz = (sz + 4095) & ~4095;
+ }
+ s = (struct capn_segment*) calloc(1, sz);
+ s->data = (char*) (s+1);
+ s->cap = sz - sizeof(*s);
+ s->user = s;
+ return s;
+}
+
+static struct capn_segment *create_local(void *u, int sz) {
+ return create(u, 0, sz);
+}
+
+void capn_init_malloc(struct capn *c) {
+ memset(c, 0, sizeof(*c));
+ c->create = &create;
+ c->create_local = &create_local;
+}
+
+void capn_free(struct capn *c) {
+ struct capn_segment *s = c->seglist;
+ while (s != NULL) {
+ struct capn_segment *n = s->next;
+ free(s->user);
+ s = n;
+ }
+ capn_reset_copy(c);
+}
+
+void capn_reset_copy(struct capn *c) {
+ struct capn_segment *s = c->copylist;
+ while (s != NULL) {
+ struct capn_segment *n = s->next;
+ free(s->user);
+ s = n;
+ }
+ c->copy = NULL;
+ c->copylist = NULL;
+}
+
+#define ZBUF_SZ 4096
+
+static int read_fp(void *p, size_t sz, FILE *f, struct capn_stream *z, uint8_t* zbuf, int packed) {
+ if (f && packed) {
+ z->next_out = (uint8_t*) p;
+ z->avail_out = sz;
+
+ while (z->avail_out && capn_inflate(z) == CAPN_NEED_MORE) {
+ int r;
+ memmove(zbuf, z->next_in, z->avail_in);
+ r = fread(zbuf+z->avail_in, 1, ZBUF_SZ - z->avail_in, f);
+ if (r <= 0)
+ return -1;
+ z->avail_in += r;
+ }
+ return 0;
+
+ } else if (f && !packed) {
+ return fread(p, sz, 1, f) != 1;
+
+ } else if (packed) {
+ z->next_out = (uint8_t*) p;
+ z->avail_out = sz;
+ return capn_inflate(z) != 0;
+
+ } else {
+ if (z->avail_in < sz)
+ return -1;
+ memcpy(p, z->next_in, sz);
+ z->next_in += sz;
+ z->avail_in -= sz;
+ return 0;
+ }
+}
+
+static int init_fp(struct capn *c, FILE *f, struct capn_stream *z, int packed) {
+ /*
+ * Initialize 'c' from the contents of 'f', assuming the message has been
+ * serialized with the standard framing format. From https://capnproto.org/encoding.html:
+ *
+ * When transmitting over a stream, the following should be sent. All integers are unsigned and little-endian.
+ * (4 bytes) The number of segments, minus one (since there is always at least one segment).
+ * (N * 4 bytes) The size of each segment, in words.
+ * (0 or 4 bytes) Padding up to the next word boundary.
+ * The content of each segment, in order.
+ */
+
+ struct capn_segment *s = NULL;
+ uint32_t i, segnum, total = 0;
+ uint32_t hdr[1024];
+ uint8_t zbuf[ZBUF_SZ];
+ char *data = NULL;
+
+ capn_init_malloc(c);
+
+ /* Read the first four bytes to know how many headers we have */
+ if (read_fp(&segnum, 4, f, z, zbuf, packed))
+ goto err;
+
+ segnum = capn_flip32(segnum);
+ if (segnum > 1023)
+ goto err;
+ segnum++; /* The wire encoding was zero-based */
+
+ /* Read the header list */
+ if (read_fp(hdr, 8 * (segnum/2) + 4, f, z, zbuf, packed))
+ goto err;
+
+ for (i = 0; i < segnum; i++) {
+ uint32_t n = capn_flip32(hdr[i]);
+ if (n > INT_MAX/8 || n > UINT32_MAX/8 || UINT32_MAX - total < n*8)
+ goto err;
+ hdr[i] = n*8;
+ total += hdr[i];
+ }
+
+ /* Allocate space for the data and the capn_segment structs */
+ s = (struct capn_segment*) calloc(1, total + (sizeof(*s) * segnum));
+ if (!s)
+ goto err;
+
+ /* Now read the data and setup the capn_segment structs */
+ data = (char*) (s+segnum);
+ if (read_fp(data, total, f, z, zbuf, packed))
+ goto err;
+
+ for (i = 0; i < segnum; i++) {
+ s[i].len = s[i].cap = hdr[i];
+ s[i].data = data;
+ data += s[i].len;
+ capn_append_segment(c, &s[i]);
+ }
+
+ /* Set the entire region to be freed on the last segment */
+ s[segnum-1].user = s;
+
+ return 0;
+
+err:
+ memset(c, 0, sizeof(*c));
+ free(s);
+ return -1;
+}
+
+int capn_init_fp(struct capn *c, FILE *f, int packed) {
+ struct capn_stream z;
+ memset(&z, 0, sizeof(z));
+ return init_fp(c, f, &z, packed);
+}
+
+int capn_init_mem(struct capn *c, const uint8_t *p, size_t sz, int packed) {
+ struct capn_stream z;
+ memset(&z, 0, sizeof(z));
+ z.next_in = p;
+ z.avail_in = sz;
+ return init_fp(c, NULL, &z, packed);
+}
+
+static void header_calc(struct capn *c, uint32_t *headerlen, size_t *headersz)
+{
+ /* segnum == 1:
+ * [segnum][segsiz]
+ * segnum == 2:
+ * [segnum][segsiz][segsiz][zeroes]
+ * segnum == 3:
+ * [segnum][segsiz][segsiz][segsiz]
+ * segnum == 4:
+ * [segnum][segsiz][segsiz][segsiz][segsiz][zeroes]
+ */
+ *headerlen = ((2 + c->segnum) / 2) * 2;
+ *headersz = 4 * *headerlen;
+}
+
+static int header_render(struct capn *c, struct capn_segment *seg, uint32_t *header, uint32_t headerlen, size_t *datasz)
+{
+ size_t i;
+
+ header[0] = capn_flip32(c->segnum - 1);
+ header[headerlen-1] = 0; /* Zero out the spare position in the header sizes */
+ for (i = 0; i < c->segnum; i++, seg = seg->next) {
+ if (0 == seg)
+ return -1;
+ *datasz += seg->len;
+ header[1 + i] = capn_flip32(seg->len / 8);
+ }
+ if (0 != seg)
+ return -1;
+
+ return 0;
+}
+
+static int capn_write_mem_packed(struct capn *c, uint8_t *p, size_t sz)
+{
+ struct capn_segment *seg;
+ struct capn_ptr root;
+ uint32_t headerlen;
+ size_t headersz, datasz = 0;
+ uint32_t *header;
+ struct capn_stream z;
+ int ret;
+
+ root = capn_root(c);
+ header_calc(c, &headerlen, &headersz);
+ header = (uint32_t*) (p + headersz + 2); /* must reserve two bytes for worst case expansion */
+
+ if (sz < headersz*2 + 2) /* We must have space for temporary writing of header to deflate */
+ return -1;
+
+ ret = header_render(c, root.seg, header, headerlen, &datasz);
+ if (ret != 0)
+ return -1;
+
+ memset(&z, 0, sizeof(z));
+ z.next_in = (uint8_t *)header;
+ z.avail_in = headersz;
+ z.next_out = p;
+ z.avail_out = sz;
+
+ // pack the headers
+ ret = capn_deflate(&z);
+ if (ret != 0 || z.avail_in != 0)
+ return -1;
+
+ for (seg = root.seg; seg; seg = seg->next) {
+ z.next_in = (uint8_t *)seg->data;
+ z.avail_in = seg->len;
+ ret = capn_deflate(&z);
+ if (ret != 0 || z.avail_in != 0)
+ return -1;
+ }
+
+ return sz - z.avail_out;
+}
+
+int
+capn_write_mem(struct capn *c, uint8_t *p, size_t sz, int packed)
+{
+ struct capn_segment *seg;
+ struct capn_ptr root;
+ uint32_t headerlen;
+ size_t headersz, datasz = 0;
+ uint32_t *header;
+ int ret;
+
+ if (c->segnum == 0)
+ return -1;
+
+ if (packed)
+ return capn_write_mem_packed(c, p, sz);
+
+ root = capn_root(c);
+ header_calc(c, &headerlen, &headersz);
+ header = (uint32_t*) p;
+
+ if (sz < headersz)
+ return -1;
+
+ ret = header_render(c, root.seg, header, headerlen, &datasz);
+ if (ret != 0)
+ return -1;
+
+ if (sz < headersz + datasz)
+ return -1;
+
+ p += headersz;
+
+ for (seg = root.seg; seg; seg = seg->next) {
+ memcpy(p, seg->data, seg->len);
+ p += seg->len;
+ }
+
+ return headersz+datasz;
+}
+
+static int _write_fd(ssize_t (*write_fd)(int fd, const void *p, size_t count), int fd, void *p, size_t count)
+{
+ ssize_t ret;
+ size_t sent = 0;
+
+ while (sent < count) {
+ ret = write_fd(fd, ((uint8_t*)p)+sent, count-sent);
+ if (ret < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ else
+ return -1;
+ }
+ sent += ret;
+ }
+
+ return 0;
+}
+
+int capn_write_fd(struct capn *c, ssize_t (*write_fd)(int fd, const void *p, size_t count), int fd, int packed)
+{
+ unsigned char buf[4096];
+ struct capn_segment *seg;
+ struct capn_ptr root;
+ uint32_t headerlen;
+ size_t headersz, datasz = 0;
+ int ret;
+ struct capn_stream z;
+ unsigned char *p;
+
+ if (c->segnum == 0)
+ return -1;
+
+ root = capn_root(c);
+ header_calc(c, &headerlen, &headersz);
+
+ if (sizeof(buf) < headersz)
+ return -1;
+
+ ret = header_render(c, root.seg, (uint32_t*)buf, headerlen, &datasz);
+ if (ret != 0)
+ return -1;
+
+ if (packed) {
+ const int headerrem = sizeof(buf) - headersz;
+ const int maxpack = headersz + 2;
+ if (headerrem < maxpack)
+ return -1;
+
+ memset(&z, 0, sizeof(z));
+ z.next_in = buf;
+ z.avail_in = headersz;
+ z.next_out = buf + headersz;
+ z.avail_out = headerrem;
+ ret = capn_deflate(&z);
+ if (ret != 0)
+ return -1;
+
+ p = buf + headersz;
+ headersz = headerrem - z.avail_out;
+ } else {
+ p = buf;
+ }
+
+ ret = _write_fd(write_fd, fd, p, headersz);
+ if (ret < 0)
+ return -1;
+
+ datasz = headersz;
+ for (seg = root.seg; seg; seg = seg->next) {
+ size_t bufsz;
+ if (packed) {
+ memset(&z, 0, sizeof(z));
+ z.next_in = (uint8_t*)seg->data;
+ z.avail_in = seg->len;
+ z.next_out = buf;
+ z.avail_out = sizeof(buf);
+ ret = capn_deflate(&z);
+ if (ret != 0)
+ return -1;
+ p = buf;
+ bufsz = sizeof(buf) - z.avail_out;
+ } else {
+ p = (uint8_t*)seg->data;
+ bufsz = seg->len;
+ }
+ ret = _write_fd(write_fd, fd, p, bufsz);
+ if (ret < 0)
+ return -1;
+ datasz += bufsz;
+ }
+
+ return datasz;
+}
diff --git a/src/lib/capnp-c/capn-stream.c b/src/lib/capnp-c/capn-stream.c
new file mode 100644
index 0000000..135d1b2
--- /dev/null
+++ b/src/lib/capnp-c/capn-stream.c
@@ -0,0 +1,217 @@
+/* vim: set sw=8 ts=8 sts=8 noet: */
+/* capn-stream.c
+ *
+ * Copyright (C) 2013 James McKaskill
+ *
+ * This software may be modified and distributed under the terms
+ * of the MIT license. See the LICENSE file for details.
+ */
+
+#include "capnp_c.h"
+#include "capnp_priv.h"
+#include <string.h>
+
+#ifndef min
+static unsigned min(unsigned a, unsigned b) { return (a < b) ? a : b; }
+#endif
+
+int capn_deflate(struct capn_stream* s) {
+ if (s->avail_in % 8) {
+ return CAPN_MISALIGNED;
+ }
+
+ while (s->avail_in) {
+ int i;
+ size_t sz;
+ uint8_t hdr = 0;
+ uint8_t *p;
+
+ if (!s->avail_out)
+ return CAPN_NEED_MORE;
+
+ if (s->raw > 0) {
+ sz = min(s->raw, min(s->avail_in, s->avail_out));
+ memcpy(s->next_out, s->next_in, sz);
+ s->next_out += sz;
+ s->next_in += sz;
+ s->avail_out -= sz;
+ s->avail_in -= sz;
+ s->raw -= sz;
+ continue;
+ }
+
+ if (s->avail_in < 8)
+ return CAPN_NEED_MORE;
+
+ sz = 0;
+ for (i = 0; i < 8; i++) {
+ if (s->next_in[i]) {
+ sz ++;
+ hdr |= 1 << i;
+ }
+ }
+
+ switch (sz) {
+ case 0:
+ if (s->avail_out < 2)
+ return CAPN_NEED_MORE;
+
+ s->next_out[0] = 0;
+ for (sz = 1; sz < min(s->avail_in/8, 256); sz++) {
+ if (((uint64_t*) s->next_in)[sz] != 0) {
+ break;
+ }
+ }
+
+ s->next_out[1] = (uint8_t) (sz-1);
+ s->next_in += sz*8;
+ s->avail_in -= sz*8;
+ s->next_out += 2;
+ s->avail_out -= 2;
+ continue;
+
+ case 8:
+ if (s->avail_out < 10)
+ return CAPN_NEED_MORE;
+
+ s->next_out[0] = 0xFF;
+ memcpy(s->next_out+1, s->next_in, 8);
+ s->next_in += 8;
+ s->avail_in -= 8;
+
+ s->raw = min(s->avail_in, 256*8);
+ if ((p = (uint8_t*) memchr(s->next_in, 0, s->raw)) != NULL) {
+ s->raw = (p - s->next_in) & ~7;
+ }
+
+ s->next_out[9] = (uint8_t) (s->raw/8);
+ s->next_out += 10;
+ s->avail_out -= 10;
+ continue;
+
+ default:
+ if (s->avail_out < 1U + sz)
+ return CAPN_NEED_MORE;
+
+ *(s->next_out++) = hdr;
+ for (i = 0; i < 8; i++) {
+ if (s->next_in[i]) {
+ *(s->next_out++) = s->next_in[i];
+ }
+ }
+ s->avail_out -= sz + 1;
+ s->next_in += 8;
+ s->avail_in -= 8;
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+int capn_inflate(struct capn_stream* s) {
+ while (s->avail_out) {
+ int i;
+ size_t sz;
+ uint8_t hdr;
+ uint8_t *wr;
+
+ if (s->avail_buf && s->avail_out >= s->avail_buf) {
+ memcpy(s->next_out, s->inflate_buf, s->avail_buf);
+ s->next_out += s->avail_buf;
+ s->avail_out -= s->avail_buf;
+ s->avail_buf = 0;
+ if (!s->avail_out)
+ return 0;
+ }
+ if (s->avail_buf && s->avail_out < s->avail_buf) {
+ memcpy(s->next_out, s->inflate_buf, s->avail_out);
+ memmove(s->inflate_buf, s->inflate_buf + s->avail_out,
+ s->avail_buf - s->avail_out);
+ s->avail_buf -= s->avail_out;
+ s->avail_out = 0;
+ return 0;
+ }
+
+ if (s->zeros > 0) {
+ sz = min(s->avail_out, s->zeros);
+ memset(s->next_out, 0, sz);
+ s->next_out += sz;
+ s->avail_out -= sz;
+ s->zeros -= sz;
+ continue;
+ }
+
+ if (s->raw > 0) {
+ if (s->avail_in == 0)
+ return CAPN_NEED_MORE;
+
+ sz = min(min(s->avail_out, s->raw), s->avail_in);
+ memcpy(s->next_out, s->next_in, sz);
+ s->next_in += sz;
+ s->next_out += sz;
+ s->avail_in -= sz;
+ s->avail_out -= sz;
+ s->raw -= sz;
+ continue;
+ }
+
+ if (s->avail_in == 0)
+ return 0;
+ else if (s->avail_in < 2)
+ return CAPN_NEED_MORE;
+
+ switch (s->next_in[0]) {
+ case 0xFF:
+ /* 0xFF is followed by 8 bytes raw, followed by
+ * a byte with length in words to read raw */
+ if (s->avail_in < 10)
+ return CAPN_NEED_MORE;
+
+ memcpy(s->inflate_buf, s->next_in+1, 8);
+ s->avail_buf = 8;
+
+ s->raw = s->next_in[9] * 8;
+ s->next_in += 10;
+ s->avail_in -= 10;
+ continue;
+
+ case 0x00:
+ /* 0x00 is followed by a single byte indicating
+ * the count of consecutive zero value words
+ * minus 1 */
+ s->zeros = (s->next_in[1] + 1) * 8;
+ s->next_in += 2;
+ s->avail_in -= 2;
+ continue;
+
+ default:
+ hdr = s->next_in[0];
+ sz = 0;
+ for (i = 0; i < 8; i++) {
+ if (hdr & (1 << i))
+ sz++;
+ }
+ if (s->avail_in < 1U + sz)
+ return CAPN_NEED_MORE;
+
+ s->next_in += 1;
+
+ wr = s->inflate_buf;
+ for (i = 0; i < 8; i++) {
+ if (hdr & (1 << i)) {
+ *wr++ = *s->next_in++;
+ } else {
+ *wr++ = 0;
+ }
+ }
+
+ s->avail_buf = 8;
+ s->avail_in -= 1 + sz;
+ continue;
+ }
+ }
+
+ return 0;
+}
+
diff --git a/src/lib/capnp-c/capn.c b/src/lib/capnp-c/capn.c
new file mode 100644
index 0000000..6e0f196
--- /dev/null
+++ b/src/lib/capnp-c/capn.c
@@ -0,0 +1,1115 @@
+/* vim: set sw=8 ts=8 sts=8 noet: */
+/* capn.c
+ *
+ * Copyright (C) 2013 James McKaskill
+ *
+ * This software may be modified and distributed under the terms
+ * of the MIT license. See the LICENSE file for details.
+ */
+
+#ifdef __GNUC__
+#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+#endif
+
+#include "capnp_c.h"
+
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <sys/param.h>
+#endif
+
+#define STRUCT_PTR 0
+#define LIST_PTR 1
+#define FAR_PTR 2
+#define DOUBLE_PTR 6
+
+#define VOID_LIST 0
+#define BIT_1_LIST 1
+#define BYTE_1_LIST 2
+#define BYTE_2_LIST 3
+#define BYTE_4_LIST 4
+#define BYTE_8_LIST 5
+#define PTR_LIST 6
+#define COMPOSITE_LIST 7
+
+#define U64(val) ((uint64_t) (val))
+#define I64(val) ((int64_t) (val))
+#define U32(val) ((uint32_t) (val))
+#define I32(val) ((int32_t) (val))
+#define U16(val) ((uint16_t) (val))
+#define I16(val) ((int16_t) (val))
+
+#ifndef min
+static int min(int a, int b) { return (a < b) ? a : b; }
+#endif
+
+#ifdef BYTE_ORDER
+#define CAPN_LITTLE (BYTE_ORDER == LITTLE_ENDIAN)
+#elif defined(__BYTE_ORDER)
+#define CAPN_LITTLE (__BYTE_ORDER == __LITTLE_ENDIAN)
+#else
+#define CAPN_LITTLE 0
+#endif
+
+struct capn_tree *capn_tree_insert(struct capn_tree *root, struct capn_tree *n) {
+ n->red = 1;
+ n->link[0] = n->link[1] = NULL;
+
+ for (;;) {
+ /* parent, uncle, grandparent, great grandparent link */
+ struct capn_tree *p, *u, *g, **gglink;
+ int dir;
+
+ /* Case 1: N is root */
+ p = n->parent;
+ if (!p) {
+ n->red = 0;
+ root = n;
+ break;
+ }
+
+ /* Case 2: p is black */
+ if (!p->red) {
+ break;
+ }
+
+ g = p->parent;
+ dir = (p == g->link[1]);
+
+ /* Case 3: P and U are red, switch g to red, but must
+ * loop as G could be root or have a red parent
+ * g to G
+ * / \ / \
+ * P U p u
+ * / /
+ * N N
+ */
+ u = g->link[!dir];
+ if (u != NULL && u->red) {
+ p->red = 0;
+ u->red = 0;
+ g->red = 1;
+ n = g;
+ continue;
+ }
+
+ if (!g->parent) {
+ gglink = &root;
+ } else if (g->parent->link[1] == g) {
+ gglink = &g->parent->link[1];
+ } else {
+ gglink = &g->parent->link[0];
+ }
+
+ if (dir != (n == p->link[1])) {
+ /* Case 4: rotate on P, then on g
+ * here dir is /
+ * g to g to n
+ * / \ / \ / \
+ * P u N u P G
+ * / \ / \ /| / \
+ * 1 N P 3 1 2 3 u
+ * / \ / \
+ * 2 3 1 2
+ */
+ struct capn_tree *two = n->link[dir];
+ struct capn_tree *three = n->link[!dir];
+ p->link[!dir] = two;
+ g->link[dir] = three;
+ n->link[dir] = p;
+ n->link[!dir] = g;
+ *gglink = n;
+ n->parent = g->parent;
+ p->parent = n;
+ g->parent = n;
+ if (two)
+ two->parent = p;
+ if (three)
+ three->parent = g;
+ n->red = 0;
+ g->red = 1;
+ } else {
+ /* Case 5: rotate on g
+ * here dir is /
+ * g to p
+ * / \ / \
+ * P u N G
+ * / \ /| / \
+ * N 3 1 2 3 u
+ * / \
+ * 1 2
+ */
+ struct capn_tree *three = p->link[!dir];
+ g->link[dir] = three;
+ p->link[!dir] = g;
+ *gglink = p;
+ p->parent = g->parent;
+ g->parent = p;
+ if (three)
+ three->parent = g;
+ g->red = 1;
+ p->red = 0;
+ }
+
+ break;
+ }
+
+ return root;
+}
+
+void capn_append_segment(struct capn *c, struct capn_segment *s) {
+ s->id = c->segnum++;
+ s->capn = c;
+ s->next = NULL;
+
+ if (c->lastseg) {
+ c->lastseg->next = s;
+ c->lastseg->hdr.link[1] = &s->hdr;
+ s->hdr.parent = &c->lastseg->hdr;
+ } else {
+ c->seglist = s;
+ s->hdr.parent = NULL;
+ }
+
+ c->lastseg = s;
+ c->segtree = capn_tree_insert(c->segtree, &s->hdr);
+}
+
+static char *new_data(struct capn *c, int sz, struct capn_segment **ps) {
+ struct capn_segment *s;
+
+ /* find a segment with sufficient data */
+ for (s = c->seglist; s != NULL; s = s->next) {
+ if (s->len + sz <= s->cap) {
+ goto end;
+ }
+ }
+
+ s = c->create ? c->create(c->user, c->segnum, sz) : NULL;
+ if (!s) {
+ *ps = NULL;
+ return NULL;
+ }
+
+ capn_append_segment(c, s);
+end:
+ *ps = s;
+ s->len += sz;
+ return s->data + s->len - sz;
+}
+
+static struct capn_segment *lookup_segment(struct capn* c, struct capn_segment *s, uint32_t id) {
+ struct capn_tree **x;
+ struct capn_segment *y = NULL;
+
+ if (s && s->id == id)
+ return s;
+ if (!c)
+ return NULL;
+
+ if (id < c->segnum) {
+ x = &c->segtree;
+ while (*x) {
+ y = (struct capn_segment*) *x;
+ if (id == y->id) {
+ return y;
+ } else if (id < y->id) {
+ x = &y->hdr.link[0];
+ } else {
+ x = &y->hdr.link[1];
+ }
+ }
+ } else {
+ /* Otherwise `x` may be uninitialized */
+ return NULL;
+ }
+
+ s = c->lookup ? c->lookup(c->user, id) : NULL;
+ if (!s)
+ return NULL;
+
+ if (id < c->segnum) {
+ s->id = id;
+ s->capn = c;
+ s->next = c->seglist;
+ c->seglist = s;
+ s->hdr.parent = &y->hdr;
+ *x = &s->hdr;
+ c->segtree = capn_tree_insert(c->segtree, &s->hdr);
+ } else {
+ c->segnum = id;
+ capn_append_segment(c, s);
+ }
+
+ return s;
+}
+
+static uint64_t lookup_double(struct capn_segment **s, char **d, uint64_t val) {
+ uint64_t far, tag;
+ size_t off = (U32(val) >> 3) * 8;
+ char *p;
+
+ if ((*s = lookup_segment((*s)->capn, *s, U32(val >> 32))) == NULL) {
+ return 0;
+ }
+
+ p = (*s)->data + off;
+ if (off + 16 > (*s)->len) {
+ return 0;
+ }
+
+ far = capn_flip64(*(uint64_t*) p);
+ tag = capn_flip64(*(uint64_t*) (p+8));
+
+ /* the far tag should not be another double, and the tag
+ * should be struct/list and have no offset */
+ if ((far&7) != FAR_PTR || U32(tag) > LIST_PTR) {
+ return 0;
+ }
+
+ if ((*s = lookup_segment((*s)->capn, *s, U32(far >> 32))) == NULL) {
+ return 0;
+ }
+
+ /* -8 because far pointers reference from the start of
+ * the segment, but offsets reference the end of the
+ * pointer data. Here *d points to where an equivalent
+ * ptr would be.
+ */
+ *d = (*s)->data - 8;
+ return U64(U32(far) >> 3 << 2) | tag;
+}
+
+static uint64_t lookup_far(struct capn_segment **s, char **d, uint64_t val) {
+ size_t off = (U32(val) >> 3) * 8;
+
+ if ((*s = lookup_segment((*s)->capn, *s, U32(val >> 32))) == NULL) {
+ return 0;
+ }
+
+ if (off + 8 > (*s)->len) {
+ return 0;
+ }
+
+ *d = (*s)->data + off;
+ return capn_flip64(*(uint64_t*)*d);
+}
+
+static char *struct_ptr(struct capn_segment *s, char *d, int minsz) {
+ uint64_t val = capn_flip64(*(uint64_t*)d);
+ uint16_t datasz;
+
+ switch (val&7) {
+ case FAR_PTR:
+ val = lookup_far(&s, &d, val);
+ break;
+ case DOUBLE_PTR:
+ val = lookup_double(&s, &d, val);
+ break;
+ }
+
+ datasz = U16(val >> 32);
+ d += (I32(U32(val)) << 1) + 8;
+
+ if (val != 0 && (val&3) != STRUCT_PTR && datasz >= minsz && s->data <= d && d < s->data + s->len) {
+ return d;
+ }
+
+ return NULL;
+}
+
+static capn_ptr read_ptr(struct capn_segment *s, char *d) {
+ capn_ptr ret = {CAPN_NULL};
+ uint64_t val;
+ char *e = 0;
+
+ val = capn_flip64(*(uint64_t*) d);
+
+ switch (val&7) {
+ case FAR_PTR:
+ val = lookup_far(&s, &d, val);
+ ret.has_ptr_tag = (U32(val) >> 2) == 0;
+ break;
+ case DOUBLE_PTR:
+ val = lookup_double(&s, &d, val);
+ break;
+ }
+
+ d += (I32(U32(val)) >> 2) * 8 + 8;
+
+ if (d < s->data) {
+ goto err;
+ }
+
+ switch (val & 3) {
+ case STRUCT_PTR:
+ ret.type = val ? CAPN_STRUCT : CAPN_NULL;
+ goto struct_common;
+
+ struct_common:
+ ret.datasz = U32(U16(val >> 32)) * 8;
+ ret.ptrs = U32(U16(val >> 48));
+ e = d + ret.datasz + 8 * ret.ptrs;
+ break;
+
+ case LIST_PTR:
+ ret.type = CAPN_LIST;
+ ret.len = val >> 35;
+
+ switch ((val >> 32) & 7) {
+ case VOID_LIST:
+ e = d;
+ break;
+ case BIT_1_LIST:
+ ret.type = CAPN_BIT_LIST;
+ ret.datasz = (ret.len+7)/8;
+ e = d + ret.datasz;
+ break;
+ case BYTE_1_LIST:
+ ret.datasz = 1;
+ e = d + ret.len;
+ break;
+ case BYTE_2_LIST:
+ ret.datasz = 2;
+ e = d + ret.len * 2;
+ break;
+ case BYTE_4_LIST:
+ ret.datasz = 4;
+ e = d + ret.len * 4;
+ break;
+ case BYTE_8_LIST:
+ ret.datasz = 8;
+ e = d + ret.len * 8;
+ break;
+ case PTR_LIST:
+ ret.type = CAPN_PTR_LIST;
+ e = d + ret.len * 8;
+ break;
+ case COMPOSITE_LIST:
+ if ((size_t)((d+8) - s->data) > s->len) {
+ goto err;
+ }
+
+ val = capn_flip64(*(uint64_t*) d);
+
+ d += 8;
+ e = d + ret.len * 8;
+
+ ret.datasz = U32(U16(val >> 32)) * 8;
+ ret.ptrs = U32(U16(val >> 48));
+ ret.len = U32(val) >> 2;
+ ret.is_composite_list = 1;
+
+ if ((ret.datasz + 8*ret.ptrs) * ret.len != e - d) {
+ goto err;
+ }
+ break;
+ }
+ break;
+
+ default:
+ goto err;
+ }
+
+ if ((size_t)(e - s->data) > s->len)
+ goto err;
+
+ ret.data = d;
+ ret.seg = s;
+ return ret;
+err:
+ memset(&ret, 0, sizeof(ret));
+ return ret;
+}
+
+void capn_resolve(capn_ptr *p) {
+ if (p->type == CAPN_FAR_POINTER) {
+ *p = read_ptr(p->seg, p->data);
+ }
+}
+
+/* TODO: should this handle CAPN_BIT_LIST? */
+capn_ptr capn_getp(capn_ptr p, int off, int resolve) {
+ capn_ptr ret = {CAPN_FAR_POINTER};
+ ret.seg = p.seg;
+
+ capn_resolve(&p);
+
+ switch (p.type) {
+ case CAPN_LIST:
+ /* Return an inner pointer */
+ if (off < p.len) {
+ capn_ptr ret = {CAPN_STRUCT};
+ ret.is_list_member = 1;
+ ret.data = p.data + off * (p.datasz + 8*p.ptrs);
+ ret.seg = p.seg;
+ ret.datasz = p.datasz;
+ ret.ptrs = p.ptrs;
+ return ret;
+ } else {
+ goto err;
+ }
+
+ case CAPN_STRUCT:
+ if (off >= p.ptrs) {
+ goto err;
+ }
+ ret.data = p.data + p.datasz + 8*off;
+ break;
+
+ case CAPN_PTR_LIST:
+ if (off >= p.len) {
+ goto err;
+ }
+ ret.data = p.data + 8*off;
+ break;
+
+ default:
+ goto err;
+ }
+
+ if (resolve) {
+ ret = read_ptr(ret.seg, ret.data);
+ }
+
+ return ret;
+
+err:
+ memset(&p, 0, sizeof(p));
+ return p;
+}
+
+static void write_ptr_tag(char *d, capn_ptr p, int off) {
+ uint64_t val = U64(U32(I32(off/8) << 2));
+
+ switch (p.type) {
+ case CAPN_STRUCT:
+ val |= STRUCT_PTR | (U64(p.datasz/8) << 32) | (U64(p.ptrs) << 48);
+ break;
+
+ case CAPN_LIST:
+ if (p.is_composite_list) {
+ val |= LIST_PTR | (U64(COMPOSITE_LIST) << 32) | (U64(p.len * (p.datasz/8 + p.ptrs)) << 35);
+ } else {
+ val |= LIST_PTR | (U64(p.len) << 35);
+
+ switch (p.datasz) {
+ case 8:
+ val |= (U64(BYTE_8_LIST) << 32);
+ break;
+ case 4:
+ val |= (U64(BYTE_4_LIST) << 32);
+ break;
+ case 2:
+ val |= (U64(BYTE_2_LIST) << 32);
+ break;
+ case 1:
+ val |= (U64(BYTE_1_LIST) << 32);
+ break;
+ case 0:
+ val |= (U64(VOID_LIST) << 32);
+ break;
+ }
+ }
+ break;
+
+ case CAPN_BIT_LIST:
+ val |= LIST_PTR | (U64(BIT_1_LIST) << 32) | (U64(p.len) << 35);
+ break;
+
+ case CAPN_PTR_LIST:
+ val |= LIST_PTR | (U64(PTR_LIST) << 32) | (U64(p.len) << 35);
+ break;
+
+ default:
+ val = 0;
+ break;
+ }
+
+ *(uint64_t*) d = capn_flip64(val);
+}
+
+static void write_far_ptr(char *d, struct capn_segment *s, char *tgt) {
+ *(uint64_t*) d = capn_flip64(FAR_PTR | U64(tgt - s->data) | (U64(s->id) << 32));
+}
+
+static void write_double_far(char *d, struct capn_segment *s, char *tgt) {
+ *(uint64_t*) d = capn_flip64(DOUBLE_PTR | U64(tgt - s->data) | (U64(s->id) << 32));
+}
+
+#define NEED_TO_COPY 1
+
+static int write_ptr(struct capn_segment *s, char *d, capn_ptr p) {
+ /* note p.seg can be NULL if its a ptr to static data */
+ char *pdata = p.data - 8*p.is_composite_list;
+
+ if (p.type == CAPN_NULL || (p.type == CAPN_STRUCT && p.datasz == 0 && p.ptrs == 0)) {
+ write_ptr_tag(d, p, 0);
+ return 0;
+
+ } else if (!p.seg || p.seg->capn != s->capn || p.is_list_member) {
+ return NEED_TO_COPY;
+
+ } else if (p.seg == s) {
+ write_ptr_tag(d, p, pdata - d - 8);
+ return 0;
+
+ } else if (p.has_ptr_tag) {
+ /* By lucky chance, the data has a tag in front
+ * of it. This happens when new_object had to move
+ * the data to a new segment. */
+ write_far_ptr(d, p.seg, pdata-8);
+ return 0;
+
+ } else if (p.seg->len + 8 <= p.seg->cap) {
+ /* The target segment has enough room for tag */
+ char *t = p.seg->data + p.seg->len;
+ write_ptr_tag(t, p, pdata - t - 8);
+ write_far_ptr(d, p.seg, t);
+ p.seg->len += 8;
+ return 0;
+
+ } else {
+ /* have to allocate room for a double far
+ * pointer */
+ char *t;
+
+ if (s->len + 16 <= s->cap) {
+ /* Try and allocate in the src segment
+ * first. This should improve lookup on
+ * read. */
+ t = s->data + s->len;
+ s->len += 16;
+ } else {
+ t = new_data(s->capn, 16, &s);
+ if (!t) return -1;
+ }
+
+ write_far_ptr(t, p.seg, pdata);
+ write_ptr_tag(t+8, p, 0);
+ write_double_far(d, s, t);
+ return 0;
+ }
+}
+
+struct copy {
+ struct capn_tree hdr;
+ struct capn_ptr to, from;
+ char *fbegin, *fend;
+};
+
+static capn_ptr new_clone(struct capn_segment *s, capn_ptr p) {
+ switch (p.type) {
+ case CAPN_STRUCT:
+ return capn_new_struct(s, p.datasz, p.ptrs);
+ case CAPN_PTR_LIST:
+ return capn_new_ptr_list(s, p.len);
+ case CAPN_BIT_LIST:
+ return capn_new_list1(s, p.len).p;
+ case CAPN_LIST:
+ return capn_new_list(s, p.len, p.datasz, p.ptrs);
+ default:
+ return p;
+ }
+}
+
+static int is_ptr_equal(const struct capn_ptr *a, const struct capn_ptr *b) {
+ return a->data == b->data
+ && a->type == b->type
+ && a->len == b->len
+ && a->datasz == b->datasz
+ && a->ptrs == b->ptrs;
+}
+
+static int data_size(struct capn_ptr p) {
+ switch (p.type) {
+ case CAPN_BIT_LIST:
+ return p.datasz;
+ case CAPN_PTR_LIST:
+ return p.len*8;
+ case CAPN_STRUCT:
+ return p.datasz + 8*p.ptrs;
+ case CAPN_LIST:
+ return p.len * (p.datasz + 8*p.ptrs) + 8*p.is_composite_list;
+ default:
+ return 0;
+ }
+}
+
+static int copy_ptr(struct capn_segment *seg, char *data, struct capn_ptr *t, struct capn_ptr *f, int *dep) {
+ struct capn *c = seg->capn;
+ struct copy *cp = NULL;
+ struct capn_tree **xcp;
+ char *fbegin = f->data - 8*f->is_composite_list;
+ char *fend = fbegin + data_size(*f);
+ int zero_sized = (fend == fbegin);
+
+ /* We always copy list members as it would otherwise be an
+ * overlapped pointer (the data is owned by the enclosing list).
+ * We do not bother with the overlapped lookup for zero sized
+ * structures/lists as they never overlap. Nor do we add them to
+ * the copy list as there is no data to be shared by multiple
+ * pointers.
+ */
+
+ xcp = &c->copy;
+ while (*xcp && !zero_sized) {
+ cp = (struct copy*) *xcp;
+ if (fend <= cp->fbegin) {
+ xcp = &cp->hdr.link[0];
+ } else if (cp->fend <= fbegin) {
+ xcp = &cp->hdr.link[1];
+ } else if (is_ptr_equal(f, &cp->from)) {
+ /* we already have a copy so just point to that */
+ return write_ptr(seg, data, cp->to);
+ } else {
+ /* pointer to overlapped data */
+ return -1;
+ }
+ }
+
+ /* no copy found - have to create a new copy */
+ *t = new_clone(seg, *f);
+
+ if (write_ptr(seg, data, *t))
+ return -1;
+
+ /* add the copy to the copy tree so we can look for overlapping
+ * source pointers and handle recursive structures */
+ if (!zero_sized) {
+ struct copy *n;
+ struct capn_segment *cs = c->copylist;
+
+ /* need to allocate a struct copy */
+ if (!cs || cs->len + (int)sizeof(*n) > cs->cap) {
+ cs = c->create_local ? c->create_local(c->user, sizeof(*n)) : NULL;
+ if (!cs) {
+ /* can't allocate a copy structure */
+ return -1;
+ }
+ cs->next = c->copylist;
+ c->copylist = cs;
+ }
+
+ n = (struct copy*) (cs->data + cs->len);
+ cs->len += sizeof(*n);
+
+ n->from = *f;
+ n->to = *t;
+ n->fbegin = fbegin;
+ n->fend = fend;
+
+ *xcp = &n->hdr;
+ n->hdr.parent = &cp->hdr;
+
+ c->copy = capn_tree_insert(c->copy, &n->hdr);
+ }
+
+ /* minimize the number of types the main copy routine has to
+ * deal with to just CAPN_LIST and CAPN_PTR_LIST. ptr list only
+ * needs t->type, t->len, t->data, t->seg, f->data, f->seg to
+ * be valid */
+ switch (t->type) {
+ case CAPN_STRUCT:
+ if (t->datasz) {
+ memcpy(t->data, f->data, t->datasz);
+ t->data += t->datasz;
+ f->data += t->datasz;
+ }
+ if (t->ptrs) {
+ t->type = CAPN_PTR_LIST;
+ t->len = t->ptrs;
+ (*dep)++;
+ }
+ return 0;
+
+ case CAPN_BIT_LIST:
+ memcpy(t->data, f->data, t->datasz);
+ return 0;
+
+ case CAPN_LIST:
+ if (!t->len) {
+ /* empty list - nothing to copy */
+ } else if (t->ptrs && t->datasz) {
+ (*dep)++;
+ } else if (t->datasz) {
+ memcpy(t->data, f->data, t->len * t->datasz);
+ } else if (t->ptrs) {
+ t->type = CAPN_PTR_LIST;
+ t->len *= t->ptrs;
+ (*dep)++;
+ }
+ return 0;
+
+ case CAPN_PTR_LIST:
+ if (t->len) {
+ (*dep)++;
+ }
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+static void copy_list_member(capn_ptr* t, capn_ptr *f, int *dep) {
+ /* copy struct data */
+ int sz = min(t->datasz, f->datasz);
+ memcpy(t->data, f->data, sz);
+ memset(t->data + sz, 0, t->datasz - sz);
+ t->data += t->datasz;
+ f->data += f->datasz;
+
+ /* reset excess pointers */
+ sz = min(t->ptrs, f->ptrs);
+ memset(t->data + sz, 0, 8*(t->ptrs - sz));
+
+ /* create a pointer list for the main loop to copy */
+ if (t->ptrs) {
+ t->type = CAPN_PTR_LIST;
+ t->len = t->ptrs;
+ (*dep)++;
+ }
+}
+
+#define MAX_COPY_DEPTH 32
+
+/* TODO: handle CAPN_BIT_LIST and setting from an inner bit list member */
+int capn_setp(capn_ptr p, int off, capn_ptr tgt) {
+ struct capn_ptr to[MAX_COPY_DEPTH], from[MAX_COPY_DEPTH];
+ char *data;
+ int err, dep = 0;
+
+ capn_resolve(&p);
+
+ if (tgt.type == CAPN_FAR_POINTER && tgt.seg->capn == p.seg->capn) {
+ uint64_t val = capn_flip64(*(uint64_t*) tgt.data);
+ if ((val & 3) == FAR_PTR) {
+ *(uint64_t*) p.data = *(uint64_t*) tgt.data;
+ return 0;
+ }
+ }
+
+ capn_resolve(&tgt);
+
+ switch (p.type) {
+ case CAPN_LIST:
+ if (off >= p.len || tgt.type != CAPN_STRUCT)
+ return -1;
+
+ to[0] = p;
+ to[0].data += off * (p.datasz + 8*p.ptrs);
+ from[0] = tgt;
+ copy_list_member(to, from, &dep);
+ break;
+
+ case CAPN_PTR_LIST:
+ if (off >= p.len)
+ return -1;
+ data = p.data + 8*off;
+ goto copy_ptr;
+
+ case CAPN_STRUCT:
+ if (off >= p.ptrs)
+ return -1;
+ data = p.data + p.datasz + 8*off;
+ goto copy_ptr;
+
+ copy_ptr:
+ err = write_ptr(p.seg, data, tgt);
+ if (err != NEED_TO_COPY)
+ return err;
+
+ /* Depth first copy the source whilst using a pointer stack to
+ * maintain the ptr to set and size left to copy at each level.
+ * We also maintain a rbtree (capn->copy) of the copies indexed
+ * by the source data. This way we can detect overlapped
+ * pointers in the source (and bail) and recursive structures
+ * (and point to the previous copy).
+ */
+
+ from[0] = tgt;
+ if (copy_ptr(p.seg, data, to, from, &dep))
+ return -1;
+ break;
+
+ default:
+ return -1;
+ }
+
+ while (dep) {
+ struct capn_ptr *tc = &to[dep-1], *tn = &to[dep];
+ struct capn_ptr *fc = &from[dep-1], *fn = &from[dep];
+
+ if (dep+1 == MAX_COPY_DEPTH) {
+ return -1;
+ }
+
+ if (!tc->len) {
+ dep--;
+ continue;
+ }
+
+ if (tc->type == CAPN_LIST) {
+ *fn = capn_getp(*fc, 0, 1);
+ *tn = capn_getp(*tc, 0, 1);
+
+ copy_list_member(tn, fn, &dep);
+
+ fc->data += fc->datasz + 8*fc->ptrs;
+ tc->data += tc->datasz + 8*tc->ptrs;
+ tc->len--;
+
+ } else { /* CAPN_PTR_LIST */
+ *fn = read_ptr(fc->seg, fc->data);
+
+ if (fn->type && copy_ptr(tc->seg, tc->data, tn, fn, &dep))
+ return -1;
+
+ fc->data += 8;
+ tc->data += 8;
+ tc->len--;
+ }
+ }
+
+ return 0;
+}
+
+/* TODO: handle CAPN_LIST, CAPN_PTR_LIST for bit lists */
+
+int capn_get1(capn_list1 l, int off) {
+ return l.p.type == CAPN_BIT_LIST
+ && off < l.p.len
+ && (l.p.data[off/8] & (1 << (off%8))) != 0;
+}
+
+int capn_set1(capn_list1 l, int off, int val) {
+ if (l.p.type != CAPN_BIT_LIST || off >= l.p.len)
+ return -1;
+ if (val) {
+ l.p.data[off/8] |= 1 << (off%8);
+ } else {
+ l.p.data[off/8] &= ~(1 << (off%8));
+ }
+ return 0;
+}
+
+int capn_getv1(capn_list1 l, int off, uint8_t *data, int sz) {
+ /* Note we only support aligned reads */
+ int bsz;
+ capn_ptr p;
+ capn_resolve(&l.p);
+ p = l.p;
+ if (p.type != CAPN_BIT_LIST || (off & 7) != 0)
+ return -1;
+
+ bsz = (sz + 7) / 8;
+ off /= 8;
+
+ if (off + sz > p.datasz) {
+ memcpy(data, p.data + off, p.datasz - off);
+ return p.len - off*8;
+ } else {
+ memcpy(data, p.data + off, bsz);
+ return sz;
+ }
+}
+
+int capn_setv1(capn_list1 l, int off, const uint8_t *data, int sz) {
+ /* Note we only support aligned writes */
+ int bsz;
+ capn_ptr p = l.p;
+ if (p.type != CAPN_BIT_LIST || (off & 7) != 0)
+ return -1;
+
+ bsz = (sz + 7) / 8;
+ off /= 8;
+
+ if (off + sz > p.datasz) {
+ memcpy(p.data + off, data, p.datasz - off);
+ return p.len - off*8;
+ } else {
+ memcpy(p.data + off, data, bsz);
+ return sz;
+ }
+}
+
+/* pull out whether we add a tag or not as a define so the unit test can
+ * test double far pointers by not creating tags */
+#ifndef ADD_TAG
+#define ADD_TAG 1
+#endif
+
+static void new_object(capn_ptr *p, int bytes) {
+ struct capn_segment *s = p->seg;
+
+ if (!s) {
+ memset(p, 0, sizeof(*p));
+ return;
+ }
+
+ /* pointer needs to be initialised to get a valid offset on write */
+ if (!bytes) {
+ p->data = s->data + s->len;
+ return;
+ }
+
+ /* all allocations are 8 byte aligned */
+ bytes = (bytes + 7) & ~7;
+
+ if (s->len + bytes <= s->cap) {
+ p->data = s->data + s->len;
+ s->len += bytes;
+ return;
+ }
+
+ /* add a tag whenever we switch segments so that write_ptr can
+ * use it */
+ p->data = new_data(s->capn, bytes + ADD_TAG*8, &p->seg);
+ if (!p->data) {
+ memset(p, 0, sizeof(*p));
+ return;
+ }
+
+ if (ADD_TAG) {
+ write_ptr_tag(p->data, *p, 0);
+ p->data += 8;
+ p->has_ptr_tag = 1;
+ }
+}
+
+capn_ptr capn_root(struct capn *c) {
+ capn_ptr r = {CAPN_PTR_LIST};
+ r.seg = lookup_segment(c, NULL, 0);
+ r.data = r.seg ? r.seg->data : new_data(c, 8, &r.seg);
+ r.len = 1;
+
+ if (!r.seg || r.seg->cap < 8) {
+ memset(&r, 0, sizeof(r));
+ } else if (r.seg->len < 8) {
+ r.seg->len = 8;
+ }
+
+ return r;
+}
+
+capn_ptr capn_new_struct(struct capn_segment *seg, int datasz, int ptrs) {
+ capn_ptr p = {CAPN_STRUCT};
+ p.seg = seg;
+ p.datasz = (datasz + 7) & ~7;
+ p.ptrs = ptrs;
+ new_object(&p, p.datasz + 8*p.ptrs);
+ return p;
+}
+
+capn_ptr capn_new_list(struct capn_segment *seg, int sz, int datasz, int ptrs) {
+ capn_ptr p = {CAPN_LIST};
+ p.seg = seg;
+ p.len = sz;
+
+ if (ptrs || datasz > 8) {
+ p.is_composite_list = 1;
+ p.datasz = (datasz + 7) & ~7;
+ p.ptrs = ptrs;
+ new_object(&p, p.len * (p.datasz + 8*p.ptrs) + 8);
+ if (p.data) {
+ uint64_t hdr = STRUCT_PTR | (U64(p.len) << 2) | (U64(p.datasz/8) << 32) | (U64(ptrs) << 48);
+ *(uint64_t*) p.data = capn_flip64(hdr);
+ p.data += 8;
+ }
+ } else if (datasz > 4) {
+ p.datasz = 8;
+ new_object(&p, p.len * 8);
+ } else if (datasz > 2) {
+ p.datasz = 4;
+ new_object(&p, p.len * 4);
+ } else {
+ p.datasz = datasz;
+ new_object(&p, p.len * datasz);
+ }
+
+ return p;
+}
+
+capn_list1 capn_new_list1(struct capn_segment *seg, int sz) {
+ capn_list1 l = {{CAPN_BIT_LIST}};
+ l.p.seg = seg;
+ l.p.datasz = (sz+7)/8;
+ l.p.len = sz;
+ new_object(&l.p, l.p.datasz);
+ return l;
+}
+
+capn_ptr capn_new_ptr_list(struct capn_segment *seg, int sz) {
+ capn_ptr p = {CAPN_PTR_LIST};
+ p.seg = seg;
+ p.len = sz;
+ p.ptrs = 0;
+ p.datasz = 0;
+ new_object(&p, sz*8);
+ return p;
+}
+
+capn_ptr capn_new_string(struct capn_segment *seg, const char *str, ssize_t sz) {
+ capn_ptr p = {CAPN_LIST};
+ p.seg = seg;
+ p.len = ((sz >= 0) ? (size_t)sz : strlen(str)) + 1;
+ p.datasz = 1;
+ new_object(&p, p.len);
+ if (p.data) {
+ memcpy(p.data, str, p.len - 1);
+ p.data[p.len - 1] = '\0';
+ }
+ return p;
+}
+
+capn_text capn_get_text(capn_ptr p, int off, capn_text def) {
+ capn_ptr m = capn_getp(p, off, 1);
+ capn_text ret = def;
+ if (m.type == CAPN_LIST && m.datasz == 1 && m.len && m.data[m.len - 1] == 0) {
+ ret.seg = m.seg;
+ ret.str = m.data;
+ ret.len = m.len - 1;
+ }
+ return ret;
+}
+
+int capn_set_text(capn_ptr p, int off, capn_text tgt) {
+ capn_ptr m = {CAPN_NULL};
+ if (tgt.seg) {
+ m.type = CAPN_LIST;
+ m.seg = tgt.seg;
+ m.data = (char*)tgt.str;
+ m.len = tgt.len + 1;
+ m.datasz = 1;
+ } else if (tgt.str) {
+ m = capn_new_string(p.seg, tgt.str, tgt.len);
+ }
+ return capn_setp(p, off, m);
+}
+
+capn_data capn_get_data(capn_ptr p, int off) {
+ capn_data ret;
+ ret.p = capn_getp(p, off, 1);
+ if (ret.p.type != CAPN_LIST || ret.p.datasz != 1) {
+ memset(&ret, 0, sizeof(ret));
+ }
+ return ret;
+}
+
+#define SZ 8
+#include "capn-list.inc"
+#undef SZ
+
+#define SZ 16
+#include "capn-list.inc"
+#undef SZ
+
+#define SZ 32
+#include "capn-list.inc"
+#undef SZ
+
+#define SZ 64
+#include "capn-list.inc"
+#undef SZ