From 9e234521f24b6e12cedb8b6a1332dae3385a2f95 Mon Sep 17 00:00:00 2001
From: Kostyantyn Vorobyov <kostyantyn.vorobyov@cea.fr>
Date: Wed, 23 Nov 2016 16:04:53 +0100
Subject: [PATCH] Import of segment memory model

---
 .../segment_model/e_acsl_segment_mmodel.c     |  185 +++
 .../segment_model/e_acsl_segment_tracking.h   | 1248 +++++++++++++++++
 .../segment_model/e_acsl_shadow_layout.h      |  400 ++++++
 3 files changed, 1833 insertions(+)
 create mode 100644 src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_segment_mmodel.c
 create mode 100644 src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_segment_tracking.h
 create mode 100644 src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_shadow_layout.h

diff --git a/src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_segment_mmodel.c b/src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_segment_mmodel.c
new file mode 100644
index 00000000000..65132f44835
--- /dev/null
+++ b/src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_segment_mmodel.c
@@ -0,0 +1,185 @@
+/**************************************************************************/
+/*                                                                        */
+/*  This file is part of the Frama-C's E-ACSL plug-in.                    */
+/*                                                                        */
+/*  Copyright (C) 2012-2015                                               */
+/*    CEA (Commissariat à l'énergie atomique et aux énergies              */
+/*         alternatives)                                                  */
+/*                                                                        */
+/*  you can redistribute it and/or modify it under the terms of the GNU   */
+/*  Lesser General Public License as published by the Free Software       */
+/*  Foundation, version 2.1.                                              */
+/*                                                                        */
+/*  It is distributed in the hope that it will be useful,                 */
+/*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
+/*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
+/*  GNU Lesser General Public License for more details.                   */
+/*                                                                        */
+/*  See the GNU Lesser General Public License version 2.1                 */
+/*  for more details (enclosed in the file license/LGPLv2.1).             */
+/*                                                                        */
+/**************************************************************************/
+
+/*! ***********************************************************************
+ * \file   e_acsl_segment_mmodel.c
+ * \brief  Implementation of E-ACSL public API for a segment (shadow) memory
+ *   model. See e_acsl_mmodel_api.h for details.
+***************************************************************************/
+
+#include <sys/mman.h>
+#include <errno.h>
+#include <sys/resource.h>
+
+static int valid(void * ptr, size_t size);
+
+#include "e_acsl_string.h"
+#include "e_acsl_bits.h"
+#include "e_acsl_printf.h"
+#include "e_acsl_assert.h"
+#include "e_acsl_debug.h"
+#include "e_acsl_malloc.h"
+#include "e_acsl_shadow_layout.h"
+#include "e_acsl_segment_tracking.h"
+#include "e_acsl_mmodel_api.h"
+
+static void * store_block(void * ptr, size_t size) {
+  /* Only stack-global memory blocks are recorded explicitly via this function.
+   * Heap blocks should be tracked internally using memory allocation functions
+   * such as malloc or calloc. */
+  shadow_alloca(ptr, size, IS_ON_GLOBAL(ptr));
+  return ptr;
+}
+
+static void delete_block(void * ptr) {
+  /* Block deletion should be performed on stack/global addresses only,
+   * heap blocks should be deallocated manually via free/cfree/realloc. */
+  if (IS_ON_STACK(ptr)) {
+    stack_shadow_freea(ptr);
+  } else if (IS_ON_GLOBAL(ptr)) {
+    global_shadow_freea(ptr);
+  }
+}
+
+static void full_init(void * ptr) {
+  initialize(ptr, block_length(ptr));
+}
+
+static void readonly(void * ptr) {
+  mark_readonly((uintptr_t)ptr, block_length(ptr));
+}
+
+/* ****************** */
+/* E-ACSL annotations */
+/* ****************** */
+
+static int valid(void * ptr, size_t size) {
+  uintptr_t addr = (uintptr_t)ptr;
+  if (!IS_ON_VALID(addr))
+    return 0;
+  TRY_SEGMENT(addr,
+    return heap_allocated(addr, size),
+    return stack_allocated(addr, size),
+    return global_allocated(addr, size) && !global_readonly(addr));
+  return 0;
+}
+
+static int valid_read(void * ptr, size_t size) {
+  uintptr_t addr = (uintptr_t)ptr;
+  if (!IS_ON_VALID(addr))
+    return 0;
+  TRY_SEGMENT(addr,
+    return heap_allocated(addr, size),
+    return stack_allocated(addr, size),
+    return global_allocated(addr, size));
+  return 0;
+}
+
+/*! NB: The implementation for this function can also be specified via
+ * \p base_addr macro that will eventually call ::TRY_SEGMENT. The following
+ * implementation is preferred for performance reasons. */
+static void * segment_base_addr(void * ptr) {
+  TRY_SEGMENT(ptr,
+    return (void*)heap_info((uintptr_t)ptr, 'B'),
+    return (void*)stack_info((uintptr_t)ptr, 'B'),
+    return (void*)global_info((uintptr_t)ptr, 'B'));
+  return NULL;
+}
+
+/*! NB: Implementation of the following function can also be specified
+ * via \p block_length macro. A more direct approach via ::TRY_SEGMENT
+ * is preffered for performance reasons. */
+static size_t segment_block_length(void * ptr) {
+  TRY_SEGMENT(ptr,
+    return heap_info((uintptr_t)ptr, 'L'),
+    return stack_info((uintptr_t)ptr, 'L'),
+    return global_info((uintptr_t)ptr, 'L'))
+  return 0;
+}
+
+/*! NB: Implementation of the following function can also be specified
+ * via \p offset macro. A more direct approach via ::TRY_SEGMENT
+ * is preffered for performance reasons. */
+static int segment_offset(void *ptr) {
+  TRY_SEGMENT(ptr,
+    return heap_info((uintptr_t)ptr, 'O'),
+    return stack_info((uintptr_t)ptr, 'O'),
+    return global_info((uintptr_t)ptr, 'O'));
+  return 0;
+}
+
+static int initialized(void * ptr, size_t size) {
+  uintptr_t addr = (uintptr_t)ptr;
+  TRY_SEGMENT_WEAK(addr,
+    return heap_initialized(addr, size),
+    return stack_initialized(addr, size),
+    return global_initialized(addr, size));
+  return 0;
+}
+
+static size_t get_heap_size(void) {
+  return heap_size;
+}
+
+static void memory_init(int *argc_ref, char *** argv_ref, size_t ptr_size) {
+  arch_assert(ptr_size);
+  initialize_report_file(argc_ref, argv_ref);
+  init_pgm_layout(argc_ref, argv_ref);
+  if (argc_ref && argv_ref)
+    argv_alloca(*argc_ref, *argv_ref);
+}
+
+static void memory_clean(void) {
+  clean_pgm_layout();
+}
+
+/* API BINDINGS {{{ */
+/* Heap allocation (native) */
+strong_alias(shadow_malloc, malloc)
+strong_alias(shadow_calloc, calloc)
+strong_alias(shadow_realloc, realloc)
+strong_alias(shadow_free, free)
+strong_alias(shadow_free, cfree)
+strong_alias(shadow_aligned_alloc, aligned_alloc)
+strong_alias(shadow_posix_memalign, posix_memalign)
+/* Explicit tracking */
+public_alias(delete_block)
+public_alias(store_block)
+/* Predicates */
+public_alias2(segment_offset, offset)
+public_alias2(segment_base_addr, base_addr)
+public_alias2(segment_block_length, block_length)
+public_alias(valid_read)
+public_alias(valid)
+public_alias(initialized)
+public_alias(freeable)
+public_alias(readonly)
+/* Block initialization  */
+public_alias(initialize)
+public_alias(full_init)
+/* Memory state initialization */
+public_alias(memory_clean)
+public_alias(memory_init)
+/* Heap size */
+public_alias(get_heap_size)
+public_alias(heap_size)
+/* }}} */
diff --git a/src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_segment_tracking.h b/src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_segment_tracking.h
new file mode 100644
index 00000000000..46de70eb46b
--- /dev/null
+++ b/src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_segment_tracking.h
@@ -0,0 +1,1248 @@
+/**************************************************************************/
+/*                                                                        */
+/*  This file is part of the Frama-C's E-ACSL plug-in.                    */
+/*                                                                        */
+/*  Copyright (C) 2012-2015                                               */
+/*    CEA (Commissariat à l'énergie atomique et aux énergies              */
+/*         alternatives)                                                  */
+/*                                                                        */
+/*  you can redistribute it and/or modify it under the terms of the GNU   */
+/*  Lesser General Public License as published by the Free Software       */
+/*  Foundation, version 2.1.                                              */
+/*                                                                        */
+/*  It is distributed in the hope that it will be useful,                 */
+/*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
+/*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
+/*  GNU Lesser General Public License for more details.                   */
+/*                                                                        */
+/*  See the GNU Lesser General Public License version 2.1                 */
+/*  for more details (enclosed in the file license/LGPLv2.1).             */
+/*                                                                        */
+/**************************************************************************/
+
+/*! ***********************************************************************
+ * \file  e_acsl_segment_tracking.h
+ * \brief Core functionality of the segment-based memory model
+***************************************************************************/
+
+/* Segment settings and shadow values interpretation {{{ */
+
+/*! @brief Byte size of a heap segment.
+ * This size is potentially used as an argument to `memalign`.
+ * It SHOULD be a multiple of 2 and a multiple of a pointer size.
+ *
+ * \b FIXME: in the current implementation there might be issues with segment
+ * size greater than 64 bytes. This is because presently some initialization
+ * functionality relies on the fact that initialization per segment can be set
+ * and/or evaluated using 8-byte bitmask. */
+#define HEAP_SEGMENT 16
+
+/*! \brief Number of required to represent initialization of a segment. */
+#define INIT_BYTES (HEAP_SEGMENT/8)
+
+/*! \brief Size (in bytes) of a long block on the stack. */
+#define LONG_BLOCK 8
+
+/*! \brief Bit offset in a short shadow byte that represents initialization. */
+#define INIT_BIT 0
+
+/*! \brief Bit offset in a short shadow byte that represents read-only or
+ * read-write access.
+ *
+ * This is such that the value of 1 is read-only, and 0 is read/write */
+#define READONLY_BIT 1
+
+/*! \brief Evaluate to a non-zero value if the size of a memory
+ * block indicates that it is a long one */
+#define IS_LONG_BLOCK(_size) (_size > LONG_BLOCK)
+
+/*! \brief Offset within a long block that identifies the portion of the block
+ * that does not have a corresponding shadow and reuse the shadow of a previous
+ * segment.
+ * E.g., given a long block of 11 bytes the boundary is 8. Then, bytes [0,7] of
+ * the block are shadowed (storing block offset and size) and bytes 8-10 are
+ * not. This is because 3 bytes are not sufficient to store size and offset.
+ * These remaining bytes reuse the shadow of [0,7]. */
+#define LONG_BLOCK_BOUNDARY(_size) (_size - _size%LONG_BLOCK)
+
+/*! \brief Short shadow of a long block consists of a 8-byte segment + a
+ * remainder. For instance, a 18-byte block is represented by two 8-byte
+ * segments + 2 bytes.  Each byte of a segment stores an offset in the long
+ * shadow. The offsets for each such segment can be expressed using the
+ * following number obtained by compressing all eight bytes with offsets set
+ * into a single block. */
+#define LONG_BLOCK_MASK 15913703276567643328UL
+
+/*! \brief 6 higher bytes of a memory cell on stack that belongs to a long
+ * memory block store offsets relative to meta-data in long shadow. The offsets
+ * start with the below number. E.g., if the bits store 51, then the offset at
+ * which to read meta-data is (51 - 48). */
+#define LONG_BLOCK_INDEX_START 48
+
+/*! \brief Increase the size to a multiple of a segment size. */
+#define ALIGNED_SIZE(_s) \
+  (_s + ((_s%HEAP_SEGMENT) ? (HEAP_SEGMENT - _s%HEAP_SEGMENT) : 0))
+
+/*! \brief Given the size of a block return the number of segments
+ * that represent that block in the heap shadow */
+#define BLOCK_SEGMENTS(_s) (ALIGNED_SIZE(_s)/HEAP_SEGMENT)
+
+/* \brief Maximal size_t value that does not cause overflow via addition
+ * when segment size is added. */
+const size_t max_allocated = SIZE_MAX - HEAP_SEGMENT;
+
+/* \brief Return actual allocation size which takes into account meta-block.
+ * In the present implementation it is the requested size of a heap block +
+ * the size if a segment */
+#define ALLOC_SIZE(_s) (_s > 0 && _s < max_allocated ? _s + HEAP_SEGMENT : 0)
+
+/*! \brief The number of bytes used to represent a heap block in its shadow.
+ * In the current implementation it aligned size + meta-segment size */
+#define HEAP_SHADOW_BLOCK_SIZE(_s) ((BLOCK_SEGMENTS(_s) + 1)*HEAP_SEGMENT)
+
+/*! \brief For short blocks numbers 1 to 36 represent lengths and offsets,
+ * such that:
+ * - 0 -> length 0, offset 0
+ * - 1 -> length 1, offset 0,
+ * - 2 -> length 2, offset 0,
+ * - 3 -> length 2, offset 1 and so on.
+ *
+ * The below data is used to identify lengths and offsets:
+ * Given x is a number from [1, 36] range:
+ *   - short_lengths[x] -> length of a block
+ *   - short_offsets[x] -> offset within a block */
+static const char short_lengths[] = {
+  0,
+  1,
+  2,2,
+  3,3,3,
+  4,4,4,4,
+  5,5,5,5,5,
+  6,6,6,6,6,6,
+  7,7,7,7,7,7,7,
+  8,8,8,8,8,8,8,8
+};
+
+static const char short_offsets[] = {
+  0,
+  0,
+  0,1,
+  0,1,2,
+  0,1,2,3,
+  0,1,2,3,4,
+  0,1,2,3,4,5,
+  0,1,2,3,4,5,6,
+  0,1,2,3,4,5,6,7
+};
+
+/*! \brief Mask for marking a heap segment as initialized.
+ * For instance, let `uintptr_t *p' point to the start of a heap segment
+ * in the heap shadow, then 'p[1] | heap_init_mask` sets initialization bits.
+ * NOTE: This approach cannot deal with segments larger than 64 bits. */
+const uint64_t heap_init_mask = ~(ONE << HEAP_SEGMENT);
+
+/*! \brief Masks for checking of initialization of global/stack allocated blocks.
+ * A byte allocated globally or on stack is deemed initialized if its
+ * least significant bit is set to `1' and uninitialized otherwise.
+ * The binary representation is then as follows (assuming the leftmost
+ * bit is the least significant one):
+ *
+ *  00000000 00000000 00000000 00000000 ... (0)
+ *  10000000 00000000 00000000 00000000 ... (1)
+ *  10000000 10000000 00000000 00000000 ... (257)
+ *  10000000 10000000 10000000 00000000 ... (65793)
+ *  10000000 10000000 10000000 10000000 ... (16843009)
+ *  ...
+ *
+ * For instance, mark first X bytes of a number N as initialised:
+ *    N |= static_init_masks[X] */
+const uint64_t static_init_masks [] = {
+  0, /* 0 bytes */
+  1,  /* 1 byte */
+  257,  /* 2 bytes */
+  65793,  /* 3 bytes */
+  16843009,  /* 4 bytes */
+  4311810305,  /* 5 bytes */
+  1103823438081,  /* 6 bytes */
+  282578800148737,	/* 7 bytes */
+  72340172838076673		/* 8 bytes */
+};
+
+/*! \brief Bit masks for setting read-only (second least significant) bits.
+ * Binary representation (assuming the least significant bit is the
+ * leftmost bit) is follows:
+ *
+ *  00000000 00000000 00000000 00000000 ... (0)
+ *  01000000 00000000 00000000 00000000 ... (2)
+ *  01000000 01000000 00000000 00000000 ... (514)
+ *  01000000 01000000 01000000 00000000 ... (131586)
+ *  01000000 01000000 01000000 01000000 ... (33686018)
+ *  ...
+ *
+ *  For instance, mark first X bytes of a number N as read-only:
+ *    N |= static_readonly_masks[X] */
+const uint64_t static_readonly_masks [] = {
+  0, /* 0 bytes */
+  2, /* 1 byte */
+  514, /* 2 bytes */
+  131586, /* 3 bytes */
+  33686018, /* 4 bytes */
+  8623620610, /* 5 bytes */
+  2207646876162, /* 6 bytes */
+  565157600297474, /* 7 bytes */
+  144680345676153346 /* 8 bytes */
+};
+/* }}} */
+
+/* E-ACSL predicates {{{ */
+/* See definitions for documentation */
+static uintptr_t heap_info(uintptr_t addr, char type);
+static uintptr_t static_info(uintptr_t addr, char type, int global);
+static int heap_allocated(uintptr_t addr, size_t size);
+static int static_allocated(uintptr_t addr, long size, int global);
+static int freeable(void *ptr);
+
+/*! \brief Wrapper around ::static_allocated for stack addresses. */
+#define stack_allocated(_addr, _size)  static_allocated(_addr, _size, 0)
+/*! \brief Wrapper around ::static_allocated for global addresses. */
+#define global_allocated(_addr, _size) static_allocated(_addr, _size, 1)
+
+/*! \brief Quick test to check if a static location belongs to allocation.
+ * This macro really belongs where static_allocated is defined, but
+ * since it is used across this whole file it needs to be defined here. */
+#define static_allocated_one(addr, global) \
+  ((int)SHORT_SHADOW(addr, global))
+
+/* Runtime assertions */
+#define VALIDATE_ALIGNMENT(_addr) \
+  vassert(((uintptr_t)_addr) % HEAP_SEGMENT == 0,  \
+      "Heap base address %a is unaligned", _addr)
+
+/* Debug-level macros for validating memory accesses and allocation */
+#ifdef E_ACSL_DEBUG
+#  define DVALIDATE_SHADOW_LAYOUT \
+    DVASSERT(pgm_layout.initialized != 0, "Un-initialized shadow layout", NULL)
+
+#  define DVALIDATE_HEAP_ACCESS(_addr, _size) \
+     DVALIDATE_SHADOW_LAYOUT; \
+     DVASSERT(IS_ON_HEAP((uintptr_t)_addr), \
+       "Expected heap location: %a\n   ", _addr); \
+     DVASSERT(heap_allocated((uintptr_t)_addr, _size), \
+       "Operation on unallocated heap block [%a + %lu]\n   ",  _addr, _size)
+
+#  define segstr(_global)  ((_global) ? "global" : "stack")
+#  define DVALIDATE_STATIC_ACCESS(_addr, _size, _global) \
+     DVALIDATE_SHADOW_LAYOUT; \
+     DVASSERT(IS_ON_STATIC((uintptr_t)_addr, _global), \
+       "Expected %s location: %a\n   ", segstr(_global), _addr); \
+     DVASSERT(static_allocated((uintptr_t)_addr, _size, _global), \
+       "Operation on unallocated %s block [%a + %lu]\n   ", \
+        segstr(_global),  _addr, _size)
+
+#  define DVALIDATE_STATIC_LOCATION(_addr, _global) \
+     DVALIDATE_SHADOW_LAYOUT; \
+     DVASSERT(IS_ON_STATIC((uintptr_t)_addr, _global), \
+       "Expected %s location: %a\n   ", segstr(_global), _addr); \
+     DVASSERT(static_allocated_one((uintptr_t)_addr, _global), \
+       "Operation on unallocated %s block [%a]\n   ", segstr(_global),  _addr)
+#else
+/*! \cond exclude from doxygen */
+#  define DVALIDATE_SHADOW_LAYOUT
+#  define DVALIDATE_HEAP_ACCESS
+#  define DVALIDATE_STATIC_ACCESS
+#  define DVALIDATE_STATIC_LOCATION
+/*! \endcond */
+#endif
+
+/*! \brief Shortcut for executing statements based on the segment a given
+ * address belongs to.
+ * \param intptr_t _addr - a memory address
+ * \param code_block _heap_stmt - code executed if `_addr` is a heap address
+ * \param code_block _stack_stmt - code executed if `_addr` is a stack address
+ * \param code_block _glob_stmt - code executed if `_addr` is a global address */
+#define TRY_SEGMENT_WEAK(_addr, _heap_stmt, _stack_stmt, _glob_stmt)  \
+  if (IS_ON_HEAP(_addr)) { \
+    _heap_stmt; \
+  } else if (IS_ON_STACK(_addr)) { \
+    _stack_stmt; \
+  } else if (IS_ON_GLOBAL(_addr)) { \
+    _glob_stmt; \
+  } \
+
+/*! \brief Same as TRY_SEGMENT but performs additional checks aborting the
+ * execution if the given address is `NULL` or does not belong to known
+ * segments. Note that `NULL` also does not belong to any of the tracked
+ * segments but it is treated separately for debugging purposes.
+ *
+ * The \b WEAK notion refers to the behaviour where no action is performed if
+ * the given address does not belong to any of the known segments. */
+#define TRY_SEGMENT(_addr, _heap_stmt, _stack_stmt, _glob_stmt) { \
+  TRY_SEGMENT_WEAK(_addr, _heap_stmt, _stack_stmt, _glob_stmt) \
+  else if (_addr == 0) { \
+    vassert(0, "Unexpected null pointer\n", NULL); \
+  } else { \
+    vassert(0, "Address %a not found in known segments\n", _addr); \
+  } \
+}
+
+/*! \brief Wrapper around ::heap_info and ::static_info functions that
+ * dispatches one of the above functions based on the type of supplied memory
+ * address (`addr`) (static, global or heap). For the case when the supplied
+ * address does not belong to the track segments 0 is returned.
+ *
+ * \param uintptr_t addr - a memory address
+ * \param char p - predicate type. See ::static_info for further details. */
+static uintptr_t predicate(uintptr_t addr, char p) {
+  TRY_SEGMENT(
+    addr,
+    return heap_info((uintptr_t)addr, p),
+    return static_info((uintptr_t)addr, p, 0),
+    return static_info((uintptr_t)addr, p, 1));
+  return 0;
+}
+
+/*! \brief Return a byte length of a memory block address `_addr` belongs to
+ * \param uintptr_t _addr - a memory address */
+#define block_length(_addr) predicate((uintptr_t)_addr, 'L')
+/*! \brief Return a base address of a memory block address `_addr` belongs to
+ * \param uintptr_t _addr - a memory address */
+#define base_addr(_addr)    predicate((uintptr_t)_addr, 'B')
+/*! \brief Return a byte offset of a memory address given by `_addr` within
+ * its block
+ * \param uintptr_t _addr - a memory address */
+#define offset(_addr)       predicate((uintptr_t)_addr, 'O')
+/* }}} */
+
+/* Static allocation {{{ */
+
+/** The below numbers identify offset "bases" for short block lengths.
+ * An offset base is a number (a code) that represents the length of a
+ * short block with a byte offset of `0`.
+ * For instance, for a block of 4 bytes its offset base if 7, that is
+ *  length 4, offset 0 => 7,
+ *  length 4, offset 1 => 8,
+ *  length 4, offset 2 => 9,
+ *  length 4, offset 3 => 10,
+ * and then for a block of 5 bytes its base offset if 11 etc. */
+static const char short_offsets_base [] = { 0, 1, 2, 4, 7, 11, 16, 22, 29 };
+
+/*! \brief Static (stack or global) memory-allocation. Record allocation of a
+ * given memory block and update shadow regions.
+ *
+ * \param ptr - pointer to a base memory address of the stack memory block.
+ * \param size - size of the stack memory block.
+ * \param global - if evaluates to a non-zero value then global region is used,
+ * otherwise program stack is used. */
+static void shadow_alloca(void *ptr, size_t size, int global) {
+  DVALIDATE_SHADOW_LAYOUT;
+
+  unsigned char *short_shadowed =
+    (unsigned char*)SHORT_SHADOW((uintptr_t)ptr, global);
+  uint64_t *short_shadowed_alt =
+    (uint64_t *)SHORT_SHADOW((uintptr_t)ptr, global);
+  unsigned int *long_shadowed =
+    (unsigned int*)LONG_SHADOW((uintptr_t)ptr, global);
+
+  /* Flip read-only bit for zero-size blocks. That is, physically it exists
+   * but one cannot write to it. Further, the flipped read-only bit will also
+   * identify such block as allocated */
+  if (!size)
+    setbit(READONLY_BIT, short_shadowed[0]);
+
+  unsigned int i, j = 0, k = 0;
+  if (IS_LONG_BLOCK(size)) { /* Long blocks */
+    int boundary = LONG_BLOCK_BOUNDARY(size);
+    for (i = 0; i < boundary; i += LONG_BLOCK) {
+      /* Set values for a long shadow */
+      long_shadowed[j++] = size;
+      long_shadowed[j++] = i;
+      /* Set offsets in the short shadow */
+      short_shadowed_alt[k++] = LONG_BLOCK_MASK;
+    }
+
+    /* Write out the remainder */
+    for (i = boundary; i < size; i++) {
+      unsigned char offset = i%LONG_BLOCK + LONG_BLOCK_INDEX_START + LONG_BLOCK;
+      short_shadowed[i] = (offset << 2);
+    }
+  } else { /* Short blocks */
+    for (i = 0; i < size; i++) {
+      unsigned char code = short_offsets_base[size] + i;
+      short_shadowed[i] = (code << 2);
+    }
+  }
+}
+
+/*! \brief  Wrapper around ::shadow_alloca for stack blocks */
+#define stack_shadow_alloca(_ptr, _size) shadow_alloca(_ptr, _size, 0)
+/*! \brief  Wrapper around ::shadow_alloca for global blocks */
+#define global_shadow_alloca(_ptr, _size) shadow_alloca(_ptr, _size, 1)
+/* }}} */
+
+/* Deletion of static blocks {{{ */
+
+/*! \brief Nullifies shadow regions of a memory block given by its address.
+ * \param ptr - base memory address of the stack memory block.
+ * \param global - if non-zero them `ptr` is assumed to carry a global address
+ *  and a stack address otherwise */
+static void static_shadow_freea(void *ptr, int global) {
+  DVALIDATE_STATIC_LOCATION(ptr, global);
+  DASSERT(ptr == (void*)base_addr(ptr));
+
+  size_t size = block_length(ptr);
+
+  memset((void*)SHORT_SHADOW(ptr, global), 0, size);
+  memset((void*)LONG_SHADOW(ptr, global), 0, size);
+}
+
+/*! \brief Wrapper around ::static_shadow_freea for stack addresses */
+#define global_shadow_freea(_addr) static_shadow_freea(_addr, 1)
+/*! \brief Wrapper around ::static_shadow_freea for global addresses */
+#define stack_shadow_freea(_addr) static_shadow_freea(_addr, 0)
+/* }}} */
+
+/* Static querying {{{ */
+
+/*! \brief Return a non-zero value if a memory region of length `size`
+ * starting at address `addr` belongs to an allocated (tracked) stack or
+ * global memory block and 0 otherwise. Note, this function is only safe if
+ * applied to a stack or global address. */
+static int static_allocated(uintptr_t addr, long size, int global) {
+  DVALIDATE_SHADOW_LAYOUT;
+  unsigned char *short_shadowed = (unsigned char*)SHORT_SHADOW(addr, global);
+  /* Unless the address belongs to tracked allocation 0 is returned */
+  if (short_shadowed[0]) {
+    unsigned int code = (short_shadowed[0] >> 2);
+    unsigned int long_block = (code >= LONG_BLOCK_INDEX_START);
+    size_t length, offset;
+    if (long_block) {
+      offset = code - LONG_BLOCK_INDEX_START;
+      unsigned int *long_shadowed =
+        (unsigned int*)LONG_SHADOW(addr - offset, global) ;
+      length = long_shadowed[0];
+      offset = long_shadowed[1] + offset;
+    } else {
+      offset = short_offsets[code];
+      length = short_lengths[code];
+    }
+    return offset + size <= length;
+  }
+  return 0;
+}
+
+/*! \brief Return a non-zero value if a statically allocated memory block starting
+ * at `addr` of length `size` is fully initialized (i.e., each of its cells
+ * is initialized). If the third argument (`global`) evaluates to a non-zero
+ * value then the memory block is assumed to belong to the global allocation
+ * and to the stack allocation otherwise. A 0  is returned if the given address
+ * does not belong to static allocation */
+static int static_initialized(uintptr_t addr, long size, int global) {
+  /* Return 0 right away if the address does not belong to
+   * static allocation */
+  if (!static_allocated(addr, size, global))
+    return 0;
+  DVALIDATE_STATIC_ACCESS(addr, size, global);
+
+  int result = 1;
+  uint64_t *shadow = (uint64_t*)SHORT_SHADOW(addr, global);
+  while (size > 0) {
+    int rem = (size >= ULONG_BYTES) ? ULONG_BYTES : size;
+    uint64_t mask = static_init_masks[rem];
+    size -= ULONG_BYTES;
+    /* Note that most of the blocks checked for initialization will be smaller
+    * than 64 bits, therefore in most cases it is more efficient to complete
+    * the loop rather than do a test and return if the result is false */
+    result = result && (((*shadow) & mask) == mask);
+    shadow++;
+  }
+  return result;
+}
+
+/*! \brief Wrapper around ::static_initialized for checking initialization of
+ * stack blocks */
+#define stack_initialized(_addr, _size)  static_initialized(_addr, _size, 0)
+/*! \brief Wrapper around ::static_initialized for checking initialization of
+ * global blocks */
+#define global_initialized(_addr, _size) static_initialized(_addr, _size, 1)
+
+/*! \brief Checking whether a globally allocated memory block containing an
+ * address _addr has read-only access. Note, this is light checking that
+ * relies on the fact that a single block cannot contain read/write and
+ * read-only parts, that is to check whether the block has read-only access it
+ * is sufficient to check any of its bytes. */
+#define global_readonly(_addr) \
+  checkbit(READONLY_BIT, (*(char*)SHORT_GLOBAL_SHADOW(addr)))
+
+/*! \brief Querying information about a specific global or stack memory address
+ * (based on the value of parameter `global'). The return value is interpreted
+ * based on the second argument that specifies parameters of the query:
+ *
+ * - 'B' - return the base address of the memory block `addr` belongs to or `0`
+ *     if `addr` lies outside of tracked allocation.
+ * - 'O' - return the offset of `addr` within its memory block or `0`
+ *     if `addr` lies outside of tracked allocation.
+ * - 'L' - return the size in bytes of the memory block `addr` belongs to or `0`
+ *     if `addr` lies outside of tracked allocation.
+ *
+ * NB: One should make sure that a given address if valid before querying.
+ * That is, for the cases when addr does not refer to a valid memory address
+ * belonging to static allocation the return value for this function is
+ * unspecified. */
+static uintptr_t static_info(uintptr_t addr, char type, int global) {
+  DVALIDATE_STATIC_LOCATION(addr, global);
+  unsigned char *short_shadowed = (unsigned char*)SHORT_SHADOW(addr, global);
+
+  /* Unless the address belongs to tracked allocation 0 is returned */
+  if (short_shadowed[0]) {
+    unsigned int code = (short_shadowed[0] >> 2);
+    unsigned int long_block = (code >= LONG_BLOCK_INDEX_START);
+    if (long_block) {
+      unsigned int offset = code - LONG_BLOCK_INDEX_START;
+      unsigned int *long_shadowed =
+        (unsigned int*)LONG_SHADOW(addr - offset, global) ;
+      switch(type) {
+        case 'B': /* Base address */
+          return addr - offset - long_shadowed[1];
+        case 'O': /* Offset */
+          return long_shadowed[1] + offset;
+        case 'L': /* Length */
+          return long_shadowed[0];
+        default:
+          DASSERT(0 && "Unknown static query type");
+      }
+    } else {
+      switch(type) {
+        case 'B': /* Base address */
+          return addr - short_offsets[code];
+        case 'O': /* Offset */
+          return short_offsets[code];
+        case 'L': /* Length */
+          return short_lengths[code];
+        default:
+          DASSERT(0 && "Unknown static query type");
+      }
+    }
+  }
+  return 0;
+}
+
+/*! \brief Wrapper around ::static_info for stack addresses */
+#define stack_info(_addr, _pred)  static_info(_addr, _pred, 0)
+/*! \brief Wrapper around ::static_info for global addresses */
+#define global_info(_addr, _pred)  static_info(_addr, _pred, 1)
+/* }}} */
+
+/* Static initialization {{{ */
+/*! \brief The following function marks n bytes starting from the address
+ * given by addr as initialized. `size` equating to zero indicates that the
+ * whole block should be marked as initialized. Parameter global indicates
+ * the shadow  region addr can be found in, such that 0 indicates stack
+ * allocation and a non-zero value indicates global allocation. */
+static void initialize_static_region(uintptr_t addr, long size, int global) {
+  DVALIDATE_STATIC_ACCESS(addr, size, global);
+  DVASSERT(!(addr - base_addr(addr) + size > block_length(addr)),
+    "Attempt to initialize %lu bytes past block boundaries\n"
+    "starting at %a with block length %lu at base address %a\n",
+    size, addr, block_length(addr), base_addr(addr));
+
+  /* Below code marks `size` bytes following `addr` in the stack shadow as
+   * initialized. That is, least significant bits of all 9 bytes following
+   * `addr` should be flipped to ones. While this is a common pattern in this
+   * program, here are some explanations.
+   *
+   * Here we grab a shadow region and initialize 8 (::ULONG_SIZE) bits at a
+   * time using masks stored in ::static_init_masks. This few lines below are
+   * better explained using an example. Let's say we need to mark 9 bytes as
+   * initialized starting from some address `addr`.
+   *
+   * In order to do that we first grab a shadow region storing it in `shadow`.
+   * For the first 8 bytes we grab a mask stored at ::static_init_masks[8]:
+   *   `10000000 10000000 10000000 10000000 10000000 10000000 10000000 10000000`
+   * That is, `*shadow |= static_init_masks[8]` sets 8 lowest significant bits
+   * of the 8 bytes following *shadow to ones.
+   *
+   * After that we need to mark the remaining 1 bite as initialized. For that
+   * we grab mask ::static_init_masks[1]:
+   *  `10000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000`
+   * That is, `*shadow |= static_init_masks[1]` will set only the least
+   * significant bit in *shadow. */
+
+  uint64_t *shadow = (uint64_t*)SHORT_SHADOW(addr, global);
+  while (size > 0) {
+    int rem = (size >= ULONG_BYTES) ? ULONG_BYTES : size;
+    size -= ULONG_BYTES;
+    *shadow |= static_init_masks[rem];
+    shadow++;
+  }
+}
+
+/*! \brief Wrapper around ::initialize_static_region for stack regions */
+#define initialize_stack_region(_addr, _n) \
+  initialize_static_region(_addr, _n, 0)
+
+/*! \brief Wrapper around ::initialize_static_region for global regions */
+#define initialize_global_region(_addr, _n) \
+  initialize_static_region(_addr, _n, 1)
+
+/* }}} */
+
+/* Read-only {{{ */
+/*! \brief Mark n bytes starting from the address given by `ptr` as initialized.
+ * NOTE: This function has many similarities with ::initialize_static_region
+ * The functionality, however is preferred to be kept separate
+ * because the ::mark_readonly should operate only on the global shadow. */
+static void mark_readonly (uintptr_t addr, long size) {
+  /* Since read-only blocks can only be stored in the globals  segments (e.g.,
+   * TEXT), this function required ptr carry a global address. */
+  DVALIDATE_SHADOW_LAYOUT;
+  DASSERT(IS_ON_GLOBAL(addr));
+  DASSERT(global_allocated(addr, 1));
+  DVASSERT(!(addr - base_addr(addr) + size > block_length(addr)),
+    "Attempt to mark read-only %lu bytes past block boundaries\n"
+    "starting at %a with block length %lu at base address %a\n",
+    size, addr, block_length(addr), base_addr(addr));
+
+  /* See comments in ::initialize_static_region for details */
+  uint64_t *shadow = (uint64_t*)SHORT_GLOBAL_SHADOW(addr);
+  while (size > 0) {
+    int rem = (size >= ULONG_BYTES) ? ULONG_BYTES : size;
+    size -= ULONG_BYTES;
+    *shadow |= static_readonly_masks[rem];
+    shadow++;
+  }
+}
+/* }}} */
+
+/* Track program arguments (ARGC/ARGV) {{{ */
+static void argv_alloca(int argc,  char ** argv) {
+  /* Track a top-level container (i.e., `*argv`) */
+  stack_shadow_alloca(argv, (argc + 1)*sizeof(char*));
+  initialize_stack_region((uintptr_t)argv, (argc + 1)*sizeof(char*));
+  /* Track argument strings */
+  while (*argv) {
+    size_t arglen = strlen(*argv) + 1;
+    stack_shadow_alloca(*argv, arglen);
+    initialize_stack_region((uintptr_t)*argv, arglen);
+    argv++;
+  }
+}
+/* }}} */
+
+/* Heap allocation {{{ (malloc/calloc) */
+
+/*! \brief Amount of heap memory allocated by the program.
+ * Variable visible externally. */
+static size_t heap_size = 0;
+
+/*! \brief Create a heap shadow for an allocated memory block starting at `ptr`
+ * and of length `size`. Optionally mark it as initialized if `init`
+ * evaluates to a non-zero value.
+ * \b WARNING: Current implementation assumes that the size of a heap segment
+ * does not exceed 64 bytes. */
+static void set_heap_segment(void *ptr, size_t size, size_t init) {
+  if (!ptr)
+    return;
+
+  VALIDATE_ALIGNMENT(ptr); /* Make sure alignment is right */
+  heap_size += size; /* Adjuct tracked allocation size */
+
+  /* Get aligned size of the block, i.e., an actual size of the
+   * allocated block */
+  size_t aligned_size = ALIGNED_SIZE(size);
+  unsigned char *shadowed = (unsigned char*)HEAP_SHADOW(ptr);
+  uintptr_t *shadow_meta = (uintptr_t*)(shadowed - HEAP_SEGMENT);
+
+  /* Write the actual length to the meta-segment. First 8 (or 4 in a 32-bit
+   * system) bytes of the segment are nullified to indicate that this segment
+   * does not correspond to an allocated memory block. The following block (at
+   * index 1) captures the size of the segment in bytes. */
+  shadow_meta[0] = ZERO;
+  shadow_meta[1] = size;
+
+  /* The overall number of block segments in a tracked memory block  */
+  size_t block_segments = aligned_size/HEAP_SEGMENT;
+  uintptr_t *block_segment = NULL;
+  int i;
+
+  /* Write the offsets per segment */
+  for (i = 0; i < block_segments; i++) {
+    block_segment = (uintptr_t*)(shadowed + i*HEAP_SEGMENT);
+    /* Write down the segment offset to the first 8 (or 4) bytes of the segment.
+     * Here it is necessary to increment the offset by one. If there is no
+     * increment, then the offset of the first segment is 0, which is the same
+     * as for a segment that does not belong to a program's allocation. */
+    *block_segment = i*HEAP_SEGMENT + 1;
+  }
+
+  /* If init is a non-zero value then mark all allocated bytes as initialized */
+  if (init) {
+    for (i = 0; i < block_segments; i++) {
+      block_segment = (uintptr_t*)(shadowed + i*HEAP_SEGMENT);
+      block_segment[1] |= heap_init_mask;
+    }
+    /* If the last segment is partially allocated, nullify the unallocated
+     * bits back. */
+    block_segment[1] &= ~(ONE << size%HEAP_SEGMENT);
+  }
+}
+
+/*! \brief Replacement for a malloc function that additionally tracks the
+ * allocated memory block.
+ *
+ * NOTE: This malloc returns a `NULL` pointer if the requested size is `0`.
+ * Such behaviour is compliant with the C99 standard, however it differs from
+ * the behaviour of the GLIBC malloc, which returns a zero-size block instead.
+ * The standard indicates that a return value for a zero-sized allocation
+ * is implementation specific:
+ *    "If the size of the space requested is zero, the behaviour is
+ *    implementation-defined: either a null pointer is returned, or the
+ *    behaviour is as if the size were some non-zero value, except that the
+ *    returned pointer shall not be used to access an object." */
+static void* shadow_malloc(size_t size) {
+  size_t alloc_size = ALLOC_SIZE(size);
+  /* Return NULL if the size is too large to be aligned */
+  char* res = alloc_size ? (char*)native_malloc(alloc_size) : NULL;
+
+  if (res) {
+    res += HEAP_SEGMENT;
+    set_heap_segment(res, size, 0);
+  }
+
+  return res;
+}
+
+/*! \brief  Replacement for `calloc` that enables memory tracking */
+static void* shadow_calloc(size_t nmemb, size_t size) {
+  /* Since both `nmemb` and `size` are both of size `size_t` the multiplication
+   * of the arguments (which gives the actual allocation size) might lead to an
+   * integer overflow. The below code checks for an overflow and sets the
+   * `alloc_size` (agrument a memory allocation function) to zero. */
+  size = (size && nmemb > SIZE_MAX/size) ? 0 : nmemb*size;
+
+  /* Since aligned size is required by the model do the allocation through
+   * `malloc` and nullify the memory space by hand */
+  char* res = size ? (char*)native_malloc(ALLOC_SIZE(size)) : NULL;
+
+  if (res) {
+    res += HEAP_SEGMENT;
+    memset(res, 0, size);
+    set_heap_segment(res, size, 1);
+  }
+
+  return res;
+}
+/* }}} */
+
+/* Heap deallocation (free) {{{ */
+static void shadow_free(void *res) {
+  char *ptr = (char *)res;
+  DVALIDATE_SHADOW_LAYOUT;
+  if (ptr != NULL) { /* NULL is a valid behaviour */
+    if (freeable(ptr)) {
+      size_t size = block_length(ptr);
+      void *meta_shadow = (void*)(HEAP_SHADOW(ptr) - HEAP_SEGMENT);
+      memset(meta_shadow, 0, HEAP_SHADOW_BLOCK_SIZE(size));
+      native_free(ptr - HEAP_SEGMENT);
+      /* Adjuct tracked allocation size carried via `__e_acsl_heap_size` */
+      heap_size -= size;
+    } else {
+      vabort("Not a start of block (%a) in free\n", ptr);
+    }
+  }
+}
+/* }}} */
+
+/* Heap reallocation (realloc) {{{ */
+static void* shadow_realloc(void *p, size_t size) {
+  DVALIDATE_SHADOW_LAYOUT;
+  char *ptr = (char*)p;
+  char *res = NULL; /* Resulting pointer */
+  /* If the pointer is NULL then realloc is equivalent to malloc(size) */
+  if (ptr == NULL)
+    return shadow_malloc(size);
+  /* If the pointer is not NULL and the size is zero then realloc is
+   * equivalent to free(ptr) */
+  else if (ptr != NULL && size == 0) {
+    shadow_free(ptr);
+  } else {
+    if (freeable(ptr)) { /* ... and valid for free  */
+      size_t alloc_size = ALLOC_SIZE(size);
+      res = native_realloc(ptr - HEAP_SEGMENT, alloc_size);
+      VALIDATE_ALIGNMENT(res);
+
+      /* realloc succeeds, otherwise nothing needs to be done */
+      if (res != NULL) {
+        res += HEAP_SEGMENT;
+        /* Tracked size of the old allocation */
+        size_t old_size = block_length((uintptr_t)ptr);
+        /* Number of block segments in the old allocation */
+        size_t old_segments = BLOCK_SEGMENTS(old_size);
+        /* Number of block segments in the new allocation */
+        size_t new_segments = BLOCK_SEGMENTS(size);
+        /* Adjuct tracked allocation size */
+        heap_size += -old_size + size;
+
+        /* Address of the meta-block in old allocation */
+        uintptr_t *meta_shadow = (uintptr_t*)(HEAP_SHADOW(ptr) - HEAP_SEGMENT);
+
+        /* The case when realloc displaces the base address of a pointer */
+        if (ptr != res) {
+          /* Address of the new shadow meta-block */
+          uintptr_t *new_meta_shadow =
+            (uintptr_t*)(HEAP_SHADOW(res) - HEAP_SEGMENT);
+          size_t inflen = HEAP_SEGMENT*(old_segments + 1);
+          /* Copy information over to the new shadow ... */
+          memcpy(new_meta_shadow, meta_shadow, inflen);
+          /* ... and nuke the old shadow */
+          memset(meta_shadow, 0, inflen);
+          meta_shadow = new_meta_shadow;
+        }
+
+        /* Set new size */
+        meta_shadow[1] = size;
+
+        /* Realloc truncates allocation, need to truncate the shadow as well:
+         *  1. Get the last segment in the new allocation and clear all init
+         *    bits from the old allocation that do not belong in the new.
+         *  2. Nullify segments tracking old allocation but which do not
+         *    belong to the new one. These will be past the last segment
+         *    (as above). */
+        if (old_segments > new_segments) {
+          /* Last segment in the new allocation and the number of bits to
+           * clear out */
+          size_t last_index = new_segments - 1;
+          size_t clear_bits = HEAP_SEGMENT - (size - last_index*HEAP_SEGMENT);
+
+          /* Reset initialization in shadow past the new allocation (1). */
+          void *seg_last = (void*)
+            (HEAP_SHADOW(res) +
+            last_index*HEAP_SEGMENT + INIT_BYTES + PTR_SZ);
+          clearbits_right(seg_last, clear_bits);
+
+          /* Clear out remaining shadow data from old allocation that is
+           * not used in the new allocation (2) */
+          int diff = old_segments - new_segments;
+          char *seg_next = (char*)(HEAP_SHADOW(res)
+            + (last_index+1)*HEAP_SEGMENT);
+          memset(seg_next, 0, diff*HEAP_SEGMENT);
+       /* Realloc increases allocation, only segment indices need to be added */
+        } else if (old_segments < new_segments) {
+          /* Just add indices to shadow. Realloc does not initialize
+           * allocated data past the old allocation. */
+          int i;
+          for (i = old_segments; i < new_segments; i++) {
+            uintptr_t *seg_next = (uintptr_t*)(HEAP_SHADOW(res)
+              + i*HEAP_SEGMENT);
+            seg_next[0] = HEAP_SEGMENT*i + 1;
+          }
+        }
+      }
+    } else {
+      vabort("Not a start of block (%a) in realloc\n", ptr);
+    }
+  }
+  return res;
+}
+/* }}} */
+
+/* Heap aligned allocation (aligned_alloc) {{{ */
+/*! \brief Replacement for `aligned_alloc` with memory tracking */
+static void *shadow_aligned_alloc(size_t alignment, size_t size) {
+  /* Check if:
+   *  - size and alignment are greater than zero
+   *  - alignment is a power of 2
+   *  - size is a multiple of alignment */
+  if (!size || !alignment || !powof2(alignment) || (size%alignment))
+    return NULL;
+
+  char *res = native_aligned_alloc(alignment, size);
+  if (res) {
+    res += HEAP_SEGMENT;
+    set_heap_segment(res, size, 0);
+  }
+  return (void*)res;
+}
+/* }}} */
+
+/* Heap aligned allocation (posix_memalign) {{{ */
+/*! \brief Replacement for `posix_memalign` with memory tracking */
+static int shadow_posix_memalign(void **memptr, size_t alignment, size_t size) {
+ /* Check if:
+   *  - size and alignment are greater than zero
+   *  - alignment is a power of 2 and a multiple of sizeof(void*) */
+  if (!size || !alignment || !powof2(alignment) || alignment%sizeof(void*))
+    return -1;
+
+  /* Make sure that the first argument to posix memalign is indeed allocated */
+  vassert(valid(memptr, sizeof(void*)),
+      "\\invalid memptr in  posix_memalign", NULL);
+
+  int res = native_posix_memalign(memptr, alignment, size);
+  if (!res) {
+    *memptr += HEAP_SEGMENT;
+    set_heap_segment(*memptr, size, 0);
+  }
+  return res;
+}
+/* }}} */
+
+/* Heap querying {{{ */
+/*! \brief Return a non-zero value if a memory region of length `size`
+ * starting at address `addr` belongs to an allocated (tracked) heap memory
+ * block and a 0 otherwise. Note, this function is only safe if applied to a
+ * heap address. */
+static int heap_allocated(uintptr_t addr, size_t size) {
+  DVALIDATE_SHADOW_LAYOUT;
+  /* Offset within the segment */
+  size_t block_offset = addr%HEAP_SEGMENT;
+  /* Address of the shadow segment the address belongs to */
+  uintptr_t *aligned_shadow =
+    (uintptr_t*)HEAP_SHADOW(addr - block_offset);
+
+  /* Non-zero if the segment belongs to heap allocation */
+  if (aligned_shadow[0]) {
+    /* Offset stored by the segment (real offset from base incremented by 1) */
+    size_t segment_offset = aligned_shadow[0];
+    /* Base address */
+    uintptr_t base_addr = addr - block_offset - segment_offset + 1;
+    /* Pointer to the meta-segment */
+    uintptr_t *meta_segment =
+      (uintptr_t*)HEAP_SHADOW(base_addr - HEAP_SEGMENT);
+    /* Size of the block stored by meta-segment */
+    size_t length = meta_segment[1];
+    /* Offset of a given address within its block */
+    size_t offset = segment_offset - 1 + block_offset;
+    return offset + size <= length;
+  }
+  return 0;
+}
+
+/*! \brief  Return a non-zero value if a given address is a base address of a
+ * heap-allocated memory block that `addr` belongs to.
+ *
+ * As some of the other functions, \b \\freeable can be expressed using
+ * ::IS_ON_HEAP, ::heap_allocated and ::base_addr. Here direct
+ * implementation is preferred for performance reasons. */
+static int freeable(void *ptr) {
+  uintptr_t addr = (uintptr_t)ptr;
+  DVALIDATE_SHADOW_LAYOUT;
+  if (!IS_ON_HEAP(addr))
+    return 0;
+  /* Offset within the segment */
+  size_t block_offset = addr%HEAP_SEGMENT;
+  /* Address of the shadow segment the address belongs to */
+  uintptr_t *aligned_shadow = (uintptr_t*)HEAP_SHADOW(addr - block_offset);
+  /* Non-zero if the segment belongs to heap allocation */
+  if (aligned_shadow[0])
+    return addr == addr - block_offset - aligned_shadow[0] + 1;
+  return 0;
+}
+
+/*! \brief Implementation of the \b \\initialized predicate for heap-allocated
+ * memory. NB: If `addr` does not belong to a valid heap region this function
+ * returns 0. */
+static int heap_initialized(uintptr_t addr, long len) {
+  DVALIDATE_SHADOW_LAYOUT;
+  /* Unallocated heap address */
+  if (!heap_allocated(addr, len))
+    return 0;
+
+  /* Do validate a block in debug mode otherwise */
+  DVALIDATE_HEAP_ACCESS(addr, len);
+  int result = 1;
+
+  /* Offset within the segment. Also the number of bits
+   * that needs to be skipped when evaluating init */
+  int skipbits = addr%HEAP_SEGMENT;
+
+  /* Base address of a shadow segment ptr belongs to */
+  uintptr_t shadow_init = (uintptr_t)(HEAP_SHADOW(addr) + PTR_SZ - skipbits);
+
+  int rem = HEAP_SEGMENT - skipbits;
+  /* The number of bits that need to be checked in the `first' segment */
+  int setbits = (len >= rem) ? rem : len;
+
+  len -= setbits;
+
+  /* Bit-mask for setting values. Tested bits are set to ones, the rest
+   * are zeroes. */
+  uint64_t mask = (ONE >> (ULONG_BITS - setbits)) << skipbits;
+
+  uint64_t *init = (uint64_t*)shadow_init;
+  result = result && ((mask & *init) == mask);
+  shadow_init += HEAP_SEGMENT;
+
+  while (len > 0) {
+    /* Recompute the number of bits to be set */
+    setbits = (len > HEAP_SEGMENT) ? HEAP_SEGMENT : len;
+    /* Reset the mask */
+    mask = ONE >> (ULONG_BITS - setbits);
+    /* Current position of shadow initialization */
+    init = (uint64_t*)shadow_init;
+
+    result = result && ((mask & *init) == mask);
+    len -= setbits;
+    shadow_init += HEAP_SEGMENT;
+  }
+  return result;
+}
+
+/*! \brief Querying information about a specific heap memory address.
+ * This function is similar to ::static_info except it returns data
+ * associated with heap-allocated memory.
+ * See in-line documentation for ::static_info for further details. */
+static uintptr_t heap_info(uintptr_t addr, char type) {
+  DVALIDATE_HEAP_ACCESS(addr, 1);
+  /* Offset within the segment */
+  size_t block_offset = addr%HEAP_SEGMENT;
+  /* Address of the shadow segment the address belongs to */
+  uintptr_t *aligned_shadow =
+    (uintptr_t*)HEAP_SHADOW(addr - block_offset);
+  /* Offset stored by the segment (real offset from base incremented by 1) */
+  size_t segment_offset = aligned_shadow[0];
+  /* Base address */
+  uintptr_t base_addr = addr - block_offset - segment_offset + 1;
+
+  switch(type) {
+    case 'B': /* Base address */
+      return base_addr;
+    case 'L': { /* Block length */
+      /* Pointer to meta-segment */
+      uintptr_t *meta_segment =
+        (uintptr_t*)HEAP_SHADOW(base_addr - HEAP_SEGMENT);
+      /* Size of the block stored by meta-segment */
+      return meta_segment[1];
+    }
+    case 'O':
+      /* Offset of a given address within its block */
+      return segment_offset - 1 + block_offset;
+    default:
+      DASSERT(0 && "Unknown heap query type");
+  }
+  return 0;
+}
+/* }}} */
+
+/* Heap initialization {{{ */
+/*! \brief Mark n bytes on the heap starting from address addr as initialized */
+static void initialize_heap_region(uintptr_t addr, long len) {
+  DVALIDATE_HEAP_ACCESS(addr, len);
+  DVASSERT(!(addr - base_addr(addr) + len > block_length(addr)),
+    "Attempt to initialize %lu bytes past block boundaries\n"
+    "starting at %a with block length %lu at base address %a\n",
+    len, addr, block_length(addr), base_addr(addr));
+
+  /* Offset within the segment. Also the number of bits
+   * that needs to be skipped when evaluating init */
+  int skipbits = (addr)%HEAP_SEGMENT;
+
+  /* Base address of a shadow segment addr belongs to */
+  uintptr_t shadow_init = (uintptr_t)(HEAP_SHADOW(addr) + PTR_SZ - skipbits);
+
+  int rem = HEAP_SEGMENT - skipbits;
+  /* The number of bits that need to be checked in the `first' segment */
+  int setbits = (len >= rem) ? rem : len;
+
+  len -= setbits;
+
+  /* Bit-mask for setting values. Bits that are tested are set to ones, the
+   * rest are zeroes */
+  uint64_t mask = (ONE >> (ULONG_BITS - setbits)) << skipbits;
+
+  *(uint64_t*)shadow_init = *(uint64_t*)shadow_init | mask;
+  shadow_init += HEAP_SEGMENT;
+
+  while (len > 0) {
+    /* Recompute the number of bits to be set */
+    setbits = (len > HEAP_SEGMENT) ? HEAP_SEGMENT : len;
+    /* Reset the mask */
+    mask = ONE >> (ULONG_BITS - setbits);
+    *(uint64_t*)shadow_init = *(uint64_t*)shadow_init | mask;
+    len -= setbits;
+    shadow_init += HEAP_SEGMENT;
+  }
+}
+/* }}} */
+
+/* Any initialization {{{ */
+/*! \brief Initialize a chunk of memory given by its start address (`addr`)
+ * and byte length (`n`). */
+static void initialize(void *ptr, size_t n) {
+  DVALIDATE_SHADOW_LAYOUT;
+  TRY_SEGMENT(
+    (uintptr_t)ptr,
+    initialize_heap_region((uintptr_t)ptr, n),
+    initialize_stack_region((uintptr_t)ptr, n),
+    initialize_global_region((uintptr_t)ptr, n)
+  )
+}
+/* }}} */
+
+/* Internal state print {{{ */
+#ifdef E_ACSL_DEBUG
+/* ! \brief Print human-readable representation of a byte in a short (byte)
+ * shadow */
+static void printbyte(unsigned char c, char buf[]) {
+  if (c >> 2 < LONG_BLOCK_INDEX_START) {
+    sprintf(buf, "SHORT: I{%u} RO{%u} OF{%2u} => %u[%u]",
+      checkbit(INIT_BIT,c), checkbit(READONLY_BIT,c), c >> 2,
+      short_lengths[c >> 2], short_offsets[c >> 2]);
+  } else {
+    sprintf(buf, "LONG:  I{%u} RO{%u} OF{%u} => %4u",
+      checkbit(INIT_BIT,c), checkbit(READONLY_BIT,c),
+      (c >> 2), (c >> 2) - LONG_BLOCK_INDEX_START);
+  }
+}
+
+/*! \brief Print human-readable (well, ish) representation of a memory block
+ * using short (byte) and long (block) shadow regions */
+static void print_static_shadows(uintptr_t addr, size_t size, int global) {
+  char short_buf[256];
+  char long_buf[256];
+
+  unsigned char *short_shadowed = (unsigned char*)SHORT_SHADOW(addr, global);
+  unsigned int *long_shadowed = (unsigned int*)LONG_SHADOW(addr, global);
+
+  int i, j = 0;
+  for (i = 0; i < size; i++) {
+    long_buf[0] = '\0';
+    printbyte(short_shadowed[i], short_buf);
+    if (IS_LONG_BLOCK(size) && (i%LONG_BLOCK) == 0) {
+      j += 2;
+      if (i < LONG_BLOCK_BOUNDARY(size)) {
+        sprintf(long_buf, " %a  SZ{%u} OF{%u}",
+          &long_shadowed[j], long_shadowed[j-2], long_shadowed[j-1]);
+      }
+      if (i) {
+        DLOG("---------------------------------------------\n");
+      }
+    }
+    DLOG("| [%2d] %a | %s || %s\n", i, &short_shadowed[i], short_buf, long_buf);
+  }
+}
+
+#define print_stack_shadows(addr, size) print_static_shadows(addr, size, 0)
+#define print_global_shadows(addr, size) print_static_shadows(addr, size, 1)
+
+/*! \brief Print human-readable representation of a heap shadow region for a
+ * memory block of length `size` starting at address `addr`.  */
+static void print_heap_shadows(uintptr_t addr, size_t size) {
+  unsigned char *shadowed = (unsigned char*)HEAP_SHADOW(addr);
+  uintptr_t *shadow_meta = (uintptr_t*)(shadowed - HEAP_SEGMENT);
+
+  size_t block_segments = (ALIGNED_SIZE(size))/HEAP_SEGMENT;
+  DLOG("Meta: %3lu", shadow_meta[1]);
+
+  size_t i;
+  for (i = 0; i < block_segments; i++) {
+    uintptr_t *block_segment = (uintptr_t*)(shadowed + i*HEAP_SEGMENT);
+    DLOG("%s | Block %2lu, Offset %3lu, Init: %"
+      TOSTRING(HEAP_SEGMENT) "b\t\t || %a \n", i ? "         "  : "", i,
+      block_segment[0], block_segment[1],  block_segment);
+  }
+  DLOG("\n");
+}
+
+static void print_shadows(uintptr_t addr, size_t size) {
+  if (IS_ON_STACK(addr))
+    print_stack_shadows(addr, size);
+  if (IS_ON_GLOBAL(addr))
+    print_global_shadows(addr, size);
+  else if (IS_ON_HEAP(addr))
+    print_heap_shadows(addr, size);
+}
+
+static void print_pgm_layout() {
+  DLOG("| --- Stack ------------------------------------\n");
+  DLOG("|Stack Shadow Size:          %16lu MB\n",
+    MB_SZ(pgm_layout.stack_shadow_size));
+  DLOG("|Stack Start:                %19lu\n", pgm_layout.stack_start);
+  DLOG("|Stack End:                  %19lu\n", pgm_layout.stack_end);
+  DLOG("|Stack Soft Bound:           %19lu\n", pgm_layout.stack_soft_bound);
+  DLOG("| --- Short Stack Shadow -----------------------\n");
+  DLOG("|Short Stack Shadow Offset:  %19lu\n",
+    pgm_layout.short_stack_shadow_offset);
+  DLOG("|Short Stack Shadow Start:   %19lu\n",
+    pgm_layout.short_stack_shadow_start);
+  DLOG("|Short Stack Shadow End:     %19lu\n",
+    pgm_layout.short_stack_shadow_end);
+  DLOG("| --- Long Stack Shadow ------------------------\n");
+  DLOG("|Long Stack Shadow Offset:   %19lu\n",
+    pgm_layout.long_stack_shadow_offset);
+  DLOG("|Long Stack Shadow Start:    %19lu\n",
+    pgm_layout.long_stack_shadow_start);
+  DLOG("|Long Stack Shadow End:      %19lu\n",
+    pgm_layout.long_stack_shadow_end);
+  DLOG("| --- Heap -------------------------------------\n");
+  DLOG("|Heap Shadow Size:           %16lu MB\n",
+    MB_SZ(pgm_layout.heap_shadow_size));
+  DLOG("|Heap Shadow Offset:         %19lu\n", pgm_layout.heap_shadow_offset);
+  DLOG("|Heap Start:                 %19lu\n", pgm_layout.heap_start);
+  DLOG("|Heap End:                   %19lu\n", pgm_layout.heap_end);
+  DLOG("|Heap Shadow Start:          %19lu\n", pgm_layout.heap_shadow_start);
+  DLOG("|Heap Shadow End:            %19lu\n", pgm_layout.heap_shadow_end);
+  DLOG("| --- Global -----------------------------------\n");
+  DLOG("|Global Shadow Size:         %16lu MB\n",
+    MB_SZ(pgm_layout.global_shadow_size));
+  DLOG("|Global Start:               %19lu\n", pgm_layout.global_start);
+  DLOG("|Global End:                 %19lu\n", pgm_layout.global_end);
+  DLOG("| --- Short Global Shadow ----------------------\n");
+  DLOG("|Short Global Shadow Offset: %19lu\n",
+    pgm_layout.short_global_shadow_offset);
+  DLOG("|Short Global Shadow Start:  %19lu\n",
+    pgm_layout.short_global_shadow_start);
+  DLOG("|Short Global Shadow End:    %19lu\n",
+    pgm_layout.short_global_shadow_end);
+  DLOG("| --- Long Global Shadow -----------------------\n");
+  DLOG("|Long Global Shadow Offset:  %19lu\n",
+    pgm_layout.long_global_shadow_offset);
+  DLOG("|Long Global Shadow Start:   %19lu\n",
+    pgm_layout.long_global_shadow_start);
+  DLOG("|Long Global Shadow End:     %19lu\n",
+    pgm_layout.long_global_shadow_end);
+  DLOG("------------------------------------------------\n");
+}
+
+/*! \brief Output the shadow segment the address belongs to */
+static void which_segment(uintptr_t addr) {
+  char *loc = NULL;
+  if (IS_ON_STACK(addr))
+    loc = "stack";
+  else if (IS_ON_HEAP(addr))
+    loc = "heap";
+  else if (IS_ON_GLOBAL(addr))
+    loc = "global";
+  else
+    loc = "untracked";
+  DLOG("Address: %a -> %s\n", addr, loc);
+}
+
+/* NOTE: Above functions are designed to be used only through the following
+ * macros or debug functions included/defined based on the value of the
+ * E_ACSL_DEBUG macro. */
+
+/*! \brief Print program layout. This function outputs start/end addresses of
+ * various program segments, their shadow counterparts and sizes of shadow
+ * regions used. */
+#define D_LAYOUT print_pgm_layout()
+void ___e_acsl_d_layout() { D_LAYOUT; }
+
+/*! \brief Print the shadow segment address addr belongs to */
+#define D_LOC(_addr) which_segment(_addr)
+void ___e_acsl_d_loc(uintptr_t addr) { D_LOC(addr); }
+
+/*! \brief Print human-readable representation of a shadow region corresponding
+ * to a memory address addr. The second argument (size) if the size of the
+ * shadow region to be printed. Normally addr argument is a base address of a
+ * memory block and size is its length. */
+#define D_SHADOW(addr, size) print_shadows((uintptr_t)addr, (size_t)size)
+void ___e_acsl_d_shadow(uintptr_t addr, size_t size) { D_SHADOW(addr, size); }
+
+#else
+/* \cond exclude from doxygen */
+#define D_SHADOW(addr, size)
+#define D_INFO(addr)
+#define D_LAYOUT
+/* \endcond */
+#endif
+/* }}} */
diff --git a/src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_shadow_layout.h b/src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_shadow_layout.h
new file mode 100644
index 00000000000..aab73d833c9
--- /dev/null
+++ b/src/plugins/e-acsl/share/e-acsl/segment_model/e_acsl_shadow_layout.h
@@ -0,0 +1,400 @@
+/**************************************************************************/
+/*                                                                        */
+/*  This file is part of the Frama-C's E-ACSL plug-in.                    */
+/*                                                                        */
+/*  Copyright (C) 2012-2015                                               */
+/*    CEA (Commissariat à l'énergie atomique et aux énergies              */
+/*         alternatives)                                                  */
+/*                                                                        */
+/*  you can redistribute it and/or modify it under the terms of the GNU   */
+/*  Lesser General Public License as published by the Free Software       */
+/*  Foundation, version 2.1.                                              */
+/*                                                                        */
+/*  It is distributed in the hope that it will be useful,                 */
+/*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
+/*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
+/*  GNU Lesser General Public License for more details.                   */
+/*                                                                        */
+/*  See the GNU Lesser General Public License version 2.1                 */
+/*  for more details (enclosed in the file license/LGPLv2.1).             */
+/*                                                                        */
+/**************************************************************************/
+
+/*! ***********************************************************************
+ * \file  e_acsl_shadow_layout.h
+ * \brief Setup for memory tracking using shadowing
+***************************************************************************/
+
+/*! \brief The first address past the end of the uninitialized data (BSS)
+ * segment . */
+extern char end;
+
+/* \cond */
+void *sbrk(intptr_t increment);
+char *strerror(int errnum);
+
+/* MAP_ANONYMOUS is a mmap flag indicating that the contents of allocated blocks
+ * should be nullified. Set value from <bits/mman-linux.h>, if MAP_ANONYMOUS is
+ * undefined */
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS 0x20
+#endif
+/* \endcond */
+
+/* Block size units in bytes */
+#define KB (1024) //!< Bytes in a kilobyte
+#define MB (1024*KB) //!< Bytes in a megabyte
+#define GB (1024*MB) //!< Bytes in a gigabyte
+#define KB_SZ(_s) (_s/MB) //!< Convert bytes to kilobytes
+#define MB_SZ(_s) (_s/MB) //!< Convert bytes to megabytes
+#define GB_SZ(_s) (_s/GB) //!< Convert bytes to gigabytes
+
+/*! \brief Size of the heap shadow */
+#define HEAP_SHADOW_SIZE (1024 * MB)
+
+/*! \brief Size of a stack shadow */
+#define STACK_SHADOW_SIZE (72 * MB)
+
+/*! \brief Pointer size (in bytes) for a given system */
+#define PTR_SZ sizeof(uintptr_t)
+
+/*! \brief Bit and byte sizes of a unsigned long type, i.e., the largest
+ * integer type that can be used with bit-level operators.
+ * NB: Some architectures allow 128-bit (i.e., 16 byte integers),
+ * but the availability of such integer types is implementation specific. */
+#define ULONG_BYTES 8
+#define ULONG_BITS 64
+
+/* Program stack information {{{ */
+extern char ** environ;
+
+/*! \brief Return greatest (known) address on a program's stack.
+ * This function presently determines the address using the address of the
+ * last string in `environ`. That is, it assumes that argc and argv are
+ * stored below environ, which holds for GCC/GLIBC but is not necessarily
+ * true. In general, a reliable way of detecting the upper bound of a stack
+ * segment is needed. */
+static uintptr_t get_stack_end(int *argc_ref,  char *** argv_ref) {
+  char **env = environ;
+  while (env[1])
+    env++;
+  uintptr_t addr = (uintptr_t)*env + strlen(*env);
+
+  /* When returning the end stack addess we need to make sure that
+   * ::ULONG_BITS past that address are actually writable. This is
+   * to be able to set initialization and read-only bits ::ULONG_BITS
+   * at a time. If not respected, this may cause a segfault in
+   * ::argv_alloca. */
+  return addr + ULONG_BITS;
+}
+
+/*! \brief Get the stack size (in bytes) as used by the program. The return
+ * value is the soft stack limit, i.e., it can be programmatically increased
+ * at runtime */
+static size_t get_stack_size() {
+  struct rlimit rlim;
+  assert(!getrlimit(RLIMIT_STACK, &rlim));
+  return rlim.rlim_cur;
+}
+
+/*! \brief Set a new stack limit
+ * \param size - new stack size in bytes */
+static void increase_stack_limit(const size_t size) {
+  const rlim_t stacksz = (rlim_t)size;
+  struct rlimit rl;
+  int result = getrlimit(RLIMIT_STACK, &rl);
+  if (result == 0) {
+    if (rl.rlim_cur < stacksz) {
+      rl.rlim_cur = stacksz;
+      result = setrlimit(RLIMIT_STACK, &rl);
+      if (result != 0) {
+        vabort("setrlimit returned result = %d\n", result);
+      }
+    }
+  }
+}
+/* }}} */
+
+/** MMAP allocation {{{ */
+/*! \brief Allocate a memory block of `size` bytes with `mmap` and return a
+ * pointer to its base address. Since this function is used to set-up shadowing
+ * the program is aborted if `mmap` fails to allocate a new memory block. */
+static void *do_mmap(size_t size) {
+  void *res = mmap(0, size, PROT_READ|PROT_WRITE,
+    MAP_ANONYMOUS|MAP_PRIVATE, -1, (size_t)0);
+  if (res == MAP_FAILED)
+      vabort("mmap error: %s\n", strerror(errno));
+  else
+      memset(res, 0, size);
+  return res;
+}
+/* }}} */
+
+/* Program Layout {{{ */
+
+/* Memory segments ************************************************************
+ *  ----------------------------------------> Maximal address
+ *  Kernel Segment
+ *  ---------------------------------------->
+ *  Environment Segment [ argc, argv, env ]
+ * -----------------------------------------> Stack End Address
+ *  Program Stack Segment [ Used stack ]
+ * -----------------------------------------> Stack Start Address
+ *  Unused Stack Segment
+ * -----------------------------------------> Stack Soft Bound Address
+ *  Stack Grow Segment
+ * ----------------------------------------->
+ *  Lib Segment
+ * -----------------------------------------> Stack Shadow Start Address
+ *  Stack Shadow Region
+ * -----------------------------------------> Stack Shadow End Address
+ *  Unused Space Segment
+ * ----------------------------------------->
+ *  Heap Shadow Region
+ * -----------------------------------------> Heap Shadow Start Address
+ * -----------------------------------------> Heap End Address
+ *  Heap
+ * -----------------------------------------> Heap Start Address [Initial Brk]
+ *  BSS Segment  [ Uninitialised Globals ]
+ * ----------------------------------------->
+ *  Data Segment [ Initialised Globals   ]
+ * ----------------------------------------->
+ *  Text Segment [ Constants ]
+ * -----------------------------------------> NULL (minimal address)
+******************************************************************************/
+
+static struct program_layout pgm_layout;
+
+/*! \brief Structure for tracking shadow regions and boundaries of memory
+ * segments. */
+struct program_layout {
+  uintptr_t stack_start; //!< Least address in program stack
+  uintptr_t stack_end; //!< Greatest address in program stack.
+  uintptr_t stack_soft_bound; /*!< \brief Address of the soft stack bound.
+   A program using stack addresses beyond (i.e., less than) the soft bound
+   runs into a stack overflow. */
+  size_t stack_shadow_size; /*!< \brief Size (in bytes) of a stack shadow.
+    Same size if used for short and long shadow regions. */
+
+  uintptr_t short_stack_shadow_start; //!< Least address in short shadow region
+  uintptr_t short_stack_shadow_end; //!< Greatest address in short shadow region
+  uintptr_t short_stack_shadow_offset; /*!< \brief Short shadow offset.
+    The offset is kept as a positive (unsigned) integer
+    relative to the stack shadow region situated below the stack. That is,
+    to get a shadow address from an actual one the offset needs to be
+    subtracted. */
+
+  uintptr_t long_stack_shadow_start; //!< Least address in long shadow region
+  uintptr_t long_stack_shadow_end; //!< Greatest address in long shadow region
+  uintptr_t long_stack_shadow_offset; /*!< \brief Long shadow offset. Similar
+    to #short_stack_shadow_offset */
+
+  uintptr_t heap_start; //!<  Least address in a program's heap.
+  uintptr_t heap_end; //!<  Greatest address in a program's heap.
+
+  uintptr_t heap_shadow_start; /*!< \brief Least address in a program's shadow
+                                 heap region. */
+  uintptr_t heap_shadow_end; /*!< \brief Greatest address in a program's
+                               shadow heap region. */
+  size_t heap_shadow_size; //!< Size (in bytes) of the heap shadow region. */
+  uintptr_t heap_shadow_offset;  /*!< \brief Short shadow offset.
+   * The offset is kept as a positive (unsigned) integer
+   * relative to the heap shadow region situated above the heap. That is,
+   * unlike stack shadow, to get a shadow address from an actual one the
+   * offset needs to be added. */
+
+  /* Same as `stack_...` but for shadowing globals */
+  uintptr_t global_end;
+  uintptr_t global_start;
+  size_t global_shadow_size;
+
+  uintptr_t short_global_shadow_end;
+  uintptr_t short_global_shadow_start;
+  uintptr_t short_global_shadow_offset;
+
+  uintptr_t long_global_shadow_end;
+  uintptr_t long_global_shadow_start;
+  uintptr_t long_global_shadow_offset;
+
+  /*! Carries a non-zero value if shadow layout has been initialized and
+   * evaluates to zero otehrwise. */
+  int initialized;
+};
+
+/*! \brief Setting program layout for analysys, namely:
+ *  - Detect boundaries of different program segments
+ *  - Allocate shadow regions (1 heap region + 2 stack regions)
+ *  - Compute shadow offsets
+ *
+ *  *TODO*: This function should be a part of ::__e_acsl_memory_init visible
+ *  externally */
+static void init_pgm_layout(int *argc_ref, char ***argv_ref) {
+  DLOG("<<< Initialize shadow layout >>>\n");
+  /* Setting up stack. Stack grows downwards starting from the stack end
+   * address (i.e., the address of the stack pointer) at the time of
+   * initialization. */
+  pgm_layout.stack_shadow_size = STACK_SHADOW_SIZE;
+  pgm_layout.stack_end = get_stack_end(argc_ref, argv_ref);
+  pgm_layout.stack_start =
+    pgm_layout.stack_end - pgm_layout.stack_shadow_size;
+  /* Soft bound can be increased programmatically, thus detecting
+   * stack overflows using this stack bound needs to take into account such
+   * changes. Very approximate for the moment,  */
+  pgm_layout.stack_soft_bound = pgm_layout.stack_end - get_stack_size();
+
+  /* Short (byte) stack shadow region */
+  void * short_stack_shadow = do_mmap(pgm_layout.stack_shadow_size);
+  pgm_layout.short_stack_shadow_start = (uintptr_t)short_stack_shadow;
+  pgm_layout.short_stack_shadow_end =
+    (uintptr_t)short_stack_shadow + pgm_layout.stack_shadow_size;
+  pgm_layout.short_stack_shadow_offset =
+    pgm_layout.stack_end - pgm_layout.short_stack_shadow_end;
+
+  /* Long (block) stack shadow region */
+  void *long_stack_shadow  = do_mmap(pgm_layout.stack_shadow_size);
+  pgm_layout.long_stack_shadow_start = (uintptr_t)long_stack_shadow;
+  pgm_layout.long_stack_shadow_end =
+    (uintptr_t)long_stack_shadow + pgm_layout.stack_shadow_size;
+  pgm_layout.long_stack_shadow_offset =
+    pgm_layout.stack_end - pgm_layout.long_stack_shadow_end;
+
+  /* Setting heap. Heap grows upwards starting from the current program break.
+   Alternative approach would be to use the end of BSS but in fact the end of
+   BSS and the current program break are not really close to each other, so
+   some space will be wasted.  Another reason is that sbrk is likely to return
+   an aligned address which makes the access faster. */
+  pgm_layout.heap_start = (uintptr_t)sbrk(0);
+  pgm_layout.heap_shadow_size = HEAP_SHADOW_SIZE;
+  pgm_layout.heap_end = pgm_layout.heap_start + pgm_layout.heap_shadow_size;
+
+  void* heap_shadow = do_mmap(pgm_layout.heap_shadow_size);
+  pgm_layout.heap_shadow_start = (uintptr_t)heap_shadow;
+  pgm_layout.heap_shadow_end =
+    (uintptr_t)heap_shadow + pgm_layout.heap_shadow_size;
+  pgm_layout.heap_shadow_offset =
+    pgm_layout.heap_shadow_start - pgm_layout.heap_start;
+
+  /* Setting shadow for globals */
+  /* In all likelihood it is reasonably safe to assume that first
+   * 2x*pointer-size bytes of the memory space will not be used. */
+  pgm_layout.global_start = PTR_SZ*2;
+  pgm_layout.global_end = (uintptr_t)&end;
+  pgm_layout.global_shadow_size
+    = pgm_layout.global_end - pgm_layout.global_start;
+
+  /* Short (byte) global shadow region */
+  void * short_global_shadow = do_mmap(pgm_layout.global_shadow_size);
+  pgm_layout.short_global_shadow_start = (uintptr_t)short_global_shadow;
+  pgm_layout.short_global_shadow_end =
+    (uintptr_t)short_global_shadow + pgm_layout.global_shadow_size;
+  pgm_layout.short_global_shadow_offset =
+    pgm_layout.short_global_shadow_start - pgm_layout.global_start;
+
+  /* Long (block) global shadow region */
+  void *long_global_shadow  = do_mmap(pgm_layout.global_shadow_size);
+  pgm_layout.long_global_shadow_start = (uintptr_t)long_global_shadow;
+  pgm_layout.long_global_shadow_end =
+    (uintptr_t)long_global_shadow + pgm_layout.global_shadow_size;
+  pgm_layout.long_global_shadow_offset =
+    pgm_layout.long_global_shadow_end - pgm_layout.global_end;
+
+  /* The shadow layout has been initialized */
+  pgm_layout.initialized = 1;
+}
+
+/*! \brief Unmap (de-allocate) shadow regions used by runtime analysis */
+static void clean_pgm_layout() {
+  DLOG("<<< Clean shadow layout >>>\n");
+  /* Unmap global shadows */
+  if (pgm_layout.long_global_shadow_start)
+    munmap((void*)pgm_layout.long_global_shadow_start,
+      pgm_layout.global_shadow_size);
+  if (pgm_layout.short_global_shadow_start)
+    munmap((void*)pgm_layout.short_global_shadow_start,
+      pgm_layout.global_shadow_size);
+  /* Unmap stack shadows */
+  if (pgm_layout.long_stack_shadow_start)
+    munmap((void*)pgm_layout.long_stack_shadow_start,
+      pgm_layout.stack_shadow_size);
+  if (pgm_layout.short_stack_shadow_start)
+    munmap((void*)pgm_layout.short_stack_shadow_start,
+      pgm_layout.stack_shadow_size);
+  /* Unmap heap shadow */
+  if (pgm_layout.heap_shadow_start)
+    munmap((void*)pgm_layout.heap_shadow_start, pgm_layout.heap_shadow_size);
+}
+/* }}} */
+
+/* Shadow access {{{
+ *
+ * In a typical case shadow regions reside in the high memory but below the
+ * stack segment. Provided that shadow displacement offsets are stored using
+ * unsigned, integers computing some shadow address `S` of an application-space
+ * address `A` using a shadow displacement offset `OFF` is as follows:
+ *
+ *  Stack address:
+ *    S = A - OFF
+ *  Global or heap address:
+ *    S = A + OFF
+ *
+ * Conversions between application-space and shadow memory addresses
+ * are given using the following macros.
+*/
+
+/*! \brief Convert a stack address into its short (byte) shadow counterpart */
+#define SHORT_STACK_SHADOW(_addr)  \
+  ((uintptr_t)((uintptr_t)_addr - pgm_layout.short_stack_shadow_offset))
+/*! \brief Convert a stack address into its long (block) shadow counterpart */
+#define LONG_STACK_SHADOW(_addr) \
+  ((uintptr_t)((uintptr_t)_addr - pgm_layout.long_stack_shadow_offset))
+
+/*! \brief Convert a global address into its short (byte) shadow counterpart */
+#define SHORT_GLOBAL_SHADOW(_addr)  \
+  ((uintptr_t)((uintptr_t)_addr + pgm_layout.short_global_shadow_offset))
+/*! \brief Convert a global address into its long (block) shadow counterpart */
+#define LONG_GLOBAL_SHADOW(_addr) \
+  ((uintptr_t)((uintptr_t)_addr + pgm_layout.long_global_shadow_offset))
+
+/*! \brief Select stack or global shadow based on the value of `_global`
+ *
+ * - SHORT_SHADOW(_addr, 0) is equivalent to STACK_SHORT_SHADOW(_addr)
+ * - SHORT_SHADOW(_addr, 1) is equivalent to GLOBAL_SHORT_SHADOW(_addr) */
+#define SHORT_SHADOW(_addr, _global) \
+  (_global ? SHORT_GLOBAL_SHADOW(_addr) : SHORT_STACK_SHADOW(_addr))
+
+/*! \brief Same as above but for long stack/global shadows */
+#define LONG_SHADOW(_addr, _global) \
+  (_global ? LONG_GLOBAL_SHADOW(_addr) : LONG_STACK_SHADOW(_addr))
+
+/*! \brief Convert a heap address into its shadow counterpart */
+#define HEAP_SHADOW(_addr)  \
+  ((uintptr_t)((uintptr_t)_addr + pgm_layout.heap_shadow_offset))
+
+/*! \brief Evaluate to a true value if a given address is a heap address */
+#define IS_ON_HEAP(_addr) ( \
+  ((uintptr_t)_addr) >= pgm_layout.heap_start && \
+  ((uintptr_t)_addr) <= pgm_layout.heap_end \
+)
+
+/*! \brief Evaluate to a true value if a given address is a stack address */
+#define IS_ON_STACK(_addr) ( \
+  ((uintptr_t)_addr) >= pgm_layout.stack_start && \
+  ((uintptr_t)_addr) <= pgm_layout.stack_end \
+)
+
+/*! \brief Evaluate to a true value if a given address is a global address */
+#define IS_ON_GLOBAL(_addr) ( \
+  ((uintptr_t)_addr) >= pgm_layout.global_start && \
+  ((uintptr_t)_addr) <= pgm_layout.global_end \
+)
+
+/*! \brief Shortcut for evaluating an address via ::IS_ON_STACK or
+ * ::IS_ON_GLOBAL based on the valua of the second parameter */
+#define IS_ON_STATIC(_addr, _global) \
+  (_global ? IS_ON_GLOBAL(_addr) : IS_ON_STACK(_addr))
+
+/*! \brief Evaluate to a true value if a given address belongs to tracked
+ * allocation (i.e., found within stack, heap oor globally) */
+#define IS_ON_VALID(_addr) \
+  (IS_ON_STACK(_addr) || IS_ON_HEAP(_addr) || IS_ON_GLOBAL(_addr))
+/* }}} */
-- 
GitLab