/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#ifndef UTIL_H
#define UTIL_H 1
+#include <inttypes.h>
#include <limits.h>
#include <stdarg.h>
#include <stdbool.h>
/* Returns X rounded up to the nearest multiple of Y. */
#define ROUND_UP(X, Y) (DIV_ROUND_UP(X, Y) * (Y))
+/* Returns the least number that, when added to X, yields a multiple of Y. */
+#define PAD_SIZE(X, Y) (ROUND_UP(X, Y) - (X))
+
/* Returns X rounded down to the nearest multiple of Y. */
#define ROUND_DOWN(X, Y) ((X) / (Y) * (Y))
#define RDP2_4(X) (RDP2_5(X) | (RDP2_5(X) >> 2))
#define RDP2_5(X) ( (X) | ( (X) >> 1))
+/* This system's cache line size, in bytes.
+ * Being wrong hurts performance but not correctness. */
+#define CACHE_LINE_SIZE 64
+BUILD_ASSERT_DECL(IS_POW2(CACHE_LINE_SIZE));
+
#ifndef MIN
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
#endif
-#define NOT_REACHED() abort()
+#define OVS_NOT_REACHED() abort()
/* Expands to a string that looks like "<file>:<line>", e.g. "tmp.c:10".
*
* (void *). This is to suppress the alignment warning issued by clang. */
#define ALIGNED_CAST(TYPE, ATTR) ((TYPE) (void *) (ATTR))
+/* Use "%"PRIuSIZE to format size_t with printf(). */
+#ifdef _WIN32
+#define PRIdSIZE "Id"
+#define PRIiSIZE "Ii"
+#define PRIoSIZE "Io"
+#define PRIuSIZE "Iu"
+#define PRIxSIZE "Ix"
+#define PRIXSIZE "IX"
+#else
+#define PRIdSIZE "zd"
+#define PRIiSIZE "zi"
+#define PRIoSIZE "zo"
+#define PRIuSIZE "zu"
+#define PRIxSIZE "zx"
+#define PRIXSIZE "zX"
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
char *xvasprintf(const char *format, va_list) PRINTF_FORMAT(1, 0) MALLOC_LIKE;
void *x2nrealloc(void *p, size_t *n, size_t s);
+void *xmalloc_cacheline(size_t) MALLOC_LIKE;
+void *xzalloc_cacheline(size_t) MALLOC_LIKE;
+void free_cacheline(void *);
+
void ovs_strlcpy(char *dst, const char *src, size_t size);
void ovs_strzcpy(char *dst, const char *src, size_t size);
bool str_to_int(const char *, int base, int *);
bool str_to_long(const char *, int base, long *);
bool str_to_llong(const char *, int base, long long *);
-bool str_to_uint(const char *, int base, unsigned int *);
-bool str_to_ulong(const char *, int base, unsigned long *);
-bool str_to_ullong(const char *, int base, unsigned long long *);
-bool ovs_scan(const char *s, const char *template, ...) SCANF_FORMAT(2, 3);
+static inline bool
+str_to_uint(const char *s, int base, unsigned int *u)
+{
+ return str_to_int(s, base, (int *) u);
+}
+
+static inline bool
+str_to_ulong(const char *s, int base, unsigned long *ul)
+{
+ return str_to_long(s, base, (long *) ul);
+}
+
+static inline bool
+str_to_ullong(const char *s, int base, unsigned long long *ull)
+{
+ return str_to_llong(s, base, (long long *) ull);
+}
+
+bool ovs_scan(const char *s, const char *format, ...) SCANF_FORMAT(2, 3);
bool str_to_double(const char *, double *);
char *base_name(const char *file_name);
char *abs_file_name(const char *dir, const char *file_name);
-char *xreadlink(const char *filename);
char *follow_symlinks(const char *filename);
void ignore(bool x OVS_UNUSED);
\f
/* Bitwise tests. */
-int log_2_floor(uint32_t);
-int log_2_ceil(uint32_t);
-unsigned int popcount(uint32_t);
-
/* Returns the number of trailing 0-bits in 'n'. Undefined if 'n' == 0. */
#if __GNUC__ >= 4
static inline int
? __builtin_ctz(n)
: __builtin_ctzll(n));
}
-#else
-/* Defined in util.c. */
-int raw_ctz(uint64_t n);
-#endif
-#if __GNUC__ >= 4
static inline int
-popcount64(uint64_t n)
+raw_clz64(uint64_t n)
{
- return __builtin_popcountll(n);
+ return __builtin_clzll(n);
}
#else
-/* Defined using the 32-bit counterparts. */
-static inline int
-popcount64(uint64_t n)
-{
- return popcount(n) + popcount(n >> 32);
-}
+/* Defined in util.c. */
+int raw_ctz(uint64_t n);
+int raw_clz64(uint64_t n);
#endif
/* Returns the number of trailing 0-bits in 'n', or 32 if 'n' is 0. */
static inline int
-ctz(uint32_t n)
+ctz32(uint32_t n)
{
return n ? raw_ctz(n) : 32;
}
return n ? raw_ctz(n) : 64;
}
+/* Returns the number of leading 0-bits in 'n', or 32 if 'n' is 0. */
+static inline int
+clz32(uint32_t n)
+{
+ return n ? raw_clz64(n) - 32 : 32;
+}
+
+/* Returns the number of leading 0-bits in 'n', or 64 if 'n' is 0. */
+static inline int
+clz64(uint64_t n)
+{
+ return n ? raw_clz64(n) : 64;
+}
+
+/* Given a word 'n', calculates floor(log_2('n')). This is equivalent
+ * to finding the bit position of the most significant one bit in 'n'. It is
+ * an error to call this function with 'n' == 0. */
+static inline int
+log_2_floor(uint64_t n)
+{
+ return 63 - raw_clz64(n);
+}
+
+/* Given a word 'n', calculates ceil(log_2('n')). It is an error to
+ * call this function with 'n' == 0. */
+static inline int
+log_2_ceil(uint64_t n)
+{
+ return log_2_floor(n) + !is_pow2(n);
+}
+
+/* unsigned int count_1bits(uint64_t x):
+ *
+ * Returns the number of 1-bits in 'x', between 0 and 64 inclusive. */
+#if UINTPTR_MAX == UINT64_MAX
+static inline unsigned int
+count_1bits(uint64_t x)
+{
+#if __GNUC__ >= 4 && __POPCNT__
+ return __builtin_popcountll(x);
+#else
+ /* This portable implementation is the fastest one we know of for 64
+ * bits, and about 3x faster than GCC 4.7 __builtin_popcountll(). */
+ const uint64_t h55 = UINT64_C(0x5555555555555555);
+ const uint64_t h33 = UINT64_C(0x3333333333333333);
+ const uint64_t h0F = UINT64_C(0x0F0F0F0F0F0F0F0F);
+ const uint64_t h01 = UINT64_C(0x0101010101010101);
+ x -= (x >> 1) & h55; /* Count of each 2 bits in-place. */
+ x = (x & h33) + ((x >> 2) & h33); /* Count of each 4 bits in-place. */
+ x = (x + (x >> 4)) & h0F; /* Count of each 8 bits in-place. */
+ return (x * h01) >> 56; /* Sum of all bytes. */
+#endif
+}
+#else /* Not 64-bit. */
+#if __GNUC__ >= 4 && __POPCNT__
+static inline unsigned int
+count_1bits_32__(uint32_t x)
+{
+ return __builtin_popcount(x);
+}
+#else
+#define NEED_COUNT_1BITS_8 1
+extern const uint8_t count_1bits_8[256];
+static inline unsigned int
+count_1bits_32__(uint32_t x)
+{
+ /* This portable implementation is the fastest one we know of for 32 bits,
+ * and faster than GCC __builtin_popcount(). */
+ return (count_1bits_8[x & 0xff] +
+ count_1bits_8[(x >> 8) & 0xff] +
+ count_1bits_8[(x >> 16) & 0xff] +
+ count_1bits_8[x >> 24]);
+}
+#endif
+static inline unsigned int
+count_1bits(uint64_t x)
+{
+ return count_1bits_32__(x) + count_1bits_32__(x >> 32);
+}
+#endif
+
/* Returns the rightmost 1-bit in 'x' (e.g. 01011000 => 00001000), or 0 if 'x'
* is 0. */
static inline uintmax_t
static inline uint32_t
rightmost_1bit_idx(uint32_t x)
{
- return x ? ctz(x) : 32;
+ return ctz32(x);
}
-/* Returns the index of the rightmost 1-bit in 'x' (e.g. 01011000 => 6), or 32
+/* Returns the index of the leftmost 1-bit in 'x' (e.g. 01011000 => 6), or 32
* if 'x' is 0.
*
* This function only works with 32-bit integers. */
uint64_t bitwise_get(const void *src, unsigned int src_len,
unsigned int src_ofs, unsigned int n_bits);
+void xsleep(unsigned int seconds);
+#ifdef _WIN32
+\f
+char *ovs_format_message(int error);
+char *ovs_lasterror_to_string(void);
+int ftruncate(int fd, off_t length);
+#endif
+
#ifdef __cplusplus
}
#endif