#define _CRT_SECURE_NO_WARNINGS
#pragma once
#if defined(_WIN32)
// MSVC compiler family
#define COMPILER_MSVC 1
#if _MSC_VER >= 1600
// <stdint.h> is only available in VS2010 and later
#define HAS_STDINT 1
#endif
#if _XBOX_VER >= 200
// Xbox 360
#define TARGET_XBOX_360 1
#define CPU_POWERPC 1
#define PTR_SIZE 4
#else
#if defined(_M_X64)
// x64
#define CPU_X64 1
#define PTR_SIZE 8
#elif defined(_M_IX86)
// x86
#define CPU_X86 1
#define PTR_SIZE 4
#else
#error Unrecognized platform!
#endif
#endif
#elif defined(__GNUC__)
// GCC compiler family
#define COMPILER_GCC 1
#define HAS_STDINT 1
#if defined(__llvm__)
// LLVM
#define COMPILER_LLVM 1
#if __has_feature(c_atomic)
// No need to mark atomic##_t members as volatile
#define HAS_C11_MEMORY_MODEL 1
#endif
#endif
#if defined(__APPLE__)
// Apple MacOS/iOS
#define IS_APPLE 1
#endif
#if defined(__x86_64__)
// x64
#define CPU_X64 1
#define PTR_SIZE 8
#elif defined(__i386__)
// x86
#define CPU_X86 1
#define PTR_SIZE 4
#elif defined(__arm__)
// ARM
#define CPU_ARM 1
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__)
// ARMv7
#define CPU_ARM_VERSION 7
#define PTR_SIZE 4
#elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__)
// ARMv6
#define CPU_ARM_VERSION 6
#define PTR_SIZE 4
#else
// Could support earlier ARM versions at some point using compiler barriers and swp instruction
#error Unrecognized ARM CPU architecture version!
#endif
#if defined(__thumb__)
// Thumb instruction set mode
#define CPU_ARM_THUMB 1
#endif
#else
#error Unrecognized target CPU!
#endif
#else
#error Unrecognized compiler!
#endif
#if defined COMPILER_MSVC
#include <Windows.h>
#endif
#include <string>
#include <thread>
#include <cinttypes>
#include <vector>
#include <cmath>
#include <cassert>
#include <mutex>
#include <memory>
#include <iostream>
extern int DebugLevel;
void slog(int level, char const *format, ...);
// Return a C++ string
std::string StringPrintf(const char* format, ...);
// Store result into a supplied string and return it
std::string& SStringPrintf(std::string & dst, const char* format, ...);
// Append result to a supplied string
void StringAppendF(std::string & dst, const char* format, ...);
// Lower-level routine that takes a va_list and appends to a specified
// string. All other routines are just convenience wrappers around it.
void StringAppendV(std::string & dst, const char* format, va_list ap);
inline long int load(volatile long int *value) {
return *value;
}
inline void store(volatile long int *value, long int desired) {
*value = desired;
}
inline void store64(volatile long long int *value, long long int desired) {
#if defined COMPILER_MSVC
#if CPU_X64 || TARGET_XBOX_360
*value = desired;
#else
// On 32-bit x86, the most compatible way to get an atomic 64-bit store is with cmpxchg8b.
// Essentially, we perform mint_compare_exchange_strong_64_relaxed(object, object->_nonatomic, desired)
// in a loop until it returns the original value.
// According to the Linux kernel (atomic64_cx8_32.S), we don't need the "lock;" prefix
// on cmpxchg8b since aligned 64-bit writes are already atomic on 586 and newer.
__asm
{
mov esi, value;
mov ebx, dword ptr desired;
mov ecx, dword ptr desired[4];
retry:
cmpxchg8b[esi];
jne retry;
}
#endif
#elif defined COMPILER_GCC
#if CPU_X64
*value = desired;
#else
auto expected = *value;
asm volatile("1: cmpxchg8b %0\n"
" jne 1b"
: "=m"(*value)
: "b"((unsigned long int) desired), "c"((unsigned long int) (desired >> 32)), "A"(expected));
#endif
#endif
}
inline long long int load64(volatile long long int *value) {
#if defined COMPILER_MSVC
#if CPU_X64 || TARGET_XBOX_360
return *value;
#else
long long int result;
__asm
{
mov esi, value;
mov ebx, eax;
mov ecx, edx;
lock cmpxchg8b[esi];
mov dword ptr result, eax;
mov dword ptr result[4], edx;
}
return result;
#endif
#elif defined COMPILER_GCC
#if CPU_X64
return *value;
#else
long long int original;
asm volatile("movl %%ebx, %%eax\n"
"movl %%ecx, %%edx\n"
"lock; cmpxchg8b %1"
: "=&A"(original)
: "m"(*value));
return original;
#endif
#endif
}
inline long long int compareAndSwap64Old(volatile long long int *value, long long int expected, long long int desired) {
#if defined COMPILER_MSVC
return _InterlockedCompareExchange64(value, desired, expected);
#elif defined COMPILER_GCC
#if CPU_X64
long long int original;
asm volatile("lock; cmpxchgq %2, %1"
: "=a"(original), "+m"(*value)
: "q"(desired), "0"(expected));
return original;
#else
long long int original;
asm volatile("lock; cmpxchg8b %1"
: "=A"(original), "+m"(*value)
: "b"((long int)desired), "c"((long int)(desired >> 32)), "0"(expected));
return original;
#endif
#endif
}
inline long int compareAndSwapOld(volatile long int *value, long int expected, long int desired) {
#if defined COMPILER_MSVC
return _InterlockedCompareExchange(value, desired, expected);
#elif defined COMPILER_GCC
#if CPU_X64
long int original;
asm volatile("lock; cmpxchgq %2, %1"
: "=a"(original), "+m"(*value)
: "q"(desired), "0"(expected));
return original;
#else
long int original;
asm volatile("lock; cmpxchgl %2, %1"
: "=a"(original), "+m"(*value)
: "q"(desired), "0"(expected));
return original;
#endif
#endif
}
inline long long int fetchAdd64(volatile long long int *value, long long int operand) {
#if defined COMPILER_MSVC
#if CPU_X64 || TARGET_XBOX_360
return _InterlockedExchangeAdd64(value, operand);
#else
auto expected = *value;
for (;;)
{
auto original = _InterlockedCompareExchange64(value, expected + operand, expected);
if (original == expected)
return original;
expected = original;
}
#endif
#elif defined COMPILER_GCC
#if CPU_X64
long long int original;
asm volatile("lock; xaddq %0, %1"
: "=r"(original), "+m"(*value)
: "0"(operand));
return original;
#else
// This implementation generates an unnecessary cmp instruction after the cmpxchg8b.
// Could be optimized further using inline assembly.
for (;;)
{
long long original = *value;
if (compareAndSwap64(value, original, original + operand)) {
return original;
}
}
#endif
#endif
}
inline long int fetchAdd(volatile long int *value, long int operand) {
#if defined COMPILER_MSVC
return _InterlockedExchangeAdd(value, operand);
#elif defined COMPILER_GCC
long int original;
#if CPU_X64
asm volatile("lock; xaddq %0, %1"
: "=r"(original), "+m"(*value)
: "0"(operand));
return original;
#else
asm volatile("lock; xaddl %0, %1"
: "=r"(original), "+m"(*value)
: "0"(operand));
return original;
#endif
#endif
}
inline bool compareAndSwap(volatile long int *value, long int expected, long int desired) {
return compareAndSwapOld(value, expected, desired) == expected;
}
inline bool compareAndSwap64(volatile long long int *value, long long int expected, long long int desired) {
return compareAndSwap64Old(value, expected, desired) == expected;
}
long long tps();
double milliseconds();
long int timeSinceEpoch();
struct Spinlock {
volatile long int _value;
Spinlock() {
store(&_value, 0);
}
bool trylock() {
return compareAndSwap(&_value, 0, 1);
}
void lock() {
volatile auto valuePtr = &_value;
while (true) {
for (int i = 0; i < 1024; i++) {
if (!*valuePtr) {
if (compareAndSwap(valuePtr, 0, 1)) {
return;
}
}
std::this_thread::yield();
}
}
}
void unlock() {
store(&_value, 0);
}
};
// An implementation of a nonblocking hashmap. It's MT safe on platforms with strong ordering (x86)
template <typename ValueType>
struct HT {
struct E {
unsigned long long int Key;
long int Value;
E() {
Key = 0LL;
Value = 0;
}
};
std::vector <Spinlock> _locks;
std::vector <ValueType> _values;
std::vector <E> _table;
long int _valueIndex;
long int _size;
long int _tableSize;
unsigned int _mask;
HT(unsigned int N) {
//Indexing starts from 1 because 0 is a deleted sync
_valueIndex = 1;
unsigned int n = N;
while (n & (n + 1)) {
n |= (n + 1);
}
// Make it bigger for sparser collisions
// minimum 65k elements will need to 255k buckets
// with 90% access of first time hash, 99% second next bucket...
_table.resize(size_t(n) + 1);
_tableSize = (long int)(n)+1;
// The value found on position 0 is special and it means it doesn't exist.
_size = N + 1;
_values.resize(N + 1);
_locks.resize(N + 1);
_mask = n;
}
long int size() {
return load(_valueIndex - 1);
}
ValueType* values() {
return _values.data() + 1;
}
inline static unsigned long int hash(unsigned long long int h) {
h ^= h >> 33;
h *= 0xff51afd7ed558ccd;
h ^= h >> 33;
h *= 0xc4ceb9fe1a85ec53;
h ^= h >> 33;
return static_cast<unsigned long int>(h);
}
ValueType *begin() {
return _values.data();
}
ValueType *end() {
return _values.data() + _values.size();
}
ValueType *operator[](unsigned long long int key) {
auto h = hash(key);
for (long int steps = 0; steps < _size; steps++) {
h = h & _mask;
long long int kv = load64(reinterpret_cast<volatile long long int*>(&_table[h].Key));
if (kv == 0LL) {
return nullptr;
}
else if (static_cast<unsigned long long int>(kv) == key) {
long int vindex = load(&_table[h].Value);
if (vindex > 0) {
return &_values[static_cast<unsigned int>(vindex)];
}
return nullptr;
}
h++;
}
return nullptr;
}
void lock(ValueType *valuePtr) {
if (valuePtr > begin() && valuePtr < end()) {
auto lockPosition = valuePtr - begin();
_locks[lockPosition].lock();
}
}
void unlock(ValueType *valuePtr) {
if (valuePtr > begin() && valuePtr < end()) {
auto lockPosition = valuePtr - begin();
_locks[lockPosition].unlock();
}
}
bool contains(unsigned long long int key) {
auto h = hash(key);
for (long int steps = 0; steps < _size; steps++) {
h = h & _mask;
long long int kv = load64(reinterpret_cast<volatile long long int*>(&_table[h].Key));
if (kv == 0LL) {
return false;
}
else if (static_cast<unsigned long long int>(kv) == key) {
long int vindex = load(&_table[h].Value);
if (vindex > 0) {
return true;
}
}
h++;
}
return false;
}
bool insert(unsigned long long int key, ValueType const &value) {
if (!key) {
return false;
}
auto h = hash(key);
for (long int steps = 0; steps < _size; steps++) {
h = h & _mask;
// Make sure that we write the key into an empty slot atomically.
auto kv = static_cast<unsigned long long int>(compareAndSwap64Old(
reinterpret_cast<volatile long long int*>(&_table[h].Key), 0LL, static_cast<long long int>(key)));
if (kv == 0LL) {
// This code is guaranteed to be executed only once for each bucket.
auto volatile prefetch = load(&_valueIndex);
// Cannot insert, the table is full
if (prefetch >= _size) {
return false;
}
auto volatile vindex =
fetchAdd(reinterpret_cast<volatile long int*>(&_valueIndex), 1);
store(reinterpret_cast<volatile long int*>(&_table[h].Value), vindex);
}
else {
auto vk = load(reinterpret_cast<volatile long int*>(&_table[h].Value));
if (vk < 0) {
auto vkz = compareAndSwapOld(reinterpret_cast<volatile long int*>(&_table[h].Value), vk, 0);
if (vkz == vk) {
store64(reinterpret_cast<volatile long long int*>(&_table[h].Key),
static_cast<long long int>(key));
store(reinterpret_cast<volatile long int*>(&_table[h].Value), -vk);
_values[static_cast<unsigned int>(-vk)] = value;
return true;
}
}
}
// We must be very careful here because someone else might want to either:
// 1. Insert and override the same key
// 2. Delete the key.
// If someone tries to insert here before vindex will be != 0 it's fine because we
// simply don't write the value. If someone tries to delete while being here
// we have to simply *undelete by setting the sign positive for vIndex.
// race conditions may happen if we insert/delete on the same key (as we expect).
if (kv == 0LL || static_cast<unsigned long long int>(kv) == key) {
auto volatile vindex = load(&_table[h].Value);
if (vindex < 0) {
vindex = -vindex;
// Deleted values are undeleted
store(reinterpret_cast<volatile long int*>(&_table[h].Value), vindex);
// A possible race condition could happen here.
// We would better lock the bucket.
_values[static_cast<unsigned int>(vindex)] = value;
return true;
}
else if (kv == 0LL) {
_values[static_cast<unsigned int>(vindex)] = value;
return true;
}
// This would override an old key with another value, better return.
return false;
}
h++;
}
return false;
}
void remove(unsigned long long int key) {
auto h = hash(key);
for (long int steps = 0; steps < _size; steps++) {
h = h & _mask;
// We didn't find the element.
long long int kv = load64(reinterpret_cast<volatile long long int*>(&_table[h].Key));
if (kv == 0) {
return;
}
// If the key matches we set the state to
// deleted by making the index negative
else if (static_cast<unsigned long long int>(kv) == key) {
auto volatile vindex = load(&_table[h].Value);
if (vindex > 0) {
vindex = -vindex;
store(reinterpret_cast<volatile long int*>(&_table[h].Value), vindex);
}
return;
}
h++;
}
}
};
template <int Size>
class AdaptiveFilecache {
std::vector<int8_t> _bytes;
int _headOffset;
int _binCount;
std::vector<int> _chunkSizes;
std::vector<double> _weights;
std::vector<int> _chunkNumber;
std::vector<int> _startingOffsets;
std::vector<int> _currentOffsets;
// The adaptive behaviour is tweaked using these variables.
std::vector<double> _correctedWeigths;
std::vector<double> _sizeWeigths;
std::vector<double> _countWeigths;
double _numberToSizeInterpolator;
double _totalSize;
double _totalNumber;
struct Node {
int* Next;
int* Prev;
int* LRUScore;
Node() : Next(nullptr), Prev(nullptr), LRUScore(nullptr) { }
};
std::vector<std::pair<int, Node>> _freeLists;
std::vector<std::pair<int, Node>> _usedLists;
int _LRUMax;
int readNode(int memIndex, Node &node) {
node.Next = reinterpret_cast<int *>(&_bytes[memIndex]);
node.Prev = node.Next + 1;
node.LRUScore = node.Next + 2;
return memIndex + 3 * sizeof(int);
}
int8_t* allocatei(int bin) {
Node prev = _freeLists[bin].second, node, next;
if (*prev.Next == -1) {
return nullptr;
}
int iaddr = *prev.Next;
readNode(iaddr, node);
if (*node.Next != -1) {
readNode(*node.Next, next);
*next.Prev = *node.Prev;
}
*prev.Next = *node.Next;
prev = _usedLists[bin].second;
*node.Next = *prev.Next;
*node.Prev = _usedLists[bin].first;
*prev.Next = iaddr;
if (*node.Next != -1) {
readNode(*node.Next, next);
*next.Prev = iaddr;
}
return &_bytes[iaddr] + 3 * sizeof(int);
}
void freei(int bin, int8_t *address) {
int iaddr = address - _bytes.data() - 3 * sizeof(int);
Node current, prev, next;
readNode(iaddr, current);
if (*current.Next != -1) {
readNode(*current.Next, next);
*next.Prev = *current.Prev;
}
if (*current.Prev != -1) {
readNode(*current.Prev, prev);
*prev.Next = *current.Next;
}
Node head = _freeLists[bin].second;
if (*head.Next != -1) {
readNode(*head.Next, next);
*next.Prev = iaddr;
}
*current.Next = *head.Next;
*current.Prev = _freeLists[bin].first;
*head.Next = iaddr;
}
void initNodes() {
int k = 0;
int nodeAddress = 0;
while (k < _binCount) {
Node head;
auto prevAddress = nodeAddress;
nodeAddress = readNode(nodeAddress, head);
*head.Next = _startingOffsets[k];
*head.Prev = -1;
*head.LRUScore = 0;
_freeLists[k].first = prevAddress;
_freeLists[k].second = head;
Node it = head, prev = it;
auto counter = 0;
while (true) {
auto nextAddress = *it.Next;
if (nextAddress >= _startingOffsets[k + 1]) {
*it.Next = -1;
break;
}
readNode(nextAddress, it);
*it.Prev = prevAddress;
*it.Next = *prev.Next + _chunkSizes[k];
prev = it;
prevAddress = nextAddress;
counter++;
}
assert(counter == _chunkNumber[k]);
prevAddress = nodeAddress;
nodeAddress = readNode(nodeAddress, head);
*head.Next = -1;
*head.Prev = -1;
*head.LRUScore = 0;
_usedLists[k].first = prevAddress;
_usedLists[k].second = head;
k++;
}
}
public:
bool verifyIntegrity() {
for (int i = 0; i < _binCount; i++) {
int nodeCount = 0;
int prevNodeCount = 0;
Node it = _freeLists[i].second;
for (; *it.Next != -1; readNode(*it.Next, it)) {
if (*it.Next < _startingOffsets[i] || *it.Next >= _startingOffsets[i + 1]) {
return false;
}
if ((*it.Next - _startingOffsets[i]) % _chunkSizes[i] != 0) {
return false;
}
nodeCount++;
}
for (; *it.Prev != -1; readNode(*it.Prev, it)) {
if (*it.Prev != _freeLists[i].first) {
if (*it.Prev < _startingOffsets[i] || *it.Prev >= _startingOffsets[i + 1]) {
return false;
}
if ((*it.Prev - _startingOffsets[i]) % _chunkSizes[i] != 0) {
return false;
}
}
prevNodeCount++;
}
if (prevNodeCount != nodeCount) {
return false;
}
it = _usedLists[i].second;
for (; *it.Next != -1; readNode(*it.Next, it)) {
if (*it.Next < _startingOffsets[i] || *it.Next >= _startingOffsets[i + 1]) {
return false;
}
if ((*it.Next - _startingOffsets[i]) % _chunkSizes[i] != 0) {
return false;
}
nodeCount++;
}
for (; *it.Prev != -1; readNode(*it.Prev, it)) {
if (*it.Prev != _usedLists[i].first) {
if (*it.Prev < _startingOffsets[i] || *it.Prev >= _startingOffsets[i + 1]) {
return false;
}
if ((*it.Prev - _startingOffsets[i]) % _chunkSizes[i] != 0) {
return false;
}
}
prevNodeCount++;
}
if (prevNodeCount != nodeCount) {
return false;
}
if (_chunkNumber[i] != nodeCount) {
return false;
}
}
return true;
}
int8_t *allocate(int size) {
int binLoc = binLocation(size);
if (binLoc != -1)
return allocatei(binLoc);
return nullptr;
}
void free(int8_t *ptr) {
if (ptr < _bytes.data() ||
ptr >= _bytes.data() + _bytes.size()) {
return;
}
int iaddr = ptr - _bytes.data();
int binLoc = 0;
while (binLoc < _binCount && _startingOffsets[binLoc] < iaddr) {
binLoc++;
}
if (iaddr < _startingOffsets[binLoc]) {
binLoc--;
freei(binLoc, ptr);
}
}
AdaptiveFilecache(std::vector<std::pair<int, double>> const &chunkDescription) :
_bytes(chunkDescription.size() * (sizeof(int) * 6) + Size)
, _headOffset(0)
, _binCount(chunkDescription.size())
, _chunkSizes(chunkDescription.size())
, _weights(chunkDescription.size())
, _chunkNumber(chunkDescription.size())
, _startingOffsets(chunkDescription.size() + 1)
, _currentOffsets(chunkDescription.size() + 1, 0)
, _correctedWeigths(chunkDescription.size())
, _sizeWeigths(chunkDescription.size())
, _countWeigths(chunkDescription.size())
, _numberToSizeInterpolator(0.7)
, _totalSize(0.0)
, _totalNumber(0.0)
, _freeLists(chunkDescription.size())
, _usedLists(chunkDescription.size())
, _LRUMax(32)
{
for (int i = 0; i < _binCount; i++) {
_chunkSizes[i] = chunkDescription[i].first;
_weights[i] = chunkDescription[i].second;
}
_numberToSizeInterpolator = 0.7;
_headOffset = _bytes.size() - Size;
reorganize();
}
bool insertChunk(int size) {
auto binLoc = binLocation(size);
if (binLoc != -1) {
this->_countWeigths[binLoc] += 1.0;
this->_sizeWeigths[binLoc] += size;
_totalNumber += 1.0;
_totalSize += size;
// really insert here
return true;
}
return false;
}
void reorganize() {
int64_t sumAi = 0LL;
double sumWeights = 0.0;
double sumWeightsA = 0.0;
for (int i = 0; i < _binCount; i++) {
sumAi += _chunkSizes[i];
sumWeights += _weights[i];
sumWeightsA += _weights[i] * _chunkSizes[i];
}
assert(Size > sumAi);
for (int i = 0; i < _binCount; i++) {
_chunkNumber[i] =
static_cast<int>(ceil(_weights[i] / (sumWeights * _chunkSizes[i]) * (Size - sumAi)));
}
_startingOffsets[0] = _headOffset;
_currentOffsets[0] = _headOffset;
for (int i = 1; i <= _binCount; i++) {
_currentOffsets[i] = _startingOffsets[i] =
_startingOffsets[i - 1] + _chunkNumber[i - 1] * _chunkSizes[i - 1];
}
initNodes();
}
void correctWeights() {
for (int i = 0; i < _binCount; i++) {
_correctedWeigths[i] =
_countWeigths[i] / _totalNumber * _numberToSizeInterpolator +
_sizeWeigths[i] / _totalSize * (1.0 - _numberToSizeInterpolator);
_countWeigths[i] = 0.0;
_sizeWeigths[i] = 0.0;
}
_totalSize = 0;
_totalNumber = 0;
swap(_correctedWeigths, _weights);
}
inline int binLocation(int size) {
size += 3 * sizeof(int);
if (size < 0) {
return -1;
}
int binLoc = 0;
while (binLoc < _binCount && _chunkSizes[binLoc] < size) {
binLoc++;
}
if (binLoc != _binCount) {
return binLoc;
}
return -1;
}
void print() {
using std::cout;
using std::endl;
using std::ios;
using std::mutex;
using std::lock_guard;
if (DebugLevel > 70) {
static mutex mtx;
lock_guard<mutex> lock(mtx);
cout << "Weights:\n";
for (int i = 0; i < _binCount; i++) {
cout << _weights[i] << endl;
}
cout << "Chunks:\n";
for (int i = 0; i < _binCount; i++) {
cout.precision(4);
cout.setf(ios::fixed);
cout << _chunkSizes[i] / 1024.0 << "kb x "
<< _chunkNumber[i] << " -> " << (_startingOffsets[i + 1] - _startingOffsets[i]) / 1024.0 / 1024.0 << "mb" <<
" offset: " << _startingOffsets[i] / 1024.0 / 1024.0 << "mb" << endl;
}
cout << _startingOffsets[_binCount] / 1024.0 / 1024.0 << "mb\n";
cout << "====================================\n";
}
}
std::vector<int8_t> &getBytes() {
return _bytes;
}
bool contains(int8_t *ptr) {
if (ptr < _bytes.data() ||
ptr >= _bytes.data() + _bytes.size()) {
return false;
}
return true;
}
};
template <int N>
class ThreadedAdaptiveFilecache {
int _threads;
std::vector<std::unique_ptr<AdaptiveFilecache<N>>> _caches;
std::vector<Spinlock> _locks;
public:
ThreadedAdaptiveFilecache(std::vector<std::pair<int, double>> const &chunkDescription, int threads)
: _threads(threads) {
for (int i = 0; i < threads; i++) {
_caches.push_back(
std::move(
std::unique_ptr<AdaptiveFilecache<N>>(new AdaptiveFilecache<N>(chunkDescription))));
_locks.push_back(Spinlock());
}
}
std::vector<bool> verifyIntegrity() {
int idx = 0;
std::vector<bool> result;
std::vector<bool> verified(_threads, false);
while ((int)result.size() < _threads) {
if (!verified[idx] && _locks[idx].trylock()) {
result.push_back(_caches[idx]->verifyIntegrity());
_locks[idx].unlock();
verified[idx] = true;
}
idx = (idx + 1) % _threads;
}
return result;
}
int8_t *allocate(int size) {
volatile int nullCount = 0;
volatile int index = 0;
std::vector<bool> allocated(_threads, false);
while (nullCount < _threads) {
if (!allocated[index] && _locks[index].trylock()) {
volatile auto ptr = _caches[index]->allocate(size);
_locks[index].unlock();
allocated[index] = true;
if (ptr == nullptr) {
nullCount++;
}
else {
return ptr;
}
}
index = (index + 1) % _threads;
}
return nullptr;
}
void free(int8_t * ptr) {
for (int i = 0; i < _threads; i++) {
if (_caches[i]->contains(ptr)) {
_locks[i].lock();
_caches[i]->free(ptr);
_locks[i].unlock();
return;
}
}
}
};
using namespace std;
int main() {
FILE *out = fopen("hashuri.out", "w");
FILE *in = fopen("hashuri.in", "r");
int n;
fscanf(in, "%d", &n);
HT<bool> ht(n);
for (int i = 0; i < n; i++) {
unsigned long long int op, c;
fscanf(in, "%llu%llu", &op, &c);
if (op == 1) {
ht.insert(c, true);
}
else if (op == 2) {
ht.remove(c);
}
else if (op == 3) {
if (ht.contains(c))
fprintf(out, "1\n");
else
fprintf(out, "0\n");
}
}
return 0;
}