#define _CRT_SECURE_NO_WARNINGS
#include <iostream>
#include <vector>
#include <string>
#include <fstream>
#include <sstream>
#include <atomic>
#include <cstdarg>
#include <chrono>
#if defined(_WIN32)
// MSVC compiler family
#define COMPILER_MSVC 1
#if _MSC_VER >= 1600
// <stdint.h> is only available in VS2010 and later
#define HAS_STDINT 1
#endif
#if _XBOX_VER >= 200
// Xbox 360
#define TARGET_XBOX_360 1
#define CPU_POWERPC 1
#define PTR_SIZE 4
#else
#if defined(_M_X64)
// x64
#define CPU_X64 1
#define PTR_SIZE 8
#elif defined(_M_IX86)
// x86
#define CPU_X86 1
#define PTR_SIZE 4
#else
#error Unrecognized platform!
#endif
#endif
#elif defined(__GNUC__)
// GCC compiler family
#define COMPILER_GCC 1
#define HAS_STDINT 1
#if defined(__llvm__)
// LLVM
#define COMPILER_LLVM 1
#if __has_feature(c_atomic)
// No need to mark atomic##_t members as volatile
#define HAS_C11_MEMORY_MODEL 1
#endif
#endif
#if defined(__APPLE__)
// Apple MacOS/iOS
#define IS_APPLE 1
#endif
#if defined(__x86_64__)
// x64
#define CPU_X64 1
#define PTR_SIZE 8
#elif defined(__i386__)
// x86
#define CPU_X86 1
#define PTR_SIZE 4
#elif defined(__arm__)
// ARM
#define CPU_ARM 1
#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7EM__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__)
// ARMv7
#define CPU_ARM_VERSION 7
#define PTR_SIZE 4
#elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__)
// ARMv6
#define CPU_ARM_VERSION 6
#define PTR_SIZE 4
#else
// Could support earlier ARM versions at some point using compiler barriers and swp instruction
#error Unrecognized ARM CPU architecture version!
#endif
#if defined(__thumb__)
// Thumb instruction set mode
#define CPU_ARM_THUMB 1
#endif
#else
#error Unrecognized target CPU!
#endif
#else
#error Unrecognized compiler!
#endif
#include <string>
#include <vector>
#if defined COMPILER_MSVC
#include <Windows.h>
#endif
bool compareAndSwap(volatile long int *value, long int expected, long int desired);
bool compareAndSwap64(volatile long long int *value, long long int expected, long long int desired);
long int load(volatile long int *value);
void store(volatile long int *value, long int desired);
void store64(volatile long long int *value, long long int desired);
long long int load64(volatile long long int *value);
long long int compareAndSwap64Old(volatile long long int *value, long long int expected, long long int desired);
long int compareAndSwapOld(volatile long int *value, long int expected, long int desired);
long long int fetchAdd64(volatile long long int *value, long long int operand);
long int fetchAdd(volatile long int *value, long int operand);
bool compareAndSwap(volatile long int *value, long int expected, long int desired) {
return compareAndSwapOld(value, expected, desired) == expected;
}
bool compareAndSwap64(volatile long long int *value, long long int expected, long long int desired) {
return compareAndSwap64Old(value, expected, desired) == expected;
}
long int load(volatile long int *value) {
return *value;
}
void store(volatile long int *value, long int desired) {
*value = desired;
}
void store64(volatile long long int *value, long long int desired) {
#if defined COMPILER_MSVC
#if CPU_X64 || TARGET_XBOX_360
*value = desired;
#else
// On 32-bit x86, the most compatible way to get an atomic 64-bit store is with cmpxchg8b.
// Essentially, we perform mint_compare_exchange_strong_64_relaxed(object, object->_nonatomic, desired)
// in a loop until it returns the original value.
// According to the Linux kernel (atomic64_cx8_32.S), we don't need the "lock;" prefix
// on cmpxchg8b since aligned 64-bit writes are already atomic on 586 and newer.
__asm
{
mov esi, value;
mov ebx, dword ptr desired;
mov ecx, dword ptr desired[4];
retry:
cmpxchg8b[esi];
jne retry;
}
#endif
#elif defined COMPILER_GCC
#if CPU_X64
*value = desired;
#else
auto expected = *value;
asm volatile("1: cmpxchg8b %0\n"
" jne 1b"
: "=m"(*value)
: "b"((unsigned long int) desired), "c"((unsigned long int) (desired >> 32)), "A"(expected));
#endif
#endif
}
long long int load64(volatile long long int *value) {
#if defined COMPILER_MSVC
#if CPU_X64 || TARGET_XBOX_360
return *value;
#else
uint64_t result;
__asm
{
mov esi, value;
mov ebx, eax;
mov ecx, edx;
lock cmpxchg8b[esi];
mov dword ptr result, eax;
mov dword ptr result[4], edx;
}
return result;
#endif
#elif defined COMPILER_GCC
#if CPU_X64
return *value;
#else
long long int original;
asm volatile("movl %%ebx, %%eax\n"
"movl %%ecx, %%edx\n"
"lock; cmpxchg8b %1"
: "=&A"(original)
: "m"(*value));
return original;
#endif
#endif
}
long long int compareAndSwap64Old(volatile long long int *value, long long int expected, long long int desired) {
#if defined COMPILER_MSVC
return _InterlockedCompareExchange64(value, desired, expected);
#elif defined COMPILER_GCC
#if CPU_X64
long long int original;
asm volatile("lock; cmpxchgq %2, %1"
: "=a"(original), "+m"(*value)
: "q"(desired), "0"(expected));
return original;
#else
long long int original;
asm volatile("lock; cmpxchg8b %1"
: "=A"(original), "+m"(*value)
: "b"((long int)desired), "c"((long int)(desired >> 32)), "0"(expected));
return original;
#endif
#endif
}
long int compareAndSwapOld(volatile long int *value, long int expected, long int desired) {
#if defined COMPILER_MSVC
return _InterlockedCompareExchange(value, desired, expected);
#elif defined COMPILER_GCC
#if CPU_X64
long int original;
asm volatile("lock; cmpxchgq %2, %1"
: "=a"(original), "+m"(*value)
: "q"(desired), "0"(expected));
return original;
#else
long int original;
asm volatile("lock; cmpxchgl %2, %1"
: "=a"(original), "+m"(*value)
: "q"(desired), "0"(expected));
return original;
#endif
#endif
}
long long int fetchAdd64(volatile long long int *value, long long int operand) {
#if defined COMPILER_MSVC
#if CPU_X64 || TARGET_XBOX_360
return _InterlockedExchangeAdd64(value, operand);
#else
auto expected = *value;
for (;;)
{
auto original = _InterlockedCompareExchange64(value, expected + operand, expected);
if (original == expected)
return original;
expected = original;
}
#endif
#elif defined COMPILER_GCC
#if CPU_X64
long long int original;
asm volatile("lock; xaddq %0, %1"
: "=r"(original), "+m"(*value)
: "0"(operand));
return original;
#else
// This implementation generates an unnecessary cmp instruction after the cmpxchg8b.
// Could be optimized further using inline assembly.
for (;;)
{
long long original = *value;
if (compareAndSwap64(value, original, original + operand)) {
return original;
}
}
#endif
#endif
}
long int fetchAdd(volatile long int *value, long int operand) {
#if defined COMPILER_MSVC
return _InterlockedExchangeAdd(value, operand);
#elif defined COMPILER_GCC
long int original;
#if CPU_X64
asm volatile("lock; xaddq %0, %1"
: "=r"(original), "+m"(*value)
: "0"(operand));
return original;
#else
asm volatile("lock; xaddl %0, %1"
: "=r"(original), "+m"(*value)
: "0"(operand));
return original;
#endif
#endif
}
using namespace std;
template <typename ValueType>
struct HT {
struct E {
unsigned long long int Key;
long int Value;
E() {
Key = 0LL;
Value = 0;
}
};
std::vector <ValueType> _values;
std::vector <E> _table;
long int _valueIndex;
unsigned int _mask;
HT(unsigned int N) {
//Indexing starts from 1 because 0 is a deleted sync
_valueIndex = 1;
unsigned int n = N;
while (n & (n + 1)) {
n |= (n + 1);
}
// Make it bigger for sparser collisions
// minimum 65k elements will need to 255k buckets
// with 90% access of first time hash, 99% second next bucket...
n |= (n + 1);
_table.resize(size_t(n) + 1);
// The value found on position 0 is special and it means it doesn't exist.
_values.resize(N + 1);
_mask = n;
}
inline static unsigned long int hash(unsigned long long int h) {
h ^= h >> 33;
h *= 0xff51afd7ed558ccd;
h ^= h >> 33;
h *= 0xc4ceb9fe1a85ec53;
h ^= h >> 33;
return static_cast<unsigned long int>(h);
}
ValueType *operator[](unsigned long long int key) {
auto h = hash(key);
// Table must not be full, otherwise this algorithm
// will not end
while (true) {
h = h & _mask;
if (_table[h].Key == 0) {
return nullptr;
}
else if (_table[h].Key == key) {
volatile auto vindex = _table[h].Value;
if (vindex > 0) {
return &_values[static_cast<unsigned int>(vindex)];
}
return nullptr;
}
h++;
}
}
bool contains(unsigned long long int key) {
auto h = hash(key);
// Table must not be full, otherwise this algorithm
// will not end
while (true) {
h = h & _mask;
if (_table[h].Key == 0) {
return false;
}
else if (_table[h].Key == key && _table[h].Value > 0) {
return true;
}
h++;
}
}
void insert(unsigned long long int key, ValueType const &value) {
auto h = hash(key);
while (true) {
// Table must not be full, otherwise this algorithm
// will not end
h = h & _mask;
// Make sure that we write the key into an empty slot atomically.
auto kv = static_cast<unsigned long long int>(compareAndSwap64Old(
reinterpret_cast<volatile long long int*>(&_table[h].Key), 0LL, static_cast<long long int>(key)));
if (kv == 0) {
// This code is guaranteed to be executed only once for each bucket.
auto volatile vindex = fetchAdd(reinterpret_cast<volatile long int*>(&_valueIndex), 1);
store(reinterpret_cast<volatile long int*>(&_table[h].Value), vindex);
}
// We must be very careful here because someone else might want to either:
// 1. Insert and override the same key
// 2. Delete the key.
// If someone tries to insert here before vindex will be != 0 it's fine because we
// simply don't write the value. If someone tries to delete while being here
// we have to simply *undelete by setting the sign positive for vIndex.
// race conditions may happen if we insert/delete on the same key (as we expect).
if (kv == 0LL || kv == key) {
auto volatile vindex = static_cast<long int>(_table[h].Value);
if (vindex < 0) {
vindex = -vindex;
// Deleted values are undeleted
store(reinterpret_cast<volatile long int*>(&_table[h].Value), vindex);
}
_values[static_cast<unsigned int>(vindex)] = value;
return;
}
h++;
}
}
void remove(unsigned long long int key) {
auto h = hash(key);
while (true) {
h = h & _mask;
// We didn't find the element.
if (_table[h].Key == 0) {
return;
}
// If the key matches we set the state to
// deleted by making the index negative
else if (_table[h].Key == key) {
auto volatile vindex = _table[h].Value;
if (vindex > 0) {
vindex = -vindex;
store(reinterpret_cast<volatile long int*>(&_table[h].Value), vindex);
}
return;
}
h++;
}
}
};
using namespace std;
int main() {
FILE *out = fopen("hashuri.out", "w");
FILE *in = fopen("hashuri.in", "r");
int n;
fscanf(in, "%d", &n);
HT<bool> ht(n);
for (int i = 0; i < n; i++) {
unsigned long long int op, c;
fscanf(in, "%llu%llu", &op, &c);
if (op == 1) {
ht.insert(c, true);
}
else if (op == 2) {
ht.remove(c);
}
else if (op == 3) {
if (ht.contains(c))
fprintf(out, "1\n");
else
fprintf(out, "0\n");
}
}
return 0;
}