diff --git a/.gitignore b/.gitignore index 8c42b0c2b..9623924d5 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ $tf*/ nuget/ packages/ *.ide/ +.vs/ fdb_c.dll diff --git a/FdbBurner/Program.cs b/FdbBurner/Program.cs index 9780e5abc..8e898300f 100644 --- a/FdbBurner/Program.cs +++ b/FdbBurner/Program.cs @@ -102,7 +102,7 @@ private static async Task BurnerThread(IFdbDatabase db, CancellationToken ct) ? rnd.Next() : pos + i; - tr.Set(folder.Pack(x, Suffix), Value); + tr.Set(folder.Keys.Encode(x, Suffix), Value); Interlocked.Increment(ref Keys); } pos += N; diff --git a/FdbShell/Commands/BasicCommands.cs b/FdbShell/Commands/BasicCommands.cs index f1478ab4c..ecbeac4ab 100644 --- a/FdbShell/Commands/BasicCommands.cs +++ b/FdbShell/Commands/BasicCommands.cs @@ -68,17 +68,17 @@ public static async Task Dir(string[] path, IFdbTuple extras, DirectoryBrowseOpt { if (!(subfolder is FdbDirectoryPartition)) { - long count = await Fdb.System.EstimateCountAsync(db, subfolder.ToRange(), ct); - log.WriteLine(" {0,-12} {1,-12} {3,9:N0} {2}", FdbKey.Dump(subfolder.Copy().Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, count); + long count = await Fdb.System.EstimateCountAsync(db, subfolder.Keys.ToRange(), ct); + log.WriteLine(" {0,-12} {1,-12} {3,9:N0} {2}", FdbKey.Dump(FdbSubspace.Copy(subfolder).Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, count); } else { - log.WriteLine(" {0,-12} {1,-12} {3,9} {2}", FdbKey.Dump(subfolder.Copy().Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, "-"); + log.WriteLine(" {0,-12} {1,-12} {3,9} {2}", FdbKey.Dump(FdbSubspace.Copy(subfolder).Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, "-"); } } else { - log.WriteLine(" {0,-12} {1,-12} {2}", FdbKey.Dump(subfolder.Copy().Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name); + log.WriteLine(" {0,-12} {1,-12} {2}", FdbKey.Dump(FdbSubspace.Copy(subfolder).Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name); } } else @@ -115,7 +115,7 @@ public static async Task CreateDirectory(string[] path, IFdbTuple extras, IFdbDa log.WriteLine("- Created under {0} [{1}]", FdbKey.Dump(folder.Key), folder.Key.ToHexaString(' ')); // look if there is already stuff under there - var stuff = await db.ReadAsync((tr) => tr.GetRange(folder.ToRange()).FirstOrDefaultAsync(), cancellationToken: ct); + var stuff = await db.ReadAsync((tr) => tr.GetRange(folder.Keys.ToRange()).FirstOrDefaultAsync(), cancellationToken: ct); if (stuff.Key.IsPresent) { log.WriteLine("CAUTION: There is already some data under {0} !"); @@ -216,7 +216,7 @@ public static async Task Count(string[] path, IFdbTuple extras, IFdbDatabase db, return; } - var copy = folder.Copy(); + var copy = FdbSubspace.Copy(folder); log.WriteLine("# Counting keys under {0} ...", FdbKey.Dump(copy.Key)); var progress = new Progress>((state) => @@ -245,7 +245,7 @@ public static async Task Show(string[] path, IFdbTuple extras, bool reverse, IFd log.WriteLine("# Content of {0} [{1}]", FdbKey.Dump(folder.Key), folder.Key.ToHexaString(' ')); var keys = await db.QueryAsync((tr) => { - var query = tr.GetRange(folder.ToRange()); + var query = tr.GetRange(folder.Keys.ToRange()); return reverse ? query.Reverse().Take(count) : query.Take(count + 1); @@ -255,7 +255,7 @@ public static async Task Show(string[] path, IFdbTuple extras, bool reverse, IFd if (reverse) keys.Reverse(); foreach (var key in keys.Take(count)) { - log.WriteLine("...{0} = {1}", FdbKey.Dump(folder.Extract(key.Key)), key.Value.ToAsciiOrHexaString()); + log.WriteLine("...{0} = {1}", FdbKey.Dump(folder.ExtractKey(key.Key)), key.Value.ToAsciiOrHexaString()); } if (!reverse && keys.Count == count + 1) { @@ -329,7 +329,7 @@ public static async Task Map(string[] path, IFdbTuple extras, IFdbDatabase db, T return; } - var span = folder.DirectoryLayer.ContentSubspace.ToRange(); + var span = folder.DirectoryLayer.ContentSubspace.Keys.ToRange(); // note: this may break in future versions of the DL! Maybe we need a custom API to get a flat list of all directories in a DL that span a specific range ? @@ -554,7 +554,7 @@ public static async Task Shards(string[] path, IFdbTuple extras, IFdbDatabase db var folder = (await TryOpenCurrentDirectoryAsync(path, db, ct)) as FdbDirectorySubspace; if (folder != null) { - var r = FdbKeyRange.StartsWith(folder.Copy().Key); + var r = FdbKeyRange.StartsWith(FdbSubspace.Copy(folder).Key); Console.WriteLine("Searching for shards that intersect with /{0} ...", String.Join("/", path)); ranges = await Fdb.System.GetChunksAsync(db, r, ct); Console.WriteLine("Found {0} ranges intersecting {1}:", ranges.Count, r); @@ -589,7 +589,7 @@ public static async Task Sampling(string[] path, IFdbTuple extras, IFdbDatabase FdbKeyRange span; if (folder is FdbDirectorySubspace) { - span = FdbKeyRange.StartsWith((folder as FdbDirectorySubspace).Copy()); + span = FdbKeyRange.StartsWith(FdbSubspace.Copy(folder as FdbDirectorySubspace).Key); log.WriteLine("Reading list of shards for /{0} under {1} ...", String.Join("/", path), FdbKey.Dump(span.Begin)); } else diff --git a/FdbShell/Program.cs b/FdbShell/Program.cs index 09d7e32f1..d4165a0c7 100644 --- a/FdbShell/Program.cs +++ b/FdbShell/Program.cs @@ -305,7 +305,7 @@ private static async Task MainAsync(string[] args, CancellationToken cancel) var tokens = s.Trim().Split(new [] { ' ' }, StringSplitOptions.RemoveEmptyEntries); string cmd = tokens.Length > 0 ? tokens[0] : String.Empty; string prm = tokens.Length > 1 ? tokens[1] : String.Empty; - var extras = tokens.Length > 2 ? FdbTuple.CreateRange(tokens.Skip(2)) : FdbTuple.Empty; + var extras = tokens.Length > 2 ? FdbTuple.FromEnumerable(tokens.Skip(2)) : FdbTuple.Empty; var trimmedCommand = cmd.Trim().ToLowerInvariant(); switch (trimmedCommand) diff --git a/FoundationDB.Client.sln b/FoundationDB.Client.sln index 5d7dc23e5..54253c327 100644 --- a/FoundationDB.Client.sln +++ b/FoundationDB.Client.sln @@ -56,6 +56,11 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "FdbTop", "FdbTop\FdbTop.csp EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "FdbBurner", "FdbBurner\FdbBurner.csproj", "{60049BA1-A95F-4127-BAC5-74AF023D3082}" EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Documents", "Documents", "{1B6BEB71-8C92-4B06-9715-9DAF49907BA1}" + ProjectSection(SolutionItems) = preProject + Tuples.md = Tuples.md + EndProjectSection +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU diff --git a/FoundationDB.Client/Async/AsyncCancellableMutex.cs b/FoundationDB.Client/Async/AsyncCancellableMutex.cs index 2e4a16990..d73868a14 100644 --- a/FoundationDB.Client/Async/AsyncCancellableMutex.cs +++ b/FoundationDB.Client/Async/AsyncCancellableMutex.cs @@ -83,7 +83,7 @@ public AsyncCancelableMutex(CancellationToken ct) { if (ct.CanBeCanceled) { - m_ctr = ct.Register(s_cancellationCallback, new WeakReference(this), useSynchronizationContext: false); + m_ctr = ct.RegisterWithoutEC(s_cancellationCallback, new WeakReference(this)); } GC.SuppressFinalize(this); } @@ -130,12 +130,12 @@ public bool Abort(bool async = false) private static void SetDefered(AsyncCancelableMutex mutex) { - ThreadPool.QueueUserWorkItem((state) => ((AsyncCancelableMutex)state).TrySetResult(null), mutex); + ThreadPool.UnsafeQueueUserWorkItem((state) => ((AsyncCancelableMutex)state).TrySetResult(null), mutex); } private static void CancelDefered(AsyncCancelableMutex mutex) { - ThreadPool.QueueUserWorkItem((state) => ((AsyncCancelableMutex)state).TrySetCanceled(), mutex); + ThreadPool.UnsafeQueueUserWorkItem((state) => ((AsyncCancelableMutex)state).TrySetCanceled(), mutex); } } diff --git a/FoundationDB.Client/Async/TaskHelpers.cs b/FoundationDB.Client/Async/TaskHelpers.cs index 9334e5e1b..9fdc4635d 100644 --- a/FoundationDB.Client/Async/TaskHelpers.cs +++ b/FoundationDB.Client/Async/TaskHelpers.cs @@ -400,6 +400,36 @@ public static void Observe(Task task) } } + private delegate CancellationTokenRegistration RegisterWithoutECDelegate(ref CancellationToken ct, Action callback, object state); + private static readonly RegisterWithoutECDelegate RegisterWithoutECHandler = GetRegisterWithoutECDelegate(); + + [NotNull] + private static RegisterWithoutECDelegate GetRegisterWithoutECDelegate() + { + try + { + // CancellationToken.Register(..., useExecutionContext) is "private", and all the public version of Register pass true, which does costly allocations (capturing context, ...) + // There is however CancellationToken.InternalRegisterWithoutEC which is internal and pass false. + // => we will attempt to create a delegate to call the internal method - if possible - or fallback to the default version of Register, if this is not possible. + var method = typeof(CancellationToken).GetMethod("InternalRegisterWithoutEC", System.Reflection.BindingFlags.Instance | System.Reflection.BindingFlags.NonPublic, null, new[] { typeof(Action), typeof(object) }, null); + if (method != null) + { + return (RegisterWithoutECDelegate)Delegate.CreateDelegate(typeof(RegisterWithoutECDelegate), null, method); + } + } + catch + { } + + return (ref CancellationToken token, Action callback, object state) => token.Register(callback, state); + } + + /// Version of CancellationToken.Register() that does not propagate the current ExecutionContext to the callback (faster, but unsafe!) + /// This should only be used with callbacks that do not execute user-provided code! + internal static CancellationTokenRegistration RegisterWithoutEC(this CancellationToken ct, [NotNull] Action callback, object state) + { + return RegisterWithoutECHandler(ref ct, callback, state); + } + /// Safely cancel a CancellationTokenSource /// CancellationTokenSource that needs to be cancelled public static void SafeCancel(this CancellationTokenSource source) diff --git a/FoundationDB.Client/Core/IFdbTransactionHandler.cs b/FoundationDB.Client/Core/IFdbTransactionHandler.cs index 49280b240..d26d0682b 100644 --- a/FoundationDB.Client/Core/IFdbTransactionHandler.cs +++ b/FoundationDB.Client/Core/IFdbTransactionHandler.cs @@ -51,7 +51,8 @@ public interface IFdbTransactionHandler : IDisposable void SetOption(FdbTransactionOption option, Slice data); /// Returns this transaction snapshot read version. - Task GetReadVersionAsync(CancellationToken cancellationToken); + /// Token used to cancel the operation from the outside, if different than the cancellation token of the transaction itself + Task GetReadVersionAsync(CancellationToken cancellationToken = default(CancellationToken)); /// Retrieves the database version number at which a given transaction was committed. /// CommitAsync() must have been called on this transaction and the resulting task must have completed successfully before this function is callged, or the behavior is undefined. @@ -71,30 +72,30 @@ public interface IFdbTransactionHandler : IDisposable /// Reads a get from the database /// Key to read /// Set to true for snapshot reads - /// + /// Token used to cancel the operation from the outside, if different than the cancellation token of the transaction itself /// - Task GetAsync(Slice key, bool snapshot, CancellationToken cancellationToken); + Task GetAsync(Slice key, bool snapshot, CancellationToken cancellationToken = default(CancellationToken)); /// Reads several values from the database snapshot represented by the current transaction /// Keys to be looked up in the database /// Set to true for snapshot reads - /// Token used to cancel the operation from the outside + /// Token used to cancel the operation from the outside, if different than the cancellation token of the transaction itself /// Task that will return an array of values, or an exception. Each item in the array will contain the value of the key at the same index in , or Slice.Nil if that key does not exist. - Task GetValuesAsync([NotNull] Slice[] keys, bool snapshot, CancellationToken cancellationToken); + Task GetValuesAsync([NotNull] Slice[] keys, bool snapshot, CancellationToken cancellationToken = default(CancellationToken)); /// Resolves a key selector against the keys in the database snapshot represented by the current transaction. /// Key selector to resolve /// Set to true for snapshot reads - /// Token used to cancel the operation from the outside + /// Token used to cancel the operation from the outside, if different than the cancellation token of the transaction itself /// Task that will return the key matching the selector, or an exception - Task GetKeyAsync(FdbKeySelector selector, bool snapshot, CancellationToken cancellationToken); + Task GetKeyAsync(FdbKeySelector selector, bool snapshot, CancellationToken cancellationToken = default(CancellationToken)); /// Resolves several key selectors against the keys in the database snapshot represented by the current transaction. /// Key selectors to resolve /// Set to true for snapshot reads - /// Token used to cancel the operation from the outside + /// Token used to cancel the operation from the outside, if different than the cancellation token of the transaction itself /// Task that will return an array of keys matching the selectors, or an exception - Task GetKeysAsync([NotNull] FdbKeySelector[] selectors, bool snapshot, CancellationToken cancellationToken); + Task GetKeysAsync([NotNull] FdbKeySelector[] selectors, bool snapshot, CancellationToken cancellationToken = default(CancellationToken)); /// Reads all key-value pairs in the database snapshot represented by transaction (potentially limited by Limit, TargetBytes, or Mode) which have a key lexicographically greater than or equal to the key resolved by the begin key selector and lexicographically less than the key resolved by the end key selector. /// key selector defining the beginning of the range @@ -102,15 +103,15 @@ public interface IFdbTransactionHandler : IDisposable /// Optionnal query options (Limit, TargetBytes, Mode, Reverse, ...) /// If streaming mode is FdbStreamingMode.Iterator, this parameter should start at 1 and be incremented by 1 for each successive call while reading this range. In all other cases it is ignored. /// Set to true for snapshot reads - /// Token used to cancel the operation from the outside + /// Token used to cancel the operation from the outside, if different than the cancellation token of the transaction itself /// - Task GetRangeAsync(FdbKeySelector beginInclusive, FdbKeySelector endExclusive, [NotNull] FdbRangeOptions options, int iteration, bool snapshot, CancellationToken cancellationToken); + Task GetRangeAsync(FdbKeySelector beginInclusive, FdbKeySelector endExclusive, [NotNull] FdbRangeOptions options, int iteration, bool snapshot, CancellationToken cancellationToken = default(CancellationToken)); /// Returns a list of public network addresses as strings, one for each of the storage servers responsible for storing and its associated value /// Name of the key whose location is to be queried. - /// Token used to cancel the operation from the outside + /// Token used to cancel the operation from the outside, if different than the cancellation token of the transaction itself /// Task that will return an array of strings, or an exception - Task GetAddressesForKeyAsync(Slice key, CancellationToken cancellationToken); + Task GetAddressesForKeyAsync(Slice key, CancellationToken cancellationToken = default(CancellationToken)); /// Modify the database snapshot represented by transaction to change the given key to have the given value. If the given key was not previously present in the database it is inserted. /// The modification affects the actual database only if transaction is later committed with CommitAsync(). @@ -153,19 +154,19 @@ public interface IFdbTransactionHandler : IDisposable /// The commit may or may not succeed – in particular, if a conflicting transaction previously committed, then the commit must fail in order to preserve transactional isolation. /// If the commit does succeed, the transaction is durably committed to the database and all subsequently started transactions will observe its effects. /// - /// Token used to cancel the operation from the outside + /// Token used to cancel the operation from the outside, if different than the cancellation token of the transaction itself /// Task that succeeds if the transaction was comitted successfully, or fails if the transaction failed to commit. /// As with other client/server databases, in some failure scenarios a client may be unable to determine whether a transaction succeeded. In these cases, CommitAsync() will throw CommitUnknownResult error. The OnErrorAsync() function treats this error as retryable, so retry loops that don’t check for CommitUnknownResult could execute the transaction twice. In these cases, you must consider the idempotence of the transaction. - Task CommitAsync(CancellationToken cancellationToken); + Task CommitAsync(CancellationToken cancellationToken = default(CancellationToken)); /// Implements the recommended retry and backoff behavior for a transaction. /// This function knows which of the error codes generated by other query functions represent temporary error conditions and which represent application errors that should be handled by the application. /// It also implements an exponential backoff strategy to avoid swamping the database cluster with excessive retries when there is a high level of conflict between transactions. /// /// FdbError code thrown by the previous command - /// Token used to cancel the operation from the outside + /// Token used to cancel the operation from the outside, if different than the cancellation token of the transaction itself /// Returns a task that completes if the operation can be safely retried, or that rethrows the original exception if the operation is not retryable. - Task OnErrorAsync(FdbError code, CancellationToken cancellationToken); + Task OnErrorAsync(FdbError code, CancellationToken cancellationToken = default(CancellationToken)); /// Reset transaction to its initial state. /// This is similar to disposing the transaction and recreating a new one. The only state that persists through a transaction reset is that which is related to the backoff logic used by OnErrorAsync() diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs deleted file mode 100644 index 8151f2ba6..000000000 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs +++ /dev/null @@ -1,157 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013, Doxense SARL -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Client -{ - using FoundationDB.Layers.Tuples; - using JetBrains.Annotations; - using System; - using System.Collections.Generic; - using System.Linq; - using System.Threading.Tasks; - - public class FdbEncoderSubspace : FdbSubspace, IKeyEncoder - { - protected readonly FdbSubspace m_parent; - protected readonly IKeyEncoder m_encoder; - - public FdbEncoderSubspace([NotNull] FdbSubspace subspace, [NotNull] IKeyEncoder encoder) - : base(subspace) - { - if (subspace == null) throw new ArgumentNullException("subspace"); - if (encoder == null) throw new ArgumentNullException("encoder"); - m_parent = subspace; - m_encoder = encoder; - } - - public IKeyEncoder Encoder - { - [NotNull] - get { return m_encoder; } - } - - #region Transaction Helpers... - - public void Set([NotNull] IFdbTransaction trans, T key, Slice value) - { - trans.Set(EncodeKey(key), value); - } - - public void Clear([NotNull] IFdbTransaction trans, T key) - { - trans.Clear(EncodeKey(key)); - } - - public Task GetAsync([NotNull] IFdbReadOnlyTransaction trans, T key) - { - return trans.GetAsync(EncodeKey(key)); - } - - public Task GetValuesAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] T[] keys) - { - return trans.GetValuesAsync(EncodeKeyRange(keys)); - } - - public Task GetValuesAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IEnumerable keys) - { - return trans.GetValuesAsync(EncodeKeyRange(keys)); - } - - #endregion - - #region Key Encoding/Decoding... - - public Slice EncodeKey(T key) - { - return this.Key + m_encoder.EncodeKey(key); - } - - [NotNull] - public Slice[] EncodeKeyRange([NotNull] T[] keys) - { - return FdbKey.Merge(this.Key, m_encoder.EncodeRange(keys)); - } - - [NotNull] - public Slice[] EncodeKeyRange([NotNull] TElement[] elements, Func selector) - { - return FdbKey.Merge(this.Key, m_encoder.EncodeRange(elements, selector)); - } - - [NotNull] - public Slice[] EncodeKeyRange([NotNull] IEnumerable keys) - { - return FdbKey.Merge(this.Key, m_encoder.EncodeRange(keys)); - } - - public T DecodeKey(Slice encoded) - { - return m_encoder.DecodeKey(this.ExtractAndCheck(encoded)); - } - - [NotNull] - public T[] DecodeKeyRange([NotNull] Slice[] encoded) - { - var extracted = new Slice[encoded.Length]; - for (int i = 0; i < encoded.Length; i++) - { - extracted[i] = ExtractAndCheck(encoded[i]); - } - return m_encoder.DecodeRange(extracted); - } - - [NotNull] - public IEnumerable DecodeKeys([NotNull] IEnumerable source) - { - return source.Select(key => m_encoder.DecodeKey(key)); - } - - public virtual FdbKeyRange ToRange(T key) - { - return FdbTuple.ToRange(EncodeKey(key)); - } - - [NotNull] - public FdbKeyRange[] ToRange([NotNull] T[] keys) - { - var packed = EncodeKeyRange(keys); - - var ranges = new FdbKeyRange[keys.Length]; - for (int i = 0; i < ranges.Length; i++) - { - ranges[i] = FdbTuple.ToRange(packed[i]); - } - return ranges; - } - - #endregion - - - } - -} diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs deleted file mode 100644 index 2fe4a5943..000000000 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs +++ /dev/null @@ -1,145 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Client -{ - using FoundationDB.Layers.Tuples; - using JetBrains.Annotations; - using System; - using System.Threading.Tasks; - - public class FdbEncoderSubspace : FdbSubspace, ICompositeKeyEncoder - { - protected readonly FdbSubspace m_parent; - protected readonly ICompositeKeyEncoder m_encoder; - protected volatile FdbEncoderSubspace m_head; - - public FdbEncoderSubspace([NotNull] FdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) - : base(subspace) - { - if (subspace == null) throw new ArgumentNullException("subspace"); - if (encoder == null) throw new ArgumentNullException("encoder"); - m_parent = subspace; - m_encoder = encoder; - } - - /// Gets the key encoder - public ICompositeKeyEncoder Encoder - { - [NotNull] - get { return m_encoder; } - } - - /// Returns a partial encoder for (T1,) - public FdbEncoderSubspace Partial - { - [NotNull] - get { return m_head ?? (m_head = new FdbEncoderSubspace(m_parent, KeyValueEncoders.Head(m_encoder))); } - } - - #region Transaction Helpers... - - public void Set([NotNull] IFdbTransaction trans, T1 key1, T2 key2, Slice value) - { - trans.Set(EncodeKey(key1, key2), value); - } - - public void Set([NotNull] IFdbTransaction trans, FdbTuple key, Slice value) - { - trans.Set(EncodeKey(key), value); - } - - public void Clear([NotNull] IFdbTransaction trans, T1 key1, T2 key2) - { - trans.Clear(EncodeKey(key1, key2)); - } - - public void Clear([NotNull] IFdbTransaction trans, FdbTuple key) - { - trans.Clear(EncodeKey(key)); - } - - public Task GetAsync([NotNull] IFdbReadOnlyTransaction trans, T1 key1, T2 key2) - { - return trans.GetAsync(EncodeKey(key1, key2)); - } - - #endregion - - #region Key Encoding/Decoding... - - public virtual Slice EncodeKey(FdbTuple key) - { - return this.Key + m_encoder.EncodeKey(key); - } - - public virtual Slice EncodeKey(T1 key1, T2 key2) - { - return this.Key + m_encoder.EncodeKey(key1, key2); - } - - public virtual Slice EncodeKey(T1 key1) - { - return this.Key + m_encoder.EncodeComposite(FdbTuple.Create(key1, default(T2)), 1); - } - - Slice ICompositeKeyEncoder>.EncodeComposite(FdbTuple key, int items) - { - return this.Key + m_encoder.EncodeComposite(key, items); - } - - public virtual FdbTuple DecodeKey(Slice encoded) - { - return m_encoder.DecodeKey(this.ExtractAndCheck(encoded)); - } - - FdbTuple ICompositeKeyEncoder>.DecodeComposite(Slice encoded, int items) - { - return m_encoder.DecodeComposite(this.ExtractAndCheck(encoded), items); - } - - public virtual FdbKeyRange ToRange(FdbTuple key) - { - return FdbTuple.ToRange(this.EncodeKey(key)); - } - - public virtual FdbKeyRange ToRange(T1 key1, T2 key2) - { - return FdbTuple.ToRange(this.EncodeKey(key1, key2)); - } - - public virtual FdbKeyRange ToRange(T1 key1) - { - return FdbTuple.ToRange(this.EncodeKey(key1)); - } - - #endregion - - } - -} diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs deleted file mode 100644 index 0f6f1b080..000000000 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs +++ /dev/null @@ -1,135 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Client -{ - using FoundationDB.Layers.Tuples; - using JetBrains.Annotations; - using System; - using System.Threading.Tasks; - - public class FdbEncoderSubspace : FdbSubspace, ICompositeKeyEncoder - { - protected readonly FdbSubspace m_parent; - protected readonly ICompositeKeyEncoder m_encoder; - protected volatile FdbEncoderSubspace m_head; - protected volatile FdbEncoderSubspace m_partial; - - public FdbEncoderSubspace([NotNull] FdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) - : base(subspace) - { - if (subspace == null) throw new ArgumentNullException("subspace"); - if (encoder == null) throw new ArgumentNullException("encoder"); - m_parent = subspace; - m_encoder = encoder; - } - - public ICompositeKeyEncoder Encoder - { - [NotNull] - get { return m_encoder; } - } - - public FdbEncoderSubspace Head - { - [NotNull] - get { return m_head ?? (m_head = new FdbEncoderSubspace(m_parent, KeyValueEncoders.Head(m_encoder))); } - } - - public FdbEncoderSubspace Partial - { - [NotNull] - get { return m_partial ?? (m_partial = new FdbEncoderSubspace(m_parent, KeyValueEncoders.Pair(m_encoder))); } - } - - #region Transaction Helpers... - - public void Set([NotNull] IFdbTransaction trans, T1 key1, T2 key2, T3 key3, Slice value) - { - trans.Set(EncodeKey(key1, key2, key3), value); - } - - public void Set([NotNull] IFdbTransaction trans, FdbTuple key, Slice value) - { - trans.Set(EncodeKey(key), value); - } - - public void Clear([NotNull] IFdbTransaction trans, T1 key1, T2 key2, T3 key3) - { - trans.Clear(EncodeKey(key1, key2, key3)); - } - - public void Clear([NotNull] IFdbTransaction trans, FdbTuple key) - { - trans.Clear(EncodeKey(key)); - } - - public Task GetAsync([NotNull] IFdbReadOnlyTransaction trans, T1 key1, T2 key2, T3 key3) - { - return trans.GetAsync(EncodeKey(key1, key2, key3)); - } - - #endregion - - #region Key Encoding/Decoding... - - public virtual Slice EncodeKey(FdbTuple key) - { - return this.Key + m_encoder.EncodeKey(key); - } - - public virtual Slice EncodeKey(T1 key1, T2 key2, T3 key3) - { - return this.Key + m_encoder.EncodeKey(key1, key2, key3); - } - - Slice ICompositeKeyEncoder>.EncodeComposite(FdbTuple key, int items) - { - return this.Key + m_encoder.EncodeComposite(key, items); - } - - public virtual FdbTuple DecodeKey(Slice encoded) - { - return m_encoder.DecodeKey(this.ExtractAndCheck(encoded)); - } - - FdbTuple ICompositeKeyEncoder>.DecodeComposite(Slice encoded, int items) - { - return m_encoder.DecodeComposite(this.ExtractAndCheck(encoded), items); - } - - public virtual FdbKeyRange ToRange(T1 key1, T2 key2, T3 key3) - { - return FdbTuple.ToRange(this.EncodeKey(key1, key2, key3)); - } - - #endregion - - } - -} diff --git a/FoundationDB.Client/Fdb.cs b/FoundationDB.Client/Fdb.cs index bef6b2545..266bc7cf6 100644 --- a/FoundationDB.Client/Fdb.cs +++ b/FoundationDB.Client/Fdb.cs @@ -493,7 +493,7 @@ internal static async Task CreateClusterInternalAsync(string cluster /// Task that will return an FdbDatabase, or an exception /// If the token is cancelled /// Since connections are not pooled, so this method can be costly and should NOT be called every time you need to read or write from the database. Instead, you should open a database instance at the start of your process, and use it a singleton. - public static Task OpenAsync(FdbSubspace globalSpace, CancellationToken cancellationToken = default(CancellationToken)) + public static Task OpenAsync(IFdbSubspace globalSpace, CancellationToken cancellationToken = default(CancellationToken)) { return OpenAsync(clusterFile: null, dbName: null, globalSpace: globalSpace, cancellationToken: cancellationToken); } @@ -523,13 +523,13 @@ internal static async Task CreateClusterInternalAsync(string cluster /// If is anything other than 'DB' /// If the token is cancelled /// Since connections are not pooled, so this method can be costly and should NOT be called every time you need to read or write from the database. Instead, you should open a database instance at the start of your process, and use it a singleton. - public static async Task OpenAsync(string clusterFile, string dbName, FdbSubspace globalSpace, bool readOnly = false, CancellationToken cancellationToken = default(CancellationToken)) + public static Task OpenAsync(string clusterFile, string dbName, IFdbSubspace globalSpace, bool readOnly = false, CancellationToken cancellationToken = default(CancellationToken)) { - return await OpenInternalAsync(clusterFile, dbName, globalSpace, readOnly, cancellationToken); + return OpenInternalAsync(clusterFile, dbName, globalSpace, readOnly, cancellationToken); } /// Create a new database handler instance using the specificied cluster file, database name, global subspace and read only settings - internal static async Task OpenInternalAsync(string clusterFile, string dbName, FdbSubspace globalSpace, bool readOnly, CancellationToken cancellationToken) + internal static async Task OpenInternalAsync(string clusterFile, string dbName, IFdbSubspace globalSpace, bool readOnly, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); diff --git a/FoundationDB.Client/FdbCluster.cs b/FoundationDB.Client/FdbCluster.cs index 50b1ae30c..c4a633c44 100644 --- a/FoundationDB.Client/FdbCluster.cs +++ b/FoundationDB.Client/FdbCluster.cs @@ -111,7 +111,7 @@ protected virtual void Dispose(bool disposing) /// If is anything other than 'DB' /// If the token is cancelled /// Any attempt to use a key outside the specified subspace will throw an exception - public async Task OpenDatabaseAsync(string databaseName, FdbSubspace subspace, bool readOnly, CancellationToken cancellationToken) + public async Task OpenDatabaseAsync(string databaseName, IFdbSubspace subspace, bool readOnly, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); return await OpenDatabaseInternalAsync(databaseName, subspace, readOnly: readOnly, ownsCluster: false, cancellationToken: cancellationToken).ConfigureAwait(false); @@ -127,7 +127,7 @@ public async Task OpenDatabaseAsync(string databaseName, FdbSubspa /// If is anything other than 'DB' /// If the token is cancelled /// As of Beta2, the only supported database name is 'DB' - internal async Task OpenDatabaseInternalAsync(string databaseName, FdbSubspace subspace, bool readOnly, bool ownsCluster, CancellationToken cancellationToken) + internal async Task OpenDatabaseInternalAsync(string databaseName, IFdbSubspace subspace, bool readOnly, bool ownsCluster, CancellationToken cancellationToken) { ThrowIfDisposed(); if (string.IsNullOrEmpty(databaseName)) throw new ArgumentNullException("databaseName"); diff --git a/FoundationDB.Client/FdbDatabase.cs b/FoundationDB.Client/FdbDatabase.cs index 7220a9351..d2d001755 100644 --- a/FoundationDB.Client/FdbDatabase.cs +++ b/FoundationDB.Client/FdbDatabase.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -26,6 +26,8 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY */ #endregion +using FoundationDB.Filters.Logging; + namespace FoundationDB.Client { using FoundationDB.Async; @@ -36,6 +38,7 @@ namespace FoundationDB.Client using JetBrains.Annotations; using System; using System.Collections.Concurrent; + using System.Collections.Generic; using System.Diagnostics; using System.Threading; using System.Threading.Tasks; @@ -76,9 +79,9 @@ public class FdbDatabase : IFdbDatabase, IFdbRetryable /// Global namespace used to prefix ALL keys and subspaces accessible by this database instance (default is empty) /// This is readonly and is set when creating the database instance - private FdbSubspace m_globalSpace; + private IFdbDynamicSubspace m_globalSpace; /// Copy of the namespace, that is exposed to the outside. - private FdbSubspace m_globalSpaceCopy; + private IFdbDynamicSubspace m_globalSpaceCopy; /// Default Timeout value for all transactions private int m_defaultTimeout; @@ -104,7 +107,7 @@ public class FdbDatabase : IFdbDatabase, IFdbRetryable /// Root directory of the database instance /// If true, the database instance will only allow read-only transactions /// If true, the cluster instance lifetime is linked with the database instance - protected FdbDatabase(IFdbCluster cluster, IFdbDatabaseHandler handler, string name, FdbSubspace contentSubspace, IFdbDirectory directory, bool readOnly, bool ownsCluster) + protected FdbDatabase(IFdbCluster cluster, IFdbDatabaseHandler handler, string name, IFdbSubspace contentSubspace, IFdbDirectory directory, bool readOnly, bool ownsCluster) { Contract.Requires(cluster != null && handler != null && name != null && contentSubspace != null); @@ -124,7 +127,7 @@ protected FdbDatabase(IFdbCluster cluster, IFdbDatabaseHandler handler, string n /// Root directory of the database instance /// If true, the database instance will only allow read-only transactions /// If true, the cluster instance lifetime is linked with the database instance - public static FdbDatabase Create(IFdbCluster cluster, IFdbDatabaseHandler handler, string name, FdbSubspace contentSubspace, IFdbDirectory directory, bool readOnly, bool ownsCluster) + public static FdbDatabase Create(IFdbCluster cluster, IFdbDatabaseHandler handler, string name, IFdbSubspace contentSubspace, IFdbDirectory directory, bool readOnly, bool ownsCluster) { if (cluster == null) throw new ArgumentNullException("cluster"); if (handler == null) throw new ArgumentNullException("handler"); @@ -451,21 +454,23 @@ Slice IFdbKey.ToFoundationDbKey() /// Change the current global namespace. /// Do NOT call this, unless you know exactly what you are doing ! - internal void ChangeRoot(FdbSubspace subspace, IFdbDirectory directory, bool readOnly) + internal void ChangeRoot(IFdbSubspace subspace, IFdbDirectory directory, bool readOnly) { + //REVIEW: rename to "ChangeRootSubspace" ? subspace = subspace ?? FdbSubspace.Empty; lock (this)//TODO: don't use this for locking { m_readOnly = readOnly; - m_globalSpace = subspace; - m_globalSpaceCopy = subspace.Copy(); + m_globalSpace = FdbSubspace.CopyDynamic(subspace, TypeSystem.Tuples); + m_globalSpaceCopy = FdbSubspace.CopyDynamic(subspace, TypeSystem.Tuples); // keep another copy m_directory = directory == null ? null : new FdbDatabasePartition(this, directory); } } /// Returns the global namespace used by this database instance - public FdbSubspace GlobalSpace + public IFdbDynamicSubspace GlobalSpace { + //REVIEW: rename to just "Subspace" ? [NotNull] get { @@ -474,34 +479,6 @@ public FdbSubspace GlobalSpace } } - /// Create a new subspace prefixed by a binary key - /// Suffix of the subspace - /// New subspace with prefix equal to the database's global prefix followed by - public FdbSubspace this[Slice suffix] - { - //REVIEW: return IFdbSusbspace? - get { return suffix.IsNullOrEmpty ? m_globalSpace : m_globalSpaceCopy[suffix]; } - } - - /// Create a new subspace prefixed by a key - /// Key that will packed - /// New subspace with prefix equal to the database's global prefix followed by the packed representation of - public FdbSubspace this[IFdbKey key] - { - //REVIEW: return IFdbSusbspace? - get { return key == null ? m_globalSpace : m_globalSpaceCopy[key]; } - } - - IFdbSubspace IFdbSubspace.this[Slice suffix] - { - get { return this[suffix]; } - } - - IFdbSubspace IFdbSubspace.this[IFdbKey key] - { - get { return this[key]; } - } - /// Checks that a key is valid, and is inside the global key space of this database /// /// Key to verify @@ -558,6 +535,100 @@ public bool Contains(Slice key) return key.HasValue && m_globalSpace.Contains(key); } + public Slice BoundCheck(Slice key, bool allowSystemKeys) + { + return m_globalSpace.BoundCheck(key, allowSystemKeys); + } + + Slice IFdbSubspace.ConcatKey(Slice key) + { + return m_globalSpace.ConcatKey(key); + } + + Slice IFdbSubspace.ConcatKey(TKey key) + { + return m_globalSpace.ConcatKey(key); + } + + Slice[] IFdbSubspace.ConcatKeys(IEnumerable keys) + { + return m_globalSpace.ConcatKeys(keys); + } + + Slice[] IFdbSubspace.ConcatKeys(IEnumerable keys) + { + return m_globalSpace.ConcatKeys(keys); + } + + /// Remove the database global subspace prefix from a binary key, or throw if the key is outside of the global subspace. + Slice IFdbSubspace.ExtractKey(Slice key, bool boundCheck) + { + return m_globalSpace.ExtractKey(key, boundCheck); + } + + /// Remove the database global subspace prefix from a binary key, or throw if the key is outside of the global subspace. + Slice[] IFdbSubspace.ExtractKeys(IEnumerable keys, bool boundCheck) + { + return m_globalSpace.ExtractKeys(keys, boundCheck); + } + + SliceWriter IFdbSubspace.GetWriter(int capacity) + { + return m_globalSpace.GetWriter(capacity); + } + + Slice IFdbSubspace.Key + { + get { return m_globalSpace.Key; } + } + + IFdbSubspace IFdbSubspace.this[Slice suffix] + { + get + { + return m_globalSpace[suffix]; + } + } + + IFdbSubspace IFdbSubspace.this[IFdbKey key] + { + get + { + return m_globalSpace[key]; + } + } + + FdbKeyRange IFdbSubspace.ToRange() + { + return m_globalSpace.ToRange(); + } + + FdbKeyRange IFdbSubspace.ToRange(Slice suffix) + { + return m_globalSpace.ToRange(suffix); + } + + FdbKeyRange IFdbSubspace.ToRange(TKey key) + { + return m_globalSpace.ToRange(key); + } + + public FdbDynamicSubspacePartition Partition + { + //REVIEW: should we hide this on the main db? + get { return m_globalSpace.Partition; } + } + + IDynamicKeyEncoder IFdbDynamicSubspace.Encoder + { + get { return m_globalSpace.Encoder; } + } + + public FdbDynamicSubspaceKeys Keys + { + get { return m_globalSpace.Keys; } + } + /// Returns true if the key is inside the system key space (starts with '\xFF') internal static bool IsSystemKey(ref Slice key) { diff --git a/FoundationDB.Client/FdbDatabaseExtensions.cs b/FoundationDB.Client/FdbDatabaseExtensions.cs index fe65d601a..d07b96cdf 100644 --- a/FoundationDB.Client/FdbDatabaseExtensions.cs +++ b/FoundationDB.Client/FdbDatabaseExtensions.cs @@ -194,36 +194,7 @@ internal static void EnsureKeysAreValid(this IFdbDatabase db, Slice[] keys, bool /// public static Slice Extract(this IFdbDatabase db, Slice keyAbsolute) { - return db.GlobalSpace.Extract(keyAbsolute); - } - - #endregion - - #region Unpack... - - /// Unpack a key using the current namespace of the database - /// Database instance - /// Key that should fit inside the current namespace of the database - [CanBeNull] - public static IFdbTuple Unpack(this IFdbDatabase db, Slice key) - { - return db.GlobalSpace.Unpack(key); - } - - /// Unpack a key using the current namespace of the database - /// Database instance - /// Key that should fit inside the current namespace of the database - public static T UnpackLast(this IFdbDatabase db, Slice key) - { - return db.GlobalSpace.UnpackLast(key); - } - - /// Unpack a key using the current namespace of the database - /// Database instance - /// Key that should fit inside the current namespace of the database - public static T UnpackSingle(this IFdbDatabase db, Slice key) - { - return db.GlobalSpace.UnpackSingle(key); + return db.GlobalSpace.ExtractKey(keyAbsolute); } #endregion diff --git a/FoundationDB.Client/FdbError.cs b/FoundationDB.Client/FdbError.cs index ea4cd4d9b..c702fcca3 100644 --- a/FoundationDB.Client/FdbError.cs +++ b/FoundationDB.Client/FdbError.cs @@ -117,7 +117,7 @@ public enum FdbError /// An operation was issued while a commit was outstanding UsedDuringCommit = 2017, /// An invalid atomic mutation type was issued - InvalidMutationType = 2048, + InvalidMutationType = 2018, /// Incompatible protocol version IncompatibleProtocolVersion = 2100, /// Transaction too large diff --git a/FoundationDB.Client/FdbKey.cs b/FoundationDB.Client/FdbKey.cs index b5a532ad0..4478cd9f4 100644 --- a/FoundationDB.Client/FdbKey.cs +++ b/FoundationDB.Client/FdbKey.cs @@ -94,6 +94,8 @@ public static Slice[] Merge(Slice prefix, [NotNull] Slice[] keys) if (prefix == null) throw new ArgumentNullException("prefix"); if (keys == null) throw new ArgumentNullException("keys"); + //REVIEW: merge this code with Slice.ConcatRange! + // we can pre-allocate exactly the buffer by computing the total size of all keys int size = keys.Sum(key => key.Count) + keys.Length * prefix.Count; var writer = new SliceWriter(size); @@ -121,6 +123,8 @@ public static Slice[] Merge(Slice prefix, [NotNull] IEnumerable keys) if (prefix == null) throw new ArgumentNullException("prefix"); if (keys == null) throw new ArgumentNullException("keys"); + //REVIEW: merge this code with Slice.ConcatRange! + // use optimized version for arrays var array = keys as Slice[]; if (array != null) return Merge(prefix, array); diff --git a/FoundationDB.Client/FdbOperationContext.cs b/FoundationDB.Client/FdbOperationContext.cs index a1d555f16..973b7ea92 100644 --- a/FoundationDB.Client/FdbOperationContext.cs +++ b/FoundationDB.Client/FdbOperationContext.cs @@ -38,19 +38,22 @@ namespace FoundationDB.Client using System.Threading.Tasks; /// - /// Represents the context of a retryable transactional function wich accept a read-only or read-write transaction. + /// Represents the context of a retryable transactional function which accepts a read-only or read-write transaction. /// [DebuggerDisplay("Retries={Retries}, Committed={Committed}, Elapsed={Duration.Elapsed}")] public sealed class FdbOperationContext : IDisposable { + //REVIEW: maybe we should find a way to reduce the size of this class? (it's already almost at 100 bytes !) + /// The database used by the operation - public IFdbDatabase Database { [NotNull] get; private set; } + public IFdbDatabase Database { [NotNull] get; private set; /*readonly*/ } /// Result of the operation (or null) public object Result { get; set; } + //REVIEW: should we force using a "SetResult()/TrySetResult()" method for this ? /// Cancellation token associated with the operation - public CancellationToken Cancellation { get; internal set; } + public CancellationToken Cancellation { get; private set; /*readonly*/ } /// If set to true, will abort and not commit the transaction. If false, will try to commit the transaction (and retry on failure) public bool Abort { get; set; } @@ -61,8 +64,19 @@ public sealed class FdbOperationContext : IDisposable /// Date at wich the operation was first started public DateTime StartedUtc { get; private set; } - /// Time spent since the start of the first attempt - public Stopwatch Duration { [NotNull] get; private set; } + /// Stopwatch that is started at the creation of the transaction, and stopped when it commits or gets disposed + internal Stopwatch Clock { [NotNull] get; private set; /*readonly*/ } + + /// Duration of all the previous attemps before the current one (starts at 0, and gets updated at each reset/retry) + internal TimeSpan BaseDuration { get; private set; } + + /// Time elapsed since the start of the first attempt + public TimeSpan ElapsedTotal { get { return this.Clock.Elapsed; } } + + /// Time elapsed since the start of the current attempt + /// This value is reset to zero every time the transation fails and is retried. + /// Note that this may not represent the actual lifetime of the transaction with the database itself, which starts at the first read operation. + public TimeSpan Elapsed { get { return this.Clock.Elapsed.Subtract(this.BaseDuration); } } /// If true, the transaction has been committed successfully public bool Committed { get; private set; } @@ -71,22 +85,28 @@ public sealed class FdbOperationContext : IDisposable internal bool Shared { get { return (this.Mode & FdbTransactionMode.InsideRetryLoop) != 0; } } /// Mode of the transaction - public FdbTransactionMode Mode { get; private set; } + public FdbTransactionMode Mode { get; private set; /*readonly*/ } /// Internal source of cancellation, able to abort any pending IO operations attached to this transaction - internal CancellationTokenSource TokenSource { get; private set; } + internal CancellationTokenSource TokenSource { [CanBeNull] get; private set; /*readonly*/ } + /// Create a new retry loop operation context + /// Database that will be used by the retry loop + /// Operation mode of the retry loop + /// Optional cancellation token that will abort the retry loop if triggered. public FdbOperationContext([NotNull] IFdbDatabase db, FdbTransactionMode mode, CancellationToken cancellationToken) { if (db == null) throw new ArgumentNullException("db"); this.Database = db; this.Mode = mode; - this.Duration = new Stopwatch(); + this.Clock = new Stopwatch(); + // note: we don't start the clock yet, only when the context starts executing... - // by default, we hook ourselves on the db's CancellationToken + // by default, we hook ourselves to the db's CancellationToken, but we may need to also + // hook with a different, caller-provided, token and respond to cancellation from both sites. var token = db.Cancellation; - if (cancellationToken.CanBeCanceled && cancellationToken != token) + if (cancellationToken.CanBeCanceled && !cancellationToken.Equals(token)) { this.TokenSource = CancellationTokenSource.CreateLinkedTokenSource(token, cancellationToken); token = this.TokenSource.Token; @@ -94,6 +114,7 @@ public FdbOperationContext([NotNull] IFdbDatabase db, FdbTransactionMode mode, C this.Cancellation = token; } + /// Execute a retry loop on this context internal static async Task ExecuteInternal([NotNull] IFdbDatabase db, [NotNull] FdbOperationContext context, [NotNull] Delegate handler, Delegate onDone) { Contract.Requires(db != null && context != null && handler != null); @@ -103,10 +124,15 @@ internal static async Task ExecuteInternal([NotNull] IFdbDatabase db, [NotNull] try { + // make sure to reset everything (in case a context is reused multiple times) context.Committed = false; context.Retries = 0; + context.BaseDuration = TimeSpan.Zero; context.StartedUtc = DateTime.UtcNow; - context.Duration.Start(); + context.Clock.Start(); + //note: we start the clock immediately, but the transaction's 5 seconde max lifetime is actually measured from the first read operation (Get, GetRange, GetReadVersion, etc...) + // => algorithms that monitor the elapsed duration to rate limit themselves may think that the trans is older than it really is... + // => we would need to plug into the transaction handler itself to be notified when exactly a read op starts... using (var trans = db.BeginTransaction(context.Mode, CancellationToken.None, context)) { @@ -172,6 +198,7 @@ internal static async Task ExecuteInternal([NotNull] IFdbDatabase db, [NotNull] } catch (FdbException x) { + //TODO: will be able to await in catch block in C# 6 ! e = x; } @@ -182,9 +209,12 @@ internal static async Task ExecuteInternal([NotNull] IFdbDatabase db, [NotNull] if (Logging.On && Logging.IsVerbose) Logging.Verbose(String.Format(CultureInfo.InvariantCulture, "fdb: transaction {0} can be safely retried", trans.Id)); } - if (context.Duration.Elapsed.TotalSeconds >= 1) + // update the base time for the next attempt + context.BaseDuration = context.ElapsedTotal; + if (context.BaseDuration.TotalSeconds >= 10) { - if (Logging.On) Logging.Info(String.Format(CultureInfo.InvariantCulture, "fdb WARNING: long transaction ({0:N1} sec elapsed in transaction lambda function ({1} retries, {2})", context.Duration.Elapsed.TotalSeconds, context.Retries, context.Committed ? "committed" : "not yet committed")); + //REVIEW: this may not be a goot idea to spam the logs with long running transactions?? + if (Logging.On) Logging.Info(String.Format(CultureInfo.InvariantCulture, "fdb WARNING: long transaction ({0:N1} sec elapsed in transaction lambda function ({1} retries, {2})", context.BaseDuration.TotalSeconds, context.Retries, context.Committed ? "committed" : "not yet committed")); } context.Retries++; @@ -200,7 +230,7 @@ internal static async Task ExecuteInternal([NotNull] IFdbDatabase db, [NotNull] } finally { - context.Duration.Stop(); + context.Clock.Stop(); context.Dispose(); } } @@ -278,15 +308,14 @@ public static async Task RunWriteWithResultAsync([NotNull] IFdbDatabase db if (asyncHandler == null) throw new ArgumentNullException("asyncHandler"); cancellationToken.ThrowIfCancellationRequested(); - R result = default(R); Func handler = async (tr) => { - result = await asyncHandler(tr).ConfigureAwait(false); + tr.Context.Result = await asyncHandler(tr).ConfigureAwait(false); }; var context = new FdbOperationContext(db, FdbTransactionMode.Default | FdbTransactionMode.InsideRetryLoop, cancellationToken); await ExecuteInternal(db, context, handler, onDone).ConfigureAwait(false); - return result; + return (R)context.Result; } #endregion diff --git a/FoundationDB.Client/FdbRangeChunk.cs b/FoundationDB.Client/FdbRangeChunk.cs index c9a732181..8ead6c0c0 100644 --- a/FoundationDB.Client/FdbRangeChunk.cs +++ b/FoundationDB.Client/FdbRangeChunk.cs @@ -135,7 +135,7 @@ public KeyValuePair[] Decode([NotNull] FdbSubspace s for (int i = 0; i < chunk.Length; i++) { results[i] = new KeyValuePair( - keyEncoder.DecodeKey(subspace.ExtractAndCheck(chunk[i].Key)), + keyEncoder.DecodeKey(subspace.ExtractKey(chunk[i].Key, boundCheck: true)), valueEncoder.DecodeValue(chunk[i].Value) ); } @@ -200,7 +200,7 @@ public T[] DecodeKeys([NotNull] FdbSubspace subspace, [NotNull] IKeyEncoder { /// Async iterator that fetches the results by batch, but return them one by one - [DebuggerDisplay("State={m_state}, Current={m_current}, RemainingInBatch={m_remainingInBatch}, ReadLastBatch={m_lastBatchRead}")] + [DebuggerDisplay("State={m_state}, Current={m_current}, RemainingInChunk={m_itemsRemainingInChunk}, OutOfChunks={m_outOfChunks}")] private sealed class ResultIterator : FdbAsyncIterator { @@ -56,7 +56,6 @@ private sealed class ResultIterator : FdbAsyncIterator /// Lambda used to transform pairs of key/value into the expected result private readonly Func, T> m_resultTransform; - /// Iterator used to read chunks from the database private IFdbAsyncEnumerator[]> m_chunkIterator; @@ -124,11 +123,16 @@ private async Task ReadAnotherBatchAsync(CancellationToken cancellationTok { Contract.Requires(m_itemsRemainingInChunk == 0 && m_currentOffsetInChunk == -1 && !m_outOfChunks); + var iterator = m_chunkIterator; + // start reading the next batch - if (await m_chunkIterator.MoveNext(cancellationToken).ConfigureAwait(false)) + if (await iterator.MoveNext(cancellationToken).ConfigureAwait(false)) { // we got a new chunk ! - var chunk = m_chunkIterator.Current; + //note: Dispose() or Cleanup() maybe have been called concurrently! + ThrowInvalidState(); + + var chunk = iterator.Current; //note: if the range is empty, we may have an empty chunk, that is equivalent to no chunk if (chunk != null && chunk.Length > 0) diff --git a/FoundationDB.Client/FdbTransaction.cs b/FoundationDB.Client/FdbTransaction.cs index 22adf2e52..5722a75d8 100644 --- a/FoundationDB.Client/FdbTransaction.cs +++ b/FoundationDB.Client/FdbTransaction.cs @@ -90,6 +90,9 @@ public sealed partial class FdbTransaction : IFdbTransaction, IFdbReadOnlyTransa /// CancellationToken that should be used for all async operations executing inside this transaction private CancellationToken m_cancellation; //PERF: readonly struct + /// Used to cancel the transaction if the parent CTS fires + private CancellationTokenRegistration m_ctr; + #endregion #region Constructors... @@ -108,6 +111,15 @@ internal FdbTransaction(FdbDatabase db, FdbOperationContext context, int id, IFd m_readOnly = (mode & FdbTransactionMode.ReadOnly) != 0; m_handler = handler; + + if (m_cancellation.IsCancellationRequested) + { // already dead? + Cancel(explicitly: false); + } + else + { + m_ctr = m_cancellation.RegisterWithoutEC(CancellationHandler, this); + } } #endregion @@ -263,7 +275,7 @@ public Task GetReadVersionAsync() // can be called after the transaction has been committed EnsureCanRetry(); - return m_handler.GetReadVersionAsync(m_cancellation); + return m_handler.GetReadVersionAsync(CancellationToken.None); } /// Retrieves the database version number at which a given transaction was committed. @@ -310,7 +322,7 @@ public Task GetAsync(Slice key) if (Logging.On && Logging.IsVerbose) Logging.Verbose(this, "GetAsync", String.Format("Getting value for '{0}'", key.ToString())); #endif - return m_handler.GetAsync(key, snapshot: false, cancellationToken: m_cancellation); + return m_handler.GetAsync(key, snapshot: false, cancellationToken: CancellationToken.None); } #endregion @@ -333,7 +345,7 @@ public Task GetValuesAsync(Slice[] keys) if (Logging.On && Logging.IsVerbose) Logging.Verbose(this, "GetValuesAsync", String.Format("Getting batch of {0} values ...", keys.Length)); #endif - return m_handler.GetValuesAsync(keys, snapshot: false, cancellationToken: m_cancellation); + return m_handler.GetValuesAsync(keys, snapshot: false, cancellationToken: CancellationToken.None); } #endregion @@ -363,7 +375,7 @@ public Task GetRangeAsync(FdbKeySelector beginInclusive, FdbKeySe // The iteration value is only needed when in iterator mode, but then it should start from 1 if (iteration == 0) iteration = 1; - return m_handler.GetRangeAsync(beginInclusive, endExclusive, options, iteration, snapshot: false, cancellationToken: m_cancellation); + return m_handler.GetRangeAsync(beginInclusive, endExclusive, options, iteration, snapshot: false, cancellationToken: CancellationToken.None); } #endregion @@ -416,7 +428,7 @@ public async Task GetKeyAsync(FdbKeySelector selector) if (Logging.On && Logging.IsVerbose) Logging.Verbose(this, "GetKeyAsync", String.Format("Getting key '{0}'", selector.ToString())); #endif - var key = await m_handler.GetKeyAsync(selector, snapshot: false, cancellationToken: m_cancellation).ConfigureAwait(false); + var key = await m_handler.GetKeyAsync(selector, snapshot: false, cancellationToken: CancellationToken.None).ConfigureAwait(false); // don't forget to truncate keys that would fall outside of the database's globalspace ! return m_database.BoundCheck(key); @@ -444,7 +456,7 @@ public Task GetKeysAsync(FdbKeySelector[] selectors) if (Logging.On && Logging.IsVerbose) Logging.Verbose(this, "GetKeysAsync", String.Format("Getting batch of {0} keys ...", selectors.Length)); #endif - return m_handler.GetKeysAsync(selectors, snapshot: false, cancellationToken: m_cancellation); + return m_handler.GetKeysAsync(selectors, snapshot: false, cancellationToken: CancellationToken.None); } #endregion @@ -634,7 +646,7 @@ public Task GetAddressesForKeyAsync(Slice key) if (Logging.On && Logging.IsVerbose) Logging.Verbose(this, "GetAddressesForKeyAsync", String.Format("Getting addresses for key '{0}'", FdbKey.Dump(key))); #endif - return m_handler.GetAddressesForKeyAsync(key, cancellationToken: m_cancellation); + return m_handler.GetAddressesForKeyAsync(key, CancellationToken.None); } #endregion @@ -657,7 +669,7 @@ public async Task CommitAsync() //TODO: need a STATE_COMMITTING ? try { - await m_handler.CommitAsync(m_cancellation).ConfigureAwait(false); + await m_handler.CommitAsync(CancellationToken.None).ConfigureAwait(false); if (Interlocked.CompareExchange(ref m_state, STATE_COMMITTED, STATE_READY) == STATE_READY) { @@ -724,7 +736,7 @@ public async Task OnErrorAsync(FdbError code) { EnsureCanRetry(); - await m_handler.OnErrorAsync(code, cancellationToken: m_cancellation).ConfigureAwait(false); + await m_handler.OnErrorAsync(code, CancellationToken.None).ConfigureAwait(false); // If fdb_transaction_on_error succeeds, that means that the transaction has been reset and is usable again var state = this.State; @@ -777,19 +789,47 @@ public void Reset() /// Rollback this transaction, and dispose it. It should not be used after that. public void Cancel() + { + Cancel(explicitly: true); + } + + private void Cancel(bool explicitly) { var state = Interlocked.CompareExchange(ref m_state, STATE_CANCELED, STATE_READY); if (state != STATE_READY) { - switch(state) + if (explicitly) { - case STATE_CANCELED: return; // already the case ! + switch (state) + { + case STATE_CANCELED: + { + return; // already the case! + } + case STATE_COMMITTED: + { + throw new InvalidOperationException("Cannot cancel transaction that has already been committed"); + } + case STATE_FAILED: + { + throw new InvalidOperationException("Cannot cancel transaction because it is in a failed state"); + } + case STATE_DISPOSED: + { + throw new ObjectDisposedException("FdbTransaction", "Cannot cancel transaction because it already has been disposed"); + } + default: + { + throw new InvalidOperationException(String.Format("Cannot cancel transaction because it is in unknown state {0}", state)); + } + } + } - case STATE_COMMITTED: throw new InvalidOperationException("Cannot cancel transaction that has already been committed"); - case STATE_FAILED: throw new InvalidOperationException("Cannot cancel transaction because it is in a failed state"); - case STATE_DISPOSED: throw new ObjectDisposedException("FdbTransaction", "Cannot cancel transaction because it already has been disposed"); - default: throw new InvalidOperationException(String.Format("Cannot cancel transaction because it is in unknown state {0}", state)); + if (state == STATE_CANCELED || state == STATE_DISPOSED) + { // it's too late + return; } + } if (Logging.On && Logging.IsVerbose) Logging.Verbose(this, "Cancel", "Canceling transaction..."); @@ -799,6 +839,16 @@ public void Cancel() if (Logging.On && Logging.IsVerbose) Logging.Verbose(this, "Cancel", "Transaction has been canceled"); } + private static readonly Action CancellationHandler = CancellationCallback; + + /// Handler called when the cancellation source of the transaction fires + private static void CancellationCallback(object state) + { + Contract.Requires(state != null); + var trans = (FdbTransaction) state; + trans.Cancel(explicitly: false); + } + #endregion #region IDisposable... @@ -916,6 +966,7 @@ public void Dispose() { try { + m_ctr.Dispose(); this.Database.UnregisterTransaction(this); m_cts.SafeCancelAndDispose(); diff --git a/FoundationDB.Client/FdbTransactionExtensions.cs b/FoundationDB.Client/FdbTransactionExtensions.cs index 22e53ec18..ecbf5986e 100644 --- a/FoundationDB.Client/FdbTransactionExtensions.cs +++ b/FoundationDB.Client/FdbTransactionExtensions.cs @@ -966,7 +966,7 @@ public static async Task GetValuesAsync(this IFdbReadOnlyTrans { if (decoder == null) throw new ArgumentNullException("decoder"); - return decoder.DecodeRange(await GetValuesAsync(trans, keys).ConfigureAwait(false)); + return decoder.DecodeValues(await GetValuesAsync(trans, keys).ConfigureAwait(false)); } /// diff --git a/FoundationDB.Client/FdbWatch.cs b/FoundationDB.Client/FdbWatch.cs index f447b5b36..4938aed65 100644 --- a/FoundationDB.Client/FdbWatch.cs +++ b/FoundationDB.Client/FdbWatch.cs @@ -88,10 +88,13 @@ public TaskAwaiter GetAwaiter() if (m_future != null) { +#if REFACTORING_IN_PROGRESS if (m_future.HasFlag(FdbFuture.Flags.DISPOSED)) { throw new ObjectDisposedException("Cannot await a watch that has already been disposed"); } + +#endif return m_future.Task.GetAwaiter(); } throw new InvalidOperationException("Cannot await an empty watch"); @@ -111,7 +114,8 @@ public void Dispose() { if (m_future != null) { - m_future.Dispose(); + //TODO: what should be do? (=> cancel the future?) + //m_future.Dispose(); } } diff --git a/FoundationDB.Client/Filters/FdbDatabaseFilter.cs b/FoundationDB.Client/Filters/FdbDatabaseFilter.cs index d29f31f05..0592a017d 100644 --- a/FoundationDB.Client/Filters/FdbDatabaseFilter.cs +++ b/FoundationDB.Client/Filters/FdbDatabaseFilter.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,6 +31,7 @@ namespace FoundationDB.Filters using FoundationDB.Client; using JetBrains.Annotations; using System; + using System.Collections.Generic; using System.Diagnostics; using System.Threading; using System.Threading.Tasks; @@ -93,7 +94,7 @@ public string Name } /// Cluster of the database - public IFdbCluster Cluster + public virtual IFdbCluster Cluster { //REVIEW: do we need a Cluster Filter ? [NotNull] @@ -107,7 +108,7 @@ public CancellationToken Cancellation } /// Returns the global namespace used by this database instance - public FdbSubspace GlobalSpace + public virtual IFdbDynamicSubspace GlobalSpace { [NotNull] get { return m_database.GlobalSpace; } @@ -132,14 +133,44 @@ public virtual bool IsReadOnly get { return m_readOnly; } } - public virtual IFdbSubspace this[Slice suffix] + Slice IFdbSubspace.Key { - get { return m_database[suffix]; } + get { return this.GlobalSpace.Key; } } - public virtual IFdbSubspace this[IFdbKey key] + FdbKeyRange IFdbSubspace.ToRange() { - get { return m_database[key]; } + return this.GlobalSpace.ToRange(); + } + + FdbKeyRange IFdbSubspace.ToRange(Slice suffix) + { + return this.GlobalSpace.ToRange(suffix); + } + + FdbKeyRange IFdbSubspace.ToRange(TKey key) + { + return this.GlobalSpace.ToRange(key); + } + + IFdbSubspace IFdbSubspace.this[Slice suffix] + { + get { return this.GlobalSpace[suffix]; } + } + + IFdbSubspace IFdbSubspace.this[IFdbKey key] + { + get { return this.GlobalSpace[key]; } + } + + public virtual FdbDynamicSubspacePartition Partition + { + get { return m_database.Partition; } + } + + public virtual FdbDynamicSubspaceKeys Keys + { + get { return m_database.Keys; } } public virtual bool Contains(Slice key) @@ -147,6 +178,53 @@ public virtual bool Contains(Slice key) return m_database.Contains(key); } + public virtual Slice BoundCheck(Slice key, bool allowSystemKeys) + { + return m_database.BoundCheck(key, allowSystemKeys); + } + + public virtual Slice ConcatKey(Slice key) + { + return m_database.ConcatKey(key); + } + + public virtual Slice ConcatKey(TKey key) + where TKey : IFdbKey + { + return m_database.ConcatKey(key); + } + + public virtual Slice[] ConcatKeys(IEnumerable keys) + { + return m_database.ConcatKeys(keys); + } + + public virtual Slice[] ConcatKeys(IEnumerable keys) + where TKey : IFdbKey + { + return m_database.ConcatKeys(keys); + } + + public virtual Slice ExtractKey(Slice key, bool boundCheck = false) + { + return m_database.ExtractKey(key, boundCheck); + } + + public virtual Slice[] ExtractKeys(IEnumerable keys, bool boundCheck = false) + { + return m_database.ExtractKeys(keys, boundCheck); + } + + public virtual SliceWriter GetWriter(int capacity = 0) + { + return m_database.GetWriter(capacity); + } + + public virtual IDynamicKeyEncoder Encoder + { + get { return m_database.Encoder; } + } + #endregion #region Transactionals... diff --git a/FoundationDB.Client/Filters/Logging/FdbLoggedDatabase.cs b/FoundationDB.Client/Filters/Logging/FdbLoggedDatabase.cs index dede51fc6..01ee3a092 100644 --- a/FoundationDB.Client/Filters/Logging/FdbLoggedDatabase.cs +++ b/FoundationDB.Client/Filters/Logging/FdbLoggedDatabase.cs @@ -39,12 +39,18 @@ public sealed class FdbLoggedDatabase : FdbDatabaseFilter /// Handler called everytime a transaction is successfully committed public Action OnCommitted { get; private set; } + /// Wrap a database with a filter that will log the activity of all transactions + /// Wrapped database + /// If true, deny all write operations. + /// If true, also dispose the wrapped database if this instance is disposed. + /// Handler that will be called when a transaction is either committed succesfully, or disposed. The log can be accessed via the property. public FdbLoggedDatabase(IFdbDatabase database, bool forceReadOnly, bool ownsDatabase, Action onCommitted) : base(database, forceReadOnly, ownsDatabase) { this.OnCommitted = onCommitted; } + /// Create a new logged transaction public override IFdbTransaction BeginTransaction(FdbTransactionMode mode, CancellationToken cancellationToken = default(CancellationToken), FdbOperationContext context = null) { return new FdbLoggedTransaction( diff --git a/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs b/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs index b49bf19a7..8cc3055b6 100644 --- a/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs +++ b/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs @@ -45,6 +45,7 @@ public sealed class FdbLoggedTransaction : FdbTransactionFilter /// Handler that will be called when this transaction commits successfully public Action Committed { get; private set; } + /// Wrap an existing transaction and log all operations performed public FdbLoggedTransaction(IFdbTransaction trans, bool ownsTransaction, Action onCommitted) : base(trans, false, ownsTransaction) { diff --git a/FoundationDB.Client/Filters/Logging/FdbLoggingExtensions.cs b/FoundationDB.Client/Filters/Logging/FdbLoggingExtensions.cs index cfc6836c7..04e2e1fe0 100644 --- a/FoundationDB.Client/Filters/Logging/FdbLoggingExtensions.cs +++ b/FoundationDB.Client/Filters/Logging/FdbLoggingExtensions.cs @@ -36,8 +36,12 @@ namespace FoundationDB.Filters.Logging public static class FdbLoggingExtensions { + /// Apply the Logging Filter to this database instance + /// Original database instance + /// Handler that will be called everytime a transaction commits successfully, or gets disposed. The log of all operations performed by the transaction can be accessed via the property. + /// Database filter, that will monitor all transactions initiated from it. Disposing this wrapper will NOT dispose the inner database. [NotNull] - public static FdbLoggedDatabase Logged(this IFdbDatabase database, [NotNull] Action handler) + public static FdbLoggedDatabase Logged([NotNull] this IFdbDatabase database, [NotNull] Action handler) { if (handler == null) throw new ArgumentNullException("handler"); @@ -50,15 +54,19 @@ public static FdbLoggedDatabase Logged(this IFdbDatabase database, [NotNull] Act /// Strip the logging behaviour of this database. Use this for boilerplate or test code that would pollute the logs otherwise. /// Database instance (that may or may not be logged) /// Either itself if it is not logged, or the inner database if it was. - public static IFdbDatabase WithoutLogging(this IFdbDatabase database) + [NotNull] + public static IFdbDatabase WithoutLogging([NotNull] this IFdbDatabase database) { + if (database == null) throw new ArgumentNullException("database"); + var logged = database as FdbLoggedDatabase; if (logged != null) return logged.GetInnerDatabase(); return database; } - internal static FdbLoggedTransaction GetLogger(IFdbReadOnlyTransaction trans) + [CanBeNull] + internal static FdbLoggedTransaction GetLogger([NotNull] IFdbReadOnlyTransaction trans) { //TODO: the logged transaction could also be wrapped in other filters. // => we need a recursive "FindFilter" method that would unwrap the filter onion looking for a specific one... @@ -67,7 +75,8 @@ internal static FdbLoggedTransaction GetLogger(IFdbReadOnlyTransaction trans) } /// Annotate a logged transaction - public static void Annotate(this IFdbReadOnlyTransaction trans, string message) + /// This method only applies to transactions created from a database instance. Calling this method on regular transaction is a no-op. + public static void Annotate([NotNull] this IFdbReadOnlyTransaction trans, [NotNull] string message) { var logged = GetLogger(trans); if (logged != null) @@ -77,32 +86,36 @@ public static void Annotate(this IFdbReadOnlyTransaction trans, string message) } /// Annotate a logged transaction + /// This method only applies to transactions created from a database instance. Calling this method on regular transaction is a no-op. [StringFormatMethod("format")] - public static void Annotate(this IFdbReadOnlyTransaction trans, string format, object arg0) + public static void Annotate([NotNull] this IFdbReadOnlyTransaction trans, [NotNull] string format, object arg0) { var logged = GetLogger(trans); if (logged != null) logged.Log.AddOperation(new FdbTransactionLog.LogCommand(String.Format(format, arg0)), countAsOperation: false); } /// Annotate a logged transaction + /// This method only applies to transactions created from a database instance. Calling this method on regular transaction is a no-op. [StringFormatMethod("format")] - public static void Annotate(this IFdbReadOnlyTransaction trans, string format, object arg0, object arg1) + public static void Annotate([NotNull] this IFdbReadOnlyTransaction trans, [NotNull] string format, object arg0, object arg1) { var logged = GetLogger(trans); if (logged != null) logged.Log.AddOperation(new FdbTransactionLog.LogCommand(String.Format(format, arg0, arg1)), countAsOperation: false); } /// Annotate a logged transaction + /// This method only applies to transactions created from a database instance. Calling this method on regular transaction is a no-op. [StringFormatMethod("format")] - public static void Annotate(this IFdbReadOnlyTransaction trans, string format, object arg0, object arg1, object arg2) + public static void Annotate([NotNull] this IFdbReadOnlyTransaction trans, [NotNull] string format, object arg0, object arg1, object arg2) { var logged = GetLogger(trans); if (logged != null) logged.Log.AddOperation(new FdbTransactionLog.LogCommand(String.Format(format, arg0, arg1, arg2)), countAsOperation: false); } /// Annotate a logged transaction + /// This method only applies to transactions created from a database instance. Calling this method on regular transaction is a no-op. [StringFormatMethod("format")] - public static void Annotate(this IFdbReadOnlyTransaction trans, string format, params object[] args) + public static void Annotate([NotNull] this IFdbReadOnlyTransaction trans, [NotNull] string format, params object[] args) { var logged = GetLogger(trans); if (logged != null) logged.Log.AddOperation(new FdbTransactionLog.LogCommand(String.Format(format, args)), countAsOperation: false); diff --git a/FoundationDB.Client/Filters/Logging/FdbTransactionLog.Commands.cs b/FoundationDB.Client/Filters/Logging/FdbTransactionLog.Commands.cs index c69e8ad7c..d690cbe1d 100644 --- a/FoundationDB.Client/Filters/Logging/FdbTransactionLog.Commands.cs +++ b/FoundationDB.Client/Filters/Logging/FdbTransactionLog.Commands.cs @@ -401,6 +401,14 @@ public override string GetArguments() return String.Concat(FdbKey.Dump(this.Key), " ", this.Mutation.ToString(), " ", this.Param.ToAsciiOrHexaString()); } + public override string ToString() + { + var arg = this.GetArguments(); + var sb = new StringBuilder(); + if (this.Snapshot) sb.Append("Snapshot."); + sb.Append("Atomic_").Append(this.Mutation.ToString()).Append(' ').Append(FdbKey.Dump(this.Key)).Append(", <").Append(this.Param.ToHexaString(' ')).Append('>'); + return sb.ToString(); + } } public sealed class AddConflictRangeCommand : Command diff --git a/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs b/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs index 99c81bdfb..efd58958e 100644 --- a/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs +++ b/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs @@ -29,6 +29,8 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Filters.Logging { using FoundationDB.Client; + using FoundationDB.Client.Utils; + using JetBrains.Annotations; using System; using System.Collections.Concurrent; using System.Diagnostics; @@ -36,6 +38,7 @@ namespace FoundationDB.Filters.Logging using System.Text; using System.Threading; + /// Container that logs all operations performed by a transaction public sealed partial class FdbTransactionLog { private int m_step; @@ -44,6 +47,8 @@ public sealed partial class FdbTransactionLog private int m_readSize; private int m_writeSize; + /// Create an empty log for a newly created transaction + /// public FdbTransactionLog(IFdbTransaction trans) { this.Commands = new ConcurrentQueue(); @@ -52,11 +57,14 @@ public FdbTransactionLog(IFdbTransaction trans) /// Id of the logged transaction public int Id { get; private set; } + /// True if the transaction is Read Only + public bool IsReadOnly { get; private set; } + /// Number of operations performed by the transaction public int Operations { get { return m_operations; } } /// List of all commands processed by the transaction - public ConcurrentQueue Commands { get; private set; } + public ConcurrentQueue Commands { [NotNull] get; private set; } /// Timestamp of the start of transaction public long StartTimestamp { get; private set; } @@ -132,28 +140,36 @@ public TimeSpan TotalDuration /// Marks the start of the transaction /// - public void Start(IFdbTransaction trans) + public void Start([NotNull] IFdbTransaction trans) { + Contract.Requires(trans != null); + this.Id = trans.Id; - this.StartedUtc = DateTimeOffset.UtcNow; + this.IsReadOnly = trans.IsReadOnly; + this.StartedUtc = DateTimeOffset.UtcNow; //TODO: use a configurable clock? this.StartTimestamp = GetTimestamp(); } /// Marks the end of the transaction /// - public void Stop(IFdbTransaction trans) + public void Stop([NotNull] IFdbTransaction trans) { + Contract.Requires(trans != null); + + //TODO: verify that the trans is the same one that was passed to Start(..)? if (!this.Completed) { this.Completed = true; this.StopTimestamp = GetTimestamp(); - this.StoppedUtc = DateTimeOffset.UtcNow; + this.StoppedUtc = DateTimeOffset.UtcNow; //TODO: use a configurable clock? } } /// Adds a new already completed command to the log - public void AddOperation(Command cmd, bool countAsOperation = true) + public void AddOperation([NotNull] Command cmd, bool countAsOperation = true) { + Contract.Requires(cmd != null); + var ts = GetTimeOffset(); int step = Volatile.Read(ref m_step); @@ -166,8 +182,10 @@ public void AddOperation(Command cmd, bool countAsOperation = true) } /// Start tracking the execution of a new command - public void BeginOperation(Command cmd) + public void BeginOperation([NotNull] Command cmd) { + Contract.Requires(cmd != null); + var ts = GetTimeOffset(); int step = Volatile.Read(ref m_step); @@ -180,8 +198,10 @@ public void BeginOperation(Command cmd) } /// Mark the end of the execution of a command - public void EndOperation(Command cmd, Exception error = null) + public void EndOperation([NotNull] Command cmd, Exception error = null) { + Contract.Requires(cmd != null); + var ts = GetTimeOffset(); var step = Interlocked.Increment(ref m_step); @@ -192,18 +212,46 @@ public void EndOperation(Command cmd, Exception error = null) } /// Generate an ASCII report with all the commands that were executed by the transaction - public string GetCommandsReport() + [NotNull] + public string GetCommandsReport(bool detailed = false) { var culture = CultureInfo.InvariantCulture; var sb = new StringBuilder(); - sb.AppendLine(String.Format(culture, "Transaction #{0} command log:", this.Id)); - int reads = 0, writes = 0; + var cmds = this.Commands.ToArray(); + sb.AppendFormat(culture, "Transaction #{0} ({3}, {1} operations, started {2}Z", this.Id, cmds.Length, this.StartedUtc.TimeOfDay, this.IsReadOnly ? "read-only" : "read/write"); + if (this.StoppedUtc.HasValue) + sb.AppendFormat(culture, ", ended {0}Z)", this.StoppedUtc.Value.TimeOfDay); + else + sb.Append(", did not finish)"); + sb.AppendLine(); + + int reads = 0, writes = 0; for (int i = 0; i < cmds.Length; i++) { var cmd = cmds[i]; - sb.AppendFormat(culture, "{0,3}/{1,3} : {2}", i + 1, cmds.Length, cmd); + if (detailed) + { + sb.AppendFormat( + culture, + "{0,3} - T+{1,7:##0.000} ({2,7:##,##0} µs) : {3}", + /* 0 */ cmd.Step, + /* 1 */ cmd.StartOffset.TotalMilliseconds, + /* 2 */ cmd.Duration.Ticks / 10.0, + /* 3 */ cmd.ToString() + ); + } + else + { + sb.AppendFormat( + culture, + "{0,3} : {2}{1}", + /* 0 */ cmd.Step, + /* 1 */ cmd.ToString(), + /* 2 */ cmd.Error != null ? "[FAILED] " : "" + ); + } sb.AppendLine(); switch (cmd.Mode) { @@ -211,12 +259,16 @@ public string GetCommandsReport() case FdbTransactionLog.Mode.Write: ++writes; break; } } - sb.AppendLine(String.Format(culture, "Stats: {0:N0} operations ({1:N0} reads, {2:N0} writes), {3:N0} bytes read, {4:N0} bytes committed", this.Operations, reads, writes, this.ReadSize, this.CommitSize)); + if (this.Completed) + { + sb.AppendLine(String.Format(culture, "Stats: {0:N0} operations, {1:N0} reads ({3:N0} bytes), {2:N0} writes ({4:N0} bytes), {5:N2} ms", this.Operations, reads, writes, this.ReadSize, this.CommitSize, this.TotalDuration.TotalMilliseconds)); + } sb.AppendLine(); return sb.ToString(); } /// Generate a full ASCII report with the detailed timeline of all the commands that were executed by the transaction + [NotNull] public string GetTimingsReport(bool showCommands = false) { var culture = CultureInfo.InvariantCulture; @@ -227,7 +279,8 @@ public string GetTimingsReport(bool showCommands = false) double scale = 0.0005d; int width; bool flag = false; - while ((width = (int)(duration.TotalSeconds / scale)) > 80) + int maxWidth = showCommands ? 80 : 160; + while ((width = (int)(duration.TotalSeconds / scale)) > maxWidth) { if (flag) scale *= 5d; else scale *= 2d; flag = !flag; @@ -236,17 +289,34 @@ public string GetTimingsReport(bool showCommands = false) var cmds = this.Commands.ToArray(); // Header - sb.AppendFormat(culture, "Transaction #{0} ({1} operations, '#' = {2:N1} ms, started {3}Z", this.Id, cmds.Length, (scale * 1000d), this.StartedUtc.TimeOfDay); + sb.AppendFormat(culture, "Transaction #{0} ({4}, {1} operations, '#' = {2:N1} ms, started {3}Z", this.Id, cmds.Length, (scale * 1000d), this.StartedUtc.TimeOfDay, this.IsReadOnly ? "read-only" : "read/write"); if (this.StoppedUtc.HasValue) - sb.AppendFormat(culture, ", ended {0}Z)", this.StoppedUtc.Value.TimeOfDay); + sb.AppendFormat(culture, ", ended {0}Z)", this.StoppedUtc.Value.TimeOfDay); else - sb.AppendLine(", did not finish"); + sb.Append(", did not finish"); sb.AppendLine(); if (cmds.Length > 0) { var bar = new string('─', width + 2); sb.AppendLine(String.Format(culture, "┌ oper. ┬{0}┬──── start ──── end ── duration ──┬─ sent recv ┐", bar)); + // look for the timestamps of the first and last commands + var first = TimeSpan.Zero; + var last = duration; + for (int i = 0; i < cmds.Length;i++) + { + if (cmds[i].Op == Operation.Log) continue; + first = cmds[i].StartOffset; + break; + } + for(int i = cmds.Length - 1; i >= 0; i--) + { + if (cmds[i].Op == Operation.Log) continue; + if (cmds[i].EndOffset.HasValue) duration = cmds[i].EndOffset.Value; + break; + } + duration -= first; + int step = -1; bool previousWasOnError = false; int attempts = 1; @@ -259,7 +329,7 @@ public string GetTimingsReport(bool showCommands = false) } long ticks = cmd.Duration.Ticks; - string w = GetFancyGraph(width, cmd.StartOffset.Ticks, ticks, duration.Ticks, charsToSkip); + string w = GetFancyGraph(width, (cmd.StartOffset - first).Ticks, ticks, duration.Ticks, charsToSkip); if (ticks > 0) { @@ -273,7 +343,7 @@ public string GetTimingsReport(bool showCommands = false) /* 4 */ (cmd.EndOffset ?? TimeSpan.Zero).TotalMilliseconds, /* 5 */ ticks / 10.0, /* 6 */ cmd.Step == step ? ":" : " ", - /* 7 */ ticks >= 100000 ? "*" : ticks >= 10000 ? "°" : " ", + /* 7 */ ticks >= TimeSpan.TicksPerMillisecond * 10 ? '*' : ticks >= TimeSpan.TicksPerMillisecond ? '°' : ' ', /* 8 */ cmd.ArgumentBytes, /* 9 */ cmd.ResultBytes, /* 10 */ cmd.Error != null ? "!" : " ", @@ -289,7 +359,7 @@ public string GetTimingsReport(bool showCommands = false) /* 1 */ cmd.Step, /* 2 */ cmd.Error != null ? "!" : " ", /* 3 */ cmd.ShortName, - /* 4 */ ticks >= 100000 ? "*" : ticks >= 10000 ? "°" : " ", + /* 4 */ ticks >= TimeSpan.TicksPerMillisecond * 10 ? '*' : ticks >= TimeSpan.TicksPerMillisecond ? '°' : ' ', /* 5 */ w, /* 6 */ cmd.StartOffset.TotalMilliseconds, /* 7 */ showCommands ? cmd.ToString() : String.Empty @@ -325,12 +395,12 @@ public string GetTimingsReport(bool showCommands = false) flag = true; } if (!flag) sb.Append("Completed"); - sb.AppendLine(String.Format(culture, " in {0:N3} ms and {1:N0} attempt(s)", duration.TotalMilliseconds, attempts)); + sb.AppendLine(String.Format(culture, " in {0:N3} ms and {1:N0} attempt(s)", this.TotalDuration.TotalMilliseconds, attempts)); } } else { // empty transaction - sb.AppendLine(String.Format(culture, "> Completed after {0:N3} ms without performing any operation", duration.TotalMilliseconds)); + sb.AppendLine(String.Format(culture, "> Completed after {0:N3} ms without performing any operation", this.TotalDuration.TotalMilliseconds)); } return sb.ToString(); } @@ -364,6 +434,7 @@ private static string GetFancyGraph(int width, long offset, long duration, long return new string(tmp); } + /// List of all operation types supported by a transaction public enum Operation { Invalid = 0, @@ -390,13 +461,19 @@ public enum Operation Log, } + /// Categories of operations supported by a transaction public enum Mode { Invalid = 0, + /// Operation that reads keys and/or values from the database Read, + /// Operation that writes or clears keys from the database Write, + /// Operation that changes the state or behavior of the transaction Meta, + /// Operation that watch changes performed in the database, outside of the transaction Watch, + /// Comments, annotations, debug output attached to the transaction Annotation } diff --git a/FoundationDB.Client/Filters/PrefixRewriterTransaction.cs b/FoundationDB.Client/Filters/PrefixRewriterTransaction.cs index f1284abf4..10b16e438 100644 --- a/FoundationDB.Client/Filters/PrefixRewriterTransaction.cs +++ b/FoundationDB.Client/Filters/PrefixRewriterTransaction.cs @@ -38,31 +38,31 @@ public sealed class PrefixRewriterTransaction : FdbTransactionFilter { // We will add a prefix to all keys sent to the db, and remove it on the way back - private readonly FdbSubspace m_prefix; + private readonly IFdbSubspace m_prefix; - public PrefixRewriterTransaction(FdbSubspace prefix, IFdbTransaction trans, bool ownsTransaction) + public PrefixRewriterTransaction(IFdbSubspace prefix, IFdbTransaction trans, bool ownsTransaction) : base(trans, false, ownsTransaction) { if (prefix == null) throw new ArgumentNullException("prefix"); m_prefix = prefix; } - public FdbSubspace Prefix { get { return m_prefix; } } + public IFdbSubspace Prefix { get { return m_prefix; } } private Slice Encode(Slice key) { - return m_prefix.Concat(key); + return m_prefix.ConcatKey(key); } private Slice[] Encode(Slice[] keys) { - return m_prefix.ConcatRange(keys); + return m_prefix.ConcatKeys(keys); } private FdbKeySelector Encode(FdbKeySelector selector) { return new FdbKeySelector( - m_prefix.Concat(selector.Key), + m_prefix.ConcatKey(selector.Key), selector.OrEqual, selector.Offset ); @@ -75,7 +75,7 @@ private FdbKeySelector[] Encode(FdbKeySelector[] selectors) { keys[i] = selectors[i].Key; } - keys = m_prefix.ConcatRange(keys); + keys = m_prefix.ConcatKeys(keys); var res = new FdbKeySelector[selectors.Length]; for (int i = 0; i < selectors.Length; i++) @@ -91,7 +91,7 @@ private FdbKeySelector[] Encode(FdbKeySelector[] selectors) private Slice Decode(Slice key) { - return m_prefix.Extract(key); + return m_prefix.ExtractKey(key); } private Slice[] Decode(Slice[] keys) @@ -99,7 +99,7 @@ private Slice[] Decode(Slice[] keys) var res = new Slice[keys.Length]; for (int i = 0; i < keys.Length;i++) { - res[i] = m_prefix.Extract(keys[i]); + res[i] = m_prefix.ExtractKey(keys[i]); } return res; } diff --git a/FoundationDB.Client/Filters/ReadOnlyTransactionFilter.cs b/FoundationDB.Client/Filters/ReadOnlyTransactionFilter.cs index 5c5b1589c..e74321045 100644 --- a/FoundationDB.Client/Filters/ReadOnlyTransactionFilter.cs +++ b/FoundationDB.Client/Filters/ReadOnlyTransactionFilter.cs @@ -31,6 +31,7 @@ namespace FoundationDB.Filters using FoundationDB.Client; using System; + /// Filter that forces a read/write transaction to be read-only public sealed class ReadOnlyTransactionFilter : FdbTransactionFilter { public ReadOnlyTransactionFilter(IFdbTransaction trans, bool ownsTransaction) diff --git a/FoundationDB.Client/FoundationDB.Client.csproj b/FoundationDB.Client/FoundationDB.Client.csproj index 82e12c10f..0fb270277 100644 --- a/FoundationDB.Client/FoundationDB.Client.csproj +++ b/FoundationDB.Client/FoundationDB.Client.csproj @@ -65,19 +65,31 @@ - - - - + + + + + + + + + + + + + + + + @@ -109,8 +121,8 @@ - - + + @@ -159,6 +171,12 @@ + + + + + + @@ -169,7 +187,7 @@ - + @@ -200,13 +218,21 @@ + + + + + + + + + - + - @@ -245,6 +271,9 @@ + + + 99)| var res = await query.Skip(50).ToListAsync(); @@ -394,7 +394,7 @@ public async Task Test_Can_Skip() // from the end using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(location.ToRange()); + var query = tr.GetRange(location.Keys.ToRange()); // |(0 <--------- 49)<<<<<<<<<<<<<| var res = await query.Reverse().Skip(50).ToListAsync(); @@ -421,7 +421,7 @@ public async Task Test_Can_Skip() // from both sides using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(location.ToRange()); + var query = tr.GetRange(location.Keys.ToRange()); // |>>>>>>>>>(25<------------74)<<<<<<<<| var res = await query.Skip(25).Reverse().Skip(25).ToListAsync(); @@ -443,16 +443,16 @@ public async Task Test_Original_Range_Does_Not_Overflow() var location = await GetCleanDirectory(db, "Queries", "Range"); // import test data - var data = Enumerable.Range(0, 30).Select(x => new KeyValuePair(location.Pack(x), Slice.FromFixed32(x))); + var data = Enumerable.Range(0, 30).Select(x => new KeyValuePair(location.Keys.Encode(x), Slice.FromFixed32(x))); await Fdb.Bulk.WriteAsync(db, data, this.Cancellation); using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { var query = tr - .GetRange(location.Pack(10), location.Pack(20)) // 10 -> 19 + .GetRange(location.Keys.Encode(10), location.Keys.Encode(20)) // 10 -> 19 .Take(20) // 10 -> 19 (limit 20) .Reverse(); // 19 -> 10 (limit 20) - Console.WriteLine(query); + Log("query: {0}", query); // set a limit that overflows, and then reverse from it var res = await query.ToListAsync(); @@ -462,11 +462,11 @@ public async Task Test_Original_Range_Does_Not_Overflow() using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { var query = tr - .GetRange(location.Pack(10), location.Pack(20)) // 10 -> 19 + .GetRange(location.Keys.Encode(10), location.Keys.Encode(20)) // 10 -> 19 .Reverse() // 19 -> 10 .Take(20) // 19 -> 10 (limit 20) .Reverse(); // 10 -> 19 (limit 20) - Console.WriteLine(query); + Log("query: {0}", query); var res = await query.ToListAsync(); Assert.That(res.Count, Is.EqualTo(10)); @@ -488,7 +488,7 @@ public async Task Test_Can_MergeSort() await db.ClearRangeAsync(location, this.Cancellation); // create K lists - var lists = Enumerable.Range(0, K).Select(i => location.Partition(i)).ToArray(); + var lists = Enumerable.Range(0, K).Select(i => location.Partition.ByKey(i)).ToArray(); // lists[0] contains all multiples of K ([0, 0], [K, 1], [2K, 2], ...) // lists[1] contains all multiples of K, offset by 1 ([1, 0], [K+1, 1], [2K+1, 2], ...) @@ -502,7 +502,7 @@ public async Task Test_Can_MergeSort() { for (int i = 0; i < N; i++) { - tr.Set(lists[k].Pack((i * K) + k), FdbTuple.Pack(k, i)); + tr.Set(lists[k].Keys.Encode((i * K) + k), FdbTuple.EncodeKey(k, i)); } await tr.CommitAsync(); } @@ -514,8 +514,8 @@ public async Task Test_Can_MergeSort() using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.MergeSort( - lists.Select(list => list.ToSelectorPair()), - kvp => location.UnpackLast(kvp.Key) + lists.Select(list => FdbKeySelectorPair.Create(list.Keys.ToRange())), + kvp => location.Keys.DecodeLast(kvp.Key) ); Assert.That(merge, Is.Not.Null); @@ -527,8 +527,8 @@ public async Task Test_Can_MergeSort() for (int i = 0; i < K * N; i++) { - Assert.That(location.Extract(results[i].Key), Is.EqualTo(FdbTuple.Pack(i % K, i))); - Assert.That(results[i].Value, Is.EqualTo(FdbTuple.Pack(i % K, i / K))); + Assert.That(location.ExtractKey(results[i].Key), Is.EqualTo(FdbTuple.EncodeKey(i % K, i))); + Assert.That(results[i].Value, Is.EqualTo(FdbTuple.EncodeKey(i % K, i / K))); } } } @@ -545,7 +545,7 @@ public async Task Test_Range_Intersect() var location = await GetCleanDirectory(db, "Queries", "Intersect"); // create K lists - var lists = Enumerable.Range(0, K).Select(i => location.Partition(i)).ToArray(); + var lists = Enumerable.Range(0, K).Select(i => location.Partition.ByKey(i)).ToArray(); // lists[0] contains all multiples of 1 // lists[1] contains all multiples of 2 @@ -566,8 +566,8 @@ public async Task Test_Range_Intersect() { for (int i = 0; i < N; i++) { - var key = lists[k].Pack(series[k][i]); - var value = FdbTuple.Pack(k, i); + var key = lists[k].Keys.Encode(series[k][i]); + var value = FdbTuple.EncodeKey(k, i); //Console.WriteLine("> " + key + " = " + value); tr.Set(key, value); } @@ -579,13 +579,13 @@ public async Task Test_Range_Intersect() IEnumerable xs = series[0]; for (int i = 1; i < K; i++) xs = xs.Intersect(series[i]); var expected = xs.ToArray(); - Console.WriteLine(String.Join(", ", expected)); + Log("Expected: {0}", String.Join(", ", expected)); using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.Intersect( - lists.Select(list => list.ToSelectorPair()), - kvp => location.UnpackLast(kvp.Key) + lists.Select(list => FdbKeySelectorPair.Create(list.Keys.ToRange())), + kvp => location.Keys.DecodeLast(kvp.Key) ); Assert.That(merge, Is.Not.Null); @@ -598,11 +598,9 @@ public async Task Test_Range_Intersect() for (int i = 0; i < results.Count; i++) { - Assert.That(location.UnpackLast(results[i].Key), Is.EqualTo(expected[i])); + Assert.That(location.Keys.DecodeLast(results[i].Key), Is.EqualTo(expected[i])); } } - - } } @@ -619,7 +617,7 @@ public async Task Test_Range_Except() var location = await GetCleanDirectory(db, "Queries", "Except"); // create K lists - var lists = Enumerable.Range(0, K).Select(i => location.Partition(i)).ToArray(); + var lists = Enumerable.Range(0, K).Select(i => location.Partition.ByKey(i)).ToArray(); // lists[0] contains all multiples of 1 // lists[1] contains all multiples of 2 @@ -640,8 +638,8 @@ public async Task Test_Range_Except() { for (int i = 0; i < N; i++) { - var key = lists[k].Pack(series[k][i]); - var value = FdbTuple.Pack(k, i); + var key = lists[k].Keys.Encode(series[k][i]); + var value = FdbTuple.EncodeKey(k, i); //Console.WriteLine("> " + key + " = " + value); tr.Set(key, value); } @@ -653,13 +651,13 @@ public async Task Test_Range_Except() IEnumerable xs = series[0]; for (int i = 1; i < K; i++) xs = xs.Except(series[i]); var expected = xs.ToArray(); - Console.WriteLine(String.Join(", ", expected)); + Log("Expected: {0}", String.Join(", ", expected)); using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.Except( - lists.Select(list => list.ToSelectorPair()), - kvp => location.UnpackLast(kvp.Key) + lists.Select(list => FdbKeySelectorPair.Create(list.Keys.ToRange())), + kvp => location.Keys.DecodeLast(kvp.Key) ); Assert.That(merge, Is.Not.Null); @@ -672,7 +670,7 @@ public async Task Test_Range_Except() for (int i = 0; i < results.Count; i++) { - Assert.That(location.UnpackLast(results[i].Key), Is.EqualTo(expected[i])); + Assert.That(location.Keys.DecodeLast(results[i].Key), Is.EqualTo(expected[i])); } } @@ -699,11 +697,11 @@ public async Task Test_Range_Except_Composite_Key() await db.WriteAsync((tr) => { // Items - tr.Set(locItems.Pack("userA", 10093), Slice.Empty); - tr.Set(locItems.Pack("userA", 19238), Slice.Empty); - tr.Set(locItems.Pack("userB", 20003), Slice.Empty); + tr.Set(locItems.Keys.Encode("userA", 10093), Slice.Empty); + tr.Set(locItems.Keys.Encode("userA", 19238), Slice.Empty); + tr.Set(locItems.Keys.Encode("userB", 20003), Slice.Empty); // Processed - tr.Set(locProcessed.Pack("userA", 19238), Slice.Empty); + tr.Set(locProcessed.Keys.Encode("userA", 19238), Slice.Empty); }, this.Cancellation); // the query (Items ∩ Processed) should return (userA, 10093) and (userB, 20003) @@ -713,14 +711,14 @@ await db.WriteAsync((tr) => var results = await db.QueryAsync((tr) => { var query = tr.Except( - new[] { locItems.ToRange(), locProcessed.ToRange() }, + new[] { locItems.Keys.ToRange(), locProcessed.Keys.ToRange() }, (kv) => FdbTuple.Unpack(kv.Key).Substring(-2), // note: keys come from any of the two ranges, so we must only keep the last 2 elements of the tuple FdbTupleComparisons.Composite() // compares t[0] as a string, and t[1] as an int ); // problem: Except() still returns the original (Slice,Slice) pairs from the first range, // meaning that we still need to unpack agin the key (this time knowing the location) - return query.Select(kv => locItems.Unpack(kv.Key)); + return query.Select(kv => locItems.Keys.Unpack(kv.Key)); }, this.Cancellation); foreach(var r in results) @@ -736,12 +734,12 @@ await db.WriteAsync((tr) => results = await db.QueryAsync((tr) => { var items = tr - .GetRange(locItems.ToRange()) - .Select(kv => locItems.Unpack(kv.Key)); + .GetRange(locItems.Keys.ToRange()) + .Select(kv => locItems.Keys.Unpack(kv.Key)); var processed = tr - .GetRange(locProcessed.ToRange()) - .Select(kv => locProcessed.Unpack(kv.Key)); + .GetRange(locProcessed.Keys.ToRange()) + .Select(kv => locProcessed.Keys.Unpack(kv.Key)); // items and processed are lists of (string, int) tuples, we can compare them directly var query = items.Except(processed, FdbTupleComparisons.Composite()); diff --git a/FoundationDB.Tests/Layers/SubspaceFacts.cs b/FoundationDB.Tests/SubspaceFacts.cs similarity index 68% rename from FoundationDB.Tests/Layers/SubspaceFacts.cs rename to FoundationDB.Tests/SubspaceFacts.cs index 0903344d3..fb7b54de9 100644 --- a/FoundationDB.Tests/Layers/SubspaceFacts.cs +++ b/FoundationDB.Tests/SubspaceFacts.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,11 +29,9 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Layers.Tuples.Tests { using FoundationDB.Client; - using FoundationDB.Client.Tests; using FoundationDB.Layers.Tuples; using NUnit.Framework; using System; - using System.Threading.Tasks; [TestFixture] public class SubspaceFacts @@ -49,31 +47,31 @@ public void Test_Empty_Subspace_Is_Empty() Assert.That(subspace.Key.Count, Is.EqualTo(0), "FdbSubspace.Empty.Key should be equal to Slice.Empty"); Assert.That(subspace.Key.HasValue, Is.True, "FdbSubspace.Empty.Key should be equal to Slice.Empty"); - Assert.That(subspace.Copy(), Is.Not.SameAs(subspace)); + Assert.That(FdbSubspace.Copy(subspace), Is.Not.SameAs(subspace)); } [Test] [Category("LocalCluster")] public void Test_Subspace_With_Binary_Prefix() { - var subspace = new FdbSubspace(Slice.Create(new byte[] { 42, 255, 0, 127 })); + var subspace = FdbSubspace.CreateDynamic(Slice.Create(new byte[] { 42, 255, 0, 127 })); Assert.That(subspace.Key.ToString(), Is.EqualTo("*<00><7F>")); - Assert.That(subspace.Copy(), Is.Not.SameAs(subspace)); - Assert.That(subspace.Copy().Key, Is.EqualTo(subspace.Key)); + Assert.That(FdbSubspace.Copy(subspace), Is.Not.SameAs(subspace)); + Assert.That(FdbSubspace.Copy(subspace).Key, Is.EqualTo(subspace.Key)); // concat(Slice) should append the slice to the binary prefix directly - Assert.That(subspace.Concat(Slice.FromInt32(0x01020304)).ToString(), Is.EqualTo("*<00><7F><04><03><02><01>")); - Assert.That(subspace.Concat(Slice.FromAscii("hello")).ToString(), Is.EqualTo("*<00><7F>hello")); + Assert.That(subspace.ConcatKey(Slice.FromInt32(0x01020304)).ToString(), Is.EqualTo("*<00><7F><04><03><02><01>")); + Assert.That(subspace.ConcatKey(Slice.FromAscii("hello")).ToString(), Is.EqualTo("*<00><7F>hello")); // pack(...) should use tuple serialization - Assert.That(subspace.Pack(123).ToString(), Is.EqualTo("*<00><7F><15>{")); - Assert.That(subspace.Pack("hello").ToString(), Is.EqualTo("*<00><7F><02>hello<00>")); - Assert.That(subspace.Pack(Slice.FromAscii("world")).ToString(), Is.EqualTo("*<00><7F><01>world<00>")); - Assert.That(subspace.Pack(FdbTuple.Create("hello", 123)).ToString(), Is.EqualTo("*<00><7F><02>hello<00><15>{")); + Assert.That(subspace.Keys.Encode(123).ToString(), Is.EqualTo("*<00><7F><15>{")); + Assert.That(subspace.Keys.Encode("hello").ToString(), Is.EqualTo("*<00><7F><02>hello<00>")); + Assert.That(subspace.Keys.Encode(Slice.FromAscii("world")).ToString(), Is.EqualTo("*<00><7F><01>world<00>")); + Assert.That(subspace.Keys.Pack(FdbTuple.Create("hello", 123)).ToString(), Is.EqualTo("*<00><7F><02>hello<00><15>{")); // if we derive a tuple from this subspace, it should keep the binary prefix when converted to a key - var t = subspace.Append("world", 123, false); + var t = subspace.Keys.Append("world", 123, false); Assert.That(t, Is.Not.Null); Assert.That(t.Count, Is.EqualTo(3)); Assert.That(t.Get(0), Is.EqualTo("world")); @@ -83,7 +81,7 @@ public void Test_Subspace_With_Binary_Prefix() Assert.That(k.ToString(), Is.EqualTo("*<00><7F><02>world<00><15>{<14>")); // if we unpack the key with the binary prefix, we should get a valid tuple - var t2 = subspace.Unpack(k); + var t2 = subspace.Keys.Unpack(k); Assert.That(t2, Is.Not.Null); Assert.That(t2.Count, Is.EqualTo(3)); Assert.That(t2.Get(0), Is.EqualTo("world")); @@ -95,7 +93,7 @@ public void Test_Subspace_With_Binary_Prefix() public void Test_Subspace_Copy_Does_Not_Share_Key_Buffer() { var original = FdbSubspace.Create(Slice.FromString("Hello")); - var copy = original.Copy(); + var copy = FdbSubspace.Copy(original); Assert.That(copy, Is.Not.Null); Assert.That(copy, Is.Not.SameAs(original), "Copy should be a new instance"); Assert.That(copy.Key, Is.EqualTo(original.Key), "Key should be equal"); @@ -111,38 +109,40 @@ public void Test_Cannot_Create_Or_Partition_Subspace_With_Slice_Nil() { Assert.That(() => new FdbSubspace(Slice.Nil), Throws.ArgumentException); Assert.That(() => FdbSubspace.Create(Slice.Nil), Throws.ArgumentException); - Assert.That(() => FdbSubspace.Empty[Slice.Nil], Throws.ArgumentException); - Assert.That(() => FdbSubspace.Create(FdbKey.Directory)[Slice.Nil], Throws.ArgumentException); + //FIXME: typed subspaces refactoring ! + //Assert.That(() => FdbSubspace.Empty.Partition[Slice.Nil], Throws.ArgumentException); + //Assert.That(() => FdbSubspace.Create(FdbKey.Directory).Partition[Slice.Nil], Throws.ArgumentException); } [Test] public void Test_Cannot_Create_Or_Partition_Subspace_With_Null_Tuple() { - Assert.That(() => new FdbSubspace(default(IFdbTuple)), Throws.InstanceOf()); - Assert.That(() => FdbSubspace.Empty[default(IFdbTuple)], Throws.InstanceOf()); - Assert.That(() => FdbSubspace.Create(FdbKey.Directory)[default(IFdbTuple)], Throws.InstanceOf()); + Assert.That(() => FdbSubspace.Create(default(IFdbTuple)), Throws.InstanceOf()); + //FIXME: typed subspaces refactoring ! + //Assert.That(() => FdbSubspace.Empty.Partition[default(IFdbTuple)], Throws.InstanceOf()); + //Assert.That(() => FdbSubspace.Create(FdbKey.Directory).Partition[default(IFdbTuple)], Throws.InstanceOf()); } [Test] [Category("LocalCluster")] public void Test_Subspace_With_Tuple_Prefix() { - var subspace = new FdbSubspace(FdbTuple.Create("hello")); + var subspace = FdbSubspace.CreateDynamic(FdbTuple.Create("hello")); Assert.That(subspace.Key.ToString(), Is.EqualTo("<02>hello<00>")); - Assert.That(subspace.Copy(), Is.Not.SameAs(subspace)); - Assert.That(subspace.Copy().Key, Is.EqualTo(subspace.Key)); + Assert.That(FdbSubspace.Copy(subspace), Is.Not.SameAs(subspace)); + Assert.That(FdbSubspace.Copy(subspace).Key, Is.EqualTo(subspace.Key)); // concat(Slice) should append the slice to the tuple prefix directly - Assert.That(subspace.Concat(Slice.FromInt32(0x01020304)).ToString(), Is.EqualTo("<02>hello<00><04><03><02><01>")); - Assert.That(subspace.Concat(Slice.FromAscii("world")).ToString(), Is.EqualTo("<02>hello<00>world")); + Assert.That(subspace.ConcatKey(Slice.FromInt32(0x01020304)).ToString(), Is.EqualTo("<02>hello<00><04><03><02><01>")); + Assert.That(subspace.ConcatKey(Slice.FromAscii("world")).ToString(), Is.EqualTo("<02>hello<00>world")); // pack(...) should use tuple serialization - Assert.That(subspace.Pack(123).ToString(), Is.EqualTo("<02>hello<00><15>{")); - Assert.That(subspace.Pack("world").ToString(), Is.EqualTo("<02>hello<00><02>world<00>")); + Assert.That(subspace.Keys.Encode(123).ToString(), Is.EqualTo("<02>hello<00><15>{")); + Assert.That(subspace.Keys.Encode("world").ToString(), Is.EqualTo("<02>hello<00><02>world<00>")); // even though the subspace prefix is a tuple, appending to it will only return the new items - var t = subspace.Append("world", 123, false); + var t = subspace.Keys.Append("world", 123, false); Assert.That(t, Is.Not.Null); Assert.That(t.Count, Is.EqualTo(3)); Assert.That(t.Get(0), Is.EqualTo("world")); @@ -153,7 +153,7 @@ public void Test_Subspace_With_Tuple_Prefix() Assert.That(k.ToString(), Is.EqualTo("<02>hello<00><02>world<00><15>{<14>")); // if we unpack the key with the binary prefix, we should get a valid tuple - var t2 = subspace.Unpack(k); + var t2 = subspace.Keys.Unpack(k); Assert.That(t2, Is.Not.Null); Assert.That(t2.Count, Is.EqualTo(3)); Assert.That(t2.Get(0), Is.EqualTo("world")); @@ -166,28 +166,28 @@ public void Test_Subspace_With_Tuple_Prefix() public void Test_Subspace_Partitioning_With_Binary_Suffix() { // start from a parent subspace - var parent = FdbSubspace.Empty; + var parent = FdbSubspace.Empty.Using(TypeSystem.Tuples); Assert.That(parent.Key.ToString(), Is.EqualTo("")); // create a child subspace using a tuple - var child = parent[FdbKey.Directory]; + var child = parent.Partition[FdbKey.Directory]; Assert.That(child, Is.Not.Null); Assert.That(child.Key.ToString(), Is.EqualTo("")); // create a key from this child subspace - var key = child.Concat(Slice.FromFixed32(0x01020304)); + var key = child.ConcatKey(Slice.FromFixed32(0x01020304)); Assert.That(key.ToString(), Is.EqualTo("<04><03><02><01>")); // create another child - var grandChild = child[Slice.FromAscii("hello")]; + var grandChild = child.Partition[Slice.FromAscii("hello")]; Assert.That(grandChild, Is.Not.Null); Assert.That(grandChild.Key.ToString(), Is.EqualTo("hello")); - key = grandChild.Concat(Slice.FromFixed32(0x01020304)); + key = grandChild.ConcatKey(Slice.FromFixed32(0x01020304)); Assert.That(key.ToString(), Is.EqualTo("hello<04><03><02><01>")); // cornercase - Assert.That(child[Slice.Empty].Key, Is.EqualTo(child.Key)); + Assert.That(child.Partition[Slice.Empty].Key, Is.EqualTo(child.Key)); } [Test] @@ -195,16 +195,16 @@ public void Test_Subspace_Partitioning_With_Binary_Suffix() public void Test_Subspace_Partitioning_With_Tuple_Suffix() { // start from a parent subspace - var parent = new FdbSubspace(Slice.Create(new byte[] { 254 })); + var parent = FdbSubspace.CreateDynamic(Slice.FromByte(254), TypeSystem.Tuples); Assert.That(parent.Key.ToString(), Is.EqualTo("")); // create a child subspace using a tuple - var child = parent.Partition(FdbTuple.Create("hca")); + var child = parent.Partition.ByKey("hca"); Assert.That(child, Is.Not.Null); Assert.That(child.Key.ToString(), Is.EqualTo("<02>hca<00>")); // create a tuple from this child subspace - var tuple = child.Append(123); + var tuple = child.Keys.Append(123); Assert.That(tuple, Is.Not.Null); Assert.That(tuple.ToSlice().ToString(), Is.EqualTo("<02>hca<00><15>{")); @@ -213,11 +213,11 @@ public void Test_Subspace_Partitioning_With_Tuple_Suffix() Assert.That(t1.ToSlice().ToString(), Is.EqualTo("<02>hca<00><15>{<14>")); // check that we could also create the same tuple starting from the parent subspace - var t2 = parent.Append("hca", 123, false); + var t2 = parent.Keys.Append("hca", 123, false); Assert.That(t2.ToSlice(), Is.EqualTo(t1.ToSlice())); // cornercase - Assert.That(child[FdbTuple.Empty].Key, Is.EqualTo(child.Key)); + Assert.That(child.Partition[FdbTuple.Empty].Key, Is.EqualTo(child.Key)); } diff --git a/FoundationDB.Tests/TestHelpers.cs b/FoundationDB.Tests/TestHelpers.cs index 16bba8dd0..fa943156e 100644 --- a/FoundationDB.Tests/TestHelpers.cs +++ b/FoundationDB.Tests/TestHelpers.cs @@ -82,10 +82,10 @@ public static async Task GetCleanDirectory([NotNull] IFdbD return subspace; } - public static async Task DumpSubspace([NotNull] IFdbDatabase db, [NotNull] FdbSubspace subspace, CancellationToken ct) + public static async Task DumpSubspace([NotNull] IFdbDatabase db, [NotNull] IFdbSubspace subspace, CancellationToken ct) { Assert.That(db, Is.Not.Null); - Assert.That(db.GlobalSpace.Contains(subspace.Key), Is.True, "Using a location outside of the test database partition!!! This is probably a bug in the test..."); + Assert.That(db.GlobalSpace.Contains(subspace.ToFoundationDbKey()), Is.True, "Using a location outside of the test database partition!!! This is probably a bug in the test..."); // do not log db = db.WithoutLogging(); @@ -96,17 +96,17 @@ public static async Task DumpSubspace([NotNull] IFdbDatabase db, [NotNull] FdbSu } } - public static async Task DumpSubspace([NotNull] IFdbReadOnlyTransaction tr, [NotNull] FdbSubspace subspace) + public static async Task DumpSubspace([NotNull] IFdbReadOnlyTransaction tr, [NotNull] IFdbSubspace subspace) { Assert.That(tr, Is.Not.Null); Console.WriteLine("Dumping content of subspace " + subspace.ToString() + " :"); int count = 0; await tr - .GetRange(FdbKeyRange.StartsWith(subspace.Key)) + .GetRange(FdbKeyRange.StartsWith(subspace.ToFoundationDbKey())) .ForEachAsync((kvp) => { - var key = subspace.Extract(kvp.Key); + var key = subspace.ExtractKey(kvp.Key, boundCheck: true); ++count; string keyDump = null; try diff --git a/FoundationDB.Tests/TransactionFacts.cs b/FoundationDB.Tests/TransactionFacts.cs index a8950e5f3..e61e0cc44 100644 --- a/FoundationDB.Tests/TransactionFacts.cs +++ b/FoundationDB.Tests/TransactionFacts.cs @@ -104,16 +104,16 @@ public async Task Test_Creating_A_ReadOnly_Transaction_Throws_When_Writing() Assert.That(tr, Is.Not.Null); // reading should not fail - await tr.GetAsync(db.Pack("Hello")); + await tr.GetAsync(db.Keys.Encode("Hello")); // any attempt to recast into a writeable transaction should fail! var tr2 = (IFdbTransaction)tr; Assert.That(tr2.IsReadOnly, Is.True, "Transaction should be marked as readonly"); - var location = db.Partition("ReadOnly"); - Assert.That(() => tr2.Set(location.Pack("Hello"), Slice.Empty), Throws.InvalidOperationException); - Assert.That(() => tr2.Clear(location.Pack("Hello")), Throws.InvalidOperationException); - Assert.That(() => tr2.ClearRange(location.Pack("ABC"), location.Pack("DEF")), Throws.InvalidOperationException); - Assert.That(() => tr2.Atomic(location.Pack("Counter"), Slice.FromFixed32(1), FdbMutationType.Add), Throws.InvalidOperationException); + var location = db.Partition.ByKey("ReadOnly"); + Assert.That(() => tr2.Set(location.Keys.Encode("Hello"), Slice.Empty), Throws.InvalidOperationException); + Assert.That(() => tr2.Clear(location.Keys.Encode("Hello")), Throws.InvalidOperationException); + Assert.That(() => tr2.ClearRange(location.Keys.Encode("ABC"), location.Keys.Encode("DEF")), Throws.InvalidOperationException); + Assert.That(() => tr2.Atomic(location.Keys.Encode("Counter"), Slice.FromFixed32(1), FdbMutationType.Add), Throws.InvalidOperationException); } } } @@ -223,11 +223,11 @@ public async Task Test_Cancelling_Transaction_Before_Commit_Should_Throw_Immedia using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.ByKey("test"); using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.Pack(1), Slice.FromString("hello")); + tr.Set(location.Keys.Encode(1), Slice.FromString("hello")); tr.Cancel(); await TestHelpers.AssertThrowsFdbErrorAsync( @@ -248,7 +248,7 @@ public async Task Test_Cancelling_Transaction_During_Commit_Should_Abort_Task() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); @@ -259,7 +259,7 @@ public async Task Test_Cancelling_Transaction_During_Commit_Should_Abort_Task() // Writes about 5 MB of stuff in 100k chunks for (int i = 0; i < 50; i++) { - tr.Set(location.Pack(i), Slice.Random(rnd, 100 * 1000)); + tr.Set(location.Keys.Encode(i), Slice.Random(rnd, 100 * 1000)); } // start commiting @@ -270,11 +270,7 @@ public async Task Test_Cancelling_Transaction_During_Commit_Should_Abort_Task() Assume.That(t.IsCompleted, Is.False, "Commit task already completed before having a chance to cancel"); tr.Cancel(); - await TestHelpers.AssertThrowsFdbErrorAsync( - () => t, - FdbError.TransactionCancelled, - "Cancelling a transaction that is writing to the server should fail the commit task" - ); + Assert.Throws(async () => await t, "Cancelling a transaction that is writing to the server should fail the commit task"); } } } @@ -288,7 +284,7 @@ public async Task Test_Cancelling_Token_During_Commit_Should_Abort_Task() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); @@ -300,7 +296,7 @@ public async Task Test_Cancelling_Token_During_Commit_Should_Abort_Task() // Writes about 5 MB of stuff in 100k chunks for (int i = 0; i < 50; i++) { - tr.Set(location.Pack(i), Slice.Random(rnd, 100 * 1000)); + tr.Set(location.Keys.Encode(i), Slice.Random(rnd, 100 * 1000)); } // start commiting with a cancellation token @@ -345,14 +341,14 @@ public async Task Test_Write_And_Read_Simple_Keys() long writeVersion; long readVersion; - var location = db.Partition("test"); + var location = db.Partition.ByKey("test"); // write a bunch of keys using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.Pack("hello"), Slice.FromString("World!")); - tr.Set(location.Pack("timestamp"), Slice.FromInt64(ticks)); - tr.Set(location.Pack("blob"), Slice.Create(new byte[] { 42, 123, 7 })); + tr.Set(location.Keys.Encode("hello"), Slice.FromString("World!")); + tr.Set(location.Keys.Encode("timestamp"), Slice.FromInt64(ticks)); + tr.Set(location.Keys.Encode("blob"), Slice.Create(new byte[] { 42, 123, 7 })); await tr.CommitAsync(); @@ -368,15 +364,15 @@ public async Task Test_Write_And_Read_Simple_Keys() readVersion = await tr.GetReadVersionAsync(); Assert.That(readVersion, Is.GreaterThan(0), "Read version should be > 0"); - bytes = await tr.GetAsync(location.Pack("hello")); // => 1007 "past_version" + bytes = await tr.GetAsync(location.Keys.Encode("hello")); // => 1007 "past_version" Assert.That(bytes.Array, Is.Not.Null); Assert.That(Encoding.UTF8.GetString(bytes.Array, bytes.Offset, bytes.Count), Is.EqualTo("World!")); - bytes = await tr.GetAsync(location.Pack("timestamp")); + bytes = await tr.GetAsync(location.Keys.Encode("timestamp")); Assert.That(bytes.Array, Is.Not.Null); Assert.That(bytes.ToInt64(), Is.EqualTo(ticks)); - bytes = await tr.GetAsync(location.Pack("blob")); + bytes = await tr.GetAsync(location.Keys.Encode("blob")); Assert.That(bytes.Array, Is.Not.Null); Assert.That(bytes.Array, Is.EqualTo(new byte[] { 42, 123, 7 })); } @@ -390,7 +386,7 @@ public async Task Test_Can_Resolve_Key_Selector() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("keys"); + var location = db.Partition.ByKey("keys"); await db.ClearRangeAsync(location, this.Cancellation); var minKey = location.Key + FdbKey.MinValue; @@ -406,7 +402,7 @@ public async Task Test_Can_Resolve_Key_Selector() tr.Set(minKey, Slice.FromString("min")); for (int i = 0; i < 20; i++) { - tr.Set(location.Pack(i), Slice.FromString(i.ToString())); + tr.Set(location.Keys.Encode(i), Slice.FromString(i.ToString())); } tr.Set(maxKey, Slice.FromString("max")); await tr.CommitAsync(); @@ -418,43 +414,43 @@ public async Task Test_Can_Resolve_Key_Selector() FdbKeySelector sel; // >= 0 - sel = FdbKeySelector.FirstGreaterOrEqual(location.Pack(0)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Pack(0)), "fGE(0) should return 0"); + sel = FdbKeySelector.FirstGreaterOrEqual(location.Keys.Encode(0)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Keys.Encode(0)), "fGE(0) should return 0"); Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(minKey), "fGE(0)-1 should return minKey"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Pack(1)), "fGE(0)+1 should return 1"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Keys.Encode(1)), "fGE(0)+1 should return 1"); // > 0 - sel = FdbKeySelector.FirstGreaterThan(location.Pack(0)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Pack(1)), "fGT(0) should return 1"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Pack(0)), "fGT(0)-1 should return 0"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Pack(2)), "fGT(0)+1 should return 2"); + sel = FdbKeySelector.FirstGreaterThan(location.Keys.Encode(0)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Keys.Encode(1)), "fGT(0) should return 1"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Keys.Encode(0)), "fGT(0)-1 should return 0"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Keys.Encode(2)), "fGT(0)+1 should return 2"); // <= 10 - sel = FdbKeySelector.LastLessOrEqual(location.Pack(10)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Pack(10)), "lLE(10) should return 10"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Pack(9)), "lLE(10)-1 should return 9"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Pack(11)), "lLE(10)+1 should return 11"); + sel = FdbKeySelector.LastLessOrEqual(location.Keys.Encode(10)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Keys.Encode(10)), "lLE(10) should return 10"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Keys.Encode(9)), "lLE(10)-1 should return 9"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Keys.Encode(11)), "lLE(10)+1 should return 11"); // < 10 - sel = FdbKeySelector.LastLessThan(location.Pack(10)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Pack(9)), "lLT(10) should return 9"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Pack(8)), "lLT(10)-1 should return 8"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Pack(10)), "lLT(10)+1 should return 10"); + sel = FdbKeySelector.LastLessThan(location.Keys.Encode(10)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Keys.Encode(9)), "lLT(10) should return 9"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Keys.Encode(8)), "lLT(10)-1 should return 8"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Keys.Encode(10)), "lLT(10)+1 should return 10"); // < 0 - sel = FdbKeySelector.LastLessThan(location.Pack(0)); + sel = FdbKeySelector.LastLessThan(location.Keys.Encode(0)); Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(minKey), "lLT(0) should return minKey"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Pack(0)), "lLT(0)+1 should return 0"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Keys.Encode(0)), "lLT(0)+1 should return 0"); // >= 20 - sel = FdbKeySelector.FirstGreaterOrEqual(location.Pack(20)); + sel = FdbKeySelector.FirstGreaterOrEqual(location.Keys.Encode(20)); Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(maxKey), "fGE(20) should return maxKey"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Pack(19)), "fGE(20)-1 should return 19"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Keys.Encode(19)), "fGE(20)-1 should return 19"); // > 19 - sel = FdbKeySelector.FirstGreaterThan(location.Pack(19)); + sel = FdbKeySelector.FirstGreaterThan(location.Keys.Encode(19)); Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(maxKey), "fGT(19) should return maxKey"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Pack(19)), "fGT(19)-1 should return 19"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Keys.Encode(19)), "fGT(19)-1 should return 19"); } } } @@ -539,7 +535,7 @@ public async Task Test_Get_Multiple_Values() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("Batch"); + var location = db.Partition.ByKey("Batch"); await db.ClearRangeAsync(location, this.Cancellation); int[] ids = new int[] { 8, 7, 2, 9, 5, 0, 3, 4, 6, 1 }; @@ -548,21 +544,21 @@ public async Task Test_Get_Multiple_Values() { for (int i = 0; i < ids.Length; i++) { - tr.Set(location.Pack(i), Slice.FromString("#" + i.ToString())); + tr.Set(location.Keys.Encode(i), Slice.FromString("#" + i.ToString())); } await tr.CommitAsync(); } using (var tr = db.BeginTransaction(this.Cancellation)) { - var keys = ids.Select(id => location.Pack(id)).ToArray(); + var keys = ids.Select(id => location.Keys.Encode(id)).ToArray(); var results = await tr.GetValuesAsync(keys); Assert.That(results, Is.Not.Null); Assert.That(results.Length, Is.EqualTo(ids.Length)); - Console.WriteLine(String.Join(", ", results)); + Log(String.Join(", ", results)); for (int i = 0; i < ids.Length;i++) { @@ -582,7 +578,7 @@ public async Task Test_Get_Multiple_Keys() using(var db = await OpenTestPartitionAsync()) { - var location = db.Partition("keys"); + var location = db.Partition.ByKey("keys"); await db.ClearRangeAsync(location, this.Cancellation); var minKey = location.Key + FdbKey.MinValue; @@ -598,7 +594,7 @@ public async Task Test_Get_Multiple_Keys() tr.Set(minKey, Slice.FromString("min")); for (int i = 0; i < 20; i++) { - tr.Set(location.Pack(i), Slice.FromString(i.ToString())); + tr.Set(location.Keys.Encode(i), Slice.FromString(i.ToString())); } tr.Set(maxKey, Slice.FromString("max")); await tr.CommitAsync(); @@ -607,7 +603,7 @@ public async Task Test_Get_Multiple_Keys() using (var tr = db.BeginTransaction(this.Cancellation)) { - var selectors = Enumerable.Range(0, N).Select((i) => FdbKeySelector.FirstGreaterOrEqual(location.Pack(i))).ToArray(); + var selectors = Enumerable.Range(0, N).Select((i) => FdbKeySelector.FirstGreaterOrEqual(location.Keys.Encode(i))).ToArray(); // GetKeysAsync([]) var results = await tr.GetKeysAsync(selectors); @@ -615,7 +611,7 @@ public async Task Test_Get_Multiple_Keys() Assert.That(results.Length, Is.EqualTo(20)); for (int i = 0; i < N; i++) { - Assert.That(results[i], Is.EqualTo(location.Pack(i))); + Assert.That(results[i], Is.EqualTo(location.Keys.Encode(i))); } // GetKeysAsync(cast to enumerable) @@ -680,7 +676,7 @@ public async Task Test_Can_Perform_Atomic_Operations() Slice key; - key = location.Pack("add"); + key = location.Keys.Encode("add"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Add, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Add, 1); await PerformAtomicOperationAndCheck(db, key, 1, FdbMutationType.Add, 0); @@ -688,21 +684,21 @@ public async Task Test_Can_Perform_Atomic_Operations() await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.Add, 1); await PerformAtomicOperationAndCheck(db, key, 123456789, FdbMutationType.Add, 987654321); - key = location.Pack("and"); + key = location.Keys.Encode("and"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitAnd, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitAnd, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.BitAnd, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x00FF00FF, FdbMutationType.BitAnd, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x0F0F0F0F, FdbMutationType.BitAnd, 0x018055AA); - key = location.Pack("or"); + key = location.Keys.Encode("or"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitOr, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitOr, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.BitOr, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x00FF00FF, FdbMutationType.BitOr, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x0F0F0F0F, FdbMutationType.BitOr, 0x018055AA); - key = location.Pack("xor"); + key = location.Keys.Encode("xor"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitXor, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitXor, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.BitXor, 0x018055AA); @@ -711,14 +707,14 @@ public async Task Test_Can_Perform_Atomic_Operations() if (Fdb.ApiVersion >= 300) { - key = location.Pack("max"); + key = location.Keys.Encode("max"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Max, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Max, 1); await PerformAtomicOperationAndCheck(db, key, 1, FdbMutationType.Max, 0); await PerformAtomicOperationAndCheck(db, key, 2, FdbMutationType.Max, 1); await PerformAtomicOperationAndCheck(db, key, 123456789, FdbMutationType.Max, 987654321); - key = location.Pack("min"); + key = location.Keys.Encode("min"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Min, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Min, 1); await PerformAtomicOperationAndCheck(db, key, 1, FdbMutationType.Min, 0); @@ -730,16 +726,15 @@ public async Task Test_Can_Perform_Atomic_Operations() // calling with an unsupported mutation type should fail using (var tr = db.BeginTransaction(this.Cancellation)) { - key = location.Pack("invalid"); + key = location.Keys.Encode("invalid"); Assert.That(() => tr.Atomic(key, Slice.FromFixed32(42), FdbMutationType.Max), Throws.InstanceOf().With.Property("Code").EqualTo(FdbError.InvalidMutationType)); } - } // calling with an invalid mutation type should fail using (var tr = db.BeginTransaction(this.Cancellation)) { - key = location.Pack("invalid"); + key = location.Keys.Encode("invalid"); Assert.That(() => tr.Atomic(key, Slice.FromFixed32(42), (FdbMutationType)42), Throws.InstanceOf().With.Property("Code").EqualTo(FdbError.InvalidMutationType)); } } @@ -751,15 +746,15 @@ public async Task Test_Can_Snapshot_Read() using(var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); // write a bunch of keys await db.WriteAsync((tr) => { - tr.Set(location.Pack("hello"), Slice.FromString("World!")); - tr.Set(location.Pack("foo"), Slice.FromString("bar")); + tr.Set(location.Keys.Encode("hello"), Slice.FromString("World!")); + tr.Set(location.Keys.Encode("foo"), Slice.FromString("bar")); }, this.Cancellation); // read them using snapshot @@ -767,10 +762,10 @@ await db.WriteAsync((tr) => { Slice bytes; - bytes = await tr.Snapshot.GetAsync(location.Pack("hello")); + bytes = await tr.Snapshot.GetAsync(location.Keys.Encode("hello")); Assert.That(bytes.ToUnicode(), Is.EqualTo("World!")); - bytes = await tr.Snapshot.GetAsync(location.Pack("foo")); + bytes = await tr.Snapshot.GetAsync(location.Keys.Encode("foo")); Assert.That(bytes.ToUnicode(), Is.EqualTo("bar")); } @@ -791,7 +786,7 @@ public async Task Test_CommittedVersion_On_ReadOnly_Transactions() long ver = tr.GetCommittedVersion(); Assert.That(ver, Is.EqualTo(-1), "Initial committed version"); - var _ = await tr.GetAsync(db.Pack("foo")); + var _ = await tr.GetAsync(db.Keys.Encode("foo")); // until the transction commits, the committed version will stay -1 ver = tr.GetCommittedVersion(); @@ -822,7 +817,7 @@ public async Task Test_CommittedVersion_On_Write_Transactions() long ver = tr.GetCommittedVersion(); Assert.That(ver, Is.EqualTo(-1), "Initial committed version"); - tr.Set(db.Pack("foo"), Slice.FromString("bar")); + tr.Set(db.Keys.Encode("foo"), Slice.FromString("bar")); // until the transction commits, the committed version should still be -1 ver = tr.GetCommittedVersion(); @@ -850,10 +845,10 @@ public async Task Test_CommittedVersion_After_Reset() // take the read version (to compare with the committed version below) long rv1 = await tr.GetReadVersionAsync(); // do something and commit - tr.Set(db.Pack("foo"), Slice.FromString("bar")); + tr.Set(db.Keys.Encode("foo"), Slice.FromString("bar")); await tr.CommitAsync(); long cv1 = tr.GetCommittedVersion(); - Console.WriteLine("COMMIT: " + rv1 + " / " + cv1); + Log("COMMIT: {0} / {1}", rv1, cv1); Assert.That(cv1, Is.GreaterThanOrEqualTo(rv1), "Committed version of write transaction should be >= the read version"); // reset the transaction @@ -861,15 +856,15 @@ public async Task Test_CommittedVersion_After_Reset() long rv2 = await tr.GetReadVersionAsync(); long cv2 = tr.GetCommittedVersion(); - Console.WriteLine("RESET: " + rv2 + " / " + cv2); + Log("RESET: {0} / {1}", rv2, cv2); //Note: the current fdb_c client does not revert the commited version to -1 ... ? //Assert.That(cv2, Is.EqualTo(-1), "Committed version should go back to -1 after reset"); // read-only + commit - await tr.GetAsync(db.Pack("foo")); + await tr.GetAsync(db.Keys.Encode("foo")); await tr.CommitAsync(); cv2 = tr.GetCommittedVersion(); - Console.WriteLine("COMMIT2: " + rv2 + " / " + cv2); + Log("COMMIT2: {0} / {1}", rv2, cv2); Assert.That(cv2, Is.EqualTo(-1), "Committed version of read-only transaction should be -1 even the transaction was previously used to write something"); } @@ -883,24 +878,24 @@ public async Task Test_Regular_Read_With_Concurrent_Change_Should_Conflict() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); await db.WriteAsync((tr) => { - tr.Set(location.Pack("foo"), Slice.FromString("foo")); + tr.Set(location.Keys.Encode("foo"), Slice.FromString("foo")); }, this.Cancellation); using (var trA = db.BeginTransaction(this.Cancellation)) using (var trB = db.BeginTransaction(this.Cancellation)) { // regular read - var foo = await trA.GetAsync(location.Pack("foo")); - trA.Set(location.Pack("foo"), Slice.FromString("bar")); + var foo = await trA.GetAsync(location.Keys.Encode("foo")); + trA.Set(location.Keys.Encode("foo"), Slice.FromString("bar")); // this will conflict with our read - trB.Set(location.Pack("foo"), Slice.FromString("bar")); + trB.Set(location.Keys.Encode("foo"), Slice.FromString("bar")); await trB.CommitAsync(); // should fail with a "not_comitted" error @@ -922,23 +917,23 @@ public async Task Test_Snapshot_Read_With_Concurrent_Change_Should_Not_Conflict( using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); await db.WriteAsync((tr) => { - tr.Set(location.Pack("foo"), Slice.FromString("foo")); + tr.Set(location.Keys.Encode("foo"), Slice.FromString("foo")); }, this.Cancellation); using (var trA = db.BeginTransaction(this.Cancellation)) using (var trB = db.BeginTransaction(this.Cancellation)) { // reading with snapshot mode should not conflict - var foo = await trA.Snapshot.GetAsync(location.Pack("foo")); - trA.Set(location.Pack("foo"), Slice.FromString("bar")); + var foo = await trA.Snapshot.GetAsync(location.Keys.Encode("foo")); + trA.Set(location.Keys.Encode("foo"), Slice.FromString("bar")); // this would normally conflicts with the previous read if it wasn't a snapshot read - trB.Set(location.Pack("foo"), Slice.FromString("bar")); + trB.Set(location.Keys.Encode("foo"), Slice.FromString("bar")); await trB.CommitAsync(); // should succeed @@ -954,12 +949,12 @@ public async Task Test_GetRange_With_Concurrent_Change_Should_Conflict() using(var db = await OpenTestPartitionAsync()) { - var loc = db.Partition("test"); + var loc = db.Partition.ByKey("test"); await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); // we will read the first key from [0, 100), expected 50 @@ -970,19 +965,19 @@ await db.WriteAsync((tr) => { // [0, 100) limit 1 => 50 var kvp = await tr1 - .GetRange(loc.Pack("foo"), loc.Pack("foo", 100)) + .GetRange(loc.Keys.Encode("foo"), loc.Keys.Encode("foo", 100)) .FirstOrDefaultAsync(); - Assert.That(kvp.Key, Is.EqualTo(loc.Pack("foo", 50))); + Assert.That(kvp.Key, Is.EqualTo(loc.Keys.Encode("foo", 50))); // 42 < 50 > conflict !!! using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 42), Slice.FromAscii("forty-two")); + tr2.Set(loc.Keys.Encode("foo", 42), Slice.FromAscii("forty-two")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(42) in TR2 should have conflicted with the GetRange(0, 100) in TR1"); } @@ -994,26 +989,26 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // [0, 100) limit 1 => 50 var kvp = await tr1 - .GetRange(loc.Pack("foo"), loc.Pack("foo", 100)) + .GetRange(loc.Keys.Encode("foo"), loc.Keys.Encode("foo", 100)) .FirstOrDefaultAsync(); - Assert.That(kvp.Key, Is.EqualTo(loc.Pack("foo", 50))); + Assert.That(kvp.Key, Is.EqualTo(loc.Keys.Encode("foo", 50))); // 77 > 50 => no conflict using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 77), Slice.FromAscii("docm")); + tr2.Set(loc.Keys.Encode("foo", 77), Slice.FromAscii("docm")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); // should not conflict! await tr1.CommitAsync(); @@ -1028,12 +1023,12 @@ public async Task Test_GetKey_With_Concurrent_Change_Should_Conflict() using (var db = await OpenTestPartitionAsync()) { - var loc = db.Partition("test"); + var loc = db.Partition.ByKey("test"); await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); // we will ask for the first key from >= 0, expecting 50, but if another transaction inserts something BEFORE 50, our key selector would have returned a different result, causing a conflict @@ -1041,18 +1036,18 @@ await db.WriteAsync((tr) => using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGE{0} => 50 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Pack("foo", 0))); - Assert.That(key, Is.EqualTo(loc.Pack("foo", 50))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Keys.Encode("foo", 0))); + Assert.That(key, Is.EqualTo(loc.Keys.Encode("foo", 50))); // 42 < 50 => conflict !!! using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 42), Slice.FromAscii("forty-two")); + tr2.Set(loc.Keys.Encode("foo", 42), Slice.FromAscii("forty-two")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(42) in TR2 should have conflicted with the GetKey(fGE{0}) in TR1"); } @@ -1062,24 +1057,24 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGE{0} => 50 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Pack("foo", 0))); - Assert.That(key, Is.EqualTo(loc.Pack("foo", 50))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Keys.Encode("foo", 0))); + Assert.That(key, Is.EqualTo(loc.Keys.Encode("foo", 50))); // 77 > 50 => no conflict using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 77), Slice.FromAscii("docm")); + tr2.Set(loc.Keys.Encode("foo", 77), Slice.FromAscii("docm")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); // should not conflict! await tr1.CommitAsync(); @@ -1090,25 +1085,25 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); - tr.Set(loc.Pack("foo", 100), Slice.FromAscii("one hundred")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 100), Slice.FromAscii("one hundred")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGE{50} + 1 => 100 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Pack("foo", 50)) + 1); - Assert.That(key, Is.EqualTo(loc.Pack("foo", 100))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Keys.Encode("foo", 50)) + 1); + Assert.That(key, Is.EqualTo(loc.Keys.Encode("foo", 100))); // 77 between 50 and 100 => conflict !!! using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 77), Slice.FromAscii("docm")); + tr2.Set(loc.Keys.Encode("foo", 77), Slice.FromAscii("docm")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); // should conflict! await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(77) in TR2 should have conflicted with the GetKey(fGE{50} + 1) in TR1"); @@ -1119,25 +1114,25 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); - tr.Set(loc.Pack("foo", 100), Slice.FromAscii("one hundred")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 100), Slice.FromAscii("one hundred")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGT{50} => 100 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterThan(loc.Pack("foo", 50))); - Assert.That(key, Is.EqualTo(loc.Pack("foo", 100))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterThan(loc.Keys.Encode("foo", 50))); + Assert.That(key, Is.EqualTo(loc.Keys.Encode("foo", 100))); // another transaction changes the VALUE of 50 and 100 (but does not change the fact that they exist nor add keys in between) using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 100), Slice.FromAscii("cent")); + tr2.Set(loc.Keys.Encode("foo", 100), Slice.FromAscii("cent")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); // this causes a conflict in the current version of FDB await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(100) in TR2 should have conflicted with the GetKey(fGT{50}) in TR1"); @@ -1148,25 +1143,25 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); - tr.Set(loc.Pack("foo", 100), Slice.FromAscii("one hundred")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 100), Slice.FromAscii("one hundred")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // lLT{100} => 50 - var key = await tr1.GetKeyAsync(FdbKeySelector.LastLessThan(loc.Pack("foo", 100))); - Assert.That(key, Is.EqualTo(loc.Pack("foo", 50))); + var key = await tr1.GetKeyAsync(FdbKeySelector.LastLessThan(loc.Keys.Encode("foo", 100))); + Assert.That(key, Is.EqualTo(loc.Keys.Encode("foo", 50))); // another transaction changes the VALUE of 50 and 100 (but does not change the fact that they exist nor add keys in between) using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Clear(loc.Pack("foo", 100)); + tr2.Clear(loc.Keys.Encode("foo", 100)); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); // this causes a conflict in the current version of FDB await tr1.CommitAsync(); @@ -1191,8 +1186,8 @@ public async Task Test_Read_Isolation() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); - var key = location.Pack("A"); + var location = db.Partition.ByKey("test"); + var key = location.Keys.Encode("A"); await db.ClearRangeAsync(location, this.Cancellation); @@ -1257,13 +1252,13 @@ public async Task Test_Read_Isolation_From_Writes() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); - var a = location.Pack("A"); - var b = location.Pack("B"); - var c = location.Pack("C"); - var d = location.Pack("D"); + var a = location.Keys.Encode("A"); + var b = location.Keys.Encode("B"); + var c = location.Keys.Encode("C"); + var d = location.Keys.Encode("D"); // Reads (before and after): // - A and B will use regular reads @@ -1318,11 +1313,11 @@ public async Task Test_ReadYourWritesDisable_Isolation() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); - var a = location.Pack("A"); - var b = location.Partition("B"); + var a = location.Keys.Encode("A"); + var b = location.Partition.ByKey("B"); #region Default behaviour... @@ -1331,23 +1326,23 @@ public async Task Test_ReadYourWritesDisable_Isolation() await db.WriteAsync((tr) => { tr.Set(a, Slice.FromString("a")); - tr.Set(b.Pack(10), Slice.FromString("PRINT \"HELLO\"")); - tr.Set(b.Pack(20), Slice.FromString("GOTO 10")); + tr.Set(b.Keys.Encode(10), Slice.FromString("PRINT \"HELLO\"")); + tr.Set(b.Keys.Encode(20), Slice.FromString("GOTO 10")); }, this.Cancellation); using(var tr = db.BeginTransaction(this.Cancellation)) { var data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("a")); - var res = await tr.GetRange(b.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + var res = await tr.GetRange(b.Keys.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new [] { "PRINT \"HELLO\"", "GOTO 10" })); tr.Set(a, Slice.FromString("aa")); - tr.Set(b.Pack(15), Slice.FromString("PRINT \"WORLD\"")); + tr.Set(b.Keys.Encode(15), Slice.FromString("PRINT \"WORLD\"")); data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("aa"), "The transaction own writes should be visible by default"); - res = await tr.GetRange(b.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + res = await tr.GetRange(b.Keys.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new[] { "PRINT \"HELLO\"", "PRINT \"WORLD\"", "GOTO 10" }), "The transaction own writes should be visible by default"); //note: don't commit @@ -1365,15 +1360,15 @@ await db.WriteAsync((tr) => var data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("a")); - var res = await tr.GetRange(b.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + var res = await tr.GetRange(b.Keys.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new[] { "PRINT \"HELLO\"", "GOTO 10" })); tr.Set(a, Slice.FromString("aa")); - tr.Set(b.Pack(15), Slice.FromString("PRINT \"WORLD\"")); + tr.Set(b.Keys.Encode(15), Slice.FromString("PRINT \"WORLD\"")); data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("a"), "The transaction own writes should not be seen with ReadYourWritesDisable option enabled"); - res = await tr.GetRange(b.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + res = await tr.GetRange(b.Keys.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new[] { "PRINT \"HELLO\"", "GOTO 10" }), "The transaction own writes should not be seen with ReadYourWritesDisable option enabled"); //note: don't commit @@ -1393,14 +1388,14 @@ public async Task Test_Can_Set_Read_Version() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.ByKey("test"); long commitedVersion; // create first version using (var tr1 = db.BeginTransaction(this.Cancellation)) { - tr1.Set(location.Pack("concurrent"), Slice.FromByte(1)); + tr1.Set(location.Keys.Encode("concurrent"), Slice.FromByte(1)); await tr1.CommitAsync(); // get this version @@ -1410,7 +1405,7 @@ public async Task Test_Can_Set_Read_Version() // mutate in another transaction using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(location.Pack("concurrent"), Slice.FromByte(2)); + tr2.Set(location.Keys.Encode("concurrent"), Slice.FromByte(2)); await tr2.CommitAsync(); } @@ -1422,7 +1417,7 @@ public async Task Test_Can_Set_Read_Version() long ver = await tr3.GetReadVersionAsync(); Assert.That(ver, Is.EqualTo(commitedVersion), "GetReadVersion should return the same value as SetReadVersion!"); - var bytes = await tr3.GetAsync(location.Pack("concurrent")); + var bytes = await tr3.GetAsync(location.Keys.Encode("concurrent")); Assert.That(bytes.GetBytes(), Is.EqualTo(new byte[] { 1 }), "Should have seen the first version!"); } @@ -1545,7 +1540,7 @@ public async Task Test_Transaction_RetryLoop_Respects_DefaultRetryLimit_Value() var t = db.ReadAsync((tr) => { ++counter; - Console.WriteLine("Called " + counter + " time(s)"); + Log("Called {0} time(s)", counter); if (counter > 4) { go.Cancel(); @@ -1610,12 +1605,12 @@ public async Task Test_Can_Add_Read_Conflict_Range() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("conflict"); + var location = db.Partition.ByKey("conflict"); await db.ClearRangeAsync(location, this.Cancellation); - var key1 = location.Pack(1); - var key2 = location.Pack(2); + var key1 = location.Keys.Encode(1); + var key2 = location.Keys.Encode(2); using (var tr1 = db.BeginTransaction(this.Cancellation)) { @@ -1649,13 +1644,13 @@ public async Task Test_Can_Add_Write_Conflict_Range() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("conflict"); + var location = db.Partition.ByKey("conflict"); await db.ClearRangeAsync(location, this.Cancellation); - var keyConflict = location.Pack(0); - var key1 = location.Pack(1); - var key2 = location.Pack(2); + var keyConflict = location.Keys.Encode(0); + var key1 = location.Keys.Encode(1); + var key2 = location.Keys.Encode(2); using (var tr1 = db.BeginTransaction(this.Cancellation)) { @@ -1691,12 +1686,12 @@ public async Task Test_Can_Setup_And_Cancel_Watches() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test", "bigbrother"); + var location = db.Partition.ByKey("test", "bigbrother"); await db.ClearRangeAsync(location, this.Cancellation); - var key1 = location.Pack("watched"); - var key2 = location.Pack("witness"); + var key1 = location.Keys.Encode("watched"); + var key2 = location.Keys.Encode("witness"); await db.WriteAsync((tr) => { @@ -1750,12 +1745,12 @@ public async Task Test_Can_Get_Addresses_For_Key() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("location_api"); + var location = db.Partition.ByKey("location_api"); await db.ClearRangeAsync(location, this.Cancellation); - var key1 = location.Pack(1); - var key404 = location.Pack(404); + var key1 = location.Keys.Encode(1); + var key404 = location.Keys.Encode(404); await db.WriteAsync((tr) => { @@ -1808,12 +1803,12 @@ public async Task Test_Can_Get_Boundary_Keys() using (var db = await Fdb.OpenAsync(TestHelpers.TestClusterFile, TestHelpers.TestDbName, this.Cancellation)) { //var cf = await db.GetCoordinatorsAsync(); - //Console.WriteLine("Connected to " + cf.ToString()); + //Log("Connected to {0}", cf.ToString()); using (var tr = db.BeginReadOnlyTransaction(this.Cancellation).WithReadAccessToSystemKeys()) { // dump nodes - Console.WriteLine("Server List:"); + Log("Server List:"); var servers = await tr.GetRange(Fdb.System.ServerList, Fdb.System.ServerList + Fdb.System.MaxValue) .Select(kvp => new KeyValuePair(kvp.Key.Substring(Fdb.System.ServerList.Count), kvp.Value)) .ToListAsync(); @@ -1826,18 +1821,18 @@ public async Task Test_Can_Get_Boundary_Keys() // the datacenter id seems to be at offset 40 var dataCenterId = key.Value.Substring(40, 16).ToHexaString(); - Console.WriteLine("- " + key.Key.ToHexaString() + ": (" + key.Value.Count + ") " + key.Value.ToAsciiOrHexaString()); - Console.WriteLine(" > node = " + nodeId); - Console.WriteLine(" > machine = " + machineId); - Console.WriteLine(" > datacenter = " + dataCenterId); + Log("- {0} : ({1}) {2}", key.Key.ToHexaString(), key.Value.Count, key.Value.ToAsciiOrHexaString()); + Log(" > node = {0}", nodeId); + Log(" > machine = {0}", machineId); + Log(" > datacenter = {0}", dataCenterId); } - Console.WriteLine(); + Log(); // dump keyServers var shards = await tr.GetRange(Fdb.System.KeyServers, Fdb.System.KeyServers + Fdb.System.MaxValue) .Select(kvp => new KeyValuePair(kvp.Key.Substring(Fdb.System.KeyServers.Count), kvp.Value)) .ToListAsync(); - Console.WriteLine("Key Servers: " + shards.Count + " shards"); + Log("Key Servers: {0} shard(s)", shards.Count); HashSet distinctNodes = new HashSet(StringComparer.Ordinal); int replicationFactor = 0; @@ -1865,14 +1860,14 @@ public async Task Test_Can_Get_Boundary_Keys() //Console.WriteLine("- " + key.Value.Substring(0, 12).ToAsciiOrHexaString() + " : " + String.Join(", ", ids) + " = " + key.Key); } - Console.WriteLine(); - Console.WriteLine("Distinct nodes: " + distinctNodes.Count); + Log(); + Log("Distinct nodes: {0}", distinctNodes.Count); foreach(var machine in distinctNodes) { - Console.WriteLine("- " + machine); + Log("- " + machine); } - Console.WriteLine(); - Console.WriteLine("Cluster topology: " + distinctNodes.Count + " processes with " + (replicationFactor == 1 ? "single" : replicationFactor == 2 ? "double" : replicationFactor == 3 ? "triple" : replicationFactor.ToString()) + " replication"); + Log(); + Log("Cluster topology: {0} process(es) with {1} replication", distinctNodes.Count, replicationFactor == 1 ? "single" : replicationFactor == 2 ? "double" : replicationFactor == 3 ? "triple" : replicationFactor.ToString()); } } } @@ -1888,10 +1883,10 @@ public async Task Test_Simple_Read_Transaction() { await tr.GetReadVersionAsync(); - var a = location.Concat(Slice.FromString("A")); - var b = location.Concat(Slice.FromString("B")); - var c = location.Concat(Slice.FromString("C")); - var z = location.Concat(Slice.FromString("Z")); + var a = location.ConcatKey(Slice.FromString("A")); + var b = location.ConcatKey(Slice.FromString("B")); + var c = location.ConcatKey(Slice.FromString("C")); + var z = location.ConcatKey(Slice.FromString("Z")); //await tr.GetAsync(location.Concat(Slice.FromString("KEY"))); @@ -1902,7 +1897,7 @@ public async Task Test_Simple_Read_Transaction() tr.Set(b, Slice.FromString("BAZ")); tr.Set(c, Slice.FromString("BAT")); tr.ClearRange(a, c); - + //tr.ClearRange(location.Concat(Slice.FromString("A")), location.Concat(Slice.FromString("Z"))); //tr.Set(location.Concat(Slice.FromString("C")), Slice.Empty); @@ -1927,125 +1922,130 @@ public async Task Test_Simple_Read_Transaction() [Test, Category("LongRunning")] public async Task Test_BadPractice_Future_Fuzzer() { - const int DURATION_SEC = 30; + const int DURATION_SEC = 10; const int R = 100; - using (var db = await OpenTestDatabaseAsync()) + try { - var location = db.Partition("Fuzzer"); - - - var rnd = new Random(); - int seed = rnd.Next(); - Log("Using random seeed {0}", seed); - rnd = new Random(seed); - - await db.WriteAsync((tr) => + using (var db = await OpenTestDatabaseAsync()) { - for (int i = 0; i < R; i++) + var location = db.Partition.ByKey("Fuzzer"); + var rnd = new Random(); + int seed = rnd.Next(); + Log("Using random seeed {0}", seed); + rnd = new Random(seed); + + await db.WriteAsync((tr) => { - tr.Set(location.Pack(i), Slice.FromInt32(i)); - } - }, this.Cancellation); + for (int i = 0; i < R; i++) + { + tr.Set(location.Keys.Encode(i), Slice.FromInt32(i)); + } + }, this.Cancellation); - var start = DateTime.UtcNow; - Log("This test will run for {0} seconds", DURATION_SEC); + var start = DateTime.UtcNow; + Log("This test will run for {0} seconds", DURATION_SEC); - int time = 0; + int time = 0; - List m_alive = new List(); - while (DateTime.UtcNow - start < TimeSpan.FromSeconds(DURATION_SEC)) - { - switch (rnd.Next(10)) + var line = new StringBuilder(256); + + var alive = new List(100); + var lastCheck = start; + while (DateTime.UtcNow - start < TimeSpan.FromSeconds(DURATION_SEC)) { - case 0: + int x = rnd.Next(10); + + if (x == 0) { // start a new transaction - Console.Write('T'); + line.Append('T'); var tr = db.BeginTransaction(FdbTransactionMode.Default, this.Cancellation); - m_alive.Add(tr); - break; + alive.Add(tr); } - case 1: + else if (x == 1) { // drop a random transaction - if (m_alive.Count == 0) continue; - Console.Write('L'); - int p = rnd.Next(m_alive.Count); + if (alive.Count == 0) continue; + line.Append('L'); + int p = rnd.Next(alive.Count); - m_alive.RemoveAt(p); - //no dispose - break; + alive.RemoveAt(p); + //no dispose! } - case 2: + else if (x == 2) { // dispose a random transaction - if (m_alive.Count == 0) continue; - Console.Write('D'); - int p = rnd.Next(m_alive.Count); + if (alive.Count == 0) continue; + line.Append('D'); + int p = rnd.Next(alive.Count); - var tr = m_alive[p]; + var tr = alive[p]; + alive.RemoveAt(p); tr.Dispose(); - m_alive.RemoveAt(p); - break; } - case 3: - { // GC! - Console.Write('C'); + else if (x == 3) + { // get read version + line.Append('R'); var tr = db.BeginTransaction(FdbTransactionMode.ReadOnly, this.Cancellation); - m_alive.Add(tr); + alive.Add(tr); await tr.GetReadVersionAsync(); - break; } - - case 4: - case 5: - case 6: - { // read a random value from a random transaction - Console.Write('G'); - if (m_alive.Count == 0) break; - int p = rnd.Next(m_alive.Count); - var tr = m_alive[p]; - - int x = rnd.Next(R); - try - { - var res = await tr.GetAsync(location.Pack(x)); + else + { + if (x % 2 == 0) + { // read a random value from a random transaction + if (alive.Count == 0) continue; + line.Append('G'); + int p = rnd.Next(alive.Count); + var tr = alive[p]; + + int k = rnd.Next(R); + try + { + await tr.GetAsync(location.Keys.Encode(x)); + } + catch (FdbException) + { + line.Append('!'); + alive.RemoveAt(p); + tr.Dispose(); + } } - catch (FdbException) - { - Console.Write('!'); + else + { // read a random value, but drop the task + if (alive.Count == 0) continue; + line.Append('g'); + int p = rnd.Next(alive.Count); + var tr = alive[p]; + + int k = rnd.Next(R); + var t = tr.GetAsync(location.Keys.Encode(k)).ContinueWith((_) => { var err = _.Exception; }, TaskContinuationOptions.OnlyOnFaulted); + // => t is not stored } - break; } - case 7: - case 8: - case 9: - { // read a random value, but drop the task - Console.Write('g'); - if (m_alive.Count == 0) break; - int p = rnd.Next(m_alive.Count); - var tr = m_alive[p]; - - int x = rnd.Next(R); - var t = tr.GetAsync(location.Pack(x)).ContinueWith((_) => Console.Write('!'), TaskContinuationOptions.NotOnRanToCompletion); - // => t is not stored - break; + + if ((++time) % 10 == 0 && DateTime.UtcNow - lastCheck >= TimeSpan.FromSeconds(1)) + { + Log(line.ToString()); + line.Clear(); + Log("State: {0}", alive.Count); + //Log("Performing full GC"); + //GC.Collect(2); + //GC.WaitForPendingFinalizers(); + //GC.Collect(2); + lastCheck = DateTime.UtcNow; } - } - if ((time++) % 80 == 0) - { - Console.WriteLine(); - Log("State: {0}", m_alive.Count); - Console.Write('C'); - GC.Collect(); - GC.WaitForPendingFinalizers(); - GC.Collect(); + //await Task.Delay(1); + } + GC.Collect(); + GC.WaitForPendingFinalizers(); + GC.Collect(); } - - GC.Collect(); - GC.WaitForPendingFinalizers(); - GC.Collect(); + } + finally + { + Log("Test methods completed!"); } } diff --git a/FoundationDB.Tests/TransactionalFacts.cs b/FoundationDB.Tests/TransactionalFacts.cs index 4349145b3..f3a55e4d3 100644 --- a/FoundationDB.Tests/TransactionalFacts.cs +++ b/FoundationDB.Tests/TransactionalFacts.cs @@ -51,7 +51,7 @@ public async Task Test_ReadAsync_Should_Normally_Execute_Only_Once() using(var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.Pack("Hello"), Slice.FromString(secret)); + tr.Set(location.Keys.Encode("Hello"), Slice.FromString(secret)); await tr.CommitAsync(); } @@ -64,7 +64,7 @@ public async Task Test_ReadAsync_Should_Normally_Execute_Only_Once() Assert.That(tr.Context.Database, Is.SameAs(db)); Assert.That(tr.Context.Shared, Is.True); - return tr.GetAsync(location.Pack("Hello")); + return tr.GetAsync(location.Keys.Encode("Hello")); }, this.Cancellation); Assert.That(called, Is.EqualTo(1)); // note: if this assert fails, first ensure that you did not get a transient error while running this test! @@ -161,7 +161,7 @@ public async Task Test_Transactionals_Retries_Do_Not_Leak_When_Reading_Too_Much( var sw = Stopwatch.StartNew(); Console.WriteLine("Inserting test data (this may take a few minutes)..."); var rnd = new Random(); - await Fdb.Bulk.WriteAsync(db, Enumerable.Range(0, 100 * 1000).Select(i => new KeyValuePair(location.Pack(i), Slice.Random(rnd, 4096))), this.Cancellation); + await Fdb.Bulk.WriteAsync(db, Enumerable.Range(0, 100 * 1000).Select(i => new KeyValuePair(location.Keys.Encode(i), Slice.Random(rnd, 4096))), this.Cancellation); sw.Stop(); Console.WriteLine("> done in " + sw.Elapsed); @@ -171,8 +171,8 @@ public async Task Test_Transactionals_Retries_Do_Not_Leak_When_Reading_Too_Much( { var result = await db.ReadAsync((tr) => { - Console.WriteLine("Retry #" + tr.Context.Retries + " @ " + tr.Context.Duration.Elapsed); - return tr.GetRange(location.ToRange()).ToListAsync(); + Console.WriteLine("Retry #" + tr.Context.Retries + " @ " + tr.Context.ElapsedTotal); + return tr.GetRange(location.Keys.ToRange()).ToListAsync(); }, this.Cancellation); Assert.Fail("Too fast! increase the amount of inserted data, or slow down the system!"); @@ -246,7 +246,7 @@ public async Task Test_Transactionals_ReadOnly_Should_Deny_Write_Attempts() Assume.That(hijack, Is.Not.Null, "This test requires the transaction to implement IFdbTransaction !"); // this call should fail ! - hijack.Set(location.Pack("Hello"), Slice.FromString("Hijacked")); + hijack.Set(location.Keys.Encode("Hello"), Slice.FromString("Hijacked")); Assert.Fail("Calling Set() on a read-only transaction should fail"); return Task.FromResult(123); @@ -271,7 +271,7 @@ await db.WriteAsync((tr) => { for (int i = 0; i < 10; i++) { - tr.Set(location.Pack(i), Slice.FromInt32(i)); + tr.Set(location.Keys.Encode(i), Slice.FromInt32(i)); } }, this.Cancellation); @@ -285,25 +285,25 @@ await db.WriteAsync((tr) => // read 0..2 for (int i = 0; i < 3; i++) { - values[i] = (await tr.GetAsync(location.Pack(i))).ToInt32(); + values[i] = (await tr.GetAsync(location.Keys.Encode(i))).ToInt32(); } // another transaction commits a change to 3 before we read it - await db.WriteAsync((tr2) => tr2.Set(location.Pack(3), Slice.FromInt32(42)), this.Cancellation); + await db.WriteAsync((tr2) => tr2.Set(location.Keys.Encode(3), Slice.FromInt32(42)), this.Cancellation); // read 3 to 7 for (int i = 3; i < 7; i++) { - values[i] = (await tr.GetAsync(location.Pack(i))).ToInt32(); + values[i] = (await tr.GetAsync(location.Keys.Encode(i))).ToInt32(); } // another transaction commits a change to 6 after it has been read - await db.WriteAsync((tr2) => tr2.Set(location.Pack(6), Slice.FromInt32(66)), this.Cancellation); + await db.WriteAsync((tr2) => tr2.Set(location.Keys.Encode(6), Slice.FromInt32(66)), this.Cancellation); // read 7 to 9 for (int i = 7; i < 10; i++) { - values[i] = (await tr.GetAsync(location.Pack(i))).ToInt32(); + values[i] = (await tr.GetAsync(location.Keys.Encode(i))).ToInt32(); } return values; diff --git a/FoundationDb.Client.sln.DotSettings b/FoundationDb.Client.sln.DotSettings index 47110b55c..d76791c18 100644 --- a/FoundationDb.Client.sln.DotSettings +++ b/FoundationDb.Client.sln.DotSettings @@ -65,4 +65,6 @@ <Policy Inspect="True" Prefix="s_" Suffix="" Style="aaBb"><ExtraRule Prefix="" Suffix="" Style="AaBb" /></Policy> <Policy Inspect="True" Prefix="" Suffix="" Style="AaBb"><ExtraRule Prefix="m_" Suffix="" Style="aaBb" /></Policy> True - True \ No newline at end of file + True + True + False \ No newline at end of file diff --git a/README.md b/README.md index 9d9cc27b0..f36ab34b9 100644 --- a/README.md +++ b/README.md @@ -26,22 +26,44 @@ using (var db = await Fdb.OpenAsync()) { // we will use a "Test" directory to isolate our test data var location = await db.Directory.CreateOrOpenAsync("Test", token); + // this location will remember the allocated prefix, and + // automatically add it as a prefix to all our keys // we need a transaction to be able to make changes to the db // note: production code should use "db.WriteAsync(..., token)" instead using (var trans = db.BeginTransaction(token)) { - // ("Test", "Hello", ) = "World" - trans.Set(location.Pack("Hello"), Slice.FromString("World")); - - // ("Test", "Count", ) = 42 - trans.Set(location.Pack("Count"), Slice.FromInt32(42)); + // For our convenience, we will use the Tuple Encoding format for our keys, + // which is accessible via the "location.Tuples" helper. We could have used + // any other encoding for the keys. Tuples are simple to use and have some + // intereseting ordering properties that make it easy to work with. + // => All our keys will be encoded as the packed tuple ({Test}, "foo"), + // making them very nice and compact. We could also use integers or GUIDs + // for the keys themselves. + + // Set "Hello" key to "World" + trans.Set( + location.Tuples.EncodeKey("Hello"), + Slice.FromString("World") // UTF-8 encoded string + ); + + // Set "Count" key to 42 + trans.Set( + location.Tuples.EncodeKey("Count"), + Slice.FromInt32(42) // 1 byte + ); - // Atomically add 123 to ("Test", "Total") - trans.AtomicAdd(location.Pack("Total"), Slice.FromFixed32(123)); - - // Set bits 3, 9 and 30 in the bitmap stored at ("Test", "Bitmap") - trans.AtomicOr(location.Pack("Bitmap"), Slice.FromFixed32((1 << 3) | (1 << 9) | (1 << 30))); + // Atomically add 123 to "Total" + trans.AtomicAdd( + location.Tuples.EncodeKey("Total"), + Slice.FromFixed32(123) // 4 bytes, Little Endian + ); + + // Set bits 3, 9 and 30 in the bit map stored in the key "Bitmap" + trans.AtomicOr( + location.Tuples.EncodeKey("Bitmap"), + Slice.FromFixed32((1 << 3) | (1 << 9) | (1 << 30)) // 4 bytes, Little Endian + ); // commit the changes to the db await trans.CommitAsync(); @@ -54,16 +76,16 @@ using (var db = await Fdb.OpenAsync()) using (var trans = db.BeginReadOnlyTransaction(token)) { // Read ("Test", "Hello", ) as a string - Slice value = await trans.GetAsync(location.Pack("Hello")); + Slice value = await trans.GetAsync(location.Tuples.EncodeKey("Hello")); Console.WriteLine(value.ToUnicode()); // -> World // Read ("Test", "Count", ) as an int - value = await trans.GetAsync(location.Pack("Count")); + value = await trans.GetAsync(location.Tuples.EncodeKey("Count")); Console.WriteLine(value.ToInt32()); // -> 42 // missing keys give a result of Slice.Nil, which is the equivalent // of "key not found". - value = await trans.GetAsync(location.Pack("NotFound")); + value = await trans.GetAsync(location.Tuples.EncodeKey("NotFound")); Console.WriteLine(value.HasValue); // -> false Console.WriteLine(value == Slice.Nil); // -> true // note: there is also Slice.Empty that is returned for existing keys @@ -86,9 +108,9 @@ using (var db = await Fdb.OpenAsync()) await db.WriteAsync((trans) => { // add some data to the list with the format: (..., index) = value - trans.Set(list.Pack(0), Slice.FromString("AAA")); - trans.Set(list.Pack(1), Slice.FromString("BBB")); - trans.Set(list.Pack(2), Slice.FromString("CCC")); + trans.Set(list.Tuples.EncodeKey(0), Slice.FromString("AAA")); + trans.Set(list.Tuples.EncodeKey(1), Slice.FromString("BBB")); + trans.Set(list.Tuples.EncodeKey(2), Slice.FromString("CCC")); // The actual keys will be a concatenation of the prefix of 'list', // and a packed tuple containing the index. Since we are using the // Directory Layer, this should still be fairly small (between 4 @@ -123,18 +145,18 @@ using (var db = await Fdb.OpenAsync()) { // do a range query on the list subspace, which should return all the pairs // in the subspace, one for each entry in the array. - // We exploit the fact that subspace.ToRange() usually does not include the - // subspace prefix itself, because we don't want our counter to be returned + // We exploit the fact that subspace.Tuples.ToRange() usually does not include + // the subspace prefix itself, because we don't want our counter to be returned // with the query itself. return trans // ask for all keys that are _inside_ our subspace - .GetRange(list.ToRange()) + .GetRange(list.Tuples.ToRange()) // transform the resultoing KeyValuePair into something // nicer to use, like a typed KeyValuePair .Select((kvp) => new KeyValuePair( // unpack the tuple and returns the last item as an int - list.UnpackLast(kvp.Key), + list.Tuples.DecodeLast(kvp.Key), // convert the value into an unicode string kvp.Value.ToUnicode() )) diff --git a/Tuples.md b/Tuples.md new file mode 100644 index 000000000..d1f9f855d --- /dev/null +++ b/Tuples.md @@ -0,0 +1,360 @@ +Tuples are made of stuff and things...* +== +*: _Working Title_ + +_"A tuple is an ordered list of elements."_ - [Wikipedia](http://en.wikipedia.org/wiki/Tuple) + +
+         0       1                      2
+    +---------+-----+--------------------------------------+
+t = | "Hello" | 123 | 773166b7-de74-4fcc-845c-84080cc89533 |
+    +---------+-----+--------------------------------------+
+
+ +This is a tuple of size 3, which contains 3 elements in a specific order: the first element, the second element and then - you guessed it - the third element. + +The difference with a regular struct, is that the elements do not have names, only positions: `t[0]`, `t[1]`, ..., `t[i]` with `0 <= i < N`, like an array. + +The difference with an array, is that all the elements can have a different types. + +There are various ways to represent a tuple in plain text, and one of them is as a vector: + +
("Hello", 123, {773166b7-de74-4fcc-845c-84080cc89533})
+ +There is a special case, for the tuple of size 1, where we usually add an extra `,` at the end, to distinguish it from an expression: + +
("Hello", )
+ +And of course you can have an empty tuple, of size 0: + +
()
+ +### The Dark Ages + +The absolute minimum implementation of a tuple is an `object[]` array. But this would not be very efficient nor user friendly, especially when you need to encode and decode keys composed of multiple elements with different types. You will probably use a lot of value types (int, Guid, bool, ...) that would need to be boxed, and you will also need to blindly casts items back into their expected type. Now was the 3rd element of this tuple an `int` or a `long`? If you guessed wrong, you will get an `InvalidCastException` at runtime. Uhoh :( + +```CSharp +// in application A that encoded a key... +var items = new object[] { "Hello", 123, Guid.NewGuid() }; +// one allocation for the object[] array, and two allocations to box the int and the guid! +var key = SomeLibrary.Encode(items); + +// in a different application B that decodes the same key +var items = SomeLibrary.Decode(key); +var a = (string)items[0]; +var b = (long)items[1]; // FAIL: it's actually an int ! +var c = (Guid)items[2]; +var d = (int)items[3]; // FAIL: there is no 4th item ! +``` + +The .NET Framework comes with a set of `Tuple<...>` classes, which gives you the ability to specify the types as well as the number of elements. You get type safety and a better intellisense experience. + +```CSharp +// in application A that encoded a key... +Tuple items = Tuple.Create("Hello", 123, Guid.NewGuid()); +// a single allocation for the Tuple instance +var key = SomeLibrary.Encode(items); + +// in a different application B that decodes the same key +Tuple items = SomeLibrary.Decode(key); +string a = items.Item1; +int b = items.Item2; +Guid c = items.Item3; +``` + +This is much better, but unfortunately, the BCL's Tuple classes are relatively barebone and don't offer much in term of feature, if at all. You can't really combine them or split them. They still require you to know that the 2nd element was an `int`, and not a `long` or `uint`. + +And quite frankly, if you have used other languages where tuples are first-class citizens (Python, for example), they seem rather bleak. + +That's why we need a better API, in order to help us be more productive. + +## IFdbTuple + +The `IFdbTuple` interface, defined in `FoundationDB.Layers.Tuples` (TODO: update this if we rename it!), is the base of all the different tuples implementation, all targetting a specific use case. + +This interface has the bare minimum API, thats must be implemented by each variant, and is in turn used by a set of extension methods that add more generic behavior that does NOT need to be replicated in all the variants. + +There is also a static class, called `FdbTuple`, which holds a bunch of methods to create and handle all the different variants of tuples. + +_note: the interface is not called `ITuple` because 1) there is already an `ITuple` interface in the BCL (even though it is internal), and 2) we wouldn't be able to call our static helper class `Tuple` since it would collide with the BCL._ + +### Types of tuples + +Tuples need to adapt to different use case: some tuples should have a fixed size and types (like the BCL Tuples), some should have a variable length (like a vector or list). Some tuples should probably be structs (to reduce the number of allocation in tight loops), while others need to be reference types. And finally, some tuples could be thin wrappers around encoded binary blobs, and defer the decoding of items until they are accessed. + +That's why there is multiple variants of tuples, all implementing the `IFdbTuple` interface: + +- `FdbTuple`, `FdbTuple` (up to T5 right now) are the equivalent of the BCL's `Tuple` except that they are implemented as a struct. They are efficient when used as a temporary step to create bigger tuples, or when you have control of the actual type (in LINQ queries, inside your own private methods, ...). They are also ideal if you want type safety and nice intellisense support, since the types are known at compile time. +- `FdbListTuple` wraps an array of object[] and exposes a subset of this array. Getting a substring of this cheap since it does not have to copy the items. +- `FdbJoinedTuple` is a wrapper that glues together two tuples (of any type). +- `FdbLinkedTuple` is a special case of an FdbJoinedTupel, where we are only adding one value to an existing tuple. +- `FdbSlicedTuple` is a wrapper around a half-parsed binary representation of a tuple, and which will only decode items if they are accessed. In cases where you are only interested in part of a key, you won't waste CPU cycles decoding the other items. +- `FdbMemoizedTuple` will cache its binary representation, which is usefull when you have a common tuple prefix which is used everytime to construct other tuples. +- `FdbPrefixedTuple` is some sort of hybrid tuples whose binary representation always have a constant binary prefix, which may or may not be a valid binary tuple representation itself (need to use tuples with prefixes generated from a different encoding). + +### Creating a tuple + +The most simple way to create a tuple, is from its elements: + +```CSharp +var t = FdbTuple.Create("Hello", 123, Guid.NewGuid()); +``` + +The actual type of the tuple will be `FdbTuple` which is a struct. Since we are using the `var` keyword, then as long as `t` stays inside the method, it will not be boxed. + +We can also create a tuple by adding something to an existing tuples, even starting with the Empty tuple: + +```CSharp +var t = FdbTuple.Empty.Append("Hello").Append(123).Append(Guid.NewGuid()); +``` + +The good news here is that _t_ is still a struct of type `FdbTuple` and we did not produce any allocations: the Empty tuple is a singleton, and all the intermediate Append() returned structs of type `FdbTuple` and `FdbTuple`. There is of course a limit to the number of elements that can be added, before we have to switch to an array-based tuple variant. + +If we have a variable-size list of items, we can also create a tuple from it: + +```CSharp +IEnumerable xs = ....; +// xs is a sequence of MyFoo objects, with an Id property (of type Guid) +var t = FdbTuple.FromSequence(xs.Select(x => x.Id)); +``` + +When all the elements or a tuple are of the same type, you can use specialized versions: +```CSharp +var xs = new [] { "Bonjour", "le", "Monde!" }; +var t = FdbTuple.FromArray(xs); +``` + +If you were already using the BCL's Tuple, you can easily convert from one to the other, via a set of implicit and explicit cast operators: + +```CSharp +var bcl = Tuple.Create("Hello", 123, Guid.NewGuid()); +FdbTuple t = bcl; // implicit cast + +var t = FdbTuple.Create("Hello", 123, Guid.NewGuid()); +Tuple bcl = (Tuple) t; // explicit cast +``` + +And for the more adventurous, you can of course create a tuple by copying the elements of an object[] array. + +```CSharp +var xs = new object[] { "Hello", 123, Guid.NewGuid() }; +var t1 = FdbTuple.FromObjects(xs); // => ("hello", 123, guid) +var t2 = FdbTuple.FromObjects(xs, 1, 2); // => (123, guid) +xs[1] = 456; // won't change the content of the tuples +// t[1] => 123 +``` + +If you really want to push it, you can skip copying the items by wrapping an existing array, but then you will break the immutability contract of the Tuples API. Don't try this at home! + +```CSharp +var xs = new object[] { "Hello", 123, Guid.NewGuid() }; +var t1 = FdbTuple.Wrap(xs); // no copy! +var t2 = FdbTuple.Wrap(xs, 1, 2); // no copy! +xs[1] = 456; // will change the content of the tuples!! +// t[1] => 456 +``` + +### Using a tuple + +Now that you have a tuple, the first thing you would wan't to know, is its size and if it is empty or not. + +All tuples expose a `Count` property which returns the number of elements in the tuple (0 to N). + +To help you verify that a tuple has the correct size before accessing its elements, there is a set of help extension methods just for that: + +- `t.IsNullOrEmpty()` returns `true` if either `t == null` or `t.Count == 0` +- `t.OfSize(3)` checks that `t` is not null, and that `t.Count` is equal to 3, and then returns the tuple itself, so you can write: `t.OfSize(3).DoSomethingWichExceptsThreeElements()` +- `t.OfSizeAtLeast(3)` (and `t.OfSizeAtMost(3)`) work the same, except they check that `t.Count >= 3` (or `t.Count <= 3`) + +Of course, if you have one of the `FdbTuple` struct, you can skip this step, since the size if known at compile time. + +To read the content of a tuple, you can simply call `t.Get(index)`, where `index` is the offset _in the tuple_ of the element, and `T` is the type into which the value will be converted. + +```CSharp +var t = FdbTuple.Create("hello", 123, Guid.NewGuid()); +var x = t.Get(0); // => "hello" +var y = t.Get(1); // => 123 +var z = t.Get(2); // => guid +``` + +If `index` is negative, then it is relative to the end of the tuple, where -1 is the last element, -2 is the next-to-last element, and -N is the first element. + +```CSharp +var t = FdbTuple.Create("hello", 123, Guid.NewGuid()); +var x = t.Get(-3); // => "hello" +var y = t.Get(-2); // => 123 +var z = t.Get(-1); // => guid +``` + +### Pretty Printing + +Code that manipulate tuples can get complex pretty fast, so you need a way to display the content of a tuple is a nice and understable way. + +For that, every tuple overrides `ToString()` to return a nicely formatted string with a standardized format. + +```CSharp +var t1 = FdbTuple.Create("hello", 123, Guid.NewGuid()); +Console.WriteLine("t1 = {0}", t1); +// => t1 = ("hello", 123, {773166b7-de74-4fcc-845c-84080cc89533}) +var t2 = FdbTuple.Create("hello"); +Console.WriteLine("t1 = {0}", t2); +// => t2 = ("hello",) +var t3 = FdbTuple.Empty; +Console.WriteLine("t3 = {0}", t3); +// => t3 = () +``` + +There is a special case for tuples of size 1, which have a trailing comma - `(123,)` instead of `(123)` - so that they can be distinguished from a normal expression in parenthesis. + +### Tuples all the way down + +Since a tuple is just a vector of elements, you can of course put a tuple inside another tuple. + +This works: + +```CSharp +var t1 = FdbTuple.Create("hello", FdbTuple(123, 456), Guid.NewGuid()); +// t1 = ("hello", (123, 456), {773166b7-de74-4fcc-845c-84080cc89533}) +var t2 = FdbTuple.Create(FdbTuple.Create("a", "b")); +// t2 = ((a, b),) +var t3 = FdbTuple.Create("hello", FdbTuple.Empty, "world"); +// t3 = ("hello", (), "world"); +``` + +_note: The easy mistake is to call `t1.Append(t2)` instead of `t1.Concat(t2)`, which will add t2 as a single element at the end of t1, instead of adding t2's elements ad the end of t1._ + +This can be usefull when you want to model a fixed-size key: `(product_id, location_id, order_id)` where location_id is a hierarchical key with a variable size, but still keep a fixed size of 3: + +```CSharp +var productId = "B00CS8QSSK"; +var locationId = new [] { "Europe", "France", "Lille" }; +var orderId = Guid.NewGuid(); + +var t = FdbTuple.Create(productId, FdbTuple.FromArray(locationId), orderId); +// t.Count => 3 +// t[0] => "B00CS8QSSK" +// t[1] => ("Europe", "France", "Lille") +// t[2] => {773166b7-de74-4fcc-845c-84080cc89533} +``` + +You code that want to parse the key can always read `t[2]` to get the order_id, without caring about the actuel size of the location_id. + +### Combining tuples + +Since tuples are immutable, there are no methods to modify the value of an element. You'd do that by creating a new tuple, with a combination of Substring, Append or Concat. + +You can, though, modify tuples by returning a new tuple, with or without copying the items (depending on the tuple variant being used). + +The most common case is to simply add a value to a tuple via the `t.Append(T value)` method. For example you have a base tuple (cached value), and you want to add a document ID. + +```CSharp +var location = FdbTuple.Create("MyAwesomeApp", "Documents"); + +var documentId = Guid.NewGuid(); +var t = location.Append(document); +// t => ("MyAwesomeApp", "Documents", {773166b7-de74-4fcc-845c-84080cc89533}); +``` + +Don't forget that if you Append a tuple, it will be added as a nested tuple! + +If you actually want to merge the elements of two tuples, when you can use the `t1.Concat(t2)` method, which return a new tuple with the elements of both t1 and t2. + +```CSharp +var location = FdbTuple.Create("MyAwesomeApp", "OrdersByProduct"); + +var productId = "B00CS8QSSK"; +var orderId = Guid.NewGuid(); +var t1 = FdbTuple.Create(productId, orderId) +// t1 => ("B00CS8QSSK", {773166b7-de74-4fcc-845c-84080cc89533}) + +var t2 = location.Concat(t1); +// t2 => ("MyAwesomeApp", "OrdersByProduct", "B00CS8QSSK", {773166b7-de74-4fcc-845c-84080cc89533}); +``` + +### Splitting tuples + +You can also split tuples into smaller chunks. + +First, you can return a subset of a tuple via on of the `t.Substring(...)` methods, or the `t[from, to]` indexer. + +The `Substring()` method works exactly the same way as for regulard strings. + +```CSharp +var t = FdbTuple.Create(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); +var u = t.Substring(0, 3); // => (1, 2, 3) +var v = t.Substring(5, 2); // => (6, 7) +var w = t.Substring(7); // => (8, 9, 10) + +// also works with negative indexing! +var w = v.Substring(-3); // => (8, 9, 10) +``` + +The `t[from, to]` indexer gets some getting used to. If actual returns all the elements in the tuple with position `from <= p < to`, which means that the `to` is excluded. + +```CSharp +var t = FdbTuple.Create(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); +var u = t[0, 3]; // => (1, 2, 3) +var v = t[5, 7]; // => (6, 7) +// rember that 'to' is excluded! +var w = t[7, -1]; // => (8, 9) +// to fix that, you can use 'null' ("up to the end") +var w = t[7, null]; // => (8, 9, 10) + +// also works with negative indexing! +var w = v[-3, null]; // => (8, 9, 10) +``` + +If you are tired of writing `t.Substring(0, 3)` all the time, you can also use `t.Truncate(3)` which does the same thing. + +```CSharp +var t = FdbTuple.Create(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); +var u = t.Truncate(3); +// u => (1, 2, 3); +var v = t.Truncate(-3); +// v => (8, 9, 10); +``` + +### More advanced stuff + +When decoding keys using tuple, you wil often find yourself extracting a fixed number of arguments into local variables, and then constructing an instance of a Model class from your application. + +```CSharp +public MyFooBar DecodeFoobar(IFdbTuple tuple) +{ + var x = tuple.Get(0); + var y = tuple.Get(1); + var z = tuple.Get(2); + return new MyFooBar(x, y, z); +} +``` + +The keen eye will see the problems with this method: + +- no null check on tuple. +- what if tuple.Count is 5 ? +- what if tuple.Count is only 2 ? +- you probably copy/pasted `var x = tuple.Get<...>(0)` two more times, and forgot to change the index to 1 and 2! _(even Notch does it!)_ + +One solution is to use the set of `t.As()` helper methods to convert a tuple of type `IFdbTuple` into a more friendly `FdbTuple` introducing tape safety and intellisence. + +```CSharp +public MyFooBar DecodeFoobar(IFdbTuple tuple) +{ + var t = tuple.As(); + // this throws if tuple is null, or not of size 3 + return new MyFooBar(t.Item1, t.Item2, t.Item3); +} +``` + +That's better, but you can still swap two arguments by mistake, if they have the same type. + +To combat this, you can use on of the `t.With(Action)` or `t.With(Func)` which can give names to the elements. + +```CSharp +public MyFooBar DecodeFoobar(IFdbTuple tuple) +{ + return tuple.With((Guid productId, Guid categoryId, Guid orderId) => new MyFooBar(productId, categoriyId, orderId)); + // all three elements are GUID, but adding name help you catch argument inversion errors +} +``` +