From 217494a904f2d3f4fc1ed14f4317e318a0e94730 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 14 Nov 2014 17:47:57 +0100 Subject: [PATCH 01/63] =?UTF-8?q?The=20Commit=20Who=20Broke=20Your=20Code!?= =?UTF-8?q?=20(=EF=BE=89=EF=BE=9F0=EF=BE=9F)=EF=BE=89~?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Completely refactored the Subspace API to fix most common issues and prevent bugs when dealing with embedded tuples - subspace.Keys.xxx to concat/extract/work with binary keys, ex: subspace.Keys[Slice.FromString("Hello")] - subspace.Partition.xxx to create sub-partitions of subspaces, ex: subspace.Partition.By("Messages") - subspace.Tuples.xxx to deal with everything Tuple related, ex: subspace.Tuples.EncodeKey(123, "abc") - Made the interface "IFdbSubspace" the primary entry point, instead of class FdbSubspace - Added low-level methods on IFdbSubspace, moved most extension methods on Keys/Partition/Tuples helper struct - See #40 for discussion, and how to update your code with the new API ! - Don't hate me too much (^_^;) --- FdbShell/Commands/BasicCommands.cs | 14 +- FoundationDB.Client.sln | 8 +- .../Encoders/FdbEncoderSubspace`1.cs | 31 +- .../Encoders/FdbEncoderSubspace`2.cs | 22 +- .../Encoders/FdbEncoderSubspace`3.cs | 10 +- FoundationDB.Client/Fdb.cs | 6 +- FoundationDB.Client/FdbCluster.cs | 2 +- FoundationDB.Client/FdbDatabase.cs | 94 +-- FoundationDB.Client/FdbDatabaseExtensions.cs | 31 +- FoundationDB.Client/FdbKey.cs | 4 + FoundationDB.Client/FdbRangeChunk.cs | 4 +- .../Filters/FdbDatabaseFilter.cs | 47 +- .../Filters/PrefixRewriterTransaction.cs | 18 +- .../FoundationDB.Client.csproj | 3 + FoundationDB.Client/IFdbDatabase.cs | 2 +- .../Layers/Directories/FdbDirectoryLayer.cs | 74 +-- .../Directories/FdbHighContentionAllocator.cs | 22 +- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 81 +-- .../Subspaces/Fdb.Directory.cs | 2 +- FoundationDB.Client/Subspaces/FdbSubspace.cs | 261 ++++---- .../Subspaces/FdbSubspaceExtensions.cs | 612 +----------------- .../Subspaces/FdbSubspaceKeys.cs | 159 +++++ .../Subspaces/FdbSubspacePartition.cs | 170 +++++ .../Subspaces/FdbSubspaceTuples.cs | 498 ++++++++++++++ FoundationDB.Client/Subspaces/IFdbSubspace.cs | 44 +- FoundationDB.Client/Utils/Slice.cs | 61 ++ FoundationDB.Client/Utils/SliceBuffer.cs | 2 +- FoundationDB.Layers.Common/Blobs/FdbBlob.cs | 12 +- .../Collections/FdbMap`2.cs | 6 +- .../Collections/FdbMultimap`2.cs | 6 +- .../Collections/FdbQueue`1.cs | 31 +- .../Collections/FdbRankedSet.cs | 44 +- .../Collections/FdbVector`1.cs | 6 +- .../Counters/FdbCounterMap.cs | 6 +- .../Counters/FdbHighContentionCounter.cs | 6 +- .../Indexes/FdbIndex`2.cs | 6 +- .../Interning/FdbStringIntern.cs | 8 +- .../Documents/FdbHashSetCollection.cs | 14 +- .../Messaging/FdbWorkerPool.cs | 66 +- .../Messaging/WorkerPoolTest.cs | 2 +- .../Benchmarks/BenchRunner.cs | 4 +- FoundationDB.Samples/Benchmarks/LeakTest.cs | 10 +- .../MessageQueue/MessageQueueRunner.cs | 20 +- .../Tutorials/ClassScheduling.cs | 10 +- FoundationDB.Storage.Memory.Test/FdbTest.cs | 37 ++ .../Transactions/Benchmarks.cs | 42 +- .../Transactions/Comparisons.cs | 10 +- .../Transactions/MemoryTransactionFacts.cs | 206 +++--- FoundationDB.Tests.Sandbox/Program.cs | 54 +- FoundationDB.Tests/DatabaseBulkFacts.cs | 34 +- FoundationDB.Tests/DatabaseFacts.cs | 16 +- .../Indexing/CompressedBitmapsFacts.cs | 1 + FoundationDB.Tests/FdbTest.cs | 29 +- .../Filters/LoggingFilterFacts.cs | 63 +- FoundationDB.Tests/Layers/BlobFacts.cs | 6 +- FoundationDB.Tests/Layers/DirectoryFacts.cs | 146 +++-- FoundationDB.Tests/Layers/IndexingFacts.cs | 10 +- FoundationDB.Tests/Layers/MapFacts.cs | 10 +- FoundationDB.Tests/Layers/MultiMapFacts.cs | 6 +- FoundationDB.Tests/Layers/RankedSetFacts.cs | 4 +- .../Layers/StringInternFacts.cs | 24 +- FoundationDB.Tests/Layers/SubspaceFacts.cs | 70 +- FoundationDB.Tests/Layers/TupleFacts.cs | 20 +- FoundationDB.Tests/Layers/VectorFacts.cs | 2 +- .../Linq/FdbAsyncQueryableFacts.cs | 14 +- .../Linq/FdbQueryExpressionFacts.cs | 4 +- FoundationDB.Tests/RangeQueryFacts.cs | 122 ++-- FoundationDB.Tests/TestHelpers.cs | 10 +- FoundationDB.Tests/TransactionFacts.cs | 332 +++++----- FoundationDB.Tests/TransactionalFacts.cs | 20 +- README.md | 56 +- 71 files changed, 2161 insertions(+), 1726 deletions(-) create mode 100644 FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs create mode 100644 FoundationDB.Client/Subspaces/FdbSubspacePartition.cs create mode 100644 FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs diff --git a/FdbShell/Commands/BasicCommands.cs b/FdbShell/Commands/BasicCommands.cs index 4183b91ce..b114b1a7a 100644 --- a/FdbShell/Commands/BasicCommands.cs +++ b/FdbShell/Commands/BasicCommands.cs @@ -69,16 +69,16 @@ public static async Task Dir(string[] path, IFdbTuple extras, DirectoryBrowseOpt if (!(subfolder is FdbDirectoryPartition)) { long count = await Fdb.System.EstimateCountAsync(db, subfolder.ToRange(), ct); - log.WriteLine(" {0,-12} {1,-12} {3,9:N0} {2}", FdbKey.Dump(subfolder.Copy().Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, count); + log.WriteLine(" {0,-12} {1,-12} {3,9:N0} {2}", FdbKey.Dump(FdbSubspace.Copy(subfolder).Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, count); } else { - log.WriteLine(" {0,-12} {1,-12} {3,9:N0} {2}", FdbKey.Dump(subfolder.Copy().Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, "-"); + log.WriteLine(" {0,-12} {1,-12} {3,9:N0} {2}", FdbKey.Dump(FdbSubspace.Copy(subfolder).Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, "-"); } } else { - log.WriteLine(" {0,-12} {1,-12} {2}", FdbKey.Dump(subfolder.Copy().Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name); + log.WriteLine(" {0,-12} {1,-12} {2}", FdbKey.Dump(FdbSubspace.Copy(subfolder).Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name); } } else @@ -216,7 +216,7 @@ public static async Task Count(string[] path, IFdbTuple extras, IFdbDatabase db, return; } - var copy = folder.Copy(); + var copy = FdbSubspace.Copy(folder); log.WriteLine("# Counting keys under {0} ...", FdbKey.Dump(copy.Key)); var progress = new Progress>((state) => @@ -255,7 +255,7 @@ public static async Task Show(string[] path, IFdbTuple extras, bool reverse, IFd if (reverse) keys.Reverse(); foreach (var key in keys.Take(count)) { - log.WriteLine("...{0} = {1}", FdbKey.Dump(folder.Extract(key.Key)), key.Value.ToAsciiOrHexaString()); + log.WriteLine("...{0} = {1}", FdbKey.Dump(folder.ExtractKey(key.Key)), key.Value.ToAsciiOrHexaString()); } if (!reverse && keys.Count == count + 1) { @@ -555,7 +555,7 @@ public static async Task Shards(string[] path, IFdbTuple extras, IFdbDatabase db var folder = (await TryOpenCurrentDirectoryAsync(path, db, ct)) as FdbDirectorySubspace; if (folder != null) { - var r = FdbKeyRange.StartsWith(folder.Copy().Key); + var r = FdbKeyRange.StartsWith(FdbSubspace.Copy(folder).Key); Console.WriteLine("Searching for shards that intersect with /{0} ...", String.Join("/", path)); ranges = await Fdb.System.GetChunksAsync(db, r, ct); Console.WriteLine("Found {0} ranges intersecting {1}:", ranges.Count, r); @@ -590,7 +590,7 @@ public static async Task Sampling(string[] path, IFdbTuple extras, IFdbDatabase FdbKeyRange span; if (folder is FdbDirectorySubspace) { - span = FdbKeyRange.StartsWith((folder as FdbDirectorySubspace).Copy()); + span = FdbKeyRange.StartsWith(FdbSubspace.Copy(folder as FdbDirectorySubspace).Key); log.WriteLine("Reading list of shards for /{0} under {1} ...", String.Join("/", path), FdbKey.Dump(span.Begin)); } else diff --git a/FoundationDB.Client.sln b/FoundationDB.Client.sln index 554e7cde2..cdb6ac1e6 100644 --- a/FoundationDB.Client.sln +++ b/FoundationDB.Client.sln @@ -1,7 +1,7 @@  Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 2013 -VisualStudioVersion = 12.0.30501.0 +VisualStudioVersion = 12.0.30723.0 MinimumVisualStudioVersion = 10.0.40219.1 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "FoundationDB.Client", "FoundationDB.Client\FoundationDB.Client.csproj", "{773166B7-DE74-4FCC-845C-84080CC89533}" EndProject @@ -36,6 +36,12 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "FoundationDB.Storage.Memory EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "FdbShell", "FdbShell\FdbShell.csproj", "{60C39E7E-E6CD-404B-8F9B-9BABF302AABC}" EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{A7D90DEE-38B0-449C-93B5-32D7B845D990}" + ProjectSection(SolutionItems) = preProject + LICENSE.md = LICENSE.md + README.md = README.md + EndProjectSection +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs index 0845dc89d..09c835aae 100644 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs +++ b/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs @@ -38,10 +38,10 @@ namespace FoundationDB.Client public class FdbEncoderSubspace : FdbSubspace, IKeyEncoder { - protected readonly FdbSubspace m_parent; + protected readonly IFdbSubspace m_parent; protected readonly IKeyEncoder m_encoder; - public FdbEncoderSubspace([NotNull] FdbSubspace subspace, [NotNull] IKeyEncoder encoder) + public FdbEncoderSubspace([NotNull] IFdbSubspace subspace, [NotNull] IKeyEncoder encoder) : base(subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); @@ -93,43 +93,38 @@ public Slice EncodeKey(T key) } [NotNull] - public Slice[] EncodeKeyRange([NotNull] T[] keys) + public Slice[] EncodeKeyRange([NotNull] IEnumerable keys) { - return FdbKey.Merge(this.Key, m_encoder.EncodeRange(keys)); + return ConcatKeys(m_encoder.EncodeRange(keys)); } [NotNull] - public Slice[] EncodeKeyRange([NotNull] TElement[] elements, Func selector) + public Slice[] EncodeKeyRange([NotNull] params T[] keys) { - return FdbKey.Merge(this.Key, m_encoder.EncodeRange(elements, selector)); + return ConcatKeys(m_encoder.EncodeRange(keys)); } [NotNull] - public Slice[] EncodeKeyRange([NotNull] IEnumerable keys) + public Slice[] EncodeKeyRange([NotNull] TElement[] elements, Func selector) { - return FdbKey.Merge(this.Key, m_encoder.EncodeRange(keys)); + return ConcatKeys(m_encoder.EncodeRange(elements, selector)); } public T DecodeKey(Slice encoded) { - return m_encoder.DecodeKey(this.ExtractAndCheck(encoded)); + return m_encoder.DecodeKey(ExtractKey(encoded, boundCheck: true)); } [NotNull] - public T[] DecodeKeyRange([NotNull] Slice[] encoded) + public T[] DecodeKeyRange([NotNull] IEnumerable encoded) { - var extracted = new Slice[encoded.Length]; - for (int i = 0; i < encoded.Length; i++) - { - extracted[i] = ExtractAndCheck(encoded[i]); - } - return m_encoder.DecodeRange(extracted); + return m_encoder.DecodeRange(ExtractKeys(encoded, boundCheck: true)); } [NotNull] - public IEnumerable DecodeKeys([NotNull] IEnumerable source) + public T[] DecodeKeyRange([NotNull] params Slice[] encoded) { - return source.Select(key => m_encoder.DecodeKey(key)); + return m_encoder.DecodeRange(ExtractKeys(encoded, boundCheck: true)); } public virtual FdbKeyRange ToRange(T key) diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs index c74cf48df..434e2d330 100644 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs +++ b/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs @@ -37,11 +37,11 @@ namespace FoundationDB.Client public class FdbEncoderSubspace : FdbSubspace, ICompositeKeyEncoder { - protected readonly FdbSubspace m_parent; + protected readonly IFdbSubspace m_parent; protected readonly ICompositeKeyEncoder m_encoder; protected volatile FdbEncoderSubspace m_head; - public FdbEncoderSubspace([NotNull] FdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) + public FdbEncoderSubspace([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) : base(subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); @@ -97,47 +97,47 @@ public Task GetAsync([NotNull] IFdbReadOnlyTransaction trans, T1 key1, T2 public virtual Slice EncodeKey(FdbTuple key) { - return this.Key + m_encoder.EncodeKey(key); + return ConcatKey(m_encoder.EncodeKey(key)); } public virtual Slice EncodeKey(T1 key1, T2 key2) { - return this.Key + m_encoder.EncodeKey(key1, key2); + return ConcatKey(m_encoder.EncodeKey(key1, key2)); } public virtual Slice EncodeKey(T1 key1) { - return this.Key + m_encoder.EncodeComposite(FdbTuple.Create(key1, default(T2)), 1); + return ConcatKey(m_encoder.EncodeComposite(FdbTuple.Create(key1, default(T2)), 1)); } Slice ICompositeKeyEncoder>.EncodeComposite(FdbTuple key, int items) { - return this.Key + m_encoder.EncodeComposite(key, items); + return ConcatKey(m_encoder.EncodeComposite(key, items)); } public virtual FdbTuple DecodeKey(Slice encoded) { - return m_encoder.DecodeKey(this.ExtractAndCheck(encoded)); + return m_encoder.DecodeKey(ExtractKey(encoded, boundCheck: true)); } FdbTuple ICompositeKeyEncoder>.DecodeComposite(Slice encoded, int items) { - return m_encoder.DecodeComposite(this.ExtractAndCheck(encoded), items); + return m_encoder.DecodeComposite(ExtractKey(encoded, boundCheck: true), items); } public virtual FdbKeyRange ToRange(FdbTuple key) { - return FdbTuple.ToRange(this.EncodeKey(key)); + return FdbTuple.ToRange(EncodeKey(key)); } public virtual FdbKeyRange ToRange(T1 key1, T2 key2) { - return FdbTuple.ToRange(this.EncodeKey(key1, key2)); + return FdbTuple.ToRange(EncodeKey(key1, key2)); } public virtual FdbKeyRange ToRange(T1 key1) { - return FdbTuple.ToRange(this.EncodeKey(key1)); + return FdbTuple.ToRange(EncodeKey(key1)); } #endregion diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs index 6920b96cb..62ce21e5c 100644 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs +++ b/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs @@ -37,12 +37,12 @@ namespace FoundationDB.Client public class FdbEncoderSubspace : FdbSubspace, ICompositeKeyEncoder { - protected readonly FdbSubspace m_parent; + protected readonly IFdbSubspace m_parent; protected readonly ICompositeKeyEncoder m_encoder; protected volatile FdbEncoderSubspace m_head; protected volatile FdbEncoderSubspace m_partial; - public FdbEncoderSubspace([NotNull] FdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) + public FdbEncoderSubspace([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) : base(subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); @@ -117,17 +117,17 @@ Slice ICompositeKeyEncoder>.EncodeComposite(FdbTuple DecodeKey(Slice encoded) { - return m_encoder.DecodeKey(this.ExtractAndCheck(encoded)); + return m_encoder.DecodeKey(ExtractKey(encoded, boundCheck: true)); } FdbTuple ICompositeKeyEncoder>.DecodeComposite(Slice encoded, int items) { - return m_encoder.DecodeComposite(this.ExtractAndCheck(encoded), items); + return m_encoder.DecodeComposite(ExtractKey(encoded, boundCheck: true), items); } public virtual FdbKeyRange ToRange(T1 key1, T2 key2, T3 key3) { - return FdbTuple.ToRange(this.EncodeKey(key1, key2, key3)); + return FdbTuple.ToRange(EncodeKey(key1, key2, key3)); } #endregion diff --git a/FoundationDB.Client/Fdb.cs b/FoundationDB.Client/Fdb.cs index dc39a8df0..f76a7bf43 100644 --- a/FoundationDB.Client/Fdb.cs +++ b/FoundationDB.Client/Fdb.cs @@ -427,7 +427,7 @@ internal static Task CreateClusterInternalAsync(string clusterFile, /// Task that will return an FdbDatabase, or an exception /// If the token is cancelled /// Since connections are not pooled, so this method can be costly and should NOT be called every time you need to read or write from the database. Instead, you should open a database instance at the start of your process, and use it a singleton. - public static Task OpenAsync(FdbSubspace globalSpace, CancellationToken cancellationToken = default(CancellationToken)) + public static Task OpenAsync(IFdbSubspace globalSpace, CancellationToken cancellationToken = default(CancellationToken)) { return OpenAsync(clusterFile: null, dbName: null, globalSpace: globalSpace, cancellationToken: cancellationToken); } @@ -457,13 +457,13 @@ internal static Task CreateClusterInternalAsync(string clusterFile, /// If is anything other than 'DB' /// If the token is cancelled /// Since connections are not pooled, so this method can be costly and should NOT be called every time you need to read or write from the database. Instead, you should open a database instance at the start of your process, and use it a singleton. - public static async Task OpenAsync(string clusterFile, string dbName, FdbSubspace globalSpace, bool readOnly = false, CancellationToken cancellationToken = default(CancellationToken)) + public static async Task OpenAsync(string clusterFile, string dbName, IFdbSubspace globalSpace, bool readOnly = false, CancellationToken cancellationToken = default(CancellationToken)) { return await OpenInternalAsync(clusterFile, dbName, globalSpace, readOnly, cancellationToken); } /// Create a new database handler instance using the specificied cluster file, database name, global subspace and read only settings - internal static async Task OpenInternalAsync(string clusterFile, string dbName, FdbSubspace globalSpace, bool readOnly, CancellationToken cancellationToken) + internal static async Task OpenInternalAsync(string clusterFile, string dbName, IFdbSubspace globalSpace, bool readOnly, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); diff --git a/FoundationDB.Client/FdbCluster.cs b/FoundationDB.Client/FdbCluster.cs index 12d0351e2..f9a235f4b 100644 --- a/FoundationDB.Client/FdbCluster.cs +++ b/FoundationDB.Client/FdbCluster.cs @@ -128,7 +128,7 @@ public async Task OpenDatabaseAsync(string databaseName, FdbSubspa /// If is anything other than 'DB' /// If the token is cancelled /// As of Beta2, the only supported database name is 'DB' - internal async Task OpenDatabaseInternalAsync(string databaseName, FdbSubspace subspace, bool readOnly, bool ownsCluster, CancellationToken cancellationToken) + internal async Task OpenDatabaseInternalAsync(string databaseName, IFdbSubspace subspace, bool readOnly, bool ownsCluster, CancellationToken cancellationToken) { ThrowIfDisposed(); if (string.IsNullOrEmpty(databaseName)) throw new ArgumentNullException("databaseName"); diff --git a/FoundationDB.Client/FdbDatabase.cs b/FoundationDB.Client/FdbDatabase.cs index f49c9185d..dde6bcfe2 100644 --- a/FoundationDB.Client/FdbDatabase.cs +++ b/FoundationDB.Client/FdbDatabase.cs @@ -36,6 +36,7 @@ namespace FoundationDB.Client using JetBrains.Annotations; using System; using System.Collections.Concurrent; + using System.Collections.Generic; using System.Diagnostics; using System.Threading; using System.Threading.Tasks; @@ -76,9 +77,9 @@ public class FdbDatabase : IFdbDatabase, IFdbTransactional, IDisposable /// Global namespace used to prefix ALL keys and subspaces accessible by this database instance (default is empty) /// This is readonly and is set when creating the database instance - private FdbSubspace m_globalSpace; + private IFdbSubspace m_globalSpace; /// Copy of the namespace, that is exposed to the outside. - private FdbSubspace m_globalSpaceCopy; + private IFdbSubspace m_globalSpaceCopy; /// Default Timeout value for all transactions private int m_defaultTimeout; @@ -101,7 +102,7 @@ public class FdbDatabase : IFdbDatabase, IFdbTransactional, IDisposable /// Root directory of the database instance /// If true, the database instance will only allow read-only transactions /// If true, the cluster instance lifetime is linked with the database instance - protected FdbDatabase(IFdbCluster cluster, IFdbDatabaseHandler handler, string name, FdbSubspace contentSubspace, IFdbDirectory directory, bool readOnly, bool ownsCluster) + protected FdbDatabase(IFdbCluster cluster, IFdbDatabaseHandler handler, string name, IFdbSubspace contentSubspace, IFdbDirectory directory, bool readOnly, bool ownsCluster) { Contract.Requires(cluster != null && handler != null && name != null && contentSubspace != null); @@ -121,7 +122,7 @@ protected FdbDatabase(IFdbCluster cluster, IFdbDatabaseHandler handler, string n /// Root directory of the database instance /// If true, the database instance will only allow read-only transactions /// If true, the cluster instance lifetime is linked with the database instance - public static FdbDatabase Create(IFdbCluster cluster, IFdbDatabaseHandler handler, string name, FdbSubspace contentSubspace, IFdbDirectory directory, bool readOnly, bool ownsCluster) + public static FdbDatabase Create(IFdbCluster cluster, IFdbDatabaseHandler handler, string name, IFdbSubspace contentSubspace, IFdbDirectory directory, bool readOnly, bool ownsCluster) { if (cluster == null) throw new ArgumentNullException("cluster"); if (handler == null) throw new ArgumentNullException("handler"); @@ -447,21 +448,23 @@ Slice IFdbKey.ToFoundationDbKey() /// Change the current global namespace. /// Do NOT call this, unless you know exactly what you are doing ! - internal void ChangeRoot(FdbSubspace subspace, IFdbDirectory directory, bool readOnly) + internal void ChangeRoot(IFdbSubspace subspace, IFdbDirectory directory, bool readOnly) { + //REVIEW: rename to "ChangeRootSubspace" ? subspace = subspace ?? FdbSubspace.Empty; lock (this)//TODO: don't use this for locking { m_readOnly = readOnly; - m_globalSpace = subspace; - m_globalSpaceCopy = subspace.Copy(); + m_globalSpace = FdbSubspace.Copy(subspace); + m_globalSpaceCopy = FdbSubspace.Copy(subspace); // keep another copy m_directory = directory == null ? null : new FdbDatabasePartition(this, directory); } } /// Returns the global namespace used by this database instance - public FdbSubspace GlobalSpace + public IFdbSubspace GlobalSpace { + //REVIEW: rename to just "Subspace" ? [NotNull] get { @@ -470,34 +473,6 @@ public FdbSubspace GlobalSpace } } - /// Create a new subspace prefixed by a binary key - /// Suffix of the subspace - /// New subspace with prefix equal to the database's global prefix followed by - public FdbSubspace this[Slice suffix] - { - //REVIEW: return IFdbSusbspace? - get { return suffix.IsNullOrEmpty ? m_globalSpace : m_globalSpaceCopy[suffix]; } - } - - /// Create a new subspace prefixed by a key - /// Key that will packed - /// New subspace with prefix equal to the database's global prefix followed by the packed representation of - public FdbSubspace this[IFdbKey key] - { - //REVIEW: return IFdbSusbspace? - get { return key == null ? m_globalSpace : m_globalSpaceCopy[key]; } - } - - IFdbSubspace IFdbSubspace.this[Slice suffix] - { - get { return this[suffix]; } - } - - IFdbSubspace IFdbSubspace.this[IFdbKey key] - { - get { return this[key]; } - } - /// Checks that a key is valid, and is inside the global key space of this database /// /// Key to verify @@ -554,10 +529,53 @@ public bool Contains(Slice key) return key.HasValue && m_globalSpace.Contains(key); } + public Slice BoundCheck(Slice key, bool allowSystemKeys) + { + return m_globalSpace.BoundCheck(key, allowSystemKeys); + } + + Slice IFdbSubspace.ConcatKey(Slice key) + { + return m_globalSpace.ConcatKey(key); + } + + Slice[] IFdbSubspace.ConcatKeys(IEnumerable keys) + { + return m_globalSpace.ConcatKeys(keys); + } + /// Remove the database global subspace prefix from a binary key, or throw if the key is outside of the global subspace. - public Slice ExtractAndCheck(Slice key) + Slice IFdbSubspace.ExtractKey(Slice key, bool boundCheck) + { + return m_globalSpace.ExtractKey(key, boundCheck); + } + + /// Remove the database global subspace prefix from a binary key, or throw if the key is outside of the global subspace. + Slice[] IFdbSubspace.ExtractKeys(IEnumerable keys, bool boundCheck) + { + return m_globalSpace.ExtractKeys(keys, boundCheck); + } + + Slice IFdbSubspace.Key + { + get { return m_globalSpace.Key; } + } + + public FdbSubspacePartition Partition + { + //REVIEW: should we hide this on the main db? + get { return m_globalSpace.Partition; } + } + + public FdbSubspaceKeys Keys + { + get { return m_globalSpace.Keys; } + } + + public FdbSubspaceTuples Tuples { - return m_globalSpace.ExtractAndCheck(key); + //REVIEW: should we hide this on the main db? + get { return m_globalSpace.Tuples; } } /// Returns a range that contains all the keys that are inside the database global subspace combined with the suffix , but not inside the System subspace. diff --git a/FoundationDB.Client/FdbDatabaseExtensions.cs b/FoundationDB.Client/FdbDatabaseExtensions.cs index 2b4a9393b..b2140369d 100644 --- a/FoundationDB.Client/FdbDatabaseExtensions.cs +++ b/FoundationDB.Client/FdbDatabaseExtensions.cs @@ -194,36 +194,7 @@ internal static void EnsureKeysAreValid(this IFdbDatabase db, Slice[] keys, bool /// public static Slice Extract(this IFdbDatabase db, Slice keyAbsolute) { - return db.GlobalSpace.Extract(keyAbsolute); - } - - #endregion - - #region Unpack... - - /// Unpack a key using the current namespace of the database - /// Database instance - /// Key that should fit inside the current namespace of the database - [CanBeNull] - public static IFdbTuple Unpack(this IFdbDatabase db, Slice key) - { - return db.GlobalSpace.Unpack(key); - } - - /// Unpack a key using the current namespace of the database - /// Database instance - /// Key that should fit inside the current namespace of the database - public static T UnpackLast(this IFdbDatabase db, Slice key) - { - return db.GlobalSpace.UnpackLast(key); - } - - /// Unpack a key using the current namespace of the database - /// Database instance - /// Key that should fit inside the current namespace of the database - public static T UnpackSingle(this IFdbDatabase db, Slice key) - { - return db.GlobalSpace.UnpackSingle(key); + return db.GlobalSpace.ExtractKey(keyAbsolute); } #endregion diff --git a/FoundationDB.Client/FdbKey.cs b/FoundationDB.Client/FdbKey.cs index 4e407bbd3..e3fab9a36 100644 --- a/FoundationDB.Client/FdbKey.cs +++ b/FoundationDB.Client/FdbKey.cs @@ -93,6 +93,8 @@ public static Slice[] Merge(Slice prefix, [NotNull] Slice[] keys) if (prefix == null) throw new ArgumentNullException("prefix"); if (keys == null) throw new ArgumentNullException("keys"); + //REVIEW: merge this code with Slice.ConcatRange! + // we can pre-allocate exactly the buffer by computing the total size of all keys int size = keys.Sum(key => key.Count) + keys.Length * prefix.Count; var writer = new SliceWriter(size); @@ -120,6 +122,8 @@ public static Slice[] Merge(Slice prefix, [NotNull] IEnumerable keys) if (prefix == null) throw new ArgumentNullException("prefix"); if (keys == null) throw new ArgumentNullException("keys"); + //REVIEW: merge this code with Slice.ConcatRange! + // use optimized version for arrays var array = keys as Slice[]; if (array != null) return Merge(prefix, array); diff --git a/FoundationDB.Client/FdbRangeChunk.cs b/FoundationDB.Client/FdbRangeChunk.cs index e30571a6f..598a86cc0 100644 --- a/FoundationDB.Client/FdbRangeChunk.cs +++ b/FoundationDB.Client/FdbRangeChunk.cs @@ -135,7 +135,7 @@ public KeyValuePair[] Decode([NotNull] FdbSubspace s for (int i = 0; i < chunk.Length; i++) { results[i] = new KeyValuePair( - keyEncoder.DecodeKey(subspace.ExtractAndCheck(chunk[i].Key)), + keyEncoder.DecodeKey(subspace.ExtractKey(chunk[i].Key, boundCheck: true)), valueEncoder.DecodeValue(chunk[i].Value) ); } @@ -200,7 +200,7 @@ public T[] DecodeKeys([NotNull] FdbSubspace subspace, [NotNull] IKeyEncoderCluster of the database - public IFdbCluster Cluster + public virtual IFdbCluster Cluster { //REVIEW: do we need a Cluster Filter ? [NotNull] @@ -107,7 +108,7 @@ public CancellationToken Cancellation } /// Returns the global namespace used by this database instance - public FdbSubspace GlobalSpace + public virtual IFdbSubspace GlobalSpace { [NotNull] get { return m_database.GlobalSpace; } @@ -132,14 +133,24 @@ public virtual bool IsReadOnly get { return m_readOnly; } } - public virtual IFdbSubspace this[Slice suffix] + Slice IFdbSubspace.Key { - get { return m_database[suffix]; } + get { return this.GlobalSpace.Key; } } - public virtual IFdbSubspace this[IFdbKey key] + public virtual FdbSubspacePartition Partition { - get { return m_database[key]; } + get { return m_database.Partition; } + } + + public virtual FdbSubspaceKeys Keys + { + get { return m_database.Keys; } + } + + public virtual FdbSubspaceTuples Tuples + { + get { return m_database.Tuples; } } public virtual bool Contains(Slice key) @@ -147,9 +158,29 @@ public virtual bool Contains(Slice key) return m_database.Contains(key); } - public virtual Slice ExtractAndCheck(Slice key) + public virtual Slice BoundCheck(Slice key, bool allowSystemKeys) + { + return m_database.BoundCheck(key, allowSystemKeys); + } + + public virtual Slice ConcatKey(Slice key) + { + return m_database.ConcatKey(key); + } + + public virtual Slice[] ConcatKeys(IEnumerable keys) + { + return m_database.ConcatKeys(keys); + } + + public virtual Slice ExtractKey(Slice key, bool boundCheck = false) + { + return m_database.ExtractKey(key, boundCheck); + } + + public virtual Slice[] ExtractKeys(IEnumerable keys, bool boundCheck = false) { - return m_database.ExtractAndCheck(key); + return m_database.ExtractKeys(keys, boundCheck); } public virtual FdbKeyRange ToRange(Slice key) diff --git a/FoundationDB.Client/Filters/PrefixRewriterTransaction.cs b/FoundationDB.Client/Filters/PrefixRewriterTransaction.cs index f1284abf4..10b16e438 100644 --- a/FoundationDB.Client/Filters/PrefixRewriterTransaction.cs +++ b/FoundationDB.Client/Filters/PrefixRewriterTransaction.cs @@ -38,31 +38,31 @@ public sealed class PrefixRewriterTransaction : FdbTransactionFilter { // We will add a prefix to all keys sent to the db, and remove it on the way back - private readonly FdbSubspace m_prefix; + private readonly IFdbSubspace m_prefix; - public PrefixRewriterTransaction(FdbSubspace prefix, IFdbTransaction trans, bool ownsTransaction) + public PrefixRewriterTransaction(IFdbSubspace prefix, IFdbTransaction trans, bool ownsTransaction) : base(trans, false, ownsTransaction) { if (prefix == null) throw new ArgumentNullException("prefix"); m_prefix = prefix; } - public FdbSubspace Prefix { get { return m_prefix; } } + public IFdbSubspace Prefix { get { return m_prefix; } } private Slice Encode(Slice key) { - return m_prefix.Concat(key); + return m_prefix.ConcatKey(key); } private Slice[] Encode(Slice[] keys) { - return m_prefix.ConcatRange(keys); + return m_prefix.ConcatKeys(keys); } private FdbKeySelector Encode(FdbKeySelector selector) { return new FdbKeySelector( - m_prefix.Concat(selector.Key), + m_prefix.ConcatKey(selector.Key), selector.OrEqual, selector.Offset ); @@ -75,7 +75,7 @@ private FdbKeySelector[] Encode(FdbKeySelector[] selectors) { keys[i] = selectors[i].Key; } - keys = m_prefix.ConcatRange(keys); + keys = m_prefix.ConcatKeys(keys); var res = new FdbKeySelector[selectors.Length]; for (int i = 0; i < selectors.Length; i++) @@ -91,7 +91,7 @@ private FdbKeySelector[] Encode(FdbKeySelector[] selectors) private Slice Decode(Slice key) { - return m_prefix.Extract(key); + return m_prefix.ExtractKey(key); } private Slice[] Decode(Slice[] keys) @@ -99,7 +99,7 @@ private Slice[] Decode(Slice[] keys) var res = new Slice[keys.Length]; for (int i = 0; i < keys.Length;i++) { - res[i] = m_prefix.Extract(keys[i]); + res[i] = m_prefix.ExtractKey(keys[i]); } return res; } diff --git a/FoundationDB.Client/FoundationDB.Client.csproj b/FoundationDB.Client/FoundationDB.Client.csproj index 35d6deeb3..4bf9383e7 100644 --- a/FoundationDB.Client/FoundationDB.Client.csproj +++ b/FoundationDB.Client/FoundationDB.Client.csproj @@ -146,6 +146,9 @@ + + + diff --git a/FoundationDB.Client/IFdbDatabase.cs b/FoundationDB.Client/IFdbDatabase.cs index 8e635a05e..4459884e5 100644 --- a/FoundationDB.Client/IFdbDatabase.cs +++ b/FoundationDB.Client/IFdbDatabase.cs @@ -47,7 +47,7 @@ public interface IFdbDatabase : IFdbReadOnlyTransactional, IFdbTransactional, IF /// Returns the global namespace used by this database instance /// Makes a copy of the subspace tuple, so you should not call this property a lot. Use any of the Partition(..) methods to create a subspace of the database - FdbSubspace GlobalSpace { [NotNull] get; } + IFdbSubspace GlobalSpace { [NotNull] get; } /// Directory partition of this database instance FdbDatabasePartition Directory { [NotNull] get; } diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs index d13f6eb3c..e2ad404e8 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs @@ -54,13 +54,13 @@ public class FdbDirectoryLayer : IFdbDirectory internal static readonly Slice VersionKey = Slice.FromAscii("version"); /// Subspace where the content of each folder will be stored - public FdbSubspace ContentSubspace { get; private set; } + public IFdbSubspace ContentSubspace { get; private set; } /// Subspace where all the metadata nodes for each folder will be stored - public FdbSubspace NodeSubspace { get; private set; } + public IFdbSubspace NodeSubspace { get; private set; } /// Root node of the directory - internal FdbSubspace RootNode { get; private set; } + internal IFdbSubspace RootNode { get; private set; } /// Allocated used to generated prefix for new content internal FdbHighContentionAllocator Allocator { get; private set; } @@ -114,7 +114,7 @@ Task IFdbDirectory.ChangeLayerAsync(IFdbTransaction trans, /// Subspace where all the node metadata will be stored ('\xFE' by default) /// Subspace where all automatically allocated directories will be stored (empty by default) /// Location of the root of all the directories managed by this Directory Layer. Ususally empty for the root partition of the database. - internal FdbDirectoryLayer(FdbSubspace nodeSubspace, FdbSubspace contentSubspace, IFdbTuple location) + internal FdbDirectoryLayer(IFdbSubspace nodeSubspace, IFdbSubspace contentSubspace, IFdbTuple location) { Contract.Requires(nodeSubspace != null && contentSubspace != null); @@ -123,8 +123,8 @@ internal FdbDirectoryLayer(FdbSubspace nodeSubspace, FdbSubspace contentSubspace this.NodeSubspace = nodeSubspace; // The root node is the one whose contents are the node subspace - this.RootNode = nodeSubspace.Partition(nodeSubspace.Key); - this.Allocator = new FdbHighContentionAllocator(this.RootNode.Partition(HcaKey)); + this.RootNode = nodeSubspace.Partition.By(nodeSubspace.Key); + this.Allocator = new FdbHighContentionAllocator(this.RootNode.Partition.By(HcaKey)); if (location == null || location.Count == 0) { this.Location = FdbTuple.Empty; @@ -150,24 +150,24 @@ public static FdbDirectoryLayer Create(Slice prefix, IEnumerable path = { var subspace = FdbSubspace.Create(prefix); var location = path != null ? ParsePath(path) : FdbTuple.Empty; - return new FdbDirectoryLayer(subspace[FdbKey.Directory], subspace, location); + return new FdbDirectoryLayer(subspace.Partition[FdbKey.Directory], subspace, location); } /// Create an instance of a Directory Layer located under a specific subspace and path /// Subspace for the content. The nodes will be stored under .Key + <FE> /// Optional path, if the Directory Layer is not located at the root of the database. - public static FdbDirectoryLayer Create(FdbSubspace subspace, IEnumerable path = null) + public static FdbDirectoryLayer Create(IFdbSubspace subspace, IEnumerable path = null) { if (subspace == null) throw new ArgumentNullException("subspace"); var location = path != null ? ParsePath(path) : FdbTuple.Empty; - return new FdbDirectoryLayer(subspace[FdbKey.Directory], subspace, location); + return new FdbDirectoryLayer(subspace.Partition[FdbKey.Directory], subspace, location); } /// Create an instance of a Directory Layer located under a specific subpsace and path /// Subspace for the nodes of the Directory Layer. /// Subspace for the content of the Directory Layer. /// Optional path, if the Directory Layer is not located at the root of the database - public static FdbDirectoryLayer Create(FdbSubspace nodeSubspace, FdbSubspace contentSubspace, IEnumerable path = null) + public static FdbDirectoryLayer Create(IFdbSubspace nodeSubspace, IFdbSubspace contentSubspace, IEnumerable path = null) { if (nodeSubspace == null) throw new ArgumentNullException("nodeSubspace"); if (contentSubspace == null) throw new ArgumentNullException("contentSubspace"); @@ -454,7 +454,7 @@ public override string ToString() private struct Node { - public Node(FdbSubspace subspace, IFdbTuple path, IFdbTuple targetPath, Slice layer) + public Node(IFdbSubspace subspace, IFdbTuple path, IFdbTuple targetPath, Slice layer) { this.Subspace = subspace; this.Path = path; @@ -462,7 +462,7 @@ public Node(FdbSubspace subspace, IFdbTuple path, IFdbTuple targetPath, Slice la this.Layer = layer; } - public readonly FdbSubspace Subspace; + public readonly IFdbSubspace Subspace; public readonly IFdbTuple Path; public readonly IFdbTuple TargetPath; public Slice Layer; //PERF: readonly struct @@ -478,10 +478,10 @@ public bool IsInPartition(bool includeEmptySubPath) } - private static void SetLayer(IFdbTransaction trans, FdbSubspace subspace, Slice layer) + private static void SetLayer(IFdbTransaction trans, IFdbSubspace subspace, Slice layer) { if (layer.IsNull) layer = Slice.Empty; - trans.Set(subspace.Pack(LayerSuffix), layer); + trans.Set(subspace.Tuples.EncodeKey(LayerSuffix), layer); } internal static IFdbTuple ParsePath(IEnumerable path, string argName = null) @@ -600,7 +600,7 @@ internal async Task CreateOrOpenInternalAsync(IFdbReadOnly if (prefix == null) { // automatically allocate a new prefix inside the ContentSubspace long id = await this.Allocator.AllocateAsync(trans).ConfigureAwait(false); - prefix = this.ContentSubspace.Pack(id); + prefix = this.ContentSubspace.Tuples.EncodeKey(id); // ensure that there is no data already present under this prefix if (await trans.GetRange(FdbKeyRange.StartsWith(prefix)).AnyAsync().ConfigureAwait(false)) @@ -624,7 +624,7 @@ internal async Task CreateOrOpenInternalAsync(IFdbReadOnly } // we need to recursively create any missing parents - FdbSubspace parentNode; + IFdbSubspace parentNode; if (path.Count > 1) { var parentSubspace = await CreateOrOpenInternalAsync(readTrans, trans, path.Substring(0, path.Count - 1), Slice.Nil, Slice.Nil, true, true, true).ConfigureAwait(false); @@ -696,7 +696,7 @@ internal async Task MoveInternalAsync(IFdbTransaction tran return null; } - trans.Set(GetSubDirKey(parentNode.Subspace, newPath.Get(-1)), this.NodeSubspace.UnpackSingle(oldNode.Subspace.Key)); + trans.Set(GetSubDirKey(parentNode.Subspace, newPath.Get(-1)), this.NodeSubspace.Tuples.DecodeKey(oldNode.Subspace.Key)); await RemoveFromParent(trans, oldPath).ConfigureAwait(false); return ContentsOfNode(oldNode.Subspace, newPath, oldNode.Layer); @@ -799,7 +799,7 @@ internal async Task ChangeLayerInternalAsync(IFdbTransaction trans, IFdbTuple pa private async Task CheckReadVersionAsync(IFdbReadOnlyTransaction trans) { - var value = await trans.GetAsync(this.RootNode.Pack(VersionKey)).ConfigureAwait(false); + var value = await trans.GetAsync(this.RootNode.Tuples.EncodeKey(VersionKey)).ConfigureAwait(false); if (!value.IsNullOrEmpty) { CheckVersion(value, false); @@ -808,7 +808,7 @@ private async Task CheckReadVersionAsync(IFdbReadOnlyTransaction trans) private async Task CheckWriteVersionAsync(IFdbTransaction trans) { - var value = await trans.GetAsync(this.RootNode.Pack(VersionKey)).ConfigureAwait(false); + var value = await trans.GetAsync(this.RootNode.Tuples.EncodeKey(VersionKey)).ConfigureAwait(false); if (value.IsNullOrEmpty) { InitializeDirectory(trans); @@ -838,10 +838,10 @@ private void InitializeDirectory(IFdbTransaction trans) writer.WriteFixed32((uint)LayerVersion.Major); writer.WriteFixed32((uint)LayerVersion.Minor); writer.WriteFixed32((uint)LayerVersion.Build); - trans.Set(this.RootNode.Pack(VersionKey), writer.ToSlice()); + trans.Set(this.RootNode.Tuples.EncodeKey(VersionKey), writer.ToSlice()); } - private async Task NodeContainingKey(IFdbReadOnlyTransaction tr, Slice key) + private async Task NodeContainingKey(IFdbReadOnlyTransaction tr, Slice key) { Contract.Requires(tr != null); @@ -855,14 +855,14 @@ private async Task NodeContainingKey(IFdbReadOnlyTransaction tr, Sl var kvp = await tr .GetRange( this.NodeSubspace.ToRange().Begin, - this.NodeSubspace.Pack(key) + FdbKey.MinValue + this.NodeSubspace.Tuples.EncodeKey(key) + FdbKey.MinValue ) .LastOrDefaultAsync() .ConfigureAwait(false); if (kvp.Key.HasValue) { - var prevPrefix = this.NodeSubspace.UnpackFirst(kvp.Key); + var prevPrefix = this.NodeSubspace.Tuples.DecodeFirst(kvp.Key); if (key.StartsWith(prevPrefix)) { return NodeWithPrefix(prevPrefix); @@ -873,19 +873,19 @@ private async Task NodeContainingKey(IFdbReadOnlyTransaction tr, Sl } /// Returns the subspace to a node metadata, given its prefix - private FdbSubspace NodeWithPrefix(Slice prefix) + private IFdbSubspace NodeWithPrefix(Slice prefix) { if (prefix.IsNullOrEmpty) return null; - return this.NodeSubspace.Partition(prefix); + return this.NodeSubspace.Partition.By(prefix); } /// Returns a new Directory Subspace given its node subspace, path and layer id - private FdbDirectorySubspace ContentsOfNode(FdbSubspace node, IFdbTuple relativePath, Slice layer) + private FdbDirectorySubspace ContentsOfNode(IFdbSubspace node, IFdbTuple relativePath, Slice layer) { Contract.Requires(node != null); var path = this.Location.Concat(relativePath); - var prefix = this.NodeSubspace.UnpackSingle(node.Key); + var prefix = this.NodeSubspace.Tuples.DecodeKey(node.Key); if (layer == FdbDirectoryPartition.LayerId) { return new FdbDirectoryPartition(path, relativePath, prefix, this); @@ -921,7 +921,7 @@ private async Task FindAsync(IFdbReadOnlyTransaction tr, IFdbTuple path) return new Node(null, path.Substring(0, i + 1), path, Slice.Empty); } - layer = await tr.GetAsync(n.Pack(LayerSuffix)).ConfigureAwait(false); + layer = await tr.GetAsync(n.Tuples.EncodeKey(LayerSuffix)).ConfigureAwait(false); if (layer == FdbDirectoryPartition.LayerId) { // stop when reaching a partition return new Node(n, path.Substring(0, i + 1), path, FdbDirectoryPartition.LayerId); @@ -933,15 +933,15 @@ private async Task FindAsync(IFdbReadOnlyTransaction tr, IFdbTuple path) } /// Returns the list of names and nodes of all children of the specified node - private IFdbAsyncEnumerable> SubdirNamesAndNodes(IFdbReadOnlyTransaction tr, FdbSubspace node) + private IFdbAsyncEnumerable> SubdirNamesAndNodes(IFdbReadOnlyTransaction tr, IFdbSubspace node) { Contract.Requires(tr != null && node != null); - var sd = node.Partition(SUBDIRS); + var sd = node.Partition.By(SUBDIRS); return tr .GetRange(sd.ToRange()) - .Select(kvp => new KeyValuePair( - sd.UnpackSingle(kvp.Key), + .Select(kvp => new KeyValuePair( + sd.Tuples.DecodeKey(kvp.Key), NodeWithPrefix(kvp.Value) )); } @@ -962,7 +962,7 @@ private async Task RemoveFromParent(IFdbTransaction tr, IFdbTuple path) } /// Resursively remove a node (including the content), all its children - private async Task RemoveRecursive(IFdbTransaction tr, FdbSubspace node) + private async Task RemoveRecursive(IFdbTransaction tr, IFdbSubspace node) { Contract.Requires(tr != null && node != null); @@ -986,20 +986,20 @@ private async Task IsPrefixFree(IFdbReadOnlyTransaction tr, Slice prefix) return await tr .GetRange( - this.NodeSubspace.Pack(prefix), - this.NodeSubspace.Pack(FdbKey.Increment(prefix)) + this.NodeSubspace.Tuples.EncodeKey(prefix), + this.NodeSubspace.Tuples.EncodeKey(FdbKey.Increment(prefix)) ) .NoneAsync() .ConfigureAwait(false); } - private static Slice GetSubDirKey(FdbSubspace parent, string path) + private static Slice GetSubDirKey(IFdbSubspace parent, string path) { Contract.Requires(parent != null && path != null); // for a path equal to ("foo","bar","baz") and index = -1, we need to generate (parent, SUBDIRS, "baz") // but since the last item of path can be of any type, we will use tuple splicing to copy the last item without changing its type - return parent.Pack(SUBDIRS, path); + return parent.Tuples.EncodeKey(SUBDIRS, path); } /// Convert a tuple representing a path, into a string array diff --git a/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs b/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs index b3e9210e2..7813a38aa 100644 --- a/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs +++ b/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs @@ -41,21 +41,21 @@ public sealed class FdbHighContentionAllocator private readonly Random m_rnd = new Random(); - public FdbHighContentionAllocator(FdbSubspace subspace) + public FdbHighContentionAllocator(IFdbSubspace subspace) { if (subspace == null) throw new ArgumentException("subspace"); this.Subspace = subspace; - this.Counters = subspace.Partition(COUNTERS); - this.Recent = subspace.Partition(RECENT); + this.Counters = subspace.Partition.By(COUNTERS); + this.Recent = subspace.Partition.By(RECENT); } /// Location of the allocator - public FdbSubspace Subspace { get; private set; } + public IFdbSubspace Subspace { get; private set; } - private FdbSubspace Counters { get; set; } + private IFdbSubspace Counters { get; set; } - private FdbSubspace Recent { get; set; } + private IFdbSubspace Recent { get; set; } /// Returns a 64-bit integer that /// 1) has never and will never be returned by another call to this @@ -73,7 +73,7 @@ public async Task AllocateAsync(IFdbTransaction trans) if (kv.Key.IsPresent) { - start = this.Counters.UnpackSingle(kv.Key); + start = this.Counters.Tuples.DecodeKey(kv.Key); count = kv.Value.ToInt64(); } @@ -81,13 +81,13 @@ public async Task AllocateAsync(IFdbTransaction trans) int window = GetWindowSize(start); if ((count + 1) * 2 >= window) { // advance the window - trans.ClearRange(this.Counters.Key, this.Counters.Pack(start) + FdbKey.MinValue); + trans.ClearRange(this.Counters.Key, this.Counters.Tuples.EncodeKey(start) + FdbKey.MinValue); start += window; - trans.ClearRange(this.Recent.Key, this.Recent.Pack(start)); + trans.ClearRange(this.Recent.Key, this.Recent.Tuples.EncodeKey(start)); } // Increment the allocation count for the current window - trans.AtomicAdd(this.Counters.Pack(start), Slice.FromFixed64(1)); + trans.AtomicAdd(this.Counters.Tuples.EncodeKey(start), Slice.FromFixed64(1)); // As of the snapshot being read from, the window is less than half // full, so this should be expected to take 2 tries. Under high @@ -103,7 +103,7 @@ public async Task AllocateAsync(IFdbTransaction trans) } // test if the key is used - var key = this.Recent.Pack(candidate); + var key = this.Recent.Tuples.EncodeKey(candidate); var value = await trans.GetAsync(key).ConfigureAwait(false); if (value.IsNull) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 5a0a0725f..598ce0b10 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -403,14 +403,14 @@ public static Slice Pack([NotNull] params object[] items) /// Sequence of keys to pack /// Array of slices (for all keys) that share the same underlying buffer [NotNull] - public static Slice[] PackRange(Slice prefix, [NotNull] IEnumerable keys) + public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] IEnumerable keys) { if (prefix == null) throw new ArgumentNullException("prefix"); if (keys == null) throw new ArgumentNullException("keys"); // use optimized version for arrays var array = keys as T[]; - if (array != null) return PackRange(prefix, array); + if (array != null) return PackRangeWithPrefix(prefix, array); var next = new List(); var writer = SliceWriter.Empty; @@ -434,7 +434,7 @@ public static Slice[] PackRange(Slice prefix, [NotNull] IEnumerable keys) /// Sequence of keys to pack /// Array of slices (for all keys) that share the same underlying buffer [NotNull] - public static Slice[] PackRange(Slice prefix, [NotNull] params T[] keys) + public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] params T[] keys) { if (keys == null) throw new ArgumentNullException("keys"); @@ -463,7 +463,7 @@ public static Slice[] PackRange(Slice prefix, [NotNull] params T[] keys) /// Array of slices (for all keys) that share the same underlying buffer public static Slice[] PackRange([NotNull] TElement[] elements, [NotNull] Func selector) { - return PackRange(Slice.Empty, elements, selector); + return PackRangeWithPrefix(Slice.Empty, elements, selector); } /// Merge an array of elements with a same prefix, all sharing the same buffer @@ -473,7 +473,7 @@ public static Slice[] PackRange([NotNull] TElement[] elements, [ /// Sequence of elements to pack /// Lambda that extract the key from each element /// Array of slices (for all keys) that share the same underlying buffer - public static Slice[] PackRange(Slice prefix, [NotNull] TElement[] elements, [NotNull] Func selector) + public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] TElement[] elements, [NotNull] Func selector) { if (elements == null) throw new ArgumentNullException("elements"); if (selector == null) throw new ArgumentNullException("selector"); @@ -502,7 +502,7 @@ public static Slice[] PackRange(Slice prefix, [NotNull] TElement [NotNull] public static Slice[] PackRange([NotNull] IEnumerable tuples) { - return PackRange(Slice.Nil, tuples); + return PackRangeWithPrefix(Slice.Nil, tuples); } /// Pack a sequence of N-tuples, all sharing the same buffer @@ -511,13 +511,13 @@ public static Slice[] PackRange([NotNull] IEnumerable tuples) /// Array containing the buffer segment of each packed tuple /// BatchPack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] [NotNull] - public static Slice[] PackRange(Slice prefix, [NotNull] IEnumerable tuples) + public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] IEnumerable tuples) { if (tuples == null) throw new ArgumentNullException("tuples"); // use optimized version for arrays var array = tuples as IFdbTuple[]; - if (array != null) return PackRange(prefix, array); + if (array != null) return PackRangeWithPrefix(prefix, array); var next = new List(); var writer = SliceWriter.Empty; @@ -541,7 +541,7 @@ public static Slice[] PackRange(Slice prefix, [NotNull] IEnumerable t [NotNull] public static Slice[] PackRange([NotNull] IFdbTuple[] tuples) { - return PackRange(Slice.Nil, tuples); + return PackRangeWithPrefix(Slice.Nil, tuples); } /// Pack an array of N-tuples, all sharing the same buffer @@ -550,7 +550,7 @@ public static Slice[] PackRange([NotNull] IFdbTuple[] tuples) /// Array containing the buffer segment of each packed tuple /// BatchPack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] [NotNull] - public static Slice[] PackRange(Slice prefix, params IFdbTuple[] tuples) + public static Slice[] PackRangeWithPrefix(Slice prefix, params IFdbTuple[] tuples) { if (tuples == null) throw new ArgumentNullException("tuples"); @@ -576,11 +576,11 @@ public static Slice[] PackRange(Slice prefix, params IFdbTuple[] tuples) /// Sequence of keys to pack /// Array of slices (for all keys) that share the same underlying buffer [NotNull] - public static Slice[] PackRange([NotNull] IFdbTuple prefix, [NotNull] IEnumerable keys) + public static Slice[] PackRangeWithPrefix([NotNull] IFdbTuple prefix, [NotNull] IEnumerable keys) { if (prefix == null) throw new ArgumentNullException("prefix"); - return PackRange(prefix.ToSlice(), keys); + return PackRangeWithPrefix(prefix.ToSlice(), keys); } /// Pack a sequence of keys with a same prefix, all sharing the same buffer @@ -589,57 +589,11 @@ public static Slice[] PackRange([NotNull] IFdbTuple prefix, [NotNull] IEnumer /// Sequence of keys to pack /// Array of slices (for all keys) that share the same underlying buffer [NotNull] - public static Slice[] PackRange([NotNull] IFdbTuple prefix, [NotNull] params T[] keys) + public static Slice[] PackRangeWithPrefix([NotNull] IFdbTuple prefix, [NotNull] params T[] keys) { if (prefix == null) throw new ArgumentNullException("prefix"); - return PackRange(prefix.ToSlice(), keys); - } - - /// Pack a sequence of keys with a same prefix, all sharing the same buffer - /// Prefix shared by all keys - /// Sequence of keys to pack - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] PackBoxedRange(Slice prefix, [NotNull] IEnumerable keys) - { - return PackRange(prefix, keys); - } - - /// Pack a sequence of keys with a same prefix, all sharing the same buffer - /// Prefix shared by all keys - /// Sequence of keys to pack - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] PackBoxedRange(Slice prefix, [NotNull] object[] keys) - { - //note: we don't use "params object[] keys" because it can be ambiguous when passing an 'object[]' parameter (because an object[] is also an object) - return PackRange(prefix, keys); - } - - /// Pack a sequence of keys with a same prefix, all sharing the same buffer - /// Prefix shared by all keys - /// Sequence of keys to pack - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] PackBoxedRange([NotNull] IFdbTuple prefix, [NotNull] IEnumerable keys) - { - if (prefix == null) throw new ArgumentNullException("prefix"); - - return PackRange(prefix.ToSlice(), keys); - } - - /// Pack a sequence of keys with a same prefix, all sharing the same buffer - /// Prefix shared by all keys - /// Sequence of keys to pack - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] PackBoxedRange([NotNull] IFdbTuple prefix, [NotNull] object[] keys) - { - //note: we don't use "params object[] keys" because it can be ambiguous when passing an 'object[]' parameter (because an object[] is also an object) - if (prefix == null) throw new ArgumentNullException("prefix"); - - return PackRange(prefix.ToSlice(), keys); + return PackRangeWithPrefix(prefix.ToSlice(), keys); } #endregion @@ -667,7 +621,12 @@ public static IFdbTuple Unpack(Slice packedKey) public static IFdbTuple UnpackWithoutPrefix(Slice packedKey, Slice prefix) { // ensure that the key starts with the prefix - if (!packedKey.StartsWith(prefix)) throw new ArgumentOutOfRangeException("packedKey", "The specifed packed tuple does not start with the expected prefix"); + if (!packedKey.StartsWith(prefix)) +#if DEBUG + throw new ArgumentOutOfRangeException("packedKey", String.Format("The specifed packed tuple does not start with the expected prefix '{0}'", prefix.ToString())); +#else + throw new ArgumentOutOfRangeException("packedKey", "The specifed packed tuple does not start with the expected prefix"); +#endif // unpack the key, minus the prefix return FdbTuplePackers.Unpack(packedKey.Substring(prefix.Count)); diff --git a/FoundationDB.Client/Subspaces/Fdb.Directory.cs b/FoundationDB.Client/Subspaces/Fdb.Directory.cs index 5d75dbc08..6592938ed 100644 --- a/FoundationDB.Client/Subspaces/Fdb.Directory.cs +++ b/FoundationDB.Client/Subspaces/Fdb.Directory.cs @@ -84,7 +84,7 @@ public static async Task OpenNamedPartitionAsync(string clusterFil if (Logging.On) Logging.Verbose(typeof(Fdb.Directory), "OpenNamedPartitionAsync", String.Format("Found named partition '{0}' at prefix {1}", descriptor.FullName, descriptor)); // we have to chroot the database to the new prefix, and create a new DirectoryLayer with a new '/' - rootSpace = descriptor.Copy(); //note: create a copy of the key + rootSpace = FdbSubspace.Copy(descriptor); //note: create a copy of the key //TODO: find a nicer way to do that! db.ChangeRoot(rootSpace, FdbDirectoryLayer.Create(rootSpace), readOnly); diff --git a/FoundationDB.Client/Subspaces/FdbSubspace.cs b/FoundationDB.Client/Subspaces/FdbSubspace.cs index 7971bce1d..7c477f5b5 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspace.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspace.cs @@ -31,9 +31,13 @@ namespace FoundationDB.Client using FoundationDB.Layers.Tuples; using JetBrains.Annotations; using System; + using System.Linq; + using System.Collections.Generic; + using System.Globalization; + using System.Diagnostics; /// Adds a prefix on every keys, to group them inside a common subspace - public class FdbSubspace : IFdbSubspace, IFdbKey, IEquatable, IComparable + public class FdbSubspace : IFdbSubspace, IFdbKey, IEquatable, IComparable { /// Empty subspace, that does not add any prefix to the keys public static readonly FdbSubspace Empty = new FdbSubspace(Slice.Empty); @@ -41,6 +45,12 @@ public class FdbSubspace : IFdbSubspace, IFdbKey, IEquatable, IComp /// Binary prefix of this subspace private Slice m_rawPrefix; //PERF: readonly struct + /// Helper used to deal with keys in this subspace + private FdbSubspaceKeys m_keys; // cached for perf reasons + + /// Helper used to deal with keys in this subspace + private FdbSubspaceTuples m_tuples; // cached for perf reasons + /// Returns the key of this directory subspace /// This should only be used by methods that can use the key internally, even if it is not supposed to be exposed (as is the case for directory partitions) protected Slice InternalKey @@ -50,12 +60,16 @@ protected Slice InternalKey #region Constructors... - /// Wraps an existing subspace - protected FdbSubspace([NotNull] FdbSubspace copy) + /// Wraps an existing subspace, without copying the prefix (if possible) + protected FdbSubspace([NotNull] IFdbSubspace copy) { if (copy == null) throw new ArgumentNullException("copy"); - if (copy.m_rawPrefix.IsNull) throw new ArgumentException("The subspace key cannot be null. Use Slice.Empty if you want a subspace with no prefix.", "copy"); - m_rawPrefix = copy.m_rawPrefix; + var sub = copy as FdbSubspace; + Slice key = sub != null ? sub.m_rawPrefix : copy.ToFoundationDbKey(); + if (key.IsNull) throw new ArgumentException("The subspace key cannot be null. Use Slice.Empty if you want a subspace with no prefix.", "copy"); + m_rawPrefix = key; + m_keys = new FdbSubspaceKeys(this); + m_tuples = new FdbSubspaceTuples(this); } /// Create a new subspace from a binary prefix @@ -66,6 +80,8 @@ protected FdbSubspace(Slice rawPrefix, bool copy) if (rawPrefix.IsNull) throw new ArgumentException("The subspace key cannot be null. Use Slice.Empty if you want a subspace with no prefix.", "rawPrefix"); if (copy) rawPrefix = rawPrefix.Memoize(); m_rawPrefix = rawPrefix.Memoize(); + m_keys = new FdbSubspaceKeys(this); + m_tuples = new FdbSubspaceTuples(this); } /// Create a new subspace from a binary prefix @@ -74,14 +90,6 @@ public FdbSubspace(Slice rawPrefix) : this(rawPrefix, true) { } - /// Create a new subspace from a Tuple prefix - /// Tuple packed to produce the prefix - public FdbSubspace([NotNull] IFdbTuple tuple) - { - if (tuple == null) throw new ArgumentNullException("tuple"); - m_rawPrefix = tuple.ToSlice().Memoize(); - } - #endregion #region Static Prefix Helpers... @@ -101,70 +109,28 @@ public static FdbSubspace Create(Slice slice) [NotNull] public static FdbSubspace Create([NotNull] IFdbTuple tuple) { - return new FdbSubspace(tuple); + if (tuple == null) throw new ArgumentNullException("tuple"); + return new FdbSubspace(tuple.ToSlice(), true); } /// Clone this subspace /// New Subspace that uses the same prefix key /// Hint: Cloning a special Subspace like a or will not keep all the "special abilities" of the parent. [NotNull] - public FdbSubspace Copy() - { - //SPOILER WARNING: You didn't hear it from me, but some say that you can use this to bypass the fact that FdbDirectoryPartition.get_Key and ToRange() throws in v2.x ... If you bypass this protection and bork your database, don't come crying! - return new FdbSubspace(this.InternalKey.Memoize()); - } - - #endregion - - #region Partition... - - /// Returns the key to use when creating direct keys that are inside this subspace - /// Prefix that must be added to all keys created by this subspace - /// Subspaces that disallow the creation of keys should override this method and throw an exception - protected virtual Slice GetKeyPrefix() + public static FdbSubspace Copy([NotNull] IFdbSubspace subspace) { - return m_rawPrefix; - } - - /// Create a new subspace by adding a suffix to the key of the current subspace. - /// Binary suffix that will be appended to the current prefix - /// New subspace whose prefix is the concatenation of the parent prefix, and - public FdbSubspace this[Slice suffix] - { - // note: there is a difference with the Pyton layer because here we don't use Tuple encoding, but just concat the slices together. - // the .NET equivalent of the subspace.__getitem__(self, name) method would be subspace.Partition(name) or subspace[FdbTuple.Create(name)] ! - [NotNull] - get + var sub = subspace as FdbSubspace; + if (sub != null) { - if (suffix.IsNull) throw new ArgumentException("The subspace key cannot be null. Use Slice.Empty if you want a subspace with no prefix.", "suffix"); - return FdbSubspace.Create(GetKeyPrefix() + suffix); + //SPOILER WARNING: You didn't hear it from me, but some say that you can use this to bypass the fact that FdbDirectoryPartition.get_Key and ToRange() throws in v2.x ... If you bypass this protection and bork your database, don't come crying! + return new FdbSubspace(sub.InternalKey, true); } - } - - IFdbSubspace IFdbSubspace.this[Slice suffix] - { - get { return this[suffix]; } - } - - /// Create a new subspace by adding a to the current subspace's prefix - /// Key that will be appended to the current prefix - /// New subspace whose prefix is the concatenation of the parent prefix, and the packed representation of - public FdbSubspace this[IFdbKey key] - { - [ContractAnnotation("null => halt; notnull => notnull")] - get + else { - if (key == null) throw new ArgumentNullException("key"); - var packed = key.ToFoundationDbKey(); - return packed.Count == 0 ? this : FdbSubspace.Create(GetKeyPrefix() + packed); + return new FdbSubspace(subspace.Key, true); } } - IFdbSubspace IFdbSubspace.this[IFdbKey key] - { - get { return this[key]; } - } - #endregion #region IFdbKey... @@ -185,6 +151,36 @@ public Slice Key get { return GetKeyPrefix(); } } + /// Returns the key to use when creating direct keys that are inside this subspace + /// Prefix that must be added to all keys created by this subspace + /// Subspaces that disallow the creation of keys should override this method and throw an exception + [DebuggerStepThrough] + protected virtual Slice GetKeyPrefix() + { + return m_rawPrefix; + } + + /// Return a view of all the possible binary keys of this subspace + public FdbSubspaceKeys Keys + { + [DebuggerStepThrough] + get { return m_keys; } + } + + public FdbSubspacePartition Partition + { + //note: not cached, because this is probably not be called frequently (except in the init path) + [DebuggerStepThrough] + get { return new FdbSubspacePartition(this); } + } + + /// Return a view of all the possible tuple-based keys of this subspace + public FdbSubspaceTuples Tuples + { + [DebuggerStepThrough] + get { return m_tuples; } + } + /// Tests whether the specified starts with this Subspace's prefix, indicating that the Subspace logically contains . /// The key to be tested /// The key Slice.Nil is not contained by any Subspace, so subspace.Contains(Slice.Nil) will always return false @@ -195,116 +191,127 @@ public virtual bool Contains(Slice key) /// Append a key to the subspace key /// This is the equivalent of calling 'subspace.Key + key' - public Slice Concat(Slice key) + public Slice ConcatKey(Slice key) { - return Slice.Concat(GetKeyPrefix(), key); + //REVIEW: what to do with Slice.Nil? + return GetKeyPrefix().Concat(key); + } + + /// Merge an array of keys with the subspace's prefix, all sharing the same buffer + /// Array of keys to pack + /// Array of slices (for all keys) that share the same underlying buffer + [NotNull] + public Slice[] ConcatKeys([NotNull] IEnumerable keys) + { + if (keys == null) throw new ArgumentNullException("keys"); + //REVIEW: what to do with keys that are Slice.Nil ? + return Slice.ConcatRange(GetKeyPrefix(), keys); } /// Remove the subspace prefix from a binary key, and only return the tail, or Slice.Nil if the key does not fit inside the namespace /// Complete key that contains the current subspace prefix, and a binary suffix + /// If true, verify that is inside the bounds of the subspace /// Binary suffix of the key (or Slice.Empty is the key is exactly equal to the subspace prefix). If the key is outside of the subspace, returns Slice.Nil /// This is the inverse operation of - public Slice Extract(Slice key) + /// If is true and is outside the current subspace. + public Slice ExtractKey(Slice key, bool boundCheck = false) { if (key.IsNull) return Slice.Nil; var prefix = GetKeyPrefix(); if (!key.StartsWith(prefix)) { - // or should we throw ? + if (boundCheck) FailKeyOutOfBound(key); return Slice.Nil; } return key.Substring(prefix.Count); } - //REVIEW: add Extract() where TKey : IFdbKey ? - /// Remove the subspace prefix from a batch of binary keys, and only return the tail, or Slice.Nil if a key does not fit inside the namespace - /// Array of complete keys that contains the current subspace prefix, and a binary suffix + /// Sequence of complete keys that contains the current subspace prefix, and a binary suffix + /// If true, verify that each key in is inside the bounds of the subspace /// Array of only the binary suffix of the keys, Slice.Empty for a key that is exactly equal to the subspace prefix, or Slice.Nil for a key that is outside of the subspace + /// If is true and at least one key in is outside the current subspace. [NotNull] - public Slice[] Extract([NotNull] Slice[] keys) - { //REVIEW: rename to ExtractRange ? + public Slice[] ExtractKeys([NotNull] IEnumerable keys, bool boundCheck = false) + { if (keys == null) throw new ArgumentNullException("keys"); var prefix = GetKeyPrefix(); - var results = new Slice[keys.Length]; - for (int i = 0; i < keys.Length; i++) - { - if (keys[i].StartsWith(prefix)) + + var arr = keys as Slice[]; + if (arr != null) + { // fast-path for Sice[] (frequent for range reads) + + var res = new Slice[arr.Length]; + for (int i = 0; i < arr.Length; i++) { - results[i] = keys[i].Substring(prefix.Count); + if (arr[i].StartsWith(prefix)) + { + res[i] = arr[i].Substring(prefix.Count); + } + else if (boundCheck) + { + FailKeyOutOfBound(arr[i]); + } } + return res; } - - return results; - } - - /// Remove the subspace prefix from a binary key, or throw if the key does not belong to this subspace - /// Complete key that contains the current subspace prefix, and a binary suffix. - /// Binary suffix of the key (or Slice.Empty is the key is exactly equal to the subspace prefix). If the key is equal to Slice.Nil, then it will be returned unmodified. If the key is outside of the subspace, the method throws. - /// If key is outside the current subspace. - public Slice ExtractAndCheck(Slice key) - { - if (key.IsNull) return Slice.Nil; - - var prefix = GetKeyPrefix(); - - // ensure that the key starts with the prefix - if (!key.StartsWith(prefix)) FailKeyOutOfBound(key); - - return key.Substring(prefix.Count); - } - - [NotNull] - public Slice[] ExtractAndCheck([NotNull] Slice[] keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - - var prefix = GetKeyPrefix(); - var results = new Slice[keys.Length]; - for (int i = 0; i < keys.Length; i++) - { - var key = keys[i]; - if (!key.IsNull) + else + { // slow path for the rest + var coll = keys as ICollection; + var res = coll != null ? new List(coll.Count) : new List(); + foreach(var key in keys) { - if (!key.StartsWith(prefix)) FailKeyOutOfBound(key); - results[i] = key.Substring(prefix.Count); + if (key.StartsWith(prefix)) + { + res.Add(key.Substring(prefix.Count)); + } + else if (boundCheck) + { + FailKeyOutOfBound(key); + } } + return res.ToArray(); } - return results; - } - - /// Gets a key range respresenting all keys strictly within the Subspace. - /// Key range that, when passed to ClearRange() or GetRange(), would clear or return all the keys contained by this subspace, excluding the subspace prefix itself. - public FdbKeyRange ToRange() - { - return ToRange(Slice.Nil); } /// Gets a key range respresenting all keys strictly within a sub-section of this Subspace. /// Suffix added to the subspace prefix /// Key range that, when passed to ClearRange() or GetRange(), would clear or return all the keys contained by this subspace, excluding the subspace prefix itself. - public virtual FdbKeyRange ToRange(Slice suffix) + public virtual FdbKeyRange ToRange(Slice suffix = default(Slice)) { - return FdbTuple.ToRange(GetKeyPrefix().Concat(suffix)); + if (suffix.IsPresent) + return FdbTuple.ToRange(GetKeyPrefix().Concat(suffix)); + else + return FdbTuple.ToRange(GetKeyPrefix()); } #endregion #region IEquatable / IComparable... - public int CompareTo(FdbSubspace other) + public int CompareTo(IFdbSubspace other) { if (other == null) return +1; if (object.ReferenceEquals(this, other)) return 0; - return this.InternalKey.CompareTo(other.InternalKey); + var sub = other as FdbSubspace; + if (sub != null) + return this.InternalKey.CompareTo(sub.InternalKey); + else + return this.InternalKey.CompareTo(other.ToFoundationDbKey()); } - public bool Equals(FdbSubspace other) + public bool Equals(IFdbSubspace other) { - return other != null && (object.ReferenceEquals(this, other) || this.InternalKey.Equals(other.InternalKey)); + if (other == null) return false; + if (object.ReferenceEquals(this, other)) return true; + var sub = other as FdbSubspace; + if (sub != null) + return this.InternalKey.Equals(sub.InternalKey); + else + return this.InternalKey.Equals(other.ToFoundationDbKey()); } public override bool Equals(object obj) @@ -339,7 +346,7 @@ public Slice BoundCheck(Slice key, bool allowSystemKeys) // The key is outside the bounds, and must be corrected // > return empty if we are before // > return \xFF if we are after - if (key < GetKeyPrefix()) + if (key < prefix) return Slice.Empty; else return FdbKey.System; @@ -369,7 +376,7 @@ public virtual string DumpKey(Slice key) public override string ToString() { - return String.Format("Subspace({0})", this.InternalKey.ToString()); + return String.Format(CultureInfo.InvariantCulture, "Subspace({0})", this.InternalKey.ToString()); } #endregion diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs b/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs index 17b2d5a2b..c6110cf66 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs @@ -41,18 +41,17 @@ namespace FoundationDB.Client public static class FdbSubspaceExtensions { - #region FDB API... - /// Clear the entire content of a subspace - public static void ClearRange(this IFdbTransaction trans, [NotNull] FdbSubspace subspace) + public static void ClearRange(this IFdbTransaction trans, [NotNull] IFdbSubspace subspace) { Contract.Requires(trans != null && subspace != null); - trans.ClearRange(FdbKeyRange.StartsWith(subspace.Key)); + //BUGBUG: should we call subspace.ToRange() ? + trans.ClearRange(FdbKeyRange.StartsWith(subspace.ToFoundationDbKey())); } /// Clear the entire content of a subspace - public static Task ClearRangeAsync(this IFdbTransactional db, [NotNull] FdbSubspace subspace, CancellationToken cancellationToken) + public static Task ClearRangeAsync(this IFdbTransactional db, [NotNull] IFdbSubspace subspace, CancellationToken cancellationToken) { if (db == null) throw new ArgumentNullException("db"); if (subspace == null) throw new ArgumentNullException("subspace"); @@ -62,625 +61,36 @@ public static Task ClearRangeAsync(this IFdbTransactional db, [NotNull] FdbSubsp /// Returns all the keys inside of a subspace [NotNull] - public static FdbRangeQuery> GetRangeStartsWith(this IFdbReadOnlyTransaction trans, [NotNull] FdbSubspace subspace, FdbRangeOptions options = null) + public static FdbRangeQuery> GetRangeStartsWith(this IFdbReadOnlyTransaction trans, [NotNull] IFdbSubspace subspace, FdbRangeOptions options = null) { + //REVIEW: should we remove this method? Contract.Requires(trans != null && subspace != null); - return trans.GetRange(FdbKeyRange.StartsWith(subspace.Key), options); + return trans.GetRange(FdbKeyRange.StartsWith(subspace.ToFoundationDbKey()), options); } - /// Read a key inside a subspace - /// - /// Both lines are equivalent: - /// tr.GetAsync(new FdbSubspace("Hello"), FdbTuple.Create("World")); - /// tr.GetAsync(FdbTuple.Create("Hello", "World")); - /// - public static Task GetAsync(this IFdbReadOnlyTransaction trans, [NotNull] FdbSubspace subspace, IFdbTuple key) - { - Contract.Requires(trans != null && subspace != null); - - return trans.GetAsync(subspace.Pack(key)); - } - - /// Write a key inside a subspace - /// - /// Both lines are equivalent: - /// tr.Set(new FdbSubspace("Hello"), FdbTuple.Create("World"), some_value); - /// tr.Set(FdbTuple.Create("Hello", "World"), some_value); - /// - public static void Set(this IFdbTransaction trans, [NotNull] FdbSubspace subspace, [NotNull] IFdbTuple key, Slice value) - { - Contract.Requires(trans != null && subspace != null && key != null); - - trans.Set(subspace.Pack(key), value); - } - - #endregion - - #region Contains... - /// Tests whether the specified starts with this Subspace's prefix, indicating that the Subspace logically contains . /// The key to be tested /// If is null - public static bool Contains(this FdbSubspace subspace, [NotNull] TKey key) + public static bool Contains([NotNull] this IFdbSubspace subspace, [NotNull] TKey key) where TKey : IFdbKey { + if (subspace == null) throw new ArgumentNullException("subspace"); if (key == null) throw new ArgumentNullException("key"); return subspace.Contains(key.ToFoundationDbKey()); } - #endregion - - #region Concat... - - /// Append a key to the subspace key - public static Slice Concat(this IFdbSubspace subspace, Slice key) - { - return Slice.Concat(subspace.ToFoundationDbKey(), key); - } - - /// Append a key to the subspace key - /// type of the key, must implements IFdbKey - /// - /// Return Slice : 'subspace.Key + key' - public static Slice Concat(this IFdbSubspace subspace, [NotNull] TKey key) - where TKey : IFdbKey - { - if (key == null) throw new ArgumentNullException("key"); - return Slice.Concat(subspace.ToFoundationDbKey(), key.ToFoundationDbKey()); - } - - /// Merge an array of keys with the subspace's prefix, all sharing the same buffer - /// Array of keys to pack - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] ConcatRange(this IFdbSubspace subspace, params Slice[] keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - return subspace.ToFoundationDbKey().ConcatRange(keys); - } - - /// Merge a sequence of keys with the subspace's prefix, all sharing the same buffer - /// Sequence of keys to pack - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] ConcatRange(this IFdbSubspace subspace, [NotNull] IEnumerable keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - return subspace.ToFoundationDbKey().ConcatRange(keys); - } - - /// Append a sequence of keys with the subspace's prefix, all sharing the same buffer - /// type of the key, must implements IFdbKey - /// - /// Return Slice : 'subspace.Key + key' - [NotNull] - public static Slice[] ConcatRange(this IFdbSubspace subspace, [NotNull] IEnumerable keys) - where TKey : IFdbKey - { - if (keys == null) throw new ArgumentNullException("keys"); - return subspace.ToFoundationDbKey().ConcatRange(keys.Select((key) => key.ToFoundationDbKey())); - } - - #endregion - - #region Partition... - - /// Partition this subspace into a child subspace - /// Type of the child subspace key - /// Value of the child subspace - /// New subspace that is logically contained by the current subspace - /// Subspace([Foo, ]).Partition(Bar) is equivalent to Subspace([Foo, Bar, ]) - /// - /// new FdbSubspace(["Users", ]).Partition("Contacts") == new FdbSubspace(["Users", "Contacts", ]) - /// - [NotNull] - public static FdbSubspace Partition(this IFdbSubspace subspace, T value) - { - //TODO: this should go into a FdbTupleSubspace, because it collides with FdbEncoderSubspace ! - return new FdbSubspace(FdbTuple.PackWithPrefix(subspace.ToFoundationDbKey(), value)); - } - - /// Partition this subspace into a child subspace - /// Type of the first subspace key - /// Type of the second subspace key - /// Value of the first subspace key - /// Value of the second subspace key - /// New subspace that is logically contained by the current subspace - /// Subspace([Foo, ]).Partition(Bar, Baz) is equivalent to Subspace([Foo, Bar, Baz]) - /// - /// new FdbSubspace(["Users", ]).Partition("Contacts", "Friends") == new FdbSubspace(["Users", "Contacts", "Friends", ]) - /// - [NotNull] - public static FdbSubspace Partition(this IFdbSubspace subspace, T1 value1, T2 value2) - { - //TODO: this should go into a FdbTupleSubspace, because it collides with FdbEncoderSubspace ! - return new FdbSubspace(FdbTuple.PackWithPrefix(subspace.ToFoundationDbKey(), value1, value2)); - } - - /// Partition this subspace into a child subspace - /// Type of the first subspace key - /// Type of the second subspace key - /// Type of the third subspace key - /// Value of the first subspace key - /// Value of the second subspace key - /// Value of the third subspace key - /// New subspace that is logically contained by the current subspace - /// - /// new FdbSubspace(["Users", ]).Partition("John Smith", "Contacts", "Friends") == new FdbSubspace(["Users", "John Smith", "Contacts", "Friends", ]) - /// - [NotNull] - public static FdbSubspace Partition(this IFdbSubspace subspace, T1 value1, T2 value2, T3 value3) - { - //TODO: this should go into a FdbTupleSubspace, because it collides with FdbEncoderSubspace ! - return new FdbSubspace(FdbTuple.PackWithPrefix(subspace.ToFoundationDbKey(), value1, value2, value3)); - } - - /// Partition this subspace into a child subspace - /// Type of the first subspace key - /// Type of the second subspace key - /// Type of the third subspace key - /// Type of the fourth subspace key - /// Value of the first subspace key - /// Value of the second subspace key - /// Value of the third subspace key - /// Value of the fourth subspace key - /// New subspace that is logically contained by the current subspace - /// - /// new FdbSubspace(["Users", ]).Partition("John Smith", "Contacts", "Friends", "Messages") == new FdbSubspace(["Users", "John Smith", "Contacts", "Friends", "Messages", ]) - /// - [NotNull] - public static FdbSubspace Partition(this IFdbSubspace subspace, T1 value1, T2 value2, T3 value3, T4 value4) - { - //TODO: this should go into a FdbTupleSubspace, because it collides with FdbEncoderSubspace ! - return new FdbSubspace(FdbTuple.PackWithPrefix(subspace.ToFoundationDbKey(), value1, value2, value3, value4)); - } - - /// Parition this subspace by appending a tuple - /// Tuple that will be used for this partition - /// New subspace that is creating by combining the namespace prefix and - /// Subspace([Foo, ]).Partition([Bar, Baz, ]) is equivalent to Subspace([Foo, Bar, Baz,]) - /// - /// new FdbSubspace(["Users", ]).Partition(["Contacts", "Friends", ]) => new FdbSubspace(["Users", "Contacts", "Friends", ]) - /// - [NotNull] - public static FdbSubspace Partition(this IFdbSubspace subspace, [NotNull] IFdbTuple tuple) - { - if (tuple == null) throw new ArgumentNullException("tuple"); - if (tuple.Count == 0) - return new FdbSubspace(subspace.ToFoundationDbKey()); - else - return new FdbSubspace(FdbTuple.PackWithPrefix(subspace.ToFoundationDbKey(), tuple)); - } - - /// Partition this subspace into a child subspace - /// a ITupleFormattable, .ToTuple() will be used for this partition - /// New subspace that is creating by combining the namespace prefix and - /// Subspace([Foo, ]).Partition(Bar) is equivalent to Subspace([Foo, Bar, ]) - /// - /// new FdbSubspace(["Users", ]).Partition("Contacts") == new FdbSubspace(["Users", "Contacts", ]) - /// - [NotNull] - public static FdbSubspace Partition(this IFdbSubspace subspace, [NotNull] ITupleFormattable formattable) - { - if (formattable == null) throw new ArgumentNullException("formattable"); - var tuple = formattable.ToTuple(); - if (tuple == null) throw new InvalidOperationException("Formattable item returned an empty tuple"); - return Partition(subspace, tuple); - } - - #endregion - - #region Tuples... - - /// Return an empty tuple that is attached to this subspace - /// Empty tuple that can be extended, and whose packed representation will always be prefixed by the subspace key - [NotNull] - public static IFdbTuple ToTuple(this IFdbSubspace subspace) - { - return new FdbPrefixedTuple(subspace.ToFoundationDbKey(), FdbTuple.Empty); - } - - /// Attach a tuple to an existing subspace. - /// Tuple whose items will be appended at the end of the current subspace - /// Tuple that wraps the items of and whose packed representation will always be prefixed by the subspace key. - [NotNull] - public static IFdbTuple Append(this IFdbSubspace subspace, IFdbTuple tuple) - { - return new FdbPrefixedTuple(subspace.ToFoundationDbKey(), tuple); - } - - [NotNull] - public static IFdbTuple AppendBoxed(this IFdbSubspace subspace, object value) - { - return new FdbPrefixedTuple(subspace.ToFoundationDbKey(), FdbTuple.CreateBoxed(value)); - } - - /// Convert a formattable item into a tuple that is attached to this subspace. - /// Item that can be converted into a tuple - /// Tuple that is the logical representation of the item, and whose packed representation will always be prefixed by the subspace key. - /// This is the equivalent of calling 'subspace.Create(formattable.ToTuple())' - [NotNull] - public static IFdbTuple Append(this IFdbSubspace subspace, [NotNull] ITupleFormattable formattable) - { - if (formattable == null) throw new ArgumentNullException("formattable"); - var tuple = formattable.ToTuple(); - if (tuple == null) throw new InvalidOperationException("Formattable item cannot return an empty tuple"); - return new FdbPrefixedTuple(subspace.ToFoundationDbKey(), tuple); - } - - /// Create a new 1-tuple that is attached to this subspace - /// Type of the value to append - /// Value that will be appended - /// Tuple of size 1 that contains , and whose packed representation will always be prefixed by the subspace key. - /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T>(value))' - [NotNull] - public static IFdbTuple Append(this IFdbSubspace subspace, T value) - { - return new FdbPrefixedTuple(subspace.ToFoundationDbKey(), FdbTuple.Create(value)); - } - - /// Create a new 2-tuple that is attached to this subspace - /// Type of the first value to append - /// Type of the second value to append - /// First value that will be appended - /// Second value that will be appended - /// Tuple of size 2 that contains and , and whose packed representation will always be prefixed by the subspace key. - /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2>(value1, value2))' - [NotNull] - public static IFdbTuple Append(this IFdbSubspace subspace, T1 value1, T2 value2) - { - return new FdbPrefixedTuple(subspace.ToFoundationDbKey(), FdbTuple.Create(value1, value2)); - } - - /// Create a new 3-tuple that is attached to this subspace - /// Type of the first value to append - /// Type of the second value to append - /// Type of the third value to append - /// First value that will be appended - /// Second value that will be appended - /// Third value that will be appended - /// Tuple of size 3 that contains , and , and whose packed representation will always be prefixed by the subspace key. - /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3>(value1, value2, value3))' - [NotNull] - public static IFdbTuple Append(this IFdbSubspace subspace, T1 value1, T2 value2, T3 value3) - { - return new FdbPrefixedTuple(subspace.ToFoundationDbKey(), FdbTuple.Create(value1, value2, value3)); - } - - /// Create a new 4-tuple that is attached to this subspace - /// Type of the first value to append - /// Type of the second value to append - /// Type of the third value to append - /// Type of the fourth value to append - /// First value that will be appended - /// Second value that will be appended - /// Third value that will be appended - /// Fourth value that will be appended - /// Tuple of size 4 that contains , , and , and whose packed representation will always be prefixed by the subspace key. - /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3, T4>(value1, value2, value3, value4))' - [NotNull] - public static IFdbTuple Append(this IFdbSubspace subspace, T1 value1, T2 value2, T3 value3, T4 value4) - { - return new FdbPrefixedTuple(subspace.ToFoundationDbKey(), FdbTuple.Create(value1, value2, value3, value4)); - } - - /// Create a new N-tuple that is attached to this subspace - /// Array of items of the new tuple - /// Tuple of size .Length, and whose packed representation will always be prefixed by the subspace key. - /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create(items))' - [NotNull] - public static IFdbTuple AppendBoxed(this IFdbSubspace subspace, params object[] items) - { //REVIEW: Append(arrayOfObjects) is ambiguous with Append(new object[] { arrayOfObjects }) because an object[] is also an object - return Append(subspace, FdbTuple.Create(items)); - } - - /// Create a new key by appending a formattable object to the current subspace - /// Tuple to pack (can be empty) - /// Key the correspond to the concatenation of the current subspace's prefix and the packed representation of - public static Slice Pack(this IFdbSubspace subspace, IFdbTuple tuple) - { - //REVIEW: this class should have a different name, because subspace.Pack(IFdbTuple ...) has a different behavior than subspace.Pack(...) ! - return FdbTuple.PackWithPrefix(subspace.ToFoundationDbKey(), tuple); - } - - /// Create a new key by appending a value to the current subspace - /// Value that will be appended at the end of the key - /// Key the correspond to the concatenation of the current subspace's prefix and - /// tuple.PackBoxed(x) is the non-generic equivalent of tuple.Pack<object>(tuple) - public static Slice PackBoxed(this IFdbSubspace subspace, object item) - { - return FdbTuple.PackBoxedWithPrefix(subspace.ToFoundationDbKey(), item); - } - - /// Create a new key by appending a value to the current subspace - /// Type of the value - /// Value that will be appended at the end of the key - /// Key the correspond to the concatenation of the current subspace's prefix and - /// tuple.Pack(x) is equivalent to tuple.Append(x).ToSlice() - public static Slice Pack(this IFdbSubspace subspace, T key) - { - return FdbTuple.PackWithPrefix(subspace.ToFoundationDbKey(), key); - } - - /// Create a new key by appending two values to the current subspace - /// Type of the next to last value - /// Type of the last value - /// Value that will be in the next to last position - /// Value that will be in the last position - /// Key the correspond to the concatenation of the current subspace's prefix, and - /// (...,).Pack(x, y) is equivalent to (...,).Append(x).Append(y).ToSlice() - public static Slice Pack(this IFdbSubspace subspace, T1 key1, T2 key2) - { - return FdbTuple.PackWithPrefix(subspace.ToFoundationDbKey(), key1, key2); - } - - /// Create a new key by appending three values to the current subspace - /// Type of the first value - /// Type of the second value - /// Type of the thrid value - /// Value that will be appended first - /// Value that will be appended second - /// Value that will be appended third - /// Key the correspond to the concatenation of the current subspace's prefix, , and - /// tuple.Pack(x, y, z) is equivalent to tuple.Append(x).Append(y).Append(z).ToSlice() - public static Slice Pack(this IFdbSubspace subspace, T1 key1, T2 key2, T3 key3) - { - return FdbTuple.PackWithPrefix(subspace.ToFoundationDbKey(), key1, key2, key3); - } - - /// Create a new key by appending three values to the current subspace - /// Type of the first value - /// Type of the second value - /// Type of the third value - /// Type of the fourth value - /// Value that will be appended first - /// Value that will be appended second - /// Value that will be appended third - /// Value that will be appended fourth - /// Key the correspond to the concatenation of the current subspace's prefix, , , and - /// tuple.Pack(w, x, y, z) is equivalent to tuple.Append(w).Append(x).Append(y).Append(z).ToSlice() - public static Slice Pack(this IFdbSubspace subspace, T1 key1, T2 key2, T3 key3, T4 key4) - { - return FdbTuple.PackWithPrefix(subspace.ToFoundationDbKey(), key1, key2, key3, key4); - } - - /// Pack a sequence of tuples, all sharing the same buffer - /// Sequence of N-tuples to pack - /// Array containing the buffer segment of each packed tuple - /// BatchPack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] - [NotNull] - public static Slice[] PackRange(this IFdbSubspace subspace, params IFdbTuple[] tuples) - { - return FdbTuple.PackRange(subspace.ToFoundationDbKey(), tuples); - } - - /// Pack a sequence of tuples, all sharing the same buffer - /// Sequence of N-tuples to pack - /// Array containing the buffer segment of each packed tuple - /// BatchPack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] - [NotNull] - public static Slice[] PackRange(this IFdbSubspace subspace, [NotNull] IEnumerable tuples) - { - return FdbTuple.PackRange(subspace.ToFoundationDbKey(), tuples); - } - - /// Merge a sequence of keys with the subspace's prefix, all sharing the same buffer - /// Type of the keys - /// Sequence of keys to pack - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] PackRange(this IFdbSubspace subspace, [NotNull] IEnumerable keys) - { - return FdbTuple.PackRange(subspace.ToFoundationDbKey(), keys); - } - - /// Merge a sequence of keys with the subspace's prefix, all sharing the same buffer - /// Type of the keys - /// Sequence of keys to pack - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] PackRange(this IFdbSubspace subspace, [NotNull] T[] keys) - { - return FdbTuple.PackRange(subspace.ToFoundationDbKey(), keys); - } - - /// Merge a sequence of elements with the subspace's prefix, all sharing the same buffer - /// Type of the elements - /// Type of the keys extracted from the elements - /// Sequence of elements to pack - /// Lambda that extract the key from each element - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] PackRange(this IFdbSubspace subspace, [NotNull] TElement[] elements, Func selector) - { - return FdbTuple.PackRange(subspace.ToFoundationDbKey(), elements, selector); - } - - /// Pack a sequence of keys with the subspace's prefix, all sharing the same buffer - /// Sequence of keys to pack - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] PackBoxedRange(this IFdbSubspace subspace, [NotNull] IEnumerable keys) - { - return FdbTuple.PackBoxedRange(subspace.ToFoundationDbKey(), keys); - } - - /// Pack a sequence of keys with the subspace's prefix, all sharing the same buffer - /// Sequence of keys to pack - /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public static Slice[] PackBoxedRange(this IFdbSubspace subspace, [NotNull] object[] keys) - { - //note: cannot use "params object[]" because it may conflict with PackRange(IEnumerable) - return FdbTuple.PackBoxedRange(subspace.ToFoundationDbKey(), keys); - } - - #endregion - - #region Unpack... - - //REVIEW: right now we can't hook these methods to the IFdbKey interface because we need "ExtractAndCheck" that is on defined on FdbSubspace - - /// Unpack a key into a tuple, with the subspace prefix removed - /// Packed version of a key that should fit inside this subspace. - /// Unpacked tuple that is relative to the current subspace, or null if the key is equal to Slice.Nil - /// new Subspace([FE]).Unpack([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => ("hello", 1,) - /// If the unpacked tuple is not contained in this subspace - [CanBeNull] - public static IFdbTuple Unpack(this FdbSubspace subspace, Slice key) - { - // We special case 'Slice.Nil' because it is returned by GetAsync(..) when the key does not exist - // This is to simplifiy decoding logic where the caller could do "var foo = FdbTuple.Unpack(await tr.GetAsync(...))" and then only have to test "if (foo != null)" - if (key.IsNull) return null; - - return new FdbPrefixedTuple(subspace.Key, FdbTuple.Unpack(subspace.ExtractAndCheck(key))); - } - - /// Unpack a key into a tuple, and return only the first element - /// Expected type of the last element - /// Packed version of a key that should fit inside this subspace - /// Converted value of the last element of the tuple - /// new Subspace([FE]).UnpackLast<int>([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => (string) "Hello" - public static T UnpackFirst(this FdbSubspace subspace, Slice key) - { - return FdbTuple.UnpackFirst(subspace.ExtractAndCheck(key)); - } - - /// Unpack a key into a tuple, and return only the last element - /// Expected type of the last element - /// Packed version of a key that should fit inside this subspace - /// Converted value of the last element of the tuple - /// new Subspace([FE]).UnpackLast<int>([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => (int) 1 - public static T UnpackLast(this FdbSubspace subspace, Slice key) - { - return FdbTuple.UnpackLast(subspace.ExtractAndCheck(key)); - } - - /// Unpack a key into a singleton tuple, and return the single element - /// Expected type of the only element - /// Packed version of a key that should fit inside this subspace - /// Converted value of the only element in the tuple. Throws an exception if the tuple is empty or contains more than one element - /// new Subspace([FE]).UnpackSingle<int>([FE 02 'H' 'e' 'l' 'l' 'o' 00]) => (string) "Hello" - public static T UnpackSingle(this FdbSubspace subspace, Slice key) - { - return FdbTuple.UnpackSingle(subspace.ExtractAndCheck(key)); - } - - /// Unpack an array of keys in tuples, with the subspace prefix removed - /// Packed version of keys inside this subspace - /// Unpacked tuples that are relative to the current subspace - [NotNull] - public static IFdbTuple[] Unpack(this FdbSubspace subspace, [NotNull] Slice[] keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - - var prefix = subspace.Key; - var tuples = new IFdbTuple[keys.Length]; - - if (keys.Length > 0) - { - for (int i = 0; i < keys.Length; i++) - { - if (keys[i].HasValue) - { - tuples[i] = new FdbPrefixedTuple(prefix, FdbTuple.Unpack(subspace.ExtractAndCheck(keys[i]))); - } - } - } - - return tuples; - } - - /// Unpack an array of key into tuples, and return an array with only the first elements of each tuple - /// Expected type of the first element of all the keys - /// Array of packed keys that should all fit inside this subspace - /// Array containing the converted values of the first elements of each tuples - [NotNull] - public static T[] UnpackFirst(this FdbSubspace subspace, [NotNull] Slice[] keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - - var values = new T[keys.Length]; - - if (keys.Length > 0) - { - for (int i = 0; i < keys.Length; i++) - { - values[i] = FdbTuple.UnpackFirst(subspace.ExtractAndCheck(keys[i])); - } - } - - return values; - } - - /// Unpack an array of key into tuples, and return an array with only the last elements of each tuple - /// Expected type of the last element of all the keys - /// Array of packed keys that should all fit inside this subspace - /// Array containing the converted values of the last elements of each tuples - [NotNull] - public static T[] UnpackLast(this FdbSubspace subspace, [NotNull] Slice[] keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - - var values = new T[keys.Length]; - - if (keys.Length > 0) - { - for (int i = 0; i < keys.Length; i++) - { - values[i] = FdbTuple.UnpackLast(subspace.ExtractAndCheck(keys[i])); - } - } - - return values; - } - - /// Unpack an array of key into singleton tuples, and return an array with value of each tuple - /// Expected type of the only element of all the keys - /// Array of packed keys that should all fit inside this subspace - /// Array containing the converted values of the only elements of each tuples. Throws an exception if one key contains more than one element - [NotNull] - public static T[] UnpackSingle(this FdbSubspace subspace, [NotNull] Slice[] keys) - { - if (keys == null) throw new ArgumentNullException("keys"); - - var values = new T[keys.Length]; - - if (keys.Length > 0) - { - for (int i = 0; i < keys.Length; i++) - { - values[i] = FdbTuple.UnpackSingle(subspace.ExtractAndCheck(keys[i])); - } - } - - return values; - } - - #endregion - - #region ToRange... - - public static FdbKeyRange ToRange(this FdbSubspace subspace, [NotNull] TKey key) + public static FdbKeyRange ToRange([NotNull] this IFdbSubspace subspace, [NotNull] TKey key) where TKey : IFdbKey { if (key == null) throw new ArgumentNullException("key"); return subspace.ToRange(key.ToFoundationDbKey()); } - /// Gets a key range representing all keys in the Subspace strictly starting with the specified Tuple. - public static FdbKeyRange ToRange(this FdbSubspace subspace, [NotNull] IFdbTuple tuple) - { - if (tuple == null) throw new ArgumentNullException("tuple"); - return subspace.ToRange(tuple.ToSlice()); - } - - public static FdbKeySelectorPair ToSelectorPair(this FdbSubspace subspace) + public static FdbKeySelectorPair ToSelectorPair([NotNull] this IFdbSubspace subspace) { return FdbKeySelectorPair.Create(subspace.ToRange()); } - #endregion } } diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs b/FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs new file mode 100644 index 000000000..23fe4f542 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs @@ -0,0 +1,159 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Client +{ + using FoundationDB.Client.Utils; + using JetBrains.Annotations; + using System; + using System.Linq; + using System.Collections.Generic; + + public struct FdbSubspaceKeys + { + private readonly IFdbSubspace m_subspace; + + public FdbSubspaceKeys(IFdbSubspace subspace) + { + Contract.Requires(subspace != null); + m_subspace = subspace; + } + + public IFdbSubspace Subspace + { + [NotNull] //note: except for corner cases like default(FdbTupleSubspace) or unallocated value + get + { return m_subspace; } + } + + /// + /// + /// + /// + /// + /// This is an alias to calling subspace.Keys.Concat(key) + public Slice this[Slice key] + { + get { return m_subspace.ConcatKey(key); } + } + + /// Append a serializable key to the subspace key + /// Instance that can serialize itself into a binary key + /// Return Slice : 'subspace.Key + key.ToFoundationDbKey()' + /// This is an alias to calling subspace.Keys.Concat<IFdbKey>(key) + public Slice this[[NotNull] IFdbKey key] + { + get + { + if (key == null) throw new ArgumentNullException("key"); + return m_subspace.ConcatKey(key.ToFoundationDbKey()); + } + } + + public Slice Concat(Slice key) + { + return m_subspace.ConcatKey(key); + } + + + public Slice Concat([NotNull] TKey key) + where TKey : IFdbKey + { + if (key == null) throw new ArgumentNullException("key"); + return m_subspace.ConcatKey(key.ToFoundationDbKey()); + } + + [NotNull] + public Slice[] Concat([NotNull] IEnumerable keys) + { + return m_subspace.ConcatKeys(keys); + } + + [NotNull] + public Slice[] Concat([NotNull] params Slice[] keys) + { + return m_subspace.ConcatKeys(keys); + } + + [NotNull] + public Slice[] Concat([NotNull] IEnumerable keys) + where TKey : IFdbKey + { + if (keys == null) throw new ArgumentNullException("keys"); + return m_subspace.ConcatKeys(keys.Select((key) => key.ToFoundationDbKey())); + } + + [NotNull] + public Slice[] Concat([NotNull] params TKey[] keys) + where TKey : IFdbKey + { + return Concat((IEnumerable)keys); + } + + public Slice BoundCheck(Slice key) + { + return m_subspace.BoundCheck(key, allowSystemKeys: true); + } + + public Slice Extract(Slice key) + { + return m_subspace.ExtractKey(key, boundCheck: true); + } + + [NotNull] + public Slice[] Extract([NotNull] params Slice[] keys) + { + return m_subspace.ExtractKeys(keys, boundCheck: true); + } + + [NotNull] + public Slice[] Extract([NotNull] IEnumerable keys) + { + return m_subspace.ExtractKeys(keys, boundCheck: true); + } + + public FdbKeyRange ToRange() + { + return m_subspace.ToRange(); + } + + public FdbKeyRange ToRange(Slice key) + { + return m_subspace.ToRange(key); + } + + public FdbKeyRange ToRange([NotNull] IFdbKey key) + where TKey : IFdbKey + { + if (key == null) throw new ArgumentNullException("key"); + return m_subspace.ToRange(key.ToFoundationDbKey()); + } + + } + +} diff --git a/FoundationDB.Client/Subspaces/FdbSubspacePartition.cs b/FoundationDB.Client/Subspaces/FdbSubspacePartition.cs new file mode 100644 index 000000000..458f72c16 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbSubspacePartition.cs @@ -0,0 +1,170 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Client +{ + using FoundationDB.Layers.Tuples; + using JetBrains.Annotations; + using System; + using System.Linq; + using System.Collections.Generic; + + public struct FdbSubspacePartition + { + private readonly IFdbSubspace m_subspace; + + public FdbSubspacePartition(IFdbSubspace subspace) + { + if (subspace == null) throw new ArgumentNullException("subspace"); + m_subspace = subspace; + } + + public IFdbSubspace Subspace + { + get { return m_subspace; } + } + + /// Create a new subspace by appdending a suffix to the current subspace + /// Suffix of the new subspace + /// New subspace with prefix equal to the current subspace's prefix, followed by + public IFdbSubspace this[Slice suffix] + { + [NotNull] + get + { + if (suffix.IsNull) throw new ArgumentException("Partition suffix cannot be null", "suffix"); + //TODO: find a way to limit the number of copies of the key? + return new FdbSubspace(m_subspace.ConcatKey(suffix)); + } + } + + /// Create a new subspace by adding a to the current subspace's prefix + /// Key that will be appended to the current prefix + /// New subspace whose prefix is the concatenation of the parent prefix, and the packed representation of + public IFdbSubspace this[IFdbKey key] + { + [ContractAnnotation("null => halt; notnull => notnull")] + get + { + if (key == null) throw new ArgumentNullException("key"); + var packed = key.ToFoundationDbKey(); + return this[packed]; + } + } + + public IFdbSubspace this[IFdbTuple tuple] + { + [ContractAnnotation("null => halt; notnull => notnull")] + get + { + if (tuple == null) throw new ArgumentNullException("tuple"); + //TODO: find a way to limit the number of copies of the packed tuple? + return new FdbSubspace(m_subspace.Tuples.Pack(tuple)); + } + } + + public IFdbSubspace this[ITupleFormattable item] + { + [ContractAnnotation("null => halt; notnull => notnull")] + get + { + if (item == null) throw new ArgumentNullException("item"); + var tuple = item.ToTuple(); + if (tuple == null) throw new InvalidOperationException("Formattable item returned an empty tuple"); + return this[tuple]; + } + } + + /// Partition this subspace into a child subspace + /// Type of the child subspace key + /// Value of the child subspace + /// New subspace that is logically contained by the current subspace + /// Subspace([Foo, ]).Partition(Bar) is equivalent to Subspace([Foo, Bar, ]) + /// + /// new FdbSubspace(["Users", ]).Partition("Contacts") == new FdbSubspace(["Users", "Contacts", ]) + /// + [NotNull] + public IFdbSubspace By(T value) + { + return this[FdbTuple.Create(value)]; + } + + /// Partition this subspace into a child subspace + /// Type of the first subspace key + /// Type of the second subspace key + /// Value of the first subspace key + /// Value of the second subspace key + /// New subspace that is logically contained by the current subspace + /// Subspace([Foo, ]).Partition(Bar, Baz) is equivalent to Subspace([Foo, Bar, Baz]) + /// + /// new FdbSubspace(["Users", ]).Partition("Contacts", "Friends") == new FdbSubspace(["Users", "Contacts", "Friends", ]) + /// + [NotNull] + public IFdbSubspace By(T1 value1, T2 value2) + { + return this[FdbTuple.Create(value1, value2)]; + } + + /// Partition this subspace into a child subspace + /// Type of the first subspace key + /// Type of the second subspace key + /// Type of the third subspace key + /// Value of the first subspace key + /// Value of the second subspace key + /// Value of the third subspace key + /// New subspace that is logically contained by the current subspace + /// + /// new FdbSubspace(["Users", ]).Partition("John Smith", "Contacts", "Friends") == new FdbSubspace(["Users", "John Smith", "Contacts", "Friends", ]) + /// + [NotNull] + public IFdbSubspace By(T1 value1, T2 value2, T3 value3) + { + return this[FdbTuple.Create(value1, value2, value3)]; + } + + /// Partition this subspace into a child subspace + /// Type of the first subspace key + /// Type of the second subspace key + /// Type of the third subspace key + /// Type of the fourth subspace key + /// Value of the first subspace key + /// Value of the second subspace key + /// Value of the third subspace key + /// Value of the fourth subspace key + /// New subspace that is logically contained by the current subspace + /// + /// new FdbSubspace(["Users", ]).Partition("John Smith", "Contacts", "Friends", "Messages") == new FdbSubspace(["Users", "John Smith", "Contacts", "Friends", "Messages", ]) + /// + [NotNull] + public IFdbSubspace By(T1 value1, T2 value2, T3 value3, T4 value4) + { + return this[FdbTuple.Create(value1, value2, value3, value4)]; + } + + } +} diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs new file mode 100644 index 000000000..f5439ade4 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs @@ -0,0 +1,498 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Client +{ + using FoundationDB.Layers.Tuples; + using JetBrains.Annotations; + using System; + using System.Linq; + using System.Collections.Generic; + using FoundationDB.Client.Utils; + + + /// Provides of methods to encode and decodes keys using the Tuple Encoding format + public struct FdbSubspaceTuples + { + + /// Ref to the parent subspace + private readonly IFdbSubspace m_subspace; + + /// Wraps an existing subspace + /// + public FdbSubspaceTuples(IFdbSubspace subspace) + { + Contract.Requires(subspace != null); + m_subspace = subspace; + } + + public IFdbSubspace Subspace + { + [NotNull] //note: except for corner cases like default(FdbTupleSubspace) or unallocated value + get { return m_subspace; } + } + + public Slice this[[NotNull] IFdbTuple tuple] + { + get { return Pack(tuple); } + } + + public Slice this[[NotNull] ITupleFormattable item] + { + get { return Pack(item); } + } + + /// Return a key that is composed of the subspace prefix, and the packed representation of a tuple. + /// Tuple to pack (can be null or empty) + /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . + /// If is null or empty, then the prefix of the subspace is returned. + public Slice Pack([NotNull] IFdbTuple tuple) + { + if (tuple == null) throw new ArgumentNullException("tuple"); + return FdbTuple.PackWithPrefix(m_subspace.Key, tuple); + } + + /// Pack a sequence of tuples, all sharing the same buffer + /// Sequence of N-tuples to pack + /// Array containing the buffer segment of each packed tuple + /// tuple.Pack(new [] { "abc", [ ("Foo", 1), ("Foo", 2) ] }) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] + [NotNull] + public Slice[] Pack([NotNull] IEnumerable tuples) + { + if (tuples == null) throw new ArgumentNullException("tuples"); + + return FdbTuple.PackRangeWithPrefix(m_subspace.Key, tuples); + } + + /// Pack a sequence of tuples, all sharing the same buffer + /// Sequence of N-tuples to pack + /// Array containing the buffer segment of each packed tuple + /// BatchPack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] + [NotNull] + public Slice[] Pack([NotNull] params IFdbTuple[] tuples) + { + return Pack((IEnumerable)tuples); + } + + /// Return a key that is composed of the subspace prefix, and the packed representation of a tuple. + /// Tuple to pack (can be null or empty) + /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . + /// If is null or empty, then the prefix of the subspace is returned. + public Slice Pack([NotNull] ITupleFormattable item) + { + if (item == null) throw new ArgumentNullException("item"); + return Pack(item.ToTuple()); + } + + /// Pack a sequence of keys, all sharing the same buffer + /// Sequence of N-tuples to pack + /// Array containing the buffer segment of each packed tuple + /// Pack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] + [NotNull] + public Slice[] Pack([NotNull] IEnumerable items) + { + if (items == null) throw new ArgumentNullException("items"); + + return FdbTuple.PackRangeWithPrefix(m_subspace.Key, items.Select((item) => item.ToTuple())); + } + + /// Pack a sequence of keys, all sharing the same buffer + /// Sequence of N-tuples to pack + /// Array containing the buffer segment of each packed tuple + /// Pack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] + [NotNull] + public Slice[] Pack([NotNull] params ITupleFormattable[] items) + { + return Pack((IEnumerable)items); + } + + /// Unpack a key into a tuple, with the subspace prefix removed + /// Packed version of a key that should fit inside this subspace. + /// Unpacked tuple that is relative to the current subspace, or null if the key is equal to Slice.Nil + /// new Subspace([FE]).Unpack([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => ("hello", 1,) + /// If is equal to the subspace prefix, then an empty tuple is returned. + /// If the unpacked tuple is not contained in this subspace + [CanBeNull] + public IFdbTuple Unpack(Slice key) + { + // We special case 'Slice.Nil' because it is returned by GetAsync(..) when the key does not exist + // This is to simplifiy decoding logic where the caller could do "var foo = FdbTuple.Unpack(await tr.GetAsync(...))" and then only have to test "if (foo != null)" + if (key.IsNull) return null; + + return FdbTuple.Unpack(m_subspace.ExtractKey(key, boundCheck: true)); + } + + /// Unpack an sequence of keys into tuples, with the subspace prefix removed + /// Packed version of keys inside this subspace + /// Unpacked tuples that are relative to the current subspace + [NotNull] + public IFdbTuple[] Unpack([NotNull] IEnumerable keys) + { + // return an array with the keys minus the subspace's prefix + var extracted = m_subspace.ExtractKeys(keys, boundCheck: true); + + // unpack everything + var prefix = m_subspace.Key; + var tuples = new IFdbTuple[extracted.Length]; + for(int i = 0; i < extracted.Length; i++) + { + if (extracted[i].HasValue) tuples[i] = new FdbPrefixedTuple(prefix, FdbTuple.Unpack(extracted[i])); + } + return tuples; + } + + /// Unpack an array of keys into tuples, with the subspace prefix removed + /// Packed version of keys inside this subspace + /// Unpacked tuples that are relative to the current subspace + [NotNull] + public IFdbTuple[] Unpack([NotNull] params Slice[] keys) + { + return Unpack((IEnumerable)keys); + } + + public FdbKeyRange ToRange([NotNull] IFdbTuple tuple) + { + if (tuple == null) throw new ArgumentNullException("tuple"); + return m_subspace.ToRange(tuple.ToSlice()); + } + + public FdbKeyRange ToRange([NotNull] ITupleFormattable item) + { + if (item == null) throw new ArgumentNullException("item"); + return ToRange(item.ToTuple()); + } + + #region EncodeKey... + + /// Create a new key by appending a value to the current subspace + /// Type of the value + /// Value that will be appended at the end of the key + /// Key the correspond to the concatenation of the current subspace's prefix and + /// tuple.Pack(x) is equivalent to tuple.Append(x).ToSlice() + public Slice EncodeKey(T key) + { + return FdbTuple.PackWithPrefix(m_subspace.Key, key); + } + + /// Create a new key by appending two values to the current subspace + /// Type of the next to last value + /// Type of the last value + /// Value that will be in the next to last position + /// Value that will be in the last position + /// Key the correspond to the concatenation of the current subspace's prefix, and + /// (...,).Pack(x, y) is equivalent to (...,).Append(x).Append(y).ToSlice() + public Slice EncodeKey(T1 key1, T2 key2) + { + return FdbTuple.PackWithPrefix(m_subspace.Key, key1, key2); + } + + /// Create a new key by appending three values to the current subspace + /// Type of the first value + /// Type of the second value + /// Type of the thrid value + /// Value that will be appended first + /// Value that will be appended second + /// Value that will be appended third + /// Key the correspond to the concatenation of the current subspace's prefix, , and + /// tuple.Pack(x, y, z) is equivalent to tuple.Append(x).Append(y).Append(z).ToSlice() + public Slice EncodeKey(T1 key1, T2 key2, T3 key3) + { + return FdbTuple.PackWithPrefix(m_subspace.Key, key1, key2, key3); + } + + /// Create a new key by appending three values to the current subspace + /// Type of the first value + /// Type of the second value + /// Type of the third value + /// Type of the fourth value + /// Value that will be appended first + /// Value that will be appended second + /// Value that will be appended third + /// Value that will be appended fourth + /// Key the correspond to the concatenation of the current subspace's prefix, , , and + /// tuple.Pack(w, x, y, z) is equivalent to tuple.Append(w).Append(x).Append(y).Append(z).ToSlice() + public Slice EncodeKey(T1 key1, T2 key2, T3 key3, T4 key4) + { + return FdbTuple.PackWithPrefix(m_subspace.Key, key1, key2, key3, key4); + } + + /// Merge a sequence of keys with the subspace's prefix, all sharing the same buffer + /// Type of the keys + /// Sequence of keys to pack + /// Array of slices (for all keys) that share the same underlying buffer + [NotNull] + public Slice[] EncodeKeys([NotNull] IEnumerable keys) + { + return FdbTuple.PackRangeWithPrefix(m_subspace.Key, keys); + } + + /// Merge a sequence of keys with the subspace's prefix, all sharing the same buffer + /// Type of the keys + /// Sequence of keys to pack + /// Array of slices (for all keys) that share the same underlying buffer + [NotNull] + public Slice[] EncodeKeys([NotNull] T[] keys) + { + return FdbTuple.PackRangeWithPrefix(m_subspace.Key, keys); + } + + /// Merge a sequence of elements with the subspace's prefix, all sharing the same buffer + /// Type of the elements + /// Type of the keys extracted from the elements + /// Sequence of elements to pack + /// Lambda that extract the key from each element + /// Array of slices (for all keys) that share the same underlying buffer + [NotNull] + public Slice[] EncodeKeys([NotNull] TElement[] elements, [NotNull] Func selector) + { + return FdbTuple.PackRangeWithPrefix(m_subspace.Key, elements, selector); + } + + #endregion + + #region DecodeKey... + + /// Unpack a key into a singleton tuple, and return the single element + /// Expected type of the only element + /// Packed version of a key that should fit inside this subspace + /// Converted value of the only element in the tuple. Throws an exception if the tuple is empty or contains more than one element + /// new Subspace([FE]).UnpackSingle<int>([FE 02 'H' 'e' 'l' 'l' 'o' 00]) => (string) "Hello" + public T DecodeKey(Slice key) + { + return FdbTuple.UnpackSingle(m_subspace.ExtractKey(key, boundCheck: true)); + } + + + public FdbTuple DecodeKey(Slice key) + { + var tuple = Unpack(key); + if (tuple == null) throw new FormatException("The specified key does not contain any items"); + if (tuple.Count != 2) throw new FormatException("The specified key is not a tuple with 2 items"); + + return FdbTuple.Create( + tuple.Get(0), + tuple.Get(1) + ); + } + + public FdbTuple DecodeKey(Slice key) + { + var tuple = Unpack(key); + if (tuple == null) throw new FormatException("The specified key does not contain any items"); + if (tuple.Count != 3) throw new FormatException("The specified key is not a tuple with 3 items"); + + return FdbTuple.Create( + tuple.Get(0), + tuple.Get(1), + tuple.Get(2) + ); + } + + public FdbTuple DecodeKey(Slice key) + { + var tuple = Unpack(key); + if (tuple == null) throw new FormatException("The specified key does not contain any items"); + if (tuple.Count != 4) throw new FormatException("The specified key is not a tuple with 4 items"); + + return FdbTuple.Create( + tuple.Get(0), + tuple.Get(1), + tuple.Get(2), + tuple.Get(3) + ); + } + + /// Unpack a key into a tuple, and return only the first element + /// Expected type of the last element + /// Packed version of a key that should fit inside this subspace + /// Converted value of the last element of the tuple + /// new Subspace([FE]).UnpackLast<int>([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => (string) "Hello" + public T DecodeFirst(Slice key) + { + return FdbTuple.UnpackFirst(m_subspace.ExtractKey(key, boundCheck: true)); + } + + /// Unpack a key into a tuple, and return only the last element + /// Expected type of the last element + /// Packed version of a key that should fit inside this subspace + /// Converted value of the last element of the tuple + /// new Subspace([FE]).UnpackLast<int>([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => (int) 1 + public T DecodeLast(Slice key) + { + return FdbTuple.UnpackLast(m_subspace.ExtractKey(key, boundCheck: true)); + } + + /// Unpack an array of key into tuples, and return an array with only the first elements of each tuple + /// Expected type of the first element of all the keys + /// Array of packed keys that should all fit inside this subspace + /// Array containing the converted values of the first elements of each tuples + [NotNull] + public T[] DecodeKeysFirst([NotNull] Slice[] keys) + { + if (keys == null) throw new ArgumentNullException("keys"); + + var values = new T[keys.Length]; + for (int i = 0; i < keys.Length; i++) + { + //REVIEW: what should we do if we encounter Slice.Nil keys ?? + values[i] = FdbTuple.UnpackFirst(m_subspace.ExtractKey(keys[i], boundCheck: true)); + } + return values; + } + + /// Unpack an array of key into tuples, and return an array with only the last elements of each tuple + /// Expected type of the last element of all the keys + /// Array of packed keys that should all fit inside this subspace + /// Array containing the converted values of the last elements of each tuples + [NotNull] + public T[] DecodeKeysLast([NotNull] Slice[] keys) + { + if (keys == null) throw new ArgumentNullException("keys"); + + var values = new T[keys.Length]; + for (int i = 0; i < keys.Length; i++) + { + //REVIEW: what should we do if we encounter Slice.Nil keys ?? + values[i] = FdbTuple.UnpackLast(m_subspace.ExtractKey(keys[i], boundCheck: true)); + } + return values; + } + + /// Unpack an array of key into singleton tuples, and return an array with value of each tuple + /// Expected type of the only element of all the keys + /// Array of packed keys that should all fit inside this subspace + /// Array containing the converted values of the only elements of each tuples. Throws an exception if one key contains more than one element + [NotNull] + public T[] DecodeKeys([NotNull] Slice[] keys) + { + if (keys == null) throw new ArgumentNullException("keys"); + + var values = new T[keys.Length]; + for (int i = 0; i < keys.Length; i++) + { + //REVIEW: what should we do if we encounter Slice.Nil keys ?? + values[i] = FdbTuple.UnpackSingle(m_subspace.ExtractKey(keys[i], boundCheck: true)); + } + return values; + } + + #endregion + + #region Append... + + /// Return an empty tuple that is attached to this subspace + /// Empty tuple that can be extended, and whose packed representation will always be prefixed by the subspace key + [NotNull] + public IFdbTuple ToTuple() + { + return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Empty); + } + + /// Attach a tuple to an existing subspace. + /// Tuple whose items will be appended at the end of the current subspace + /// Tuple that wraps the items of and whose packed representation will always be prefixed by the subspace key. + [NotNull] + public IFdbTuple Concat([NotNull] IFdbTuple tuple) + { + return new FdbPrefixedTuple(m_subspace.Key, tuple); + } + + /// Convert a formattable item into a tuple that is attached to this subspace. + /// Item that can be converted into a tuple + /// Tuple that is the logical representation of the item, and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(formattable.ToTuple())' + [NotNull] + public IFdbTuple Concat([NotNull] ITupleFormattable formattable) + { + if (formattable == null) throw new ArgumentNullException("formattable"); + var tuple = formattable.ToTuple(); + if (tuple == null) throw new InvalidOperationException("Formattable item cannot return an empty tuple"); + return new FdbPrefixedTuple(m_subspace.Key, tuple); + } + + /// Create a new 1-tuple that is attached to this subspace + /// Type of the value to append + /// Value that will be appended + /// Tuple of size 1 that contains , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T>(value))' + [NotNull] + public IFdbTuple Append(T value) + { + return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(value)); + } + + /// Create a new 2-tuple that is attached to this subspace + /// Type of the first value to append + /// Type of the second value to append + /// First value that will be appended + /// Second value that will be appended + /// Tuple of size 2 that contains and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2>(value1, value2))' + [NotNull] + public IFdbTuple Append(T1 value1, T2 value2) + { + return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(value1, value2)); + } + + /// Create a new 3-tuple that is attached to this subspace + /// Type of the first value to append + /// Type of the second value to append + /// Type of the third value to append + /// First value that will be appended + /// Second value that will be appended + /// Third value that will be appended + /// Tuple of size 3 that contains , and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3>(value1, value2, value3))' + [NotNull] + public IFdbTuple Append(T1 value1, T2 value2, T3 value3) + { + return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(value1, value2, value3)); + } + + /// Create a new 4-tuple that is attached to this subspace + /// Type of the first value to append + /// Type of the second value to append + /// Type of the third value to append + /// Type of the fourth value to append + /// First value that will be appended + /// Second value that will be appended + /// Third value that will be appended + /// Fourth value that will be appended + /// Tuple of size 4 that contains , , and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3, T4>(value1, value2, value3, value4))' + [NotNull] + public IFdbTuple Append(T1 value1, T2 value2, T3 value3, T4 value4) + { + return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(value1, value2, value3, value4)); + } + + #endregion + + } + +} diff --git a/FoundationDB.Client/Subspaces/IFdbSubspace.cs b/FoundationDB.Client/Subspaces/IFdbSubspace.cs index 8c69ec31d..7ce64e8c5 100644 --- a/FoundationDB.Client/Subspaces/IFdbSubspace.cs +++ b/FoundationDB.Client/Subspaces/IFdbSubspace.cs @@ -30,17 +30,29 @@ namespace FoundationDB.Client { using JetBrains.Annotations; using System; + using System.Collections.Generic; + public interface IFdbSubspace : IFdbKey { // This interface helps solve some type resolution ambiguities at compile time between types that all implement IFdbKey but have different semantics for partitionning and concatenation - /// Create a new subspace by adding a suffix to the key of the current subspace. - /// Binary suffix that will be appended to the current prefix - /// New subspace whose prefix is the concatenation of the parent prefix, and - IFdbSubspace this[Slice suffix] { [NotNull] get; } + /// Returns the prefix of this subspace + Slice Key { get; } + + /// Return a view of all the possible binary keys of this subspace + FdbSubspaceKeys Keys { get; } + + /// Helper that can be used to partition this subspace into smaller subspaces + FdbSubspacePartition Partition { get; } + + /// Return a view of all the possible tuple-based keys of this subspace + FdbSubspaceTuples Tuples { get; } - IFdbSubspace this[IFdbKey key] { [NotNull] get; } + ///// Create a new subspace by adding a suffix to the key of the current subspace. + ///// Binary suffix that will be appended to the current prefix + ///// New subspace whose prefix is the concatenation of the parent prefix, and + //IFdbSubspace this[Slice suffix] { [NotNull] get; } /// Test if a key is inside the range of keys logically contained by this subspace /// Key to test @@ -48,16 +60,30 @@ public interface IFdbSubspace : IFdbKey /// Please note that this method does not test if the key *actually* exists in the database, only if the key is not ouside the range of keys defined by the subspace. bool Contains(Slice key); + /// Check that a key fits inside this subspace, and return '' or '\xFF' if it is outside the bounds + /// Key that needs to be checked + /// If true, allow keys that starts with \xFF even if this subspace is not the Empty subspace or System subspace itself. + /// The unchanged if it is contained in the namespace, Slice.Empty if it was before the subspace, or FdbKey.MaxValue if it was after. + Slice BoundCheck(Slice key, bool allowSystemKeys); + + Slice ConcatKey(Slice suffix); + + [NotNull] + Slice[] ConcatKeys([NotNull] IEnumerable suffixes); + /// Remove the subspace prefix from a binary key, or throw if the key does not belong to this subspace /// Complete key that contains the current subspace prefix, and a binary suffix. /// Binary suffix of the key (or Slice.Empty is the key is exactly equal to the subspace prefix). If the key is equal to Slice.Nil, then it will be returned unmodified. If the key is outside of the subspace, the method throws. /// If key is outside the current subspace. - Slice ExtractAndCheck(Slice key); - //REVIEW: what about Extract(..) ? Merge both with an optional "bool throwIfOutside = false" ? + Slice ExtractKey(Slice key, bool boundCheck = false); + + [NotNull] + Slice[] ExtractKeys([NotNull] IEnumerable keys, bool boundCheck = false); - FdbKeyRange ToRange(Slice suffix); + /// Return a pair of keys that contain all the keys inside this subspace + FdbKeyRange ToRange(Slice suffix = default(Slice)); + //REVIEW: this is not exactly true if ToRange() use the Tuple ToRange() wich adds <00> and to the prefix! - //REVIEW: Consider adding IEquatable and maybe IComparable ? } } diff --git a/FoundationDB.Client/Utils/Slice.cs b/FoundationDB.Client/Utils/Slice.cs index 43f70156b..ebd4c0bb6 100644 --- a/FoundationDB.Client/Utils/Slice.cs +++ b/FoundationDB.Client/Utils/Slice.cs @@ -1952,6 +1952,67 @@ public static Slice Concat(params Slice[] args) return writer.ToSlice(); } + /// Adds a prefix to a list of slices + /// Prefix to add to all the slices + /// List of slices to process + /// Array of slice that all start with and followed by the corresponding entry in + /// This method is optmized to reduce the amount of memory allocated + public static Slice[] ConcatRange(Slice prefix, IEnumerable slices) + { + if (slices == null) throw new ArgumentNullException("suffixes"); + + if (prefix.IsNullOrEmpty) + { // nothing to do, but we still need to copy the array + return slices.ToArray(); + } + + Slice[] res; + Slice[] arr; + ICollection coll; + + if ((arr = slices as Slice[]) != null) + { // fast-path for arrays (most frequent with range reads) + + // we wil use a SliceBuffer to store all the keys produced in as few byte[] arrays as needed + + // precompute the exact size needed + int totalSize = prefix.Count * arr.Length; + for (int i = 0; i < arr.Length; i++) totalSize += arr[i].Count; + var buf = new SliceBuffer(Math.Min(totalSize, 64 * 1024)); + + res = new Slice[arr.Length]; + for (int i = 0; i < arr.Length; i++) + { + res[i] = buf.Intern(prefix, arr[i], aligned: false); + } + } + else if ((coll = slices as ICollection) != null) + { // collection (size known) + + //TODO: also use a SliceBuffer since we could precompute the total size... + + res = new Slice[coll.Count]; + int p = 0; + foreach (var suffix in coll) + { + res[p++] = prefix.Concat(suffix); + } + } + else + { // streaming sequence (size unknown) + + //note: we can only scan the list once, so would be no way to get a sensible value for the buffer's page size + var list = new List(); + foreach (var suffix in slices) + { + list.Add(prefix.Concat(suffix)); + } + res = list.ToArray(); + } + + return res; + } + /// Implicitly converts a Slice into an ArraySegment<byte> public static implicit operator ArraySegment(Slice value) { diff --git a/FoundationDB.Client/Utils/SliceBuffer.cs b/FoundationDB.Client/Utils/SliceBuffer.cs index aa2b3d46f..d2d31d336 100644 --- a/FoundationDB.Client/Utils/SliceBuffer.cs +++ b/FoundationDB.Client/Utils/SliceBuffer.cs @@ -42,7 +42,7 @@ namespace FoundationDB.Client.Utils public sealed class SliceBuffer { private const int DefaultPageSize = 256; - private const int MaxPageSize = 64 * 1024; // 64KB (small enough to no go into the LOH) + private const int MaxPageSize = 64 * 1024; // 64KB (small enough to not go into the LOH) /// Default initial size of pages (doubled every time until it reached the max page size) private int m_pageSize; diff --git a/FoundationDB.Layers.Common/Blobs/FdbBlob.cs b/FoundationDB.Layers.Common/Blobs/FdbBlob.cs index 0d7e07e8c..377b7b6a8 100644 --- a/FoundationDB.Layers.Common/Blobs/FdbBlob.cs +++ b/FoundationDB.Layers.Common/Blobs/FdbBlob.cs @@ -53,7 +53,7 @@ public class FdbBlob /// Only keys within the subspace will be used by the object. /// Other clients of the database should refrain from modifying the subspace. /// Subspace to be used for storing the blob data and metadata - public FdbBlob(FdbSubspace subspace) + public FdbBlob(IFdbSubspace subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); @@ -61,7 +61,7 @@ public FdbBlob(FdbSubspace subspace) } /// Subspace used as a prefix for all items in this table - public FdbSubspace Subspace { get; private set; } + public IFdbSubspace Subspace { get; private set; } /// Returns the key for data chunk at the specified offset /// @@ -69,24 +69,24 @@ public FdbBlob(FdbSubspace subspace) protected virtual Slice DataKey(long offset) { //note: python code uses "%16d" % offset, which pads the value with spaces.. Not sure why ? - return this.Subspace.Pack(DataSuffix, offset.ToString("D16", CultureInfo.InvariantCulture)); + return this.Subspace.Tuples.EncodeKey(DataSuffix, offset.ToString("D16", CultureInfo.InvariantCulture)); } protected virtual long DataKeyOffset(Slice key) { - long offset = Int64.Parse(this.Subspace.UnpackLast(key), CultureInfo.InvariantCulture); + long offset = Int64.Parse(this.Subspace.Tuples.DecodeLast(key), CultureInfo.InvariantCulture); if (offset < 0) throw new InvalidOperationException("Chunk offset value cannot be less than zero"); return offset; } protected virtual Slice SizeKey() { - return this.Subspace.Pack(SizeSuffix); + return this.Subspace.Tuples.EncodeKey(SizeSuffix); } protected virtual Slice AttributeKey(string name) { - return this.Subspace.Pack(AttributesSuffix, name); + return this.Subspace.Tuples.EncodeKey(AttributesSuffix, name); } #region Internal Helpers... diff --git a/FoundationDB.Layers.Common/Collections/FdbMap`2.cs b/FoundationDB.Layers.Common/Collections/FdbMap`2.cs index faeb10feb..8483a6921 100644 --- a/FoundationDB.Layers.Common/Collections/FdbMap`2.cs +++ b/FoundationDB.Layers.Common/Collections/FdbMap`2.cs @@ -43,11 +43,11 @@ namespace FoundationDB.Layers.Collections public class FdbMap { - public FdbMap([NotNull] string name, [NotNull] FdbSubspace subspace, [NotNull] IValueEncoder valueEncoder) + public FdbMap([NotNull] string name, [NotNull] IFdbSubspace subspace, [NotNull] IValueEncoder valueEncoder) : this(name, subspace, KeyValueEncoders.Tuples.Key(), valueEncoder) { } - public FdbMap([NotNull] string name, [NotNull] FdbSubspace subspace, [NotNull] IKeyEncoder keyEncoder, [NotNull] IValueEncoder valueEncoder) + public FdbMap([NotNull] string name, [NotNull] IFdbSubspace subspace, [NotNull] IKeyEncoder keyEncoder, [NotNull] IValueEncoder valueEncoder) { if (name == null) throw new ArgumentNullException("name"); if (subspace == null) throw new ArgumentNullException("subspace"); @@ -67,7 +67,7 @@ public FdbMap([NotNull] string name, [NotNull] FdbSubspace subspace, [NotNull] I public string Name { [NotNull] get; private set; } /// Subspace used as a prefix for all items in this map - public FdbSubspace Subspace { [NotNull] get; private set; } + public IFdbSubspace Subspace { [NotNull] get; private set; } /// Subspace used to encoded the keys for the items protected FdbEncoderSubspace Location { [NotNull] get; private set; } diff --git a/FoundationDB.Layers.Common/Collections/FdbMultimap`2.cs b/FoundationDB.Layers.Common/Collections/FdbMultimap`2.cs index 3a913a8e3..2085bdcf3 100644 --- a/FoundationDB.Layers.Common/Collections/FdbMultimap`2.cs +++ b/FoundationDB.Layers.Common/Collections/FdbMultimap`2.cs @@ -54,7 +54,7 @@ public class FdbMultiMap /// Create a new multimap /// Location where the map will be stored in the database /// If true, allow negative or zero values to stay in the map. - public FdbMultiMap(FdbSubspace subspace, bool allowNegativeValues) + public FdbMultiMap(IFdbSubspace subspace, bool allowNegativeValues) : this(subspace, allowNegativeValues, KeyValueEncoders.Tuples.CompositeKey()) { } @@ -62,7 +62,7 @@ public FdbMultiMap(FdbSubspace subspace, bool allowNegativeValues) /// Location where the map will be stored in the database /// If true, allow negative or zero values to stay in the map. /// Encoder for the key/value pairs - public FdbMultiMap(FdbSubspace subspace, bool allowNegativeValues, ICompositeKeyEncoder encoder) + public FdbMultiMap(IFdbSubspace subspace, bool allowNegativeValues, ICompositeKeyEncoder encoder) { if (subspace == null) throw new ArgumentNullException("subspace"); if (encoder == null) throw new ArgumentNullException("encoder"); @@ -75,7 +75,7 @@ public FdbMultiMap(FdbSubspace subspace, bool allowNegativeValues, ICompositeKey #region Public Properties... /// Subspace used as a prefix for all items in this map - public FdbSubspace Subspace { [NotNull] get; private set; } + public IFdbSubspace Subspace { [NotNull] get; private set; } /// If true, allow negative or zero values to stay in the map. public bool AllowNegativeValues { get; private set; } diff --git a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs index 47221cff9..713e3cf67 100644 --- a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs +++ b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs @@ -67,7 +67,7 @@ public FdbQueue([NotNull] FdbSubspace subspace, bool highContention) /// Create a new queue using either High Contention mode or Simple mode /// Subspace where the queue will be stored /// If true, uses High Contention Mode (lots of popping clients). If true, uses the Simple Mode (a few popping clients). - public FdbQueue([NotNull] FdbSubspace subspace, bool highContention, [NotNull] IValueEncoder encoder) + public FdbQueue([NotNull] IFdbSubspace subspace, bool highContention, [NotNull] IValueEncoder encoder) { if (subspace == null) throw new ArgumentNullException("subspace"); if (encoder == null) throw new ArgumentNullException("encocer"); @@ -76,13 +76,14 @@ public FdbQueue([NotNull] FdbSubspace subspace, bool highContention, [NotNull] I this.HighContention = highContention; this.Encoder = encoder; - this.ConflictedPop = subspace.Partition(Slice.FromAscii("pop")); - this.ConflictedItem = subspace.Partition(Slice.FromAscii("conflict")); - this.QueueItem = subspace.Partition(Slice.FromAscii("item")); + //TODO: rewrite this, using FdbEncoderSubpsace<..> ! + this.ConflictedPop = subspace.Partition.By(Slice.FromAscii("pop")); + this.ConflictedItem = subspace.Partition.By(Slice.FromAscii("conflict")); + this.QueueItem = subspace.Partition.By(Slice.FromAscii("item")); } /// Subspace used as a prefix for all items in this table - public FdbSubspace Subspace { [NotNull] get; private set; } + public IFdbSubspace Subspace { [NotNull] get; private set; } /// If true, the queue is operating in High Contention mode that will scale better with a lot of popping clients. public bool HighContention { get; private set; } @@ -90,11 +91,11 @@ public FdbQueue([NotNull] FdbSubspace subspace, bool highContention, [NotNull] I /// Serializer for the elements of the queue public IValueEncoder Encoder { [NotNull] get; private set; } - internal FdbSubspace ConflictedPop { get; private set; } + internal IFdbSubspace ConflictedPop { get; private set; } - internal FdbSubspace ConflictedItem { get; private set; } + internal IFdbSubspace ConflictedItem { get; private set; } - internal FdbSubspace QueueItem { get; private set; } + internal IFdbSubspace QueueItem { get; private set; } /// Remove all items from the queue. public void ClearAsync([NotNull] IFdbTransaction trans) @@ -253,7 +254,7 @@ public Task ExportAsync(IFdbDatabase db, Func handler, Cancella private Slice ConflictedItemKey(object subKey) { - return this.ConflictedItem.Pack(subKey); + return this.ConflictedItem.Tuples.EncodeKey(subKey); } private Slice RandId() @@ -271,12 +272,12 @@ private async Task PushAtAsync([NotNull] IFdbTransaction tr, T value, long index // This makes pushes fast and usually conflict free (unless the queue becomes empty // during the push) - Slice key = this.QueueItem.Pack(index, this.RandId()); + Slice key = this.QueueItem.Tuples.EncodeKey(index, this.RandId()); await tr.GetAsync(key).ConfigureAwait(false); tr.Set(key, this.Encoder.EncodeValue(value)); } - private async Task GetNextIndexAsync([NotNull] IFdbReadOnlyTransaction tr, FdbSubspace subspace) + private async Task GetNextIndexAsync([NotNull] IFdbReadOnlyTransaction tr, IFdbSubspace subspace) { var range = subspace.ToRange(); @@ -287,7 +288,7 @@ private async Task GetNextIndexAsync([NotNull] IFdbReadOnlyTransaction tr, return 0; } - return subspace.Unpack(lastKey).Get(0) + 1; + return subspace.Tuples.DecodeFirst(lastKey) + 1; } private Task> GetFirstItemAsync([NotNull] IFdbReadOnlyTransaction tr) @@ -323,7 +324,7 @@ private async Task AddConflictedPopAsync([NotNull] IFdbTransaction tr, bo return Slice.Nil; } - Slice waitKey = this.ConflictedPop.Pack(index, this.RandId()); + Slice waitKey = this.ConflictedPop.Tuples.EncodeKey(index, this.RandId()); await tr.GetAsync(waitKey).ConfigureAwait(false); tr.Set(waitKey, Slice.Empty); return waitKey; @@ -371,7 +372,7 @@ private async Task FulfillConflictedPops([NotNull] IFdbDatabase db, Cancel var pop = pops[i]; var kvp = items[i]; - var key = this.ConflictedPop.Unpack(pop.Key); + var key = this.ConflictedPop.Tuples.Unpack(pop.Key); var storageKey = this.ConflictedItemKey(key[1]); tr.Set(storageKey, kvp.Value); @@ -446,7 +447,7 @@ private async Task> PopHighContentionAsync([NotNull] IFdbDatabase db } // The result of the pop will be stored at this key once it has been fulfilled - var resultKey = ConflictedItemKey(this.ConflictedPop.UnpackLast(waitKey)); + var resultKey = ConflictedItemKey(this.ConflictedPop.Tuples.DecodeLast(waitKey)); tr.Reset(); diff --git a/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs b/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs index 568dc3d36..4352dce1f 100644 --- a/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs +++ b/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs @@ -54,7 +54,7 @@ public class FdbRankedSet /// Initializes a new ranked set at a given location /// Subspace where the set will be stored - public FdbRankedSet([NotNull] FdbSubspace subspace) + public FdbRankedSet([NotNull] IFdbSubspace subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); @@ -68,7 +68,7 @@ public Task OpenAsync([NotNull] IFdbTransaction trans) } /// Subspace used as a prefix for all items in this table - public FdbSubspace Subspace { [NotNull] get; private set; } + public IFdbSubspace Subspace { [NotNull] get; private set; } /// Returns the number of items in the set. /// @@ -78,7 +78,7 @@ public Task SizeAsync([NotNull] IFdbReadOnlyTransaction trans) if (trans == null) throw new ArgumentNullException("trans"); return trans - .GetRange(this.Subspace.Partition(MAX_LEVELS - 1).ToRange()) + .GetRange(this.Subspace.Partition.By(MAX_LEVELS - 1).ToRange()) .Select(kv => DecodeCount(kv.Value)) .SumAsync(); } @@ -101,7 +101,7 @@ public async Task InsertAsync([NotNull] IFdbTransaction trans, Slice key) if ((keyHash & ((1 << (level * LEVEL_FAN_POW)) - 1)) != 0) { //Console.WriteLine("> [" + level + "] Incrementing previous key: " + FdbKey.Dump(prevKey)); - trans.AtomicAdd(this.Subspace.Partition(level, prevKey), EncodeCount(1)); + trans.AtomicAdd(this.Subspace.Partition.By(level, prevKey), EncodeCount(1)); } else { @@ -109,14 +109,14 @@ public async Task InsertAsync([NotNull] IFdbTransaction trans, Slice key) // Insert into this level by looking at the count of the previous // key in the level and recounting the next lower level to correct // the counts - var prevCount = DecodeCount(await trans.GetAsync(this.Subspace.Pack(level, prevKey)).ConfigureAwait(false)); + var prevCount = DecodeCount(await trans.GetAsync(this.Subspace.Tuples.EncodeKey(level, prevKey)).ConfigureAwait(false)); var newPrevCount = await SlowCountAsync(trans, level - 1, prevKey, key); var count = checked((prevCount - newPrevCount) + 1); // print "insert", key, "level", level, "count", count, // "splits", prevKey, "oldC", prevCount, "newC", newPrevCount - trans.Set(this.Subspace.Pack(level, prevKey), EncodeCount(newPrevCount)); - trans.Set(this.Subspace.Pack(level, key), EncodeCount(count)); + trans.Set(this.Subspace.Tuples.EncodeKey(level, prevKey), EncodeCount(newPrevCount)); + trans.Set(this.Subspace.Tuples.EncodeKey(level, key), EncodeCount(count)); } } } @@ -126,7 +126,7 @@ public async Task ContainsAsync([NotNull] IFdbReadOnlyTransaction trans, S if (trans == null) throw new ArgumentNullException("trans"); if (key.IsNull) throw new ArgumentException("Empty key not allowed in set", "key"); - return (await trans.GetAsync(this.Subspace.Pack(0, key)).ConfigureAwait(false)).HasValue; + return (await trans.GetAsync(this.Subspace.Tuples.EncodeKey(0, key)).ConfigureAwait(false)).HasValue; } public async Task EraseAsync([NotNull] IFdbTransaction trans, Slice key) @@ -141,7 +141,7 @@ public async Task EraseAsync([NotNull] IFdbTransaction trans, Slice key) for (int level = 0; level < MAX_LEVELS; level++) { // This could be optimized with hash - var k = this.Subspace.Partition(level, key); + var k = this.Subspace.Partition.By(level, key); var c = await trans.GetAsync(k).ConfigureAwait(false); if (c.HasValue) trans.Clear(k); if (level == 0) continue; @@ -151,7 +151,7 @@ public async Task EraseAsync([NotNull] IFdbTransaction trans, Slice key) long countChange = -1; if (c.HasValue) countChange += DecodeCount(c); - trans.AtomicAdd(this.Subspace.Pack(level, prevKey), EncodeCount(countChange)); + trans.AtomicAdd(this.Subspace.Tuples.EncodeKey(level, prevKey), EncodeCount(countChange)); } } @@ -169,15 +169,15 @@ public async Task EraseAsync([NotNull] IFdbTransaction trans, Slice key) var rankKey = Slice.Empty; for(int level = MAX_LEVELS - 1; level >= 0; level--) { - var lss = this.Subspace.Partition(level); + var lss = this.Subspace.Partition.By(level); long lastCount = 0; var kcs = await trans.GetRange( - FdbKeySelector.FirstGreaterOrEqual(lss.Pack(rankKey)), - FdbKeySelector.FirstGreaterThan(lss.Pack(key)) + FdbKeySelector.FirstGreaterOrEqual(lss.Tuples.EncodeKey(rankKey)), + FdbKeySelector.FirstGreaterThan(lss.Tuples.EncodeKey(key)) ).ToListAsync().ConfigureAwait(false); foreach (var kc in kcs) { - rankKey = lss.UnpackSingle(kc.Key); + rankKey = lss.Tuples.DecodeKey(kc.Key); lastCount = DecodeCount(kc.Value); r += lastCount; } @@ -198,14 +198,14 @@ public async Task GetNthAsync([NotNull] IFdbReadOnlyTransaction trans, lo var key = Slice.Empty; for (int level = MAX_LEVELS - 1; level >= 0; level--) { - var lss = this.Subspace.Partition(level); - var kcs = await trans.GetRange(lss.Pack(key), lss.ToRange().End).ToListAsync().ConfigureAwait(false); + var lss = this.Subspace.Partition.By(level); + var kcs = await trans.GetRange(lss.Tuples.EncodeKey(key), lss.ToRange().End).ToListAsync().ConfigureAwait(false); if (kcs.Count == 0) break; foreach(var kc in kcs) { - key = lss.UnpackSingle(kc.Key); + key = lss.Tuples.DecodeKey(kc.Key); long count = DecodeCount(kc.Value); if (key.IsPresent && r == 0) { @@ -250,7 +250,7 @@ private Task SlowCountAsync(IFdbReadOnlyTransaction trans, int level, Slic } return trans - .GetRange(this.Subspace.Pack(level, beginKey), this.Subspace.Pack(level, endKey)) + .GetRange(this.Subspace.Tuples.EncodeKey(level, beginKey), this.Subspace.Tuples.EncodeKey(level, endKey)) .Select(kv => DecodeCount(kv.Value)) .SumAsync(); } @@ -258,7 +258,7 @@ private Task SlowCountAsync(IFdbReadOnlyTransaction trans, int level, Slic private async Task SetupLevelsAsync(IFdbTransaction trans) { var ks = Enumerable.Range(0, MAX_LEVELS) - .Select((l) => this.Subspace.Pack(l, Slice.Empty)) + .Select((l) => this.Subspace.Tuples.EncodeKey(l, Slice.Empty)) .ToList(); var res = await trans.GetValuesAsync(ks).ConfigureAwait(false); @@ -278,7 +278,7 @@ private async Task GetPreviousNodeAsync(IFdbTransaction trans, int level, // a transaction conflict. We also add a conflict key on the found previous // key in level 0. This allows detection of erasures. - var k = this.Subspace.Pack(level, key); + var k = this.Subspace.Tuples.EncodeKey(level, key); //Console.WriteLine(k); //Console.WriteLine("GetPreviousNode(" + level + ", " + key + ")"); //Console.WriteLine(FdbKeySelector.LastLessThan(k) + " <= x < " + FdbKeySelector.FirstGreaterOrEqual(k)); @@ -292,9 +292,9 @@ private async Task GetPreviousNodeAsync(IFdbTransaction trans, int level, .ConfigureAwait(false); //Console.WriteLine("Found " + FdbKey.Dump(kv.Key)); - var prevKey = this.Subspace.UnpackLast(kv.Key); + var prevKey = this.Subspace.Tuples.DecodeLast(kv.Key); trans.AddReadConflictRange(kv.Key + FdbKey.MinValue, k); - trans.AddReadConflictKey(this.Subspace.Pack(0, prevKey)); + trans.AddReadConflictKey(this.Subspace.Tuples.EncodeKey(0, (Slice)prevKey)); return prevKey; } diff --git a/FoundationDB.Layers.Common/Collections/FdbVector`1.cs b/FoundationDB.Layers.Common/Collections/FdbVector`1.cs index c5c98db6b..77e6c433b 100644 --- a/FoundationDB.Layers.Common/Collections/FdbVector`1.cs +++ b/FoundationDB.Layers.Common/Collections/FdbVector`1.cs @@ -142,7 +142,7 @@ public async Task> PopAsync([NotNull] IFdbTransaction tr) if (lastTwo.Count == 0) return default(Optional); //note: keys are reversed so indices[0] = last, indices[1] = second to last - var indices = lastTwo.Select(kvp => this.Subspace.Unpack(kvp.Key).Get(0)).ToList(); + var indices = lastTwo.Select(kvp => this.Subspace.Tuples.DecodeFirst(kvp.Key)).ToList(); if (indices[0] == 0) { // Vector has size one @@ -296,12 +296,12 @@ private async Task ComputeSizeAsync(IFdbReadOnlyTransaction tr) return 0; } - return this.Subspace.Unpack(lastKey).Get(0) + 1; + return this.Subspace.Tuples.DecodeFirst(lastKey) + 1; } private Slice GetKeyAt(long index) { - return this.Subspace.Pack(index); + return this.Subspace.Tuples.EncodeKey(index); } #endregion diff --git a/FoundationDB.Layers.Common/Counters/FdbCounterMap.cs b/FoundationDB.Layers.Common/Counters/FdbCounterMap.cs index 3ccf24c6c..b88d36fb0 100644 --- a/FoundationDB.Layers.Common/Counters/FdbCounterMap.cs +++ b/FoundationDB.Layers.Common/Counters/FdbCounterMap.cs @@ -41,12 +41,12 @@ public sealed class FdbCounterMap private static readonly Slice MinusOne = Slice.FromFixed64(-1); /// Create a new counter map. - public FdbCounterMap(FdbSubspace subspace) + public FdbCounterMap(IFdbSubspace subspace) : this(subspace, KeyValueEncoders.Tuples.Key()) { } /// Create a new counter map, using a specific key encoder. - public FdbCounterMap(FdbSubspace subspace, IKeyEncoder keyEncoder) + public FdbCounterMap(IFdbSubspace subspace, IKeyEncoder keyEncoder) { if (subspace == null) throw new ArgumentNullException("subspace"); if (keyEncoder == null) throw new ArgumentNullException("keyEncoder"); @@ -57,7 +57,7 @@ public FdbCounterMap(FdbSubspace subspace, IKeyEncoder keyEncoder) } /// Subspace used as a prefix for all items in this counter list - public FdbSubspace Subspace { [NotNull] get; private set; } + public IFdbSubspace Subspace { [NotNull] get; private set; } /// Encoder for the keys of the counter map public IKeyEncoder KeyEncoder { [NotNull] get; private set; } diff --git a/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs b/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs index 1436d6133..61c9e5155 100644 --- a/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs +++ b/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs @@ -100,7 +100,7 @@ private async Task Coalesce(int N, CancellationToken ct) try { // read N writes from a random place in ID space - var loc = this.Subspace.Pack(RandomId()); + var loc = this.Subspace.Tuples.EncodeKey(RandomId()); bool right; lock(this.Rng) { right = this.Rng.NextDouble() < 0.5; } @@ -119,7 +119,7 @@ private async Task Coalesce(int N, CancellationToken ct) tr.Clear(shard.Key); } - tr.Set(this.Subspace.Pack(RandomId()), this.Encoder.EncodeValue(total)); + tr.Set(this.Subspace.Tuples.EncodeKey(RandomId()), this.Encoder.EncodeValue(total)); // note: contrary to the python impl, we will await the commit, and rely on the caller to not wait to the Coalesce task itself to complete. // That way, the transaction will live as long as the task, and we ensure that it gets disposed at some time @@ -196,7 +196,7 @@ public void Add(IFdbTransaction trans, long x) { if (trans == null) throw new ArgumentNullException("trans"); - trans.Set(this.Subspace.Pack(RandomId()), this.Encoder.EncodeValue(x)); + trans.Set(this.Subspace.Tuples.EncodeKey(RandomId()), this.Encoder.EncodeValue(x)); // decide if we must coalesce //note: Random() is not thread-safe so we must lock diff --git a/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs b/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs index 381deac43..5d955aa0e 100644 --- a/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs +++ b/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs @@ -44,11 +44,11 @@ namespace FoundationDB.Layers.Indexing public class FdbIndex { - public FdbIndex([NotNull] string name, [NotNull] FdbSubspace subspace, IEqualityComparer valueComparer = null, bool indexNullValues = false) + public FdbIndex([NotNull] string name, [NotNull] IFdbSubspace subspace, IEqualityComparer valueComparer = null, bool indexNullValues = false) : this(name, subspace, valueComparer, indexNullValues, KeyValueEncoders.Tuples.CompositeKey()) { } - public FdbIndex([NotNull] string name, [NotNull] FdbSubspace subspace, IEqualityComparer valueComparer, bool indexNullValues, [NotNull] ICompositeKeyEncoder encoder) + public FdbIndex([NotNull] string name, [NotNull] IFdbSubspace subspace, IEqualityComparer valueComparer, bool indexNullValues, [NotNull] ICompositeKeyEncoder encoder) { if (name == null) throw new ArgumentNullException("name"); if (subspace == null) throw new ArgumentNullException("subspace"); @@ -63,7 +63,7 @@ public FdbIndex([NotNull] string name, [NotNull] FdbSubspace subspace, IEquality public string Name { [NotNull] get; private set; } - public FdbSubspace Subspace { [NotNull] get; private set; } + public IFdbSubspace Subspace { [NotNull] get; private set; } protected FdbEncoderSubspace Location { [NotNull] get; private set; } diff --git a/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs b/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs index 51f845d48..5dc4186ed 100644 --- a/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs +++ b/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs @@ -91,7 +91,7 @@ public override int GetHashCode() private readonly RandomNumberGenerator m_prng = RandomNumberGenerator.Create(); private readonly ReaderWriterLockSlim m_lock = new ReaderWriterLockSlim(); - public FdbStringIntern(FdbSubspace subspace) + public FdbStringIntern(IFdbSubspace subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); @@ -99,18 +99,18 @@ public FdbStringIntern(FdbSubspace subspace) } - public FdbSubspace Subspace { get; private set; } + public IFdbSubspace Subspace { get; private set; } #region Private Helpers... protected virtual Slice UidKey(Slice uid) { - return this.Subspace.Pack(Uid2StringKey, uid); + return this.Subspace.Tuples.EncodeKey(Uid2StringKey, uid); } protected virtual Slice StringKey(string value) { - return this.Subspace.Pack(String2UidKey, value); + return this.Subspace.Tuples.EncodeKey(String2UidKey, value); } /// Evict a random value from the cache diff --git a/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs b/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs index 1d8633145..c7b94ec49 100644 --- a/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs +++ b/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs @@ -46,7 +46,7 @@ namespace FoundationDB.Layers.Blobs public class FdbHashSetCollection { - public FdbHashSetCollection(FdbSubspace subspace) + public FdbHashSetCollection(IFdbSubspace subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); @@ -54,14 +54,15 @@ public FdbHashSetCollection(FdbSubspace subspace) } /// Subspace used as a prefix for all hashsets in this collection - public FdbSubspace Subspace { get; private set; } + public IFdbSubspace Subspace { get; private set; } /// Returns the key prefix of an HashSet: (subspace, id, ) /// /// protected virtual Slice GetKey(IFdbTuple id) { - return this.Subspace.Pack(id); + //REVIEW: should the id be encoded as a an embedded tuple or not? + return this.Subspace.Tuples.Pack(id); } /// Returns the key of a specific field of an HashSet: (subspace, id, field, ) @@ -70,7 +71,8 @@ protected virtual Slice GetKey(IFdbTuple id) /// protected virtual Slice GetFieldKey(IFdbTuple id, string field) { - return this.Subspace.Pack(id, field); + //REVIEW: should the id be encoded as a an embedded tuple or not? + return this.Subspace.Tuples.Pack(id.Append(field)); } protected virtual string ParseFieldKey(IFdbTuple key) @@ -110,7 +112,7 @@ await trans .GetRange(FdbKeyRange.StartsWith(prefix)) .ForEachAsync((kvp) => { - string field = this.Subspace.UnpackLast(kvp.Key); + string field = this.Subspace.Tuples.DecodeLast(kvp.Key); results[field] = kvp.Value; }) .ConfigureAwait(false); @@ -129,7 +131,7 @@ public async Task> GetAsync(IFdbReadOnlyTransaction t if (id == null) throw new ArgumentNullException("id"); if (fields == null) throw new ArgumentNullException("fields"); - var keys = FdbTuple.PackRange(GetKey(id), fields); + var keys = FdbTuple.PackRangeWithPrefix(GetKey(id), fields); var values = await trans.GetValuesAsync(keys).ConfigureAwait(false); Contract.Assert(values != null && values.Length == fields.Length); diff --git a/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs b/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs index 34c90aebd..87155d526 100644 --- a/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs +++ b/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs @@ -64,15 +64,15 @@ public class FdbWorkerPool private readonly RandomNumberGenerator m_rng = RandomNumberGenerator.Create(); - public FdbSubspace Subspace { get; private set; } + public IFdbSubspace Subspace { get; private set; } - internal FdbSubspace TaskStore { get; private set; } + internal IFdbSubspace TaskStore { get; private set; } - internal FdbSubspace IdleRing { get; private set; } + internal IFdbSubspace IdleRing { get; private set; } - internal FdbSubspace BusyRing { get; private set; } + internal IFdbSubspace BusyRing { get; private set; } - internal FdbSubspace UnassignedTaskRing { get; private set; } + internal IFdbSubspace UnassignedTaskRing { get; private set; } internal FdbCounterMap Counters { get; private set; } @@ -108,26 +108,26 @@ public class FdbWorkerPool #endregion - public FdbWorkerPool(FdbSubspace subspace) + public FdbWorkerPool(IFdbSubspace subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); this.Subspace = subspace; - this.TaskStore = subspace.Partition(Slice.FromChar('T')); - this.IdleRing = subspace.Partition(Slice.FromChar('I')); - this.BusyRing = subspace.Partition(Slice.FromChar('B')); - this.UnassignedTaskRing = subspace.Partition(Slice.FromChar('U')); + this.TaskStore = subspace.Partition.By(Slice.FromChar('T')); + this.IdleRing = subspace.Partition.By(Slice.FromChar('I')); + this.BusyRing = subspace.Partition.By(Slice.FromChar('B')); + this.UnassignedTaskRing = subspace.Partition.By(Slice.FromChar('U')); - this.Counters = new FdbCounterMap(subspace.Partition(Slice.FromChar('C'))); + this.Counters = new FdbCounterMap(subspace.Partition.By(Slice.FromChar('C'))); } - private async Task> FindRandomItem(IFdbTransaction tr, FdbSubspace ring) + private async Task> FindRandomItem(IFdbTransaction tr, IFdbSubspace ring) { var range = ring.ToRange(); // start from a random position around the ring - Slice key = ring.Pack(GetRandomId()); + Slice key = ring.Tuples.EncodeKey(GetRandomId()); // We want to find the next item in the clockwise direction. If we reach the end of the ring, we "wrap around" by starting again from the start // => So we do find_next(key <= x < MAX) and if that does not produce any result, we do a find_next(MIN <= x < key) @@ -154,7 +154,7 @@ private Slice GetRandomId() } } - private async Task PushQueueAsync(IFdbTransaction tr, FdbSubspace queue, Slice taskId) + private async Task PushQueueAsync(IFdbTransaction tr, IFdbSubspace queue, Slice taskId) { //TODO: use a high contention algo ? // - must support Push and Pop @@ -163,21 +163,21 @@ private async Task PushQueueAsync(IFdbTransaction tr, FdbSubspace queue, Slice t // get the current size of the queue var range = queue.ToRange(); var lastKey = await tr.Snapshot.GetKeyAsync(FdbKeySelector.LastLessThan(range.End)).ConfigureAwait(false); - int count = lastKey < range.Begin ? 0 : queue.Unpack(lastKey).Get(0) + 1; + int count = lastKey < range.Begin ? 0 : queue.Tuples.DecodeFirst(lastKey) + 1; // set the value - tr.Set(queue.Pack(count, GetRandomId()), taskId); + tr.Set(queue.Tuples.EncodeKey(count, GetRandomId()), taskId); } private void StoreTask(IFdbTransaction tr, Slice taskId, DateTime scheduledUtc, Slice taskBody) { tr.Annotate("Writing task {0}", taskId.ToAsciiOrHexaString()); - var prefix = this.TaskStore.Partition(taskId); + var prefix = this.TaskStore.Partition.By(taskId); // store task body and timestamp tr.Set(prefix.Key, taskBody); - tr.Set(prefix.Pack(TASK_META_SCHEDULED), Slice.FromInt64(scheduledUtc.Ticks)); + tr.Set(prefix.Tuples.EncodeKey(TASK_META_SCHEDULED), Slice.FromInt64(scheduledUtc.Ticks)); // increment total and pending number of tasks this.Counters.Increment(tr, COUNTER_TOTAL_TASKS); this.Counters.Increment(tr, COUNTER_PENDING_TASKS); @@ -188,7 +188,7 @@ private void ClearTask(IFdbTransaction tr, Slice taskId) tr.Annotate("Deleting task {0}", taskId.ToAsciiOrHexaString()); // clear all metadata about the task - tr.ClearRange(FdbKeyRange.StartsWith(this.TaskStore.Pack(taskId))); + tr.ClearRange(FdbKeyRange.StartsWith(this.TaskStore.Tuples.EncodeKey(taskId))); // decrement pending number of tasks this.Counters.Decrement(tr, COUNTER_PENDING_TASKS); } @@ -217,16 +217,16 @@ await db.ReadWriteAsync(async (tr) => if (randomWorkerKey.Key != null) { - Slice workerId = this.IdleRing.UnpackSingle(randomWorkerKey.Key); + Slice workerId = this.IdleRing.Tuples.DecodeKey(randomWorkerKey.Key); tr.Annotate("Assigning {0} to {1}", taskId.ToAsciiOrHexaString(), workerId.ToAsciiOrHexaString()); // remove worker from the idle ring - tr.Clear(this.IdleRing.Pack(workerId)); + tr.Clear(this.IdleRing.Tuples.EncodeKey(workerId)); this.Counters.Decrement(tr, COUNTER_IDLE); // assign task to the worker - tr.Set(this.BusyRing.Pack(workerId), taskId); + tr.Set(this.BusyRing.Tuples.EncodeKey(workerId), taskId); this.Counters.Increment(tr, COUNTER_BUSY); } else @@ -283,7 +283,7 @@ await db.ReadWriteAsync( else if (myId.IsPresent) { // look for an already assigned task tr.Annotate("Look for already assigned task"); - msg.Id = await tr.GetAsync(this.BusyRing.Pack(myId)).ConfigureAwait(false); + msg.Id = await tr.GetAsync(this.BusyRing.Tuples.EncodeKey(myId)).ConfigureAwait(false); } if (!msg.Id.IsPresent) @@ -305,7 +305,7 @@ await db.ReadWriteAsync( // note: we need a random id so generate one if it is the first time... if (!myId.IsPresent) myId = GetRandomId(); tr.Annotate("Found {0}, switch to busy with id {1}", msg.Id.ToAsciiOrHexaString(), myId.ToAsciiOrHexaString()); - tr.Set(this.BusyRing.Pack(myId), msg.Id); + tr.Set(this.BusyRing.Tuples.EncodeKey(myId), msg.Id); this.Counters.Increment(tr, COUNTER_BUSY); } else if (myId.IsPresent) @@ -319,11 +319,11 @@ await db.ReadWriteAsync( { // get the task body tr.Annotate("Fetching body for task {0}", msg.Id.ToAsciiOrHexaString()); - var prefix = this.TaskStore.Partition(msg.Id); + var prefix = this.TaskStore.Partition.By(msg.Id); //TODO: replace this with a get_range ? var data = await tr.GetValuesAsync(new [] { - prefix.Key, - prefix.Pack(TASK_META_SCHEDULED) + prefix.ToFoundationDbKey(), + prefix.Tuples.EncodeKey(TASK_META_SCHEDULED) }).ConfigureAwait(false); msg.Body = data[0]; @@ -336,7 +336,7 @@ await db.ReadWriteAsync( // remove us from the busy ring if (myId.IsPresent) { - tr.Clear(this.BusyRing.Pack(myId)); + tr.Clear(this.BusyRing.Tuples.EncodeKey(myId)); this.Counters.Decrement(tr, COUNTER_BUSY); } @@ -344,7 +344,7 @@ await db.ReadWriteAsync( myId = GetRandomId(); // the idle key will also be used as the watch key to wake us up - var watchKey = this.IdleRing.Pack(myId); + var watchKey = this.IdleRing.Tuples.EncodeKey(myId); tr.Annotate("Will start watching on key {0} with id {1}", watchKey.ToAsciiOrHexaString(), myId.ToAsciiOrHexaString()); tr.Set(watchKey, Slice.Empty); this.Counters.Increment(tr, COUNTER_IDLE); @@ -380,9 +380,11 @@ await db.ReadWriteAsync( previousTaskId = msg.Id; if (msg.Body.IsNull) - { // the task has been dropped? - // TODO: loggin? + { // the task has been dropped? + // TODO: loggin? +#if DEBUG Console.WriteLine("[####] Task[" + msg.Id.ToAsciiOrHexaString() + "] has vanished?"); +#endif } else { @@ -393,7 +395,9 @@ await db.ReadWriteAsync( catch (Exception e) { //TODO: logging? +#if DEBUG Console.Error.WriteLine("Task[" + msg.Id.ToAsciiOrHexaString() + "] failed: " + e.ToString()); +#endif } } } diff --git a/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs b/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs index e16a92bd2..877135106 100644 --- a/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs +++ b/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs @@ -142,7 +142,7 @@ await tr.Snapshot .GetRange(FdbKeyRange.StartsWith(location.Key)) .ForEachAsync((kvp) => { - Console.WriteLine(" - " + FdbTuple.Unpack(location.Extract(kvp.Key)) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine(" - " + FdbTuple.Unpack(location.Keys.Extract(kvp.Key)) + " = " + kvp.Value.ToAsciiOrHexaString()); }).ConfigureAwait(false); } Console.WriteLine(""); diff --git a/FoundationDB.Samples/Benchmarks/BenchRunner.cs b/FoundationDB.Samples/Benchmarks/BenchRunner.cs index 4116f1cce..fd5546c08 100644 --- a/FoundationDB.Samples/Benchmarks/BenchRunner.cs +++ b/FoundationDB.Samples/Benchmarks/BenchRunner.cs @@ -86,7 +86,7 @@ public async Task Run(IFdbDatabase db, TextWriter log, CancellationToken ct) var duration = Stopwatch.StartNew(); - var foo = this.Subspace.Pack("foo"); + var foo = this.Subspace.Tuples.EncodeKey("foo"); var bar = Slice.FromString("bar"); var barf = Slice.FromString("barf"); @@ -117,7 +117,7 @@ public async Task Run(IFdbDatabase db, TextWriter log, CancellationToken ct) } else { - var foos = FdbTuple.PackRange(foo, Enumerable.Range(1, this.Value).ToArray()); + var foos = FdbTuple.PackRangeWithPrefix(foo, Enumerable.Range(1, this.Value).ToArray()); await db.ReadAsync(tr => tr.GetValuesAsync(foos), ct); } break; diff --git a/FoundationDB.Samples/Benchmarks/LeakTest.cs b/FoundationDB.Samples/Benchmarks/LeakTest.cs index d4e5977a1..1fdf25caf 100644 --- a/FoundationDB.Samples/Benchmarks/LeakTest.cs +++ b/FoundationDB.Samples/Benchmarks/LeakTest.cs @@ -27,7 +27,7 @@ public LeakTest(int k, int m, int n, TimeSpan delay) public int N { get; private set; } public TimeSpan Delay { get; private set; } - public FdbSubspace Subspace { get; private set; } + public IFdbSubspace Subspace { get; private set; } /// /// Setup the initial state of the database @@ -43,8 +43,8 @@ public async Task Init(IFdbDatabase db, CancellationToken ct) // insert all the classes await db.WriteAsync((tr) => { - tr.Set(this.Subspace.Concat(FdbKey.MinValue), Slice.FromString("BEGIN")); - tr.Set(this.Subspace.Concat(FdbKey.MaxValue), Slice.FromString("END")); + tr.Set(this.Subspace.Keys[FdbKey.MinValue], Slice.FromString("BEGIN")); + tr.Set(this.Subspace.Keys[FdbKey.MaxValue], Slice.FromString("END")); }, ct); } @@ -62,7 +62,7 @@ public async Task RunWorker(IFdbDatabase db, int id, CancellationToken ct) values[i] = "initial_value_" + rnd.Next(); } - var prefix = this.Subspace.Partition(student); + var location = this.Subspace.Partition.By(student); for (int i = 0; i < 1/*this.N*/ && !ct.IsCancellationRequested; i++) { @@ -81,7 +81,7 @@ await db.WriteAsync((tr) => if (tr.Context.Retries > 0) Console.Write("!"); for (int j = 0; j < values.Length; j++) { - tr.Set(prefix.Pack(j, now), Slice.FromString(values[j] + new string('A', 100))); + tr.Set(location.Tuples.EncodeKey(j, now), Slice.FromString(values[j] + new string('A', 100))); } }, ct); Console.Write("."); diff --git a/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs b/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs index 5a12f3af5..7526457fc 100644 --- a/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs +++ b/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs @@ -141,15 +141,15 @@ public async Task RunClear(IFdbDatabase db, CancellationToken ct) public async Task RunStatus(IFdbDatabase db, CancellationToken ct) { - var countersLocation = this.WorkerPool.Subspace.Partition(Slice.FromChar('C')); - var idleLocation = this.WorkerPool.Subspace.Partition(Slice.FromChar('I')); - var busyLocation = this.WorkerPool.Subspace.Partition(Slice.FromChar('B')); - var tasksLocation = this.WorkerPool.Subspace.Partition(Slice.FromChar('T')); - var unassignedLocation = this.WorkerPool.Subspace.Partition(Slice.FromChar('U')); + var countersLocation = this.WorkerPool.Subspace.Partition.By(Slice.FromChar('C')); + var idleLocation = this.WorkerPool.Subspace.Partition.By(Slice.FromChar('I')); + var busyLocation = this.WorkerPool.Subspace.Partition.By(Slice.FromChar('B')); + var tasksLocation = this.WorkerPool.Subspace.Partition.By(Slice.FromChar('T')); + var unassignedLocation = this.WorkerPool.Subspace.Partition.By(Slice.FromChar('U')); using(var tr = db.BeginTransaction(ct)) { - var counters = await tr.Snapshot.GetRange(countersLocation.ToRange()).Select(kvp => new KeyValuePair(countersLocation.UnpackLast(kvp.Key), kvp.Value.ToInt64())).ToListAsync().ConfigureAwait(false); + var counters = await tr.Snapshot.GetRange(countersLocation.ToRange()).Select(kvp => new KeyValuePair(countersLocation.Tuples.DecodeLast(kvp.Key), kvp.Value.ToInt64())).ToListAsync().ConfigureAwait(false); Console.WriteLine("Status at " + DateTimeOffset.Now.ToString("O")); foreach(var counter in counters) @@ -161,22 +161,22 @@ public async Task RunStatus(IFdbDatabase db, CancellationToken ct) Console.WriteLine("> Idle"); await tr.Snapshot.GetRange(idleLocation.ToRange()).ForEachAsync((kvp) => { - Console.WriteLine("- Idle." + idleLocation.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine("- Idle." + idleLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Busy"); await tr.Snapshot.GetRange(busyLocation.ToRange()).ForEachAsync((kvp) => { - Console.WriteLine("- Busy." + busyLocation.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine("- Busy." + busyLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Unassigned"); await tr.Snapshot.GetRange(unassignedLocation.ToRange()).ForEachAsync((kvp) => { - Console.WriteLine("- Unassigned." + unassignedLocation.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine("- Unassigned." + unassignedLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Tasks"); await tr.Snapshot.GetRange(tasksLocation.ToRange()).ForEachAsync((kvp) => { - Console.WriteLine("- Tasks." + tasksLocation.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine("- Tasks." + tasksLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("<"); } diff --git a/FoundationDB.Samples/Tutorials/ClassScheduling.cs b/FoundationDB.Samples/Tutorials/ClassScheduling.cs index 3b8102fcd..0b08a7987 100644 --- a/FoundationDB.Samples/Tutorials/ClassScheduling.cs +++ b/FoundationDB.Samples/Tutorials/ClassScheduling.cs @@ -38,17 +38,17 @@ public ClassScheduling() protected Slice ClassKey(string c) { - return this.Subspace.Pack("class", c); + return this.Subspace.Tuples.EncodeKey("class", c); } protected Slice AttendsKey(string s, string c) { - return this.Subspace.Pack("attends", s, c); + return this.Subspace.Tuples.EncodeKey("attends", s, c); } protected FdbKeyRange AttendsKeys(string s) { - return this.Subspace.ToRange(FdbTuple.Create("attends", s)); + return this.Subspace.ToRange(FdbTuple.Pack("attends", s)); } /// @@ -78,9 +78,9 @@ await db.WriteAsync((tr) => /// public Task> AvailableClasses(IFdbReadOnlyTransaction tr) { - return tr.GetRange(this.Subspace.ToRange(FdbTuple.Create("class"))) + return tr.GetRange(this.Subspace.ToRange(FdbTuple.Pack("class"))) .Where(kvp => { int _; return Int32.TryParse(kvp.Value.ToAscii(), out _); }) // (step 3) - .Select(kvp => this.Subspace.UnpackSingle(kvp.Key)) + .Select(kvp => this.Subspace.Tuples.DecodeKey(kvp.Key)) .ToListAsync(); } diff --git a/FoundationDB.Storage.Memory.Test/FdbTest.cs b/FoundationDB.Storage.Memory.Test/FdbTest.cs index 97a11c058..48b38ddcc 100644 --- a/FoundationDB.Storage.Memory.Test/FdbTest.cs +++ b/FoundationDB.Storage.Memory.Test/FdbTest.cs @@ -81,6 +81,7 @@ protected void AfterAllTests() /// Cancellation token usable by any test protected CancellationToken Cancellation { + [DebuggerStepThrough] get { if (m_cts == null) SetupCancellation(); @@ -100,5 +101,41 @@ private void SetupCancellation() } } + #region Logging... + + // These methods are just there to help with the problem of culture-aware string formatting + + [DebuggerStepThrough] + protected static void Log() + { + Console.WriteLine(); + } + + [DebuggerStepThrough] + protected static void Log(string text) + { + Console.WriteLine(text); + } + + [DebuggerStepThrough] + protected static void Log(string format, object arg0) + { + Console.WriteLine(String.Format(CultureInfo.InvariantCulture, format, arg0)); + } + + [DebuggerStepThrough] + protected static void Log(string format, object arg0, object arg1) + { + Console.WriteLine(String.Format(CultureInfo.InvariantCulture, format, arg0, arg1)); + } + + [DebuggerStepThrough] + protected static void Log(string format, params object[] args) + { + Console.WriteLine(String.Format(CultureInfo.InvariantCulture, format, args)); + } + + #endregion + } } diff --git a/FoundationDB.Storage.Memory.Test/Transactions/Benchmarks.cs b/FoundationDB.Storage.Memory.Test/Transactions/Benchmarks.cs index b07f0272d..e0b8cdf37 100644 --- a/FoundationDB.Storage.Memory.Test/Transactions/Benchmarks.cs +++ b/FoundationDB.Storage.Memory.Test/Transactions/Benchmarks.cs @@ -24,7 +24,7 @@ public class Benchmarks : FdbTest private static void DumpResult(string label, long total, long trans, TimeSpan elapsed) { - Console.WriteLine( + Log( "{0,-12}: {1,10:N0} keys in {2,4:N3} sec => {3,9:N0} kps, {4,7:N0} tps", label, total, @@ -42,7 +42,7 @@ private static void DumpMemory(bool collect = false) GC.WaitForPendingFinalizers(); GC.Collect(); } - Console.WriteLine("Total memory: Managed={0:N1} KiB, WorkingSet={1:N1} KiB", GC.GetTotalMemory(false) / 1024.0, Environment.WorkingSet / 1024.0); + Log("Total memory: Managed={0:N1} KiB, WorkingSet={1:N1} KiB", GC.GetTotalMemory(false) / 1024.0, Environment.WorkingSet / 1024.0); } [Test] @@ -62,12 +62,12 @@ public async Task MiniBench() //WARMUP using (var db = MemoryDatabase.CreateNew("FOO")) { - await db.WriteAsync((tr) => tr.Set(db.Pack("hello"), Slice.FromString("world")), this.Cancellation); + await db.WriteAsync((tr) => tr.Set(db.Tuples.EncodeKey("hello"), Slice.FromString("world")), this.Cancellation); Slice.Random(rnd, KEYSIZE); Slice.Random(rnd, VALUESIZE); } - Console.WriteLine("Inserting " + KEYSIZE + "-bytes " + (RANDOM ? "random" : "ordered") + " keys / " + VALUESIZE + "-bytes values, in " + T.ToString("N0") + " transactions"); + Log("Inserting {0}-bytes {1} keys / {2}-bytes values, in {3:N0} transactions", KEYSIZE, RANDOM ? "random" : "ordered", VALUESIZE, T); bool random = RANDOM; string fmt = "D" + KEYSIZE; @@ -117,12 +117,12 @@ public async Task MiniBench() } sw.Stop(); - Console.WriteLine("done"); - Console.WriteLine("* Inserted: {0:N0} keys", total); - Console.WriteLine("* Elapsed : {0:N3} sec", sw.Elapsed.TotalSeconds); - Console.WriteLine("* TPS: {0:N0} transactions/sec", T / sw.Elapsed.TotalSeconds); - Console.WriteLine("* KPS: {0:N0} keys/sec", total / sw.Elapsed.TotalSeconds); - Console.WriteLine("* BPS: {0:N0} bytes/sec", (total * (KEYSIZE + VALUESIZE)) / sw.Elapsed.TotalSeconds); + Log("done"); + Log("* Inserted: {0:N0} keys", total); + Log("* Elapsed : {0:N3} sec", sw.Elapsed.TotalSeconds); + Log("* TPS: {0:N0} transactions/sec", T / sw.Elapsed.TotalSeconds); + Log("* KPS: {0:N0} keys/sec", total / sw.Elapsed.TotalSeconds); + Log("* BPS: {0:N0} bytes/sec", (total * (KEYSIZE + VALUESIZE)) / sw.Elapsed.TotalSeconds); DumpMemory(collect: true); @@ -131,16 +131,16 @@ public async Task MiniBench() DumpResult("WriteSeq" + B, total, total / B, sw.Elapsed); string path = @".\\minibench.pndb"; - Console.WriteLine("Saving {0} ...", path); + Log("Saving {0} ...", path); sw.Restart(); await db.SaveSnapshotAsync(path); sw.Stop(); - Console.WriteLine("* Saved {0:N0} bytes in {1:N3} sec", new System.IO.FileInfo(path).Length, sw.Elapsed.TotalSeconds); + Log("* Saved {0:N0} bytes in {1:N3} sec", new System.IO.FileInfo(path).Length, sw.Elapsed.TotalSeconds); - Console.WriteLine("Warming up reads..."); + Log("Warming up reads..."); var data = await db.GetValuesAsync(Enumerable.Range(0, 100).Select(i => Slice.FromString(i.ToString(fmt))), this.Cancellation); - Console.WriteLine("Starting read tests..."); + Log("Starting read tests..."); #region sequential reads @@ -263,7 +263,7 @@ await tr.Snapshot.GetRangeAsync( // } //} //sw.Stop(); - //Console.WriteLine("RndRead1 : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps"); + //Log("RndRead1 : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps"); sw.Restart(); for (int i = 0; i < total; i += 10) @@ -275,7 +275,7 @@ await tr.Snapshot.GetRangeAsync( } sw.Stop(); - //Console.WriteLine("RndRead10 : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps, " + (total / (10 * sw.Elapsed.TotalSeconds)).ToString("N0") + " tps"); + //Log("RndRead10 : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps, " + (total / (10 * sw.Elapsed.TotalSeconds)).ToString("N0") + " tps"); DumpResult("RndRead10", total, total / 10, sw.Elapsed); sw.Restart(); @@ -288,7 +288,7 @@ await tr.Snapshot.GetRangeAsync( } sw.Stop(); - //Console.WriteLine("RndRead10S : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps, " + (total / (10 * sw.Elapsed.TotalSeconds)).ToString("N0") + " tps"); + //Log("RndRead10S : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps, " + (total / (10 * sw.Elapsed.TotalSeconds)).ToString("N0") + " tps"); DumpResult("RndRead10S", total, total / 10, sw.Elapsed); sw.Restart(); @@ -306,7 +306,7 @@ await tr.GetRangeAsync( } sw.Stop(); - //Console.WriteLine("RndRead10R : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps, " + (total / (10 * sw.Elapsed.TotalSeconds)).ToString("N0") + " tps"); + //Log("RndRead10R : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps, " + (total / (10 * sw.Elapsed.TotalSeconds)).ToString("N0") + " tps"); DumpResult("RndRead10R", total, total / 10, sw.Elapsed); sw.Restart(); @@ -319,7 +319,7 @@ await tr.GetRangeAsync( } sw.Stop(); - //Console.WriteLine("RndRead100 : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps, " + (total / (100 * sw.Elapsed.TotalSeconds)).ToString("N0") + " tps"); + //Log("RndRead100 : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps, " + (total / (100 * sw.Elapsed.TotalSeconds)).ToString("N0") + " tps"); DumpResult("RndRead100", total, total / 100, sw.Elapsed); sw.Restart(); @@ -332,7 +332,7 @@ await tr.GetRangeAsync( } sw.Stop(); - //Console.WriteLine("RndRead1k : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps, " + (total / (1000 * sw.Elapsed.TotalSeconds)).ToString("N0") + " tps"); + //Log("RndRead1k : " + total.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (total / sw.Elapsed.TotalSeconds).ToString("N0") + " kps, " + (total / (1000 * sw.Elapsed.TotalSeconds)).ToString("N0") + " tps"); DumpResult("RndRead1k", total, total / 1000, sw.Elapsed); #endregion @@ -374,7 +374,7 @@ await tr.GetRangeAsync( await Task.WhenAll(tasks); sw.Stop(); mre.Dispose(); - //Console.WriteLine("ParaSeqRead: " + read.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (read / sw.Elapsed.TotalSeconds).ToString("N0") + " kps"); + //Log("ParaSeqRead: " + read.ToString("N0") + " keys in " + sw.Elapsed.TotalSeconds.ToString("N3") + " sec => " + (read / sw.Elapsed.TotalSeconds).ToString("N0") + " kps"); DumpResult("ParaSeqRead", read, read / 100, sw.Elapsed); read = 0; diff --git a/FoundationDB.Storage.Memory.Test/Transactions/Comparisons.cs b/FoundationDB.Storage.Memory.Test/Transactions/Comparisons.cs index 69df5f7d1..573257d99 100644 --- a/FoundationDB.Storage.Memory.Test/Transactions/Comparisons.cs +++ b/FoundationDB.Storage.Memory.Test/Transactions/Comparisons.cs @@ -28,7 +28,7 @@ private Task Scenario2(IFdbTransaction tr) tr.ClearRange(FdbKeyRange.StartsWith(location.Key)); for (int i = 0; i < 10; i++) { - tr.Set(location.Pack(i), Slice.FromString("value of " + i)); + tr.Set(location.Tuples.EncodeKey(i), Slice.FromString("value of " + i)); } return Task.FromResult(null); } @@ -65,10 +65,10 @@ private async Task Scenario5(IFdbTransaction tr) //tr.Set(location.Pack(50), Slice.FromString("50")); //tr.Set(location.Pack(60), Slice.FromString("60")); - var x = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(location.Pack(49))); + var x = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(location.Tuples.EncodeKey(49))); Console.WriteLine(x); - tr.Set(location.Pack("FOO"), Slice.FromString("BAR")); + tr.Set(location.Tuples.EncodeKey("FOO"), Slice.FromString("BAR")); } @@ -76,9 +76,9 @@ private async Task Scenario6(IFdbTransaction tr) { var location = FdbSubspace.Create(Slice.FromAscii("TEST")); - tr.AtomicAdd(location.Pack("ATOMIC"), Slice.FromFixed32(0x55555555)); + tr.AtomicAdd(location.Tuples.EncodeKey("ATOMIC"), Slice.FromFixed32(0x55555555)); - var x = await tr.GetAsync(location.Pack("ATOMIC")); + var x = await tr.GetAsync(location.Tuples.EncodeKey("ATOMIC")); Console.WriteLine(x.ToInt32().ToString("x")); } diff --git a/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs b/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs index 737db3926..c15b8d8e6 100644 --- a/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs +++ b/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs @@ -26,7 +26,7 @@ public async Task Test_Hello_World() { using (var db = MemoryDatabase.CreateNew("DB", FdbSubspace.Empty, false)) { - var key = db.Pack("hello"); + var key = db.Tuples.EncodeKey("hello"); // v1 await db.WriteAsync((tr) => tr.Set(key, Slice.FromString("World!")), this.Cancellation); @@ -70,14 +70,15 @@ public async Task Test_GetKey() using (var db = MemoryDatabase.CreateNew("DB")) { + var location = db.Tuples; using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(db.Pack(0), Slice.FromString("first")); - tr.Set(db.Pack(10), Slice.FromString("ten")); - tr.Set(db.Pack(20), Slice.FromString("ten ten")); - tr.Set(db.Pack(42), Slice.FromString("narf!")); - tr.Set(db.Pack(100), Slice.FromString("a hundred missipis")); + tr.Set(location.EncodeKey(0), Slice.FromString("first")); + tr.Set(location.EncodeKey(10), Slice.FromString("ten")); + tr.Set(location.EncodeKey(20), Slice.FromString("ten ten")); + tr.Set(location.EncodeKey(42), Slice.FromString("narf!")); + tr.Set(location.EncodeKey(100), Slice.FromString("a hundred missipis")); await tr.CommitAsync(); } @@ -86,35 +87,35 @@ public async Task Test_GetKey() using (var tr = db.BeginTransaction(this.Cancellation)) { - value = await tr.GetAsync(db.Pack(42)); + value = await tr.GetAsync(location.EncodeKey(42)); Console.WriteLine(value); Assert.That(value.ToString(), Is.EqualTo("narf!")); - key = await tr.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(db.Pack(42))); - Assert.That(key, Is.EqualTo(db.Pack(42))); + key = await tr.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(42))); + Assert.That(key, Is.EqualTo(location.EncodeKey(42))); - key = await tr.GetKeyAsync(FdbKeySelector.FirstGreaterThan(db.Pack(42))); - Assert.That(key, Is.EqualTo(db.Pack(100))); + key = await tr.GetKeyAsync(FdbKeySelector.FirstGreaterThan(location.EncodeKey(42))); + Assert.That(key, Is.EqualTo(location.EncodeKey(100))); - key = await tr.GetKeyAsync(FdbKeySelector.LastLessOrEqual(db.Pack(42))); - Assert.That(key, Is.EqualTo(db.Pack(42))); + key = await tr.GetKeyAsync(FdbKeySelector.LastLessOrEqual(location.EncodeKey(42))); + Assert.That(key, Is.EqualTo(location.EncodeKey(42))); - key = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(db.Pack(42))); - Assert.That(key, Is.EqualTo(db.Pack(20))); + key = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(location.EncodeKey(42))); + Assert.That(key, Is.EqualTo(location.EncodeKey(20))); var keys = await tr.GetKeysAsync(new[] { - FdbKeySelector.FirstGreaterOrEqual(db.Pack(42)), - FdbKeySelector.FirstGreaterThan(db.Pack(42)), - FdbKeySelector.LastLessOrEqual(db.Pack(42)), - FdbKeySelector.LastLessThan(db.Pack(42)) + FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(42)), + FdbKeySelector.FirstGreaterThan(location.EncodeKey(42)), + FdbKeySelector.LastLessOrEqual(location.EncodeKey(42)), + FdbKeySelector.LastLessThan(location.EncodeKey(42)) }); Assert.That(keys.Length, Is.EqualTo(4)); - Assert.That(keys[0], Is.EqualTo(db.Pack(42))); - Assert.That(keys[1], Is.EqualTo(db.Pack(100))); - Assert.That(keys[2], Is.EqualTo(db.Pack(42))); - Assert.That(keys[3], Is.EqualTo(db.Pack(20))); + Assert.That(keys[0], Is.EqualTo(location.EncodeKey(42))); + Assert.That(keys[1], Is.EqualTo(location.EncodeKey(100))); + Assert.That(keys[2], Is.EqualTo(location.EncodeKey(42))); + Assert.That(keys[3], Is.EqualTo(location.EncodeKey(20))); await tr.CommitAsync(); } @@ -130,11 +131,13 @@ public async Task Test_GetKey_ReadConflicts() using (var db = MemoryDatabase.CreateNew("FOO")) { - using(var tr = db.BeginTransaction(this.Cancellation)) + var location = db.Tuples; + + using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(db.Pack(42), Slice.FromString("42")); - tr.Set(db.Pack(50), Slice.FromString("50")); - tr.Set(db.Pack(60), Slice.FromString("60")); + tr.Set(location.EncodeKey(42), Slice.FromString("42")); + tr.Set(location.EncodeKey(50), Slice.FromString("50")); + tr.Set(location.EncodeKey(60), Slice.FromString("60")); await tr.CommitAsync(); } db.Debug_Dump(); @@ -150,39 +153,39 @@ public async Task Test_GetKey_ReadConflicts() }; await check( - FdbKeySelector.FirstGreaterOrEqual(db.Pack(50)), - db.Pack(50) + FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(50)), + location.EncodeKey(50) ); await check( - FdbKeySelector.FirstGreaterThan(db.Pack(50)), - db.Pack(60) + FdbKeySelector.FirstGreaterThan(location.EncodeKey(50)), + location.EncodeKey(60) ); await check( - FdbKeySelector.FirstGreaterOrEqual(db.Pack(49)), - db.Pack(50) + FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(49)), + location.EncodeKey(50) ); await check( - FdbKeySelector.FirstGreaterThan(db.Pack(49)), - db.Pack(50) + FdbKeySelector.FirstGreaterThan(location.EncodeKey(49)), + location.EncodeKey(50) ); await check( - FdbKeySelector.FirstGreaterOrEqual(db.Pack(49)) + 1, - db.Pack(60) + FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(49)) + 1, + location.EncodeKey(60) ); await check( - FdbKeySelector.FirstGreaterThan(db.Pack(49)) + 1, - db.Pack(60) + FdbKeySelector.FirstGreaterThan(location.EncodeKey(49)) + 1, + location.EncodeKey(60) ); await check( - FdbKeySelector.LastLessOrEqual(db.Pack(49)), - db.Pack(42) + FdbKeySelector.LastLessOrEqual(location.EncodeKey(49)), + location.EncodeKey(42) ); await check( - FdbKeySelector.LastLessThan(db.Pack(49)), - db.Pack(42) + FdbKeySelector.LastLessThan(location.EncodeKey(49)), + location.EncodeKey(42) ); } } @@ -194,12 +197,13 @@ public async Task Test_GetRangeAsync() using (var db = MemoryDatabase.CreateNew("DB")) { + var location = db.Tuples; using (var tr = db.BeginTransaction(this.Cancellation)) { for (int i = 0; i <= 100; i++) { - tr.Set(db.Pack(i), Slice.FromString("value of " + i)); + tr.Set(location.EncodeKey(i), Slice.FromString("value of " + i)); } await tr.CommitAsync(); } @@ -214,15 +218,15 @@ public async Task Test_GetRangeAsync() key = await tr.GetKeyAsync(FdbKeySelector.LastLessOrEqual(FdbKey.MaxValue)); if (key != FdbKey.MaxValue) Assert.Inconclusive("Key selectors are buggy: lLE(max)"); key = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(FdbKey.MaxValue)); - if (key != db.Pack(100)) Assert.Inconclusive("Key selectors are buggy: lLT(max)"); + if (key != location.EncodeKey(100)) Assert.Inconclusive("Key selectors are buggy: lLT(max)"); } using (var tr = db.BeginTransaction(this.Cancellation)) { var chunk = await tr.GetRangeAsync( - FdbKeySelector.FirstGreaterOrEqual(db.Pack(0)), - FdbKeySelector.FirstGreaterOrEqual(db.Pack(50)) + FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(0)), + FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(50)) ); #if DEBUG for (int i = 0; i < chunk.Count; i++) @@ -238,7 +242,7 @@ public async Task Test_GetRangeAsync() for (int i = 0; i < 50; i++) { - Assert.That(chunk.Chunk[i].Key, Is.EqualTo(db.Pack(i)), "[{0}].Key", i); + Assert.That(chunk.Chunk[i].Key, Is.EqualTo(location.EncodeKey(i)), "[{0}].Key", i); Assert.That(chunk.Chunk[i].Value.ToString(), Is.EqualTo("value of " + i), "[{0}].Value", i); } @@ -249,8 +253,8 @@ public async Task Test_GetRangeAsync() { var chunk = await tr.GetRangeAsync( - FdbKeySelector.FirstGreaterOrEqual(db.Pack(0)), - FdbKeySelector.FirstGreaterOrEqual(db.Pack(50)), + FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(0)), + FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(50)), new FdbRangeOptions { Reverse = true } ); #if DEBUG @@ -267,7 +271,7 @@ public async Task Test_GetRangeAsync() for (int i = 0; i < 50; i++) { - Assert.That(chunk.Chunk[i].Key, Is.EqualTo(db.Pack(49 - i)), "[{0}].Key", i); + Assert.That(chunk.Chunk[i].Key, Is.EqualTo(location.EncodeKey(49 - i)), "[{0}].Key", i); Assert.That(chunk.Chunk[i].Value.ToString(), Is.EqualTo("value of " + (49 - i)), "[{0}].Value", i); } @@ -278,7 +282,7 @@ public async Task Test_GetRangeAsync() { var chunk = await tr.GetRangeAsync( - FdbKeySelector.FirstGreaterOrEqual(db.Pack(0)), + FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(0)), FdbKeySelector.FirstGreaterOrEqual(FdbKey.MaxValue), new FdbRangeOptions { Reverse = true, Limit = 1 } ); @@ -307,13 +311,13 @@ public async Task Test_GetRange() using (var db = MemoryDatabase.CreateNew("DB")) { - db.Debug_Dump(); + var location = db.Tuples; using (var tr = db.BeginTransaction(this.Cancellation)) { for (int i = 0; i <= 100; i++) { - tr.Set(db.Pack(i), Slice.FromString("value of " + i)); + tr.Set(location.EncodeKey(i), Slice.FromString("value of " + i)); } await tr.CommitAsync(); } @@ -324,7 +328,7 @@ public async Task Test_GetRange() { var results = await tr - .GetRange(db.Pack(0), db.Pack(50)) + .GetRange(location.EncodeKey(0), location.EncodeKey(50)) .ToListAsync(); Assert.That(results, Is.Not.Null); @@ -338,7 +342,7 @@ public async Task Test_GetRange() Assert.That(results.Count, Is.EqualTo(50)); for (int i = 0; i < 50; i++) { - Assert.That(results[i].Key, Is.EqualTo(db.Pack(i)), "[{0}].Key", i); + Assert.That(results[i].Key, Is.EqualTo(location.EncodeKey(i)), "[{0}].Key", i); Assert.That(results[i].Value.ToString(), Is.EqualTo("value of " + i), "[{0}].Value", i); } @@ -349,7 +353,7 @@ public async Task Test_GetRange() { var results = await tr - .GetRange(db.Pack(0), db.Pack(50), new FdbRangeOptions { Reverse = true }) + .GetRange(location.EncodeKey(0), location.EncodeKey(50), new FdbRangeOptions { Reverse = true }) .ToListAsync(); Assert.That(results, Is.Not.Null); #if DEBUG @@ -362,7 +366,7 @@ public async Task Test_GetRange() Assert.That(results.Count, Is.EqualTo(50)); for (int i = 0; i < 50; i++) { - Assert.That(results[i].Key, Is.EqualTo(db.Pack(49 - i)), "[{0}].Key", i); + Assert.That(results[i].Key, Is.EqualTo(location.EncodeKey(49 - i)), "[{0}].Key", i); Assert.That(results[i].Value.ToString(), Is.EqualTo("value of " + (49 - i)), "[{0}].Value", i); } @@ -372,13 +376,13 @@ public async Task Test_GetRange() using (var tr = db.BeginTransaction(this.Cancellation)) { var result = await tr - .GetRange(db.Pack(0), FdbKey.MaxValue, new FdbRangeOptions { Reverse = true }) + .GetRange(location.EncodeKey(0), FdbKey.MaxValue, new FdbRangeOptions { Reverse = true }) .FirstOrDefaultAsync(); #if DEBUG Console.WriteLine(result.Key + " = " + result.Value); #endif - Assert.That(result.Key, Is.EqualTo(db.Pack(100))); + Assert.That(result.Key, Is.EqualTo(location.EncodeKey(100))); Assert.That(result.Value.ToString(), Is.EqualTo("value of 100")); await tr.CommitAsync(); @@ -395,12 +399,14 @@ public async Task Test_CommittedVersion_On_ReadOnly_Transactions() using (var db = MemoryDatabase.CreateNew("DB")) { + var location = db.Tuples; + using (var tr = db.BeginTransaction(this.Cancellation)) { long ver = tr.GetCommittedVersion(); Assert.That(ver, Is.EqualTo(-1), "Initial committed version"); - var _ = await tr.GetAsync(db.Pack("foo")); + var _ = await tr.GetAsync(location.EncodeKey("foo")); // until the transction commits, the committed version will stay -1 ver = tr.GetCommittedVersion(); @@ -425,6 +431,8 @@ public async Task Test_CommittedVersion_On_Write_Transactions() using (var db = MemoryDatabase.CreateNew("DB")) { + var location = db.Tuples; + using (var tr = db.BeginTransaction(this.Cancellation)) { // take the read version (to compare with the committed version below) @@ -433,7 +441,7 @@ public async Task Test_CommittedVersion_On_Write_Transactions() long ver = tr.GetCommittedVersion(); Assert.That(ver, Is.EqualTo(-1), "Initial committed version"); - tr.Set(db.Pack("foo"), Slice.FromString("bar")); + tr.Set(location.EncodeKey("foo"), Slice.FromString("bar")); // until the transction commits, the committed version should still be -1 ver = tr.GetCommittedVersion(); @@ -458,12 +466,14 @@ public async Task Test_CommittedVersion_After_Reset() using (var db = MemoryDatabase.CreateNew("DB")) { + var location = db.Tuples; + using (var tr = db.BeginTransaction(this.Cancellation)) { // take the read version (to compare with the committed version below) long rv1 = await tr.GetReadVersionAsync(); // do something and commit - tr.Set(db.Pack("foo"), Slice.FromString("bar")); + tr.Set(location.EncodeKey("foo"), Slice.FromString("bar")); await tr.CommitAsync(); long cv1 = tr.GetCommittedVersion(); Console.WriteLine("COMMIT: " + rv1 + " / " + cv1); @@ -479,7 +489,7 @@ public async Task Test_CommittedVersion_After_Reset() //Assert.That(cv2, Is.EqualTo(-1), "Committed version should go back to -1 after reset"); // read-only + commit - await tr.GetAsync(db.Pack("foo")); + await tr.GetAsync(location.EncodeKey("foo")); await tr.CommitAsync(); cv2 = tr.GetCommittedVersion(); Console.WriteLine("COMMIT2: " + rv2 + " / " + cv2); @@ -496,17 +506,18 @@ public async Task Test_Conflicts() // this SHOULD NOT conflict using (var db = MemoryDatabase.CreateNew("DB")) { + var location = db.Tuples; using (var tr1 = db.BeginTransaction(this.Cancellation)) { using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(db.Pack("foo"), Slice.FromString("changed")); + tr2.Set(location.EncodeKey("foo"), Slice.FromString("changed")); await tr2.CommitAsync(); } - var x = await tr1.GetAsync(db.Pack("foo")); - tr1.Set(db.Pack("bar"), Slice.FromString("other")); + var x = await tr1.GetAsync(location.EncodeKey("foo")); + tr1.Set(location.EncodeKey("bar"), Slice.FromString("other")); await tr1.CommitAsync(); } @@ -516,18 +527,19 @@ public async Task Test_Conflicts() // this SHOULD conflict using (var db = MemoryDatabase.CreateNew("DB")) { + var location = db.Tuples; using (var tr1 = db.BeginTransaction(this.Cancellation)) { - var x = await tr1.GetAsync(db.Pack("foo")); + var x = await tr1.GetAsync(location.EncodeKey("foo")); using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(db.Pack("foo"), Slice.FromString("changed")); + tr2.Set(location.EncodeKey("foo"), Slice.FromString("changed")); await tr2.CommitAsync(); } - tr1.Set(db.Pack("bar"), Slice.FromString("other")); + tr1.Set(location.EncodeKey("bar"), Slice.FromString("other")); Assert.That(async () => await tr1.CommitAsync(), Throws.InstanceOf().With.Property("Code").EqualTo(FdbError.NotCommitted)); } @@ -537,6 +549,7 @@ public async Task Test_Conflicts() // this SHOULD conflict using (var db = MemoryDatabase.CreateNew("DB")) { + var location = db.Tuples; using (var tr1 = db.BeginTransaction(this.Cancellation)) { @@ -544,12 +557,12 @@ public async Task Test_Conflicts() using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(db.Pack("foo"), Slice.FromString("changed")); + tr2.Set(location.EncodeKey("foo"), Slice.FromString("changed")); await tr2.CommitAsync(); } - var x = await tr1.GetAsync(db.Pack("foo")); - tr1.Set(db.Pack("bar"), Slice.FromString("other")); + var x = await tr1.GetAsync(location.EncodeKey("foo")); + tr1.Set(location.EncodeKey("bar"), Slice.FromString("other")); Assert.That(async () => await tr1.CommitAsync(), Throws.InstanceOf().With.Property("Code").EqualTo(FdbError.NotCommitted)); } @@ -559,18 +572,19 @@ public async Task Test_Conflicts() // this SHOULD NOT conflict using (var db = MemoryDatabase.CreateNew("DB")) { + var location = db.Tuples; using (var tr1 = db.BeginTransaction(this.Cancellation)) { - var x = await tr1.Snapshot.GetAsync(db.Pack("foo")); + var x = await tr1.Snapshot.GetAsync(location.EncodeKey("foo")); using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(db.Pack("foo"), Slice.FromString("changed")); + tr2.Set(location.EncodeKey("foo"), Slice.FromString("changed")); await tr2.CommitAsync(); } - tr1.Set(db.Pack("bar"), Slice.FromString("other")); + tr1.Set(location.EncodeKey("bar"), Slice.FromString("other")); await tr1.CommitAsync(); } @@ -583,6 +597,8 @@ public async Task Test_Write_Then_Read() { using (var db = MemoryDatabase.CreateNew("FOO")) { + var location = db.Tuples; + using (var tr = db.BeginTransaction(this.Cancellation)) { tr.Set(Slice.FromString("hello"), Slice.FromString("World!")); @@ -649,9 +665,11 @@ public async Task Test_Atomic() { using (var db = MemoryDatabase.CreateNew("DB")) { - var key1 = db.Pack(1); - var key2 = db.Pack(2); - var key16 = db.Pack(16); + var location = db.Tuples; + + var key1 = location.EncodeKey(1); + var key2 = location.EncodeKey(2); + var key16 = location.EncodeKey(16); for (int i = 0; i < 10; i++) { @@ -679,9 +697,10 @@ public async Task Test_Use_Simple_Layer() { using (var db = MemoryDatabase.CreateNew("FOO")) { + var location = db.GlobalSpace; - var map = new FdbMap("Foos", db.GlobalSpace.Partition("Foos"), KeyValueEncoders.Values.StringEncoder); - var index = new FdbIndex("Foos.ByColor", db.GlobalSpace.Partition("Foos", "Color")); + var map = new FdbMap("Foos", db.GlobalSpace.Partition.By("Foos"), KeyValueEncoders.Values.StringEncoder); + var index = new FdbIndex("Foos.ByColor", db.GlobalSpace.Partition.By("Foos", "Color")); using (var tr = db.BeginTransaction(this.Cancellation)) { @@ -710,6 +729,7 @@ public async Task Test_Use_Directory_Layer() { using (var db = MemoryDatabase.CreateNew("DB")) { + var location = db.GlobalSpace; var foos = await db.Directory.CreateOrOpenAsync("Foos", this.Cancellation); var bars = await db.Directory.CreateOrOpenAsync("Bars", this.Cancellation); @@ -819,17 +839,19 @@ public async Task Test_Can_BulkLoad_Data_Ordered() Console.WriteLine("Warmup..."); using (var db = MemoryDatabase.CreateNew("WARMUP")) { - await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Pack(i), Slice.FromFixed32(i))).ToList(), ordered: true); + await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Tuples.EncodeKey(i), Slice.FromFixed32(i))).ToList(), ordered: true); } using(var db = MemoryDatabase.CreateNew("FOO")) { + var location = db.Tuples; + Console.WriteLine("Generating " + N.ToString("N0") + " keys..."); var data = new KeyValuePair[N]; for (int i = 0; i < N; i++) { data[i] = new KeyValuePair( - db.Pack(i), + location.EncodeKey(i), Slice.FromFixed32(i) ); } @@ -848,7 +870,7 @@ public async Task Test_Can_BulkLoad_Data_Ordered() int x = rnd.Next(N); using (var tx = db.BeginReadOnlyTransaction(this.Cancellation)) { - var res = await tx.GetAsync(db.Pack(x)).ConfigureAwait(false); + var res = await tx.GetAsync(location.EncodeKey(x)).ConfigureAwait(false); Assert.That(res.ToInt32(), Is.EqualTo(x)); } } @@ -866,18 +888,20 @@ public async Task Test_Can_BulkLoad_Data_Sequential_Unordered() Console.WriteLine("Warmup..."); using(var db = MemoryDatabase.CreateNew("WARMUP")) { - await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Pack(i), Slice.FromFixed32(i))).ToList(), ordered: false); + await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Tuples.EncodeKey(i), Slice.FromFixed32(i))).ToList(), ordered: false); } using (var db = MemoryDatabase.CreateNew("FOO")) { + var location = db.Tuples; + Console.WriteLine("Generating " + N.ToString("N0") + " keys..."); var data = new KeyValuePair[N]; var rnd = new Random(); for (int i = 0; i < N; i++) { data[i] = new KeyValuePair( - db.Pack(i), + location.EncodeKey(i), Slice.FromFixed32(i) ); } @@ -895,7 +919,7 @@ public async Task Test_Can_BulkLoad_Data_Sequential_Unordered() int x = rnd.Next(N); using (var tx = db.BeginReadOnlyTransaction(this.Cancellation)) { - var res = await tx.GetAsync(db.Pack(x)).ConfigureAwait(false); + var res = await tx.GetAsync(location.EncodeKey(x)).ConfigureAwait(false); Assert.That(res.ToInt32(), Is.EqualTo(x)); } } @@ -913,11 +937,13 @@ public async Task Test_Can_BulkLoad_Data_Random_Unordered() Console.WriteLine("Warmup..."); using (var db = MemoryDatabase.CreateNew("WARMUP")) { - await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Pack(i), Slice.FromFixed32(i))).ToList(), ordered: false); + await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Tuples.EncodeKey(i), Slice.FromFixed32(i))).ToList(), ordered: false); } using (var db = MemoryDatabase.CreateNew("FOO")) { + var location = db.Tuples; + Console.WriteLine("Generating " + N.ToString("N0") + " keys..."); var data = new KeyValuePair[N]; var ints = new int[N]; @@ -925,7 +951,7 @@ public async Task Test_Can_BulkLoad_Data_Random_Unordered() for (int i = 0; i < N; i++) { data[i] = new KeyValuePair( - db.Pack(i), + location.EncodeKey(i), Slice.FromFixed32(i) ); ints[i] = rnd.Next(int.MaxValue); @@ -947,7 +973,7 @@ public async Task Test_Can_BulkLoad_Data_Random_Unordered() int x = rnd.Next(N); using (var tx = db.BeginReadOnlyTransaction(this.Cancellation)) { - var res = await tx.GetAsync(db.Pack(x)).ConfigureAwait(false); + var res = await tx.GetAsync(location.EncodeKey(x)).ConfigureAwait(false); Assert.That(res.ToInt32(), Is.EqualTo(x)); } } diff --git a/FoundationDB.Tests.Sandbox/Program.cs b/FoundationDB.Tests.Sandbox/Program.cs index 9f01a5181..35c0da673 100644 --- a/FoundationDB.Tests.Sandbox/Program.cs +++ b/FoundationDB.Tests.Sandbox/Program.cs @@ -180,7 +180,7 @@ private static async Task MainAsync(CancellationToken ct) Console.WriteLine("> Connected!"); Console.WriteLine("Opening database 'DB'..."); - using (var db = await cluster.OpenDatabaseAsync(DB_NAME, new FdbSubspace(FdbTuple.Create(SUBSPACE)), false, ct)) + using (var db = await cluster.OpenDatabaseAsync(DB_NAME, FdbSubspace.Create(FdbTuple.Create(SUBSPACE)), false, ct)) { Console.WriteLine("> Connected to db '{0}'", db.Name); @@ -289,19 +289,19 @@ private static async Task TestSimpleTransactionAsync(IFdbDatabase db, Cancellati Console.WriteLine("> Read Version = " + readVersion); Console.WriteLine("Getting 'hello'..."); - var result = await trans.GetAsync(location.Pack("hello")); + var result = await trans.GetAsync(location.Tuples.EncodeKey("hello")); if (result.IsNull) Console.WriteLine("> hello NOT FOUND"); else Console.WriteLine("> hello = " + result.ToString()); Console.WriteLine("Setting 'Foo' = 'Bar'"); - trans.Set(location.Pack("Foo"), Slice.FromString("Bar")); + trans.Set(location.Tuples.EncodeKey("Foo"), Slice.FromString("Bar")); Console.WriteLine("Setting 'TopSecret' = rnd(512)"); var data = new byte[512]; new Random(1234).NextBytes(data); - trans.Set(location.Pack("TopSecret"), Slice.Create(data)); + trans.Set(location.Tuples.EncodeKey("TopSecret"), Slice.Create(data)); Console.WriteLine("Committing transaction..."); await trans.CommitAsync(); @@ -320,7 +320,7 @@ private static async Task BenchInsertSmallKeysAsync(IFdbDatabase db, int N, int var rnd = new Random(); var tmp = new byte[size]; - var subspace = db.Partition("Batch"); + var subspace = db.Partition.By("Batch"); var times = new List(); for (int k = 0; k <= 4; k++) @@ -335,7 +335,7 @@ private static async Task BenchInsertSmallKeysAsync(IFdbDatabase db, int N, int tmp[1] = (byte)(i >> 8); // (Batch, 1) = [......] // (Batch, 2) = [......] - trans.Set(subspace.Pack(k * N + i), Slice.Create(tmp)); + trans.Set(subspace.Tuples.EncodeKey(k * N + i), Slice.Create(tmp)); } await trans.CommitAsync(); } @@ -361,7 +361,7 @@ private static async Task BenchConcurrentInsert(IFdbDatabase db, int k, int N, i Console.WriteLine("Inserting " + N + " keys in " + k + " batches of " + n + " with " + size + "-bytes values..."); // store every key under ("Batch", i) - var subspace = db.Partition("Batch"); + var subspace = db.Partition.By("Batch"); // total estimated size of all transactions long totalPayloadSize = 0; @@ -395,7 +395,7 @@ private static async Task BenchConcurrentInsert(IFdbDatabase db, int k, int N, i tmp[1] = (byte)(i >> 8); // ("Batch", batch_index, i) = [..random..] - trans.Set(subspace.Pack(i), Slice.Create(tmp)); + trans.Set(subspace.Tuples.EncodeKey(i), Slice.Create(tmp)); } x.Stop(); Console.WriteLine("> [" + offset + "] packaged " + n + " keys (" + trans.Size.ToString("N0", CultureInfo.InvariantCulture) + " bytes) in " + FormatTimeMilli(x.Elapsed.TotalMilliseconds)); @@ -429,7 +429,7 @@ private static async Task BenchSerialWriteAsync(IFdbDatabase db, int N, Cancella { // read a lot of small keys, one by one - var location = db.Partition("hello"); + var location = db.Partition.By("hello"); var sw = Stopwatch.StartNew(); IFdbTransaction trans = null; @@ -438,7 +438,7 @@ private static async Task BenchSerialWriteAsync(IFdbDatabase db, int N, Cancella for (int i = 0; i < N; i++) { if (trans == null) trans = db.BeginTransaction(ct); - trans.Set(location.Pack(i), Slice.FromInt32(i)); + trans.Set(location.Tuples.EncodeKey(i), Slice.FromInt32(i)); if (trans.Size > 100 * 1024) { await trans.CommitAsync(); @@ -464,7 +464,7 @@ private static async Task BenchSerialReadAsync(IFdbDatabase db, int N, Cancellat // read a lot of small keys, one by one - var location = db.Partition("hello"); + var location = db.Partition.By("hello"); var sw = Stopwatch.StartNew(); for (int k = 0; k < N; k += 1000) @@ -473,7 +473,7 @@ private static async Task BenchSerialReadAsync(IFdbDatabase db, int N, Cancellat { for (int i = k; i < N && i < k + 1000; i++) { - var result = await trans.GetAsync(location.Pack(i)); + var result = await trans.GetAsync(location.Tuples.EncodeKey(i)); } } Console.Write("."); @@ -489,9 +489,9 @@ private static async Task BenchConcurrentReadAsync(IFdbDatabase db, int N, Cance Console.WriteLine("Reading " + N + " keys (concurrent)"); - var location = db.Partition("hello"); + var location = db.Partition.By("hello"); - var keys = Enumerable.Range(0, N).Select(i => location.Pack(i)).ToArray(); + var keys = Enumerable.Range(0, N).Select(i => location.Tuples.EncodeKey(i)).ToArray(); var sw = Stopwatch.StartNew(); using (var trans = db.BeginTransaction(ct)) @@ -517,14 +517,14 @@ private static async Task BenchClearAsync(IFdbDatabase db, int N, CancellationTo { // clear a lot of small keys, in a single transaction - var location = db.Partition(Slice.FromAscii("hello")); + var location = db.Partition.By(Slice.FromAscii("hello")); var sw = Stopwatch.StartNew(); using (var trans = db.BeginTransaction(ct)) { for (int i = 0; i < N; i++) { - trans.Clear(location.Pack(i)); + trans.Clear(location.Tuples.EncodeKey(i)); } await trans.CommitAsync(); @@ -541,7 +541,7 @@ private static async Task BenchUpdateSameKeyLotsOfTimesAsync(IFdbDatabase db, in var list = new byte[N]; var update = Stopwatch.StartNew(); - var key = db.GlobalSpace.Pack("list"); + var key = db.GlobalSpace.Tuples.EncodeKey("list"); for (int i = 0; i < N; i++) { list[i] = (byte)i; @@ -561,10 +561,10 @@ private static async Task BenchUpdateLotsOfKeysAsync(IFdbDatabase db, int N, Can { // change one byte in a large number of keys - var location = db.Partition("lists"); + var location = db.Partition.By("lists"); var rnd = new Random(); - var keys = Enumerable.Range(0, N).Select(x => location.Pack(x)).ToArray(); + var keys = Enumerable.Range(0, N).Select(x => location.Tuples.EncodeKey(x)).ToArray(); Console.WriteLine("> creating " + N + " half filled keys"); var segment = new byte[60]; @@ -616,7 +616,7 @@ private static async Task BenchBulkInsertThenBulkReadAsync(IFdbDatabase db, int var timings = instrumented ? new List>() : null; // put test values inside a namespace - var subspace = db.Partition("BulkInsert"); + var subspace = db.Partition.By("BulkInsert"); // cleanup everything using (var tr = db.BeginTransaction(ct)) @@ -646,7 +646,7 @@ private static async Task BenchBulkInsertThenBulkReadAsync(IFdbDatabase db, int int z = 0; foreach (int i in Enumerable.Range(chunk.Key, chunk.Value)) { - tr.Set(subspace.Pack(i), Slice.Create(new byte[256])); + tr.Set(subspace.Tuples.EncodeKey(i), Slice.Create(new byte[256])); z++; } @@ -702,7 +702,7 @@ private static async Task BenchBulkInsertThenBulkReadAsync(IFdbDatabase db, int private static async Task BenchMergeSortAsync(IFdbDatabase db, int N, int K, int B, CancellationToken ct) { // create multiple lists - var location = db.Partition("MergeSort"); + var location = db.Partition.By("MergeSort"); await db.ClearRangeAsync(location, ct); var sources = Enumerable.Range(0, K).Select(i => 'A' + i).ToArray(); @@ -714,10 +714,10 @@ private static async Task BenchMergeSortAsync(IFdbDatabase db, int N, int K, int { using (var tr = db.BeginTransaction(ct)) { - var list = location.Partition(source); + var list = location.Partition.By(source); for (int i = 0; i < N; i++) { - tr.Set(list.Pack(rnd.Next()), Slice.FromInt32(i)); + tr.Set(list.Tuples.EncodeKey(rnd.Next()), Slice.FromInt32(i)); } await tr.CommitAsync(); } @@ -730,11 +730,11 @@ private static async Task BenchMergeSortAsync(IFdbDatabase db, int N, int K, int { var mergesort = tr .MergeSort( - sources.Select(source => FdbKeySelectorPair.StartsWith(location.Pack(source))), - (kvp) => location.UnpackLast(kvp.Key) + sources.Select(source => FdbKeySelectorPair.StartsWith(location.Tuples.EncodeKey(source))), + (kvp) => location.Tuples.DecodeLast(kvp.Key) ) .Take(B) - .Select(kvp => location.Unpack(kvp.Key)); + .Select(kvp => location.Tuples.Unpack(kvp.Key)); Console.Write("> MergeSort with limit " + B + "... "); var sw = Stopwatch.StartNew(); diff --git a/FoundationDB.Tests/DatabaseBulkFacts.cs b/FoundationDB.Tests/DatabaseBulkFacts.cs index 069df3b2b..b43fc28e6 100644 --- a/FoundationDB.Tests/DatabaseBulkFacts.cs +++ b/FoundationDB.Tests/DatabaseBulkFacts.cs @@ -60,7 +60,7 @@ public async Task Test_Can_Bulk_Insert_Raw_Data() var rnd = new Random(2403); var data = Enumerable.Range(0, N) - .Select((x) => new KeyValuePair(location.Pack(x.ToString("x8")), Slice.Random(rnd, 16 + rnd.Next(240)))) + .Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x.ToString("x8")), Slice.Random(rnd, 16 + rnd.Next(240)))) .ToArray(); Log("Total data size is {0:N0} bytes", data.Sum(x => x.Key.Count + x.Value.Count)); @@ -143,7 +143,7 @@ public async Task Test_Can_Bulk_Insert_Items() ++called; uniqueKeys.Add(kv.Key); tr.Set( - location.Pack(kv.Key), + location.Tuples.EncodeKey(kv.Key), Slice.FromString(new string('A', kv.Value)) ); }, @@ -174,7 +174,7 @@ public async Task Test_Can_Bulk_Insert_Items() Assert.That(stored.Length, Is.EqualTo(N), "DB contains less or more items than expected"); for (int i = 0; i < stored.Length;i++) { - Assert.That(stored[i].Key, Is.EqualTo(location.Pack(data[i].Key)), "Key #{0}", i); + Assert.That(stored[i].Key, Is.EqualTo(location.Tuples.EncodeKey(data[i].Key)), "Key #{0}", i); Assert.That(stored[i].Value.Count, Is.EqualTo(data[i].Value), "Value #{0}", i); } @@ -198,7 +198,7 @@ public async Task Test_Can_Batch_ForEach_AsyncWithContextAndState() await Fdb.Bulk.WriteAsync( db, - Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Pack(x), Slice.FromInt32(x))), + Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x), Slice.FromInt32(x))), this.Cancellation ); @@ -210,7 +210,7 @@ await Fdb.Bulk.WriteAsync( var sw = Stopwatch.StartNew(); await Fdb.Bulk.ForEachAsync( db, - Enumerable.Range(1, N).Select(x => location.Pack(x)), + Enumerable.Range(1, N).Select(x => location.Tuples.EncodeKey(x)), () => FdbTuple.Create(0L, 0L), async (xs, ctx, state) => { @@ -284,7 +284,7 @@ public async Task Test_Can_Bulk_Batched_Insert_Items() { uniqueKeys.Add(kv.Key); tr.Set( - location.Pack(kv.Key), + location.Tuples.EncodeKey(kv.Key), Slice.FromString(new string('A', kv.Value)) ); } @@ -322,7 +322,7 @@ public async Task Test_Can_Bulk_Batched_Insert_Items() Assert.That(stored.Length, Is.EqualTo(N), "DB contains less or more items than expected"); for (int i = 0; i < stored.Length; i++) { - Assert.That(stored[i].Key, Is.EqualTo(location.Pack(data[i].Key)), "Key #{0}", i); + Assert.That(stored[i].Key, Is.EqualTo(location.Tuples.EncodeKey(data[i].Key)), "Key #{0}", i); Assert.That(stored[i].Value.Count, Is.EqualTo(data[i].Value), "Value #{0}", i); } @@ -346,7 +346,7 @@ public async Task Test_Can_Batch_ForEach_WithContextAndState() await Fdb.Bulk.WriteAsync( db, - Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Pack(x), Slice.FromInt32(x))), + Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x), Slice.FromInt32(x))), this.Cancellation ); @@ -358,7 +358,7 @@ await Fdb.Bulk.WriteAsync( var sw = Stopwatch.StartNew(); await Fdb.Bulk.ForEachAsync( db, - Enumerable.Range(1, N).Select(x => location.Pack(x)), + Enumerable.Range(1, N).Select(x => location.Tuples.EncodeKey(x)), () => FdbTuple.Create(0L, 0L), // (sum, count) (xs, ctx, state) => { @@ -411,7 +411,7 @@ public async Task Test_Can_Batch_ForEach_AsyncWithContext() await Fdb.Bulk.WriteAsync( db, - Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Pack(x), Slice.FromInt32(x))), + Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x), Slice.FromInt32(x))), this.Cancellation ); @@ -423,7 +423,7 @@ await Fdb.Bulk.WriteAsync( var sw = Stopwatch.StartNew(); await Fdb.Bulk.ForEachAsync( db, - Enumerable.Range(1, N).Select(x => location.Pack(x)), + Enumerable.Range(1, N).Select(x => location.Tuples.EncodeKey(x)), async (xs, ctx) => { Interlocked.Increment(ref chunks); @@ -471,7 +471,7 @@ public async Task Test_Can_Batch_Aggregate() await Fdb.Bulk.WriteAsync( db, - source.Select((x) => new KeyValuePair(location.Pack(x.Key), Slice.FromInt32(x.Value))), + source.Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x.Key), Slice.FromInt32(x.Value))), this.Cancellation ); @@ -481,7 +481,7 @@ await Fdb.Bulk.WriteAsync( var sw = Stopwatch.StartNew(); long total = await Fdb.Bulk.AggregateAsync( db, - source.Select(x => location.Pack(x.Key)), + source.Select(x => location.Tuples.EncodeKey(x.Key)), () => 0L, async (xs, ctx, sum) => { @@ -532,7 +532,7 @@ public async Task Test_Can_Batch_Aggregate_With_Transformed_Result() await Fdb.Bulk.WriteAsync( db, - source.Select((x) => new KeyValuePair(location.Pack(x.Key), Slice.FromInt32(x.Value))), + source.Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x.Key), Slice.FromInt32(x.Value))), this.Cancellation ); @@ -542,7 +542,7 @@ await Fdb.Bulk.WriteAsync( var sw = Stopwatch.StartNew(); double average = await Fdb.Bulk.AggregateAsync( db, - source.Select(x => location.Pack(x.Key)), + source.Select(x => location.Tuples.EncodeKey(x.Key)), () => FdbTuple.Create(0L, 0L), async (xs, ctx, state) => { @@ -601,7 +601,7 @@ public async Task Test_Can_Export_To_Disk() await Fdb.Bulk.WriteAsync( db.WithoutLogging(), - source.Select((x) => new KeyValuePair(location.Pack(x.Key), x.Value)), + source.Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x.Key), x.Value)), this.Cancellation ); @@ -628,7 +628,7 @@ await Fdb.Bulk.WriteAsync( var sb = new StringBuilder(4096); foreach(var x in xs) { - sb.AppendFormat("{0} = {1}\r\n", location.UnpackSingle(x.Key), x.Value.ToBase64()); + sb.AppendFormat("{0} = {1}\r\n", location.Tuples.DecodeKey(x.Key), x.Value.ToBase64()); } await file.WriteAsync(sb.ToString()); }, diff --git a/FoundationDB.Tests/DatabaseFacts.cs b/FoundationDB.Tests/DatabaseFacts.cs index d2708b2c5..e62eec84c 100644 --- a/FoundationDB.Tests/DatabaseFacts.cs +++ b/FoundationDB.Tests/DatabaseFacts.cs @@ -212,7 +212,7 @@ public async Task Test_Can_Get_Coordinators() Assert.That(coordinators.Coordinators[0].Port, Is.GreaterThanOrEqualTo(4500).And.LessThanOrEqualTo(4510)); //HACKHACK: may not work everywhere ! //TODO: how can we check that it is correct? - Console.WriteLine("Coordinators: " + coordinators.ToString()); + Log("Coordinators: {0}", coordinators); } } @@ -222,7 +222,7 @@ public async Task Test_Can_Get_Storage_Engine() using (var db = await OpenTestDatabaseAsync()) { string mode = await Fdb.System.GetStorageEngineModeAsync(db, this.Cancellation); - Console.WriteLine("Storage engine: " + mode); + Log("Storage engine: {0}", mode); Assert.That(mode, Is.Not.Null); Assert.That(mode, Is.EqualTo("ssd").Or.EqualTo("memory")); @@ -248,13 +248,13 @@ public async Task Test_Can_Get_Storage_Engine() public async Task Test_Can_Open_Database_With_Non_Empty_GlobalSpace() { // using a tuple prefix - using (var db = await Fdb.OpenAsync(null, "DB", new FdbSubspace(FdbTuple.Create("test")), false, this.Cancellation)) + using (var db = await Fdb.OpenAsync(null, "DB", FdbSubspace.Create(FdbTuple.Create("test")), false, this.Cancellation)) { Assert.That(db, Is.Not.Null); Assert.That(db.GlobalSpace, Is.Not.Null); Assert.That(db.GlobalSpace.Key.ToString(), Is.EqualTo("<02>test<00>")); - var subspace = db.Partition("hello"); + var subspace = db.Partition.By("hello"); Assert.That(subspace.Key.ToString(), Is.EqualTo("<02>test<00><02>hello<00>")); // keys inside the global space are invlaid @@ -271,7 +271,7 @@ public async Task Test_Can_Open_Database_With_Non_Empty_GlobalSpace() Assert.That(db.GlobalSpace, Is.Not.Null); Assert.That(db.GlobalSpace.Key.ToString(), Is.EqualTo("*<00>Z")); - var subspace = db.Partition("hello"); + var subspace = db.Partition.By("hello"); Assert.That(subspace.Key.ToString(), Is.EqualTo("*<00>Z<02>hello<00>")); // keys inside the global space are invlaid @@ -323,7 +323,7 @@ public async Task Test_Database_Instance_Should_Have_Default_Root_Directory() Assert.That(dl.ContentSubspace, Is.Not.Null); Assert.That(dl.ContentSubspace.Key, Is.EqualTo(db.GlobalSpace.Key)); Assert.That(dl.NodeSubspace, Is.Not.Null); - Assert.That(dl.NodeSubspace.Key, Is.EqualTo(db.GlobalSpace.Concat(Slice.FromByte(254)))); + Assert.That(dl.NodeSubspace.Key, Is.EqualTo(db.GlobalSpace.ConcatKey(Slice.FromByte(254)))); Assert.That(db.GlobalSpace.Contains(dl.ContentSubspace.Key), Is.True); Assert.That(db.GlobalSpace.Contains(dl.NodeSubspace.Key), Is.True); @@ -346,9 +346,9 @@ public async Task Test_Check_Timeout_On_Non_Existing_Database() using(var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { tr.Timeout = 250; // ms - Console.WriteLine("check ..."); + Log("check ..."); await tr.GetAsync(db.GlobalSpace.Key); - Console.WriteLine("Uhoh ...?"); + Log("Uhoh ...?"); exists = true; } } diff --git a/FoundationDB.Tests/Experimental/Indexing/CompressedBitmapsFacts.cs b/FoundationDB.Tests/Experimental/Indexing/CompressedBitmapsFacts.cs index 38577f19c..a4e7331e3 100644 --- a/FoundationDB.Tests/Experimental/Indexing/CompressedBitmapsFacts.cs +++ b/FoundationDB.Tests/Experimental/Indexing/CompressedBitmapsFacts.cs @@ -41,6 +41,7 @@ namespace FoundationDB.Layers.Experimental.Indexing.Tests using System.IO; [TestFixture] + [Category("LongRunning")] // disabled for now (takes too much time!) public class CompressedBitmapsFacts { diff --git a/FoundationDB.Tests/FdbTest.cs b/FoundationDB.Tests/FdbTest.cs index 97d47900c..d2963e0a9 100644 --- a/FoundationDB.Tests/FdbTest.cs +++ b/FoundationDB.Tests/FdbTest.cs @@ -82,12 +82,14 @@ protected void AfterEachTest() /// Time elapsed since the start of the current test protected TimeSpan TestElapsed { + [DebuggerStepThrough] get { return m_timer.Elapsed; } } /// Cancellation token usable by any test protected CancellationToken Cancellation { + [DebuggerStepThrough] get { if (m_cts == null) SetupCancellation(); @@ -108,28 +110,33 @@ private void SetupCancellation() } /// Connect to the local test database + [DebuggerStepThrough] protected Task OpenTestDatabaseAsync() { return TestHelpers.OpenTestDatabaseAsync(this.Cancellation); } /// Connect to the local test database + [DebuggerStepThrough] protected Task OpenTestPartitionAsync() { return TestHelpers.OpenTestPartitionAsync(this.Cancellation); } + [DebuggerStepThrough] protected Task GetCleanDirectory(IFdbDatabase db, params string[] path) { return TestHelpers.GetCleanDirectory(db, path, this.Cancellation); } - protected Task DumpSubspace(IFdbDatabase db, FdbSubspace subspace) + [DebuggerStepThrough] + protected Task DumpSubspace(IFdbDatabase db, IFdbSubspace subspace) { return TestHelpers.DumpSubspace(db, subspace, this.Cancellation); } - protected async Task DeleteSubspace(IFdbDatabase db, FdbSubspace subspace) + [DebuggerStepThrough] + protected async Task DeleteSubspace(IFdbDatabase db, IFdbSubspace subspace) { using (var tr = db.BeginTransaction(this.Cancellation)) { @@ -142,22 +149,32 @@ protected async Task DeleteSubspace(IFdbDatabase db, FdbSubspace subspace) // These methods are just there to help with the problem of culture-aware string formatting - protected void Log(string text) + [DebuggerStepThrough] + protected static void Log() + { + Console.WriteLine(); + } + + [DebuggerStepThrough] + protected static void Log(string text) { Console.WriteLine(text); } - protected void Log(string format, object arg0) + [DebuggerStepThrough] + protected static void Log(string format, object arg0) { Console.WriteLine(String.Format(CultureInfo.InvariantCulture, format, arg0)); } - protected void Log(string format, object arg0, object arg1) + [DebuggerStepThrough] + protected static void Log(string format, object arg0, object arg1) { Console.WriteLine(String.Format(CultureInfo.InvariantCulture, format, arg0, arg1)); } - protected void Log(string format, params object[] args) + [DebuggerStepThrough] + protected static void Log(string format, params object[] args) { Console.WriteLine(String.Format(CultureInfo.InvariantCulture, format, args)); } diff --git a/FoundationDB.Tests/Filters/LoggingFilterFacts.cs b/FoundationDB.Tests/Filters/LoggingFilterFacts.cs index 5a1515c76..6fd0f5f3c 100644 --- a/FoundationDB.Tests/Filters/LoggingFilterFacts.cs +++ b/FoundationDB.Tests/Filters/LoggingFilterFacts.cs @@ -47,37 +47,38 @@ public async Task Test_Can_Log_A_Transaction() using (var db = await OpenTestPartitionAsync()) { - var location = await GetCleanDirectory(db, "Logging"); + // get a tuple view of the directory + var location = (await GetCleanDirectory(db, "Logging")).Tuples; // note: ensure that all methods are JITed await db.ReadWriteAsync(async (tr) => { await tr.GetReadVersionAsync(); - tr.Set(location.Pack("Warmup", 0), Slice.FromInt32(1)); - tr.Clear(location.Pack("Warmup", 1)); - await tr.GetAsync(location.Pack("Warmup", 2)); - await tr.GetRange(FdbKeyRange.StartsWith(location.Pack("Warmup", 3))).ToListAsync(); - tr.ClearRange(location.Pack("Warmup", 4), location.Pack("Warmup", 5)); + tr.Set(location.EncodeKey("Warmup", 0), Slice.FromInt32(1)); + tr.Clear(location.EncodeKey("Warmup", 1)); + await tr.GetAsync(location.EncodeKey("Warmup", 2)); + await tr.GetRange(FdbKeyRange.StartsWith(location.EncodeKey("Warmup", 3))).ToListAsync(); + tr.ClearRange(location.EncodeKey("Warmup", 4), location.EncodeKey("Warmup", 5)); }, this.Cancellation); await db.WriteAsync((tr) => { var rnd = new Random(); - tr.Set(location.Pack("One"), Slice.FromString("111111")); - tr.Set(location.Pack("Two"), Slice.FromString("222222")); + tr.Set(location.EncodeKey("One"), Slice.FromString("111111")); + tr.Set(location.EncodeKey("Two"), Slice.FromString("222222")); for (int j = 0; j < 4; j++) { for (int i = 0; i < 100; i++) { - tr.Set(location.Pack("Range", j, rnd.Next(1000)), Slice.Empty); + tr.Set(location.EncodeKey("Range", j, rnd.Next(1000)), Slice.Empty); } } for (int j = 0; j < N; j++) { - tr.Set(location.Pack("X", j), Slice.FromInt32(j)); - tr.Set(location.Pack("Y", j), Slice.FromInt32(j)); - tr.Set(location.Pack("Z", j), Slice.FromInt32(j)); - tr.Set(location.Pack("W", j), Slice.FromInt32(j)); + tr.Set(location.EncodeKey("X", j), Slice.FromInt32(j)); + tr.Set(location.EncodeKey("Y", j), Slice.FromInt32(j)); + tr.Set(location.EncodeKey("Z", j), Slice.FromInt32(j)); + tr.Set(location.EncodeKey("W", j), Slice.FromInt32(j)); } }, this.Cancellation); @@ -109,10 +110,10 @@ await logged.ReadWriteAsync(async (tr) => long ver = await tr.GetReadVersionAsync().ConfigureAwait(false); - await tr.GetAsync(location.Pack("One")).ConfigureAwait(false); - await tr.GetAsync(location.Pack("NotFound")).ConfigureAwait(false); + await tr.GetAsync(location.EncodeKey("One")).ConfigureAwait(false); + await tr.GetAsync(location.EncodeKey("NotFound")).ConfigureAwait(false); - tr.Set(location.Pack("Write"), Slice.FromString("abcdef" + k.ToString())); + tr.Set(location.EncodeKey("Write"), Slice.FromString("abcdef" + k.ToString())); //tr.Annotate("BEFORE"); //await Task.Delay(TimeSpan.FromMilliseconds(10)); @@ -125,33 +126,33 @@ await logged.ReadWriteAsync(async (tr) => //await tr.GetRangeAsync(FdbKeySelector.LastLessOrEqual(location.Pack("A")), FdbKeySelector.FirstGreaterThan(location.Pack("Z"))).ConfigureAwait(false); await Task.WhenAll( - tr.GetRange(FdbKeyRange.StartsWith(location.Pack("Range", 0))).ToListAsync(), - tr.GetRange(location.Pack("Range", 1, 0), location.Pack("Range", 1, 200)).ToListAsync(), - tr.GetRange(location.Pack("Range", 2, 400), location.Pack("Range", 2, 600)).ToListAsync(), - tr.GetRange(location.Pack("Range", 3, 800), location.Pack("Range", 3, 1000)).ToListAsync() + tr.GetRange(FdbKeyRange.StartsWith(location.EncodeKey("Range", 0))).ToListAsync(), + tr.GetRange(location.EncodeKey("Range", 1, 0), location.EncodeKey("Range", 1, 200)).ToListAsync(), + tr.GetRange(location.EncodeKey("Range", 2, 400), location.EncodeKey("Range", 2, 600)).ToListAsync(), + tr.GetRange(location.EncodeKey("Range", 3, 800), location.EncodeKey("Range", 3, 1000)).ToListAsync() ).ConfigureAwait(false); - await tr.GetAsync(location.Pack("Two")).ConfigureAwait(false); + await tr.GetAsync(location.EncodeKey("Two")).ConfigureAwait(false); - await tr.GetValuesAsync(Enumerable.Range(0, N).Select(x => location.Pack("X", x))).ConfigureAwait(false); + await tr.GetValuesAsync(Enumerable.Range(0, N).Select(x => location.EncodeKey("X", x))).ConfigureAwait(false); for (int i = 0; i < N; i++) { - await tr.GetAsync(location.Pack("Z", i)).ConfigureAwait(false); + await tr.GetAsync(location.EncodeKey("Z", i)).ConfigureAwait(false); } - await Task.WhenAll(Enumerable.Range(0, N / 2).Select(x => tr.GetAsync(location.Pack("Y", x)))).ConfigureAwait(false); - await Task.WhenAll(Enumerable.Range(N / 2, N / 2).Select(x => tr.GetAsync(location.Pack("Y", x)))).ConfigureAwait(false); + await Task.WhenAll(Enumerable.Range(0, N / 2).Select(x => tr.GetAsync(location.EncodeKey("Y", x)))).ConfigureAwait(false); + await Task.WhenAll(Enumerable.Range(N / 2, N / 2).Select(x => tr.GetAsync(location.EncodeKey("Y", x)))).ConfigureAwait(false); await Task.WhenAll( - tr.GetAsync(location.Pack("W", 1)), - tr.GetAsync(location.Pack("W", 2)), - tr.GetAsync(location.Pack("W", 3)) + tr.GetAsync(location.EncodeKey("W", 1)), + tr.GetAsync(location.EncodeKey("W", 2)), + tr.GetAsync(location.EncodeKey("W", 3)) ).ConfigureAwait(false); - tr.Set(location.Pack("Write2"), Slice.FromString("ghijkl" + k.ToString())); - tr.Clear(location.Pack("Clear", "0")); - tr.ClearRange(location.Pack("Clear", "A"), location.Pack("Clear", "Z")); + tr.Set(location.EncodeKey("Write2"), Slice.FromString("ghijkl" + k.ToString())); + tr.Clear(location.EncodeKey("Clear", "0")); + tr.ClearRange(location.EncodeKey("Clear", "A"), location.EncodeKey("Clear", "Z")); if (tr.Context.Retries == 0) { diff --git a/FoundationDB.Tests/Layers/BlobFacts.cs b/FoundationDB.Tests/Layers/BlobFacts.cs index 952c43247..efa64781a 100644 --- a/FoundationDB.Tests/Layers/BlobFacts.cs +++ b/FoundationDB.Tests/Layers/BlobFacts.cs @@ -49,7 +49,7 @@ public async Task Test_FdbBlob_NotFound_Blob_Is_Empty() // clear previous values await DeleteSubspace(db, location); - var blob = new FdbBlob(location.Partition("Empty")); + var blob = new FdbBlob(location.Partition.By("Empty")); long? size; @@ -75,7 +75,7 @@ public async Task Test_FdbBlob_Can_AppendToBlob() // clear previous values await DeleteSubspace(db, location); - var blob = new FdbBlob(location.Partition("BobTheBlob")); + var blob = new FdbBlob(location.Partition.By("BobTheBlob")); using (var tr = db.BeginTransaction(this.Cancellation)) { @@ -112,7 +112,7 @@ public async Task Test_FdbBlob_CanAppendLargeChunks() // clear previous values await DeleteSubspace(db, location); - var blob = new FdbBlob(location.Partition("BigBlob")); + var blob = new FdbBlob(location.Partition.By("BigBlob")); var data = new byte[100 * 1000]; for (int i = 0; i < data.Length; i++) data[i] = (byte)i; diff --git a/FoundationDB.Tests/Layers/DirectoryFacts.cs b/FoundationDB.Tests/Layers/DirectoryFacts.cs index cbeb25ee1..cfbb66778 100644 --- a/FoundationDB.Tests/Layers/DirectoryFacts.cs +++ b/FoundationDB.Tests/Layers/DirectoryFacts.cs @@ -51,7 +51,7 @@ public async Task Test_Allocator() using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition(Slice.FromString("hca")); + var location = db.Partition.By(Slice.FromString("hca")); await db.ClearRangeAsync(location, this.Cancellation); #if ENABLE_LOGGING @@ -110,7 +110,7 @@ public async Task Test_CreateOrOpen_Simple() using (var db = await OpenTestDatabaseAsync()) { // we will put everything under a custom namespace - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); #if ENABLE_LOGGING @@ -175,7 +175,7 @@ public async Task Test_CreateOrOpen_With_Layer() using (var db = await OpenTestDatabaseAsync()) { // we will put everything under a custom namespace - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); #if ENABLE_LOGGING @@ -189,9 +189,9 @@ public async Task Test_CreateOrOpen_With_Layer() var directory = FdbDirectoryLayer.Create(location); Assert.That(directory.ContentSubspace, Is.Not.Null); - Assert.That(directory.ContentSubspace.Key, Is.EqualTo(location.Key)); + Assert.That(directory.ContentSubspace, Is.EqualTo(location)); Assert.That(directory.NodeSubspace, Is.Not.Null); - Assert.That(directory.NodeSubspace.Key, Is.EqualTo(location.Key + Slice.FromByte(254))); + Assert.That(directory.NodeSubspace.Key, Is.EqualTo(location.Keys[Slice.FromByte(254)])); // first call should create a new subspace (with a random prefix) var foo = await directory.CreateOrOpenAsync(logged, new[] { "Foo" }, Slice.FromString("AcmeLayer"), this.Cancellation); @@ -248,7 +248,7 @@ public async Task Test_CreateOrOpen_SubFolder() { // we will put everything under a custom namespace - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); #if ENABLE_LOGGING @@ -296,7 +296,7 @@ public async Task Test_List_SubFolders() using (var db = await OpenTestDatabaseAsync()) { // we will put everything under a custom namespace - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -349,7 +349,7 @@ public async Task Test_List_Folders_Should_Be_Sorted_By_Name() using (var db = await OpenTestDatabaseAsync()) { // we will put everything under a custom namespace - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -388,7 +388,7 @@ public async Task Test_Move_Folder() using (var db = await OpenTestDatabaseAsync()) { // we will put everything under a custom namespace - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); #if ENABLE_LOGGING @@ -443,7 +443,7 @@ public async Task Test_Remove_Folder() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -503,7 +503,7 @@ public async Task Test_Can_Change_Layer_Of_Existing_Directory() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -552,7 +552,7 @@ public async Task Test_Directory_Partitions() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -560,7 +560,7 @@ public async Task Test_Directory_Partitions() var partition = await directory.CreateAsync(db, "Foo", Slice.FromAscii("partition"), this.Cancellation); // we can't get the partition key directory (because it's a root directory) so we need to cheat a little bit - var partitionKey = partition.Copy().Key; + var partitionKey = FdbSubspace.Copy(partition).Key; Console.WriteLine(partition); Assert.That(partition, Is.InstanceOf()); Assert.That(partition.Layer, Is.EqualTo(Slice.FromAscii("partition"))); @@ -599,7 +599,7 @@ public async Task Test_Directory_Cannot_Move_To_Another_Partition() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -627,7 +627,7 @@ public async Task Test_Directory_Cannot_Move_To_A_Sub_Partition() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -683,7 +683,7 @@ public async Task Test_Renaming_Partition_Uses_Parent_DirectoryLayer() using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -709,7 +709,7 @@ public async Task Test_Renaming_Partition_Uses_Parent_DirectoryLayer() // should have kept the same prefix //note: we need to cheat to get the key of the partition - Assert.That(bar.Copy().Key, Is.EqualTo(foo.Copy().Key)); + Assert.That(FdbSubspace.Copy(bar).Key, Is.EqualTo(FdbSubspace.Copy(foo).Key)); // verify list again folders = await directory.ListAsync(tr); @@ -731,7 +731,7 @@ public async Task Test_Removing_Partition_Uses_Parent_DirectoryLayer() using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -772,7 +772,7 @@ public async Task Test_Directory_Methods_Should_Fail_With_Empty_Paths() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -819,7 +819,7 @@ public async Task Test_Directory_Partitions_Should_Disallow_Creation_Of_Direct_K using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition("DL"); + var location = db.Partition.By("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -843,7 +843,7 @@ public async Task Test_Directory_Partitions_Should_Disallow_Creation_Of_Direct_K // === PASS === // these methods are allowed to succeed on directory partitions, because we need them for the rest to work - shouldPass(() => { var _ = partition.Copy().Key; }); // EXCEPTION: we need this to work, because that's the only way that the unit tests above can see the partition key! + shouldPass(() => { var _ = FdbSubspace.Copy(partition).Key; }); // EXCEPTION: we need this to work, because that's the only way that the unit tests above can see the partition key! shouldPass(() => partition.ToString()); // EXCEPTION: this should never fail! shouldPass(() => partition.DumpKey(barKey)); // EXCEPTION: this should always work, because this can be used for debugging and logging... shouldPass(() => partition.BoundCheck(barKey, true)); // EXCEPTION: needs to work because it is used by GetRange() and GetKey() @@ -861,56 +861,64 @@ public async Task Test_Directory_Partitions_Should_Disallow_Creation_Of_Direct_K shouldFail(() => partition.Contains(barKey)); // Extract / ExtractAndCheck / BoundCheck - shouldFail(() => partition.Extract(barKey)); - shouldFail(() => partition.Extract(new[] { barKey, barKey + FdbKey.MinValue })); - shouldFail(() => partition.ExtractAndCheck(barKey)); - //TODO: add missing overrides ? + shouldFail(() => partition.ExtractKey(barKey, boundCheck: false)); + shouldFail(() => partition.ExtractKey(barKey, boundCheck: true)); + shouldFail(() => partition.ExtractKeys(new[] { barKey, barKey + FdbKey.MinValue })); + shouldFail(() => partition.Keys.Extract(barKey)); + shouldFail(() => partition.Keys.Extract(barKey, barKey + FdbKey.MinValue)); // Partition - shouldFail(() => partition.Partition(123)); - shouldFail(() => partition.Partition(123, "hello")); - shouldFail(() => partition.Partition(123, "hello", false)); - shouldFail(() => partition.Partition(123, "hello", false, "world")); - - // Concat - shouldFail(() => partition.Concat(Slice.FromString("hello"))); - shouldFail(() => partition.Concat(location)); - - // ConcatRange - shouldFail(() => partition.ConcatRange(new[] { Slice.FromString("hello"), Slice.FromString("world"), Slice.FromString("!") })); - shouldFail(() => partition.ConcatRange(new[] { location, location })); - - // ToTuple - shouldFail(() => partition.ToTuple()); - - // Append - shouldFail(() => partition.Append(123)); - shouldFail(() => partition.Append(123, "hello")); - shouldFail(() => partition.Append(123, "hello", false)); - shouldFail(() => partition.Append(123, "hello", false, "world")); - shouldFail(() => partition.Append(FdbTuple.Create(123, "hello", false, "world"))); - shouldFail(() => partition.AppendBoxed(new object[] { 123, "hello", false, "world" })); - - // Pack - shouldFail(() => partition.Pack(123)); - shouldFail(() => partition.Pack(123, "hello")); - shouldFail(() => partition.Pack(123, "hello", false)); - shouldFail(() => partition.Pack(123, "hello", false, "world")); - shouldFail(() => partition.PackBoxed(123)); - - // PackRange - shouldFail(() => partition.PackRange(new[] { 123, 456, 789 })); - shouldFail(() => partition.PackRange((IEnumerable)new[] { 123, 456, 789 })); - shouldFail(() => partition.PackBoxedRange(new object[] { 123, "hello", true })); - shouldFail(() => partition.PackBoxedRange((IEnumerable)new object[] { 123, "hello", true })); - - // Unpack - shouldFail(() => partition.Unpack(barKey)); - shouldFail(() => partition.Unpack(new[] { barKey, barKey + FdbTuple.Pack(123) })); - shouldFail(() => partition.UnpackLast(barKey)); - shouldFail(() => partition.UnpackLast(new[] { barKey, barKey + FdbTuple.Pack(123) })); - shouldFail(() => partition.UnpackSingle(barKey)); - shouldFail(() => partition.UnpackSingle(new[] { barKey, barKey })); + shouldFail(() => partition.Partition.By(123)); + shouldFail(() => partition.Partition.By(123, "hello")); + shouldFail(() => partition.Partition.By(123, "hello", false)); + shouldFail(() => partition.Partition.By(123, "hello", false, "world")); + + // Keys + + shouldFail(() => partition.ConcatKey(Slice.FromString("hello"))); + shouldFail(() => partition.ConcatKey(location.Key)); + shouldFail(() => partition.ConcatKeys(new[] { Slice.FromString("hello"), Slice.FromString("world"), Slice.FromString("!") })); + + shouldFail(() => partition.Keys.Concat(Slice.FromString("hello"))); + shouldFail(() => partition.Keys.Concat(location.Key)); + shouldFail(() => partition.Keys.Concat(location)); + shouldFail(() => partition.Keys.Concat(new[] { Slice.FromString("hello"), Slice.FromString("world"), Slice.FromString("!") })); + shouldFail(() => partition.Keys.Concat(new[] { location, location })); + + shouldFail(() => { var _ = partition.Keys[Slice.FromString("hello")]; }); + shouldFail(() => { var _ = partition.Keys[location.Key]; }); + shouldFail(() => { var _ = partition.Keys[location]; }); + + // Tuples + + shouldFail(() => partition.Tuples.EncodeKey(123)); + shouldFail(() => partition.Tuples.EncodeKey(123, "hello")); + shouldFail(() => partition.Tuples.EncodeKey(123, "hello", false)); + shouldFail(() => partition.Tuples.EncodeKey(123, "hello", false, "world")); + shouldFail(() => partition.Tuples.EncodeKey(123)); + + shouldFail(() => partition.Tuples.EncodeKeys(new[] { 123, 456, 789 })); + shouldFail(() => partition.Tuples.EncodeKeys((IEnumerable)new[] { 123, 456, 789 })); + shouldFail(() => partition.Tuples.EncodeKeys(new object[] { 123, "hello", true })); + shouldFail(() => partition.Tuples.EncodeKeys((IEnumerable)new object[] { 123, "hello", true })); + + shouldFail(() => partition.Tuples.Unpack(barKey)); + shouldFail(() => partition.Tuples.Unpack(new[] { barKey, barKey + FdbTuple.Pack(123) })); + shouldFail(() => partition.Tuples.DecodeKey(barKey)); + shouldFail(() => partition.Tuples.DecodeKeys(new[] { barKey, barKey })); + shouldFail(() => partition.Tuples.DecodeLast(barKey)); + shouldFail(() => partition.Tuples.DecodeKeysLast(new[] { barKey, barKey + FdbTuple.Pack(123) })); + shouldFail(() => partition.Tuples.DecodeFirst(barKey)); + shouldFail(() => partition.Tuples.DecodeKeysFirst(new[] { barKey, barKey + FdbTuple.Pack(123) })); + + shouldFail(() => partition.Tuples.ToTuple()); + + shouldFail(() => partition.Tuples.Append(123)); + shouldFail(() => partition.Tuples.Append(123, "hello")); + shouldFail(() => partition.Tuples.Append(123, "hello", false)); + shouldFail(() => partition.Tuples.Append(123, "hello", false, "world")); + shouldFail(() => partition.Tuples.Concat(FdbTuple.Create(123, "hello", false, "world"))); + shouldFail(() => partition.Tuples.Append(new object[] { 123, "hello", false, "world" })); // ToRange shouldFail(() => partition.ToRange()); diff --git a/FoundationDB.Tests/Layers/IndexingFacts.cs b/FoundationDB.Tests/Layers/IndexingFacts.cs index 51450bc2a..6f879276e 100644 --- a/FoundationDB.Tests/Layers/IndexingFacts.cs +++ b/FoundationDB.Tests/Layers/IndexingFacts.cs @@ -49,13 +49,13 @@ public async Task Task_Can_Add_Update_Remove_From_Index() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("Indexing"); + var location = db.Partition.By("Indexing"); // clear previous values await DeleteSubspace(db, location); - var subspace = location.Partition("FoosByColor"); + var subspace = location.Partition.By("FoosByColor"); var index = new FdbIndex("Foos.ByColor", subspace); // add items to the index @@ -148,9 +148,9 @@ public async Task Test_Can_Combine_Indexes() new Character { Id = 6, Name = "Catwoman", Brand="DC", IsVilain = default(bool?) }, }; - var indexBrand = new FdbIndex("Heroes.ByBrand", location.Partition("CharactersByBrand")); - var indexSuperHero = new FdbIndex("Heroes.BySuper", location.Partition("SuperHeros")); - var indexAlignment = new FdbIndex("Heros.ByAlignment", location.Partition("FriendsOrFoe")); + var indexBrand = new FdbIndex("Heroes.ByBrand", location.Partition.By("CharactersByBrand")); + var indexSuperHero = new FdbIndex("Heroes.BySuper", location.Partition.By("SuperHeros")); + var indexAlignment = new FdbIndex("Heros.ByAlignment", location.Partition.By("FriendsOrFoe")); // index everything await db.WriteAsync((tr) => diff --git a/FoundationDB.Tests/Layers/MapFacts.cs b/FoundationDB.Tests/Layers/MapFacts.cs index ef1712cc8..517b157c3 100644 --- a/FoundationDB.Tests/Layers/MapFacts.cs +++ b/FoundationDB.Tests/Layers/MapFacts.cs @@ -49,7 +49,7 @@ public async Task Test_FdbMap_Read_Write_Delete() { var location = await GetCleanDirectory(db, "Collections", "Maps"); - var map = new FdbMap("Foos", location.Partition("Foos"), KeyValueEncoders.Values.StringEncoder); + var map = new FdbMap("Foos", location.Partition.By("Foos"), KeyValueEncoders.Values.StringEncoder); string secret = "world:" + Guid.NewGuid().ToString(); @@ -88,7 +88,7 @@ public async Task Test_FdbMap_Read_Write_Delete() // directly read the value, behind the table's back using (var tr = db.BeginTransaction(this.Cancellation)) { - var value = await tr.GetAsync(location.Pack("Foos", "hello")); + var value = await tr.GetAsync(location.Tuples.EncodeKey("Foos", "hello")); Assert.That(value, Is.Not.EqualTo(Slice.Nil)); Assert.That(value.ToString(), Is.EqualTo(secret)); } @@ -113,7 +113,7 @@ public async Task Test_FdbMap_Read_Write_Delete() Assert.That(value.HasValue, Is.False); // also check directly - var data = await tr.GetAsync(location.Pack("Foos", "hello")); + var data = await tr.GetAsync(location.Tuples.EncodeKey("Foos", "hello")); Assert.That(data, Is.EqualTo(Slice.Nil)); } @@ -128,7 +128,7 @@ public async Task Test_FdbMap_List() { var location = await GetCleanDirectory(db, "Collections", "Maps"); - var map = new FdbMap("Foos", location.Partition("Foos"), KeyValueEncoders.Values.StringEncoder); + var map = new FdbMap("Foos", location.Partition.By("Foos"), KeyValueEncoders.Values.StringEncoder); // write a bunch of keys await db.WriteAsync((tr) => @@ -188,7 +188,7 @@ public async Task Test_FdbMap_With_Custom_Key_Encoder() { var location = await GetCleanDirectory(db, "Collections", "Maps"); - var map = new FdbMap("Firewall", location.Partition("Hosts"), keyEncoder, KeyValueEncoders.Values.StringEncoder); + var map = new FdbMap("Firewall", location.Partition.By("Hosts"), keyEncoder, KeyValueEncoders.Values.StringEncoder); // import all the rules await db.WriteAsync((tr) => diff --git a/FoundationDB.Tests/Layers/MultiMapFacts.cs b/FoundationDB.Tests/Layers/MultiMapFacts.cs index 3686c4dec..be3019db0 100644 --- a/FoundationDB.Tests/Layers/MultiMapFacts.cs +++ b/FoundationDB.Tests/Layers/MultiMapFacts.cs @@ -50,7 +50,7 @@ public async Task Test_FdbMultiMap_Read_Write_Delete() var location = await GetCleanDirectory(db, "Collections", "MultiMaps"); - var map = new FdbMultiMap(location.Partition("Foos"), allowNegativeValues: false); + var map = new FdbMultiMap(location.Partition.By("Foos"), allowNegativeValues: false); // read non existing value using (var tr = db.BeginTransaction(this.Cancellation)) @@ -89,7 +89,7 @@ public async Task Test_FdbMultiMap_Read_Write_Delete() // directly read the value, behind the table's back using (var tr = db.BeginTransaction(this.Cancellation)) { - var value = await tr.GetAsync(map.Subspace.Pack("hello", "world")); + var value = await tr.GetAsync(map.Subspace.Tuples.EncodeKey("hello", "world")); Assert.That(value, Is.Not.EqualTo(Slice.Nil)); Assert.That(value.ToInt64(), Is.EqualTo(1)); } @@ -112,7 +112,7 @@ public async Task Test_FdbMultiMap_Read_Write_Delete() Assert.That(count, Is.Null); // also check directly - var data = await tr.GetAsync(map.Subspace.Pack("hello", "world")); + var data = await tr.GetAsync(map.Subspace.Tuples.EncodeKey("hello", "world")); Assert.That(data, Is.EqualTo(Slice.Nil)); } diff --git a/FoundationDB.Tests/Layers/RankedSetFacts.cs b/FoundationDB.Tests/Layers/RankedSetFacts.cs index 3cb3fbbde..fc98e667f 100644 --- a/FoundationDB.Tests/Layers/RankedSetFacts.cs +++ b/FoundationDB.Tests/Layers/RankedSetFacts.cs @@ -75,9 +75,9 @@ private static async Task PrintRankedSet(FdbRankedSet rs, IFdbReadOnlyTransactio for (int l = 0; l < 6; l++) { sb.AppendFormat("Level {0}:\r\n", l); - await tr.GetRange(rs.Subspace.Partition(l).ToRange()).ForEachAsync((kvp) => + await tr.GetRange(rs.Subspace.Partition.By(l).ToRange()).ForEachAsync((kvp) => { - sb.AppendFormat("\t{0} = {1}\r\n", rs.Subspace.Unpack(kvp.Key), kvp.Value.ToInt64()); + sb.AppendFormat("\t{0} = {1}\r\n", rs.Subspace.Tuples.Unpack(kvp.Key), kvp.Value.ToInt64()); }); } Console.WriteLine(sb.ToString()); diff --git a/FoundationDB.Tests/Layers/StringInternFacts.cs b/FoundationDB.Tests/Layers/StringInternFacts.cs index 1673a923f..1c3ca0023 100644 --- a/FoundationDB.Tests/Layers/StringInternFacts.cs +++ b/FoundationDB.Tests/Layers/StringInternFacts.cs @@ -43,8 +43,8 @@ public async Task Test_StringIntern_Example() { using (var db = await OpenTestPartitionAsync()) { - var stringSpace = db.Partition("Strings"); - var dataSpace = db.Partition("Data"); + var stringSpace = db.Partition.By("Strings"); + var dataSpace = db.Partition.By("Data"); // clear all previous data await DeleteSubspace(db, stringSpace); @@ -55,11 +55,11 @@ public async Task Test_StringIntern_Example() // insert a bunch of strings using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(dataSpace.Pack("a"), await stringTable.InternAsync(tr, "testing 123456789")); - tr.Set(dataSpace.Pack("b"), await stringTable.InternAsync(tr, "dog")); - tr.Set(dataSpace.Pack("c"), await stringTable.InternAsync(tr, "testing 123456789")); - tr.Set(dataSpace.Pack("d"), await stringTable.InternAsync(tr, "cat")); - tr.Set(dataSpace.Pack("e"), await stringTable.InternAsync(tr, "cat")); + tr.Set(dataSpace.Tuples.EncodeKey("a"), await stringTable.InternAsync(tr, "testing 123456789")); + tr.Set(dataSpace.Tuples.EncodeKey("b"), await stringTable.InternAsync(tr, "dog")); + tr.Set(dataSpace.Tuples.EncodeKey("c"), await stringTable.InternAsync(tr, "testing 123456789")); + tr.Set(dataSpace.Tuples.EncodeKey("d"), await stringTable.InternAsync(tr, "cat")); + tr.Set(dataSpace.Tuples.EncodeKey("e"), await stringTable.InternAsync(tr, "cat")); await tr.CommitAsync(); } @@ -72,11 +72,11 @@ public async Task Test_StringIntern_Example() // check the contents of the data using (var tr = db.BeginTransaction(this.Cancellation)) { - var uid_a = await tr.GetAsync(dataSpace.Pack("a")); - var uid_b = await tr.GetAsync(dataSpace.Pack("b")); - var uid_c = await tr.GetAsync(dataSpace.Pack("c")); - var uid_d = await tr.GetAsync(dataSpace.Pack("d")); - var uid_e = await tr.GetAsync(dataSpace.Pack("e")); + var uid_a = await tr.GetAsync(dataSpace.Tuples.EncodeKey("a")); + var uid_b = await tr.GetAsync(dataSpace.Tuples.EncodeKey("b")); + var uid_c = await tr.GetAsync(dataSpace.Tuples.EncodeKey("c")); + var uid_d = await tr.GetAsync(dataSpace.Tuples.EncodeKey("d")); + var uid_e = await tr.GetAsync(dataSpace.Tuples.EncodeKey("e")); // a, b, d should be different Assert.That(uid_b, Is.Not.EqualTo(uid_a)); diff --git a/FoundationDB.Tests/Layers/SubspaceFacts.cs b/FoundationDB.Tests/Layers/SubspaceFacts.cs index 0903344d3..937cc3a65 100644 --- a/FoundationDB.Tests/Layers/SubspaceFacts.cs +++ b/FoundationDB.Tests/Layers/SubspaceFacts.cs @@ -49,7 +49,7 @@ public void Test_Empty_Subspace_Is_Empty() Assert.That(subspace.Key.Count, Is.EqualTo(0), "FdbSubspace.Empty.Key should be equal to Slice.Empty"); Assert.That(subspace.Key.HasValue, Is.True, "FdbSubspace.Empty.Key should be equal to Slice.Empty"); - Assert.That(subspace.Copy(), Is.Not.SameAs(subspace)); + Assert.That(FdbSubspace.Copy(subspace), Is.Not.SameAs(subspace)); } [Test] @@ -59,21 +59,21 @@ public void Test_Subspace_With_Binary_Prefix() var subspace = new FdbSubspace(Slice.Create(new byte[] { 42, 255, 0, 127 })); Assert.That(subspace.Key.ToString(), Is.EqualTo("*<00><7F>")); - Assert.That(subspace.Copy(), Is.Not.SameAs(subspace)); - Assert.That(subspace.Copy().Key, Is.EqualTo(subspace.Key)); + Assert.That(FdbSubspace.Copy(subspace), Is.Not.SameAs(subspace)); + Assert.That(FdbSubspace.Copy(subspace).Key, Is.EqualTo(subspace.Key)); // concat(Slice) should append the slice to the binary prefix directly - Assert.That(subspace.Concat(Slice.FromInt32(0x01020304)).ToString(), Is.EqualTo("*<00><7F><04><03><02><01>")); - Assert.That(subspace.Concat(Slice.FromAscii("hello")).ToString(), Is.EqualTo("*<00><7F>hello")); + Assert.That(subspace.ConcatKey(Slice.FromInt32(0x01020304)).ToString(), Is.EqualTo("*<00><7F><04><03><02><01>")); + Assert.That(subspace.ConcatKey(Slice.FromAscii("hello")).ToString(), Is.EqualTo("*<00><7F>hello")); // pack(...) should use tuple serialization - Assert.That(subspace.Pack(123).ToString(), Is.EqualTo("*<00><7F><15>{")); - Assert.That(subspace.Pack("hello").ToString(), Is.EqualTo("*<00><7F><02>hello<00>")); - Assert.That(subspace.Pack(Slice.FromAscii("world")).ToString(), Is.EqualTo("*<00><7F><01>world<00>")); - Assert.That(subspace.Pack(FdbTuple.Create("hello", 123)).ToString(), Is.EqualTo("*<00><7F><02>hello<00><15>{")); + Assert.That(subspace.Tuples.EncodeKey(123).ToString(), Is.EqualTo("*<00><7F><15>{")); + Assert.That(subspace.Tuples.EncodeKey("hello").ToString(), Is.EqualTo("*<00><7F><02>hello<00>")); + Assert.That(subspace.Tuples.EncodeKey(Slice.FromAscii("world")).ToString(), Is.EqualTo("*<00><7F><01>world<00>")); + Assert.That(subspace.Tuples.Pack(FdbTuple.Create("hello", 123)).ToString(), Is.EqualTo("*<00><7F><02>hello<00><15>{")); // if we derive a tuple from this subspace, it should keep the binary prefix when converted to a key - var t = subspace.Append("world", 123, false); + var t = subspace.Tuples.Append("world", 123, false); Assert.That(t, Is.Not.Null); Assert.That(t.Count, Is.EqualTo(3)); Assert.That(t.Get(0), Is.EqualTo("world")); @@ -83,7 +83,7 @@ public void Test_Subspace_With_Binary_Prefix() Assert.That(k.ToString(), Is.EqualTo("*<00><7F><02>world<00><15>{<14>")); // if we unpack the key with the binary prefix, we should get a valid tuple - var t2 = subspace.Unpack(k); + var t2 = subspace.Tuples.Unpack(k); Assert.That(t2, Is.Not.Null); Assert.That(t2.Count, Is.EqualTo(3)); Assert.That(t2.Get(0), Is.EqualTo("world")); @@ -95,7 +95,7 @@ public void Test_Subspace_With_Binary_Prefix() public void Test_Subspace_Copy_Does_Not_Share_Key_Buffer() { var original = FdbSubspace.Create(Slice.FromString("Hello")); - var copy = original.Copy(); + var copy = FdbSubspace.Copy(original); Assert.That(copy, Is.Not.Null); Assert.That(copy, Is.Not.SameAs(original), "Copy should be a new instance"); Assert.That(copy.Key, Is.EqualTo(original.Key), "Key should be equal"); @@ -111,38 +111,38 @@ public void Test_Cannot_Create_Or_Partition_Subspace_With_Slice_Nil() { Assert.That(() => new FdbSubspace(Slice.Nil), Throws.ArgumentException); Assert.That(() => FdbSubspace.Create(Slice.Nil), Throws.ArgumentException); - Assert.That(() => FdbSubspace.Empty[Slice.Nil], Throws.ArgumentException); - Assert.That(() => FdbSubspace.Create(FdbKey.Directory)[Slice.Nil], Throws.ArgumentException); + Assert.That(() => FdbSubspace.Empty.Partition[Slice.Nil], Throws.ArgumentException); + Assert.That(() => FdbSubspace.Create(FdbKey.Directory).Partition[Slice.Nil], Throws.ArgumentException); } [Test] public void Test_Cannot_Create_Or_Partition_Subspace_With_Null_Tuple() { - Assert.That(() => new FdbSubspace(default(IFdbTuple)), Throws.InstanceOf()); - Assert.That(() => FdbSubspace.Empty[default(IFdbTuple)], Throws.InstanceOf()); - Assert.That(() => FdbSubspace.Create(FdbKey.Directory)[default(IFdbTuple)], Throws.InstanceOf()); + Assert.That(() => FdbSubspace.Create(default(IFdbTuple)), Throws.InstanceOf()); + Assert.That(() => FdbSubspace.Empty.Partition[default(IFdbTuple)], Throws.InstanceOf()); + Assert.That(() => FdbSubspace.Create(FdbKey.Directory).Partition[default(IFdbTuple)], Throws.InstanceOf()); } [Test] [Category("LocalCluster")] public void Test_Subspace_With_Tuple_Prefix() { - var subspace = new FdbSubspace(FdbTuple.Create("hello")); + var subspace = FdbSubspace.Create(FdbTuple.Create("hello")); Assert.That(subspace.Key.ToString(), Is.EqualTo("<02>hello<00>")); - Assert.That(subspace.Copy(), Is.Not.SameAs(subspace)); - Assert.That(subspace.Copy().Key, Is.EqualTo(subspace.Key)); + Assert.That(FdbSubspace.Copy(subspace), Is.Not.SameAs(subspace)); + Assert.That(FdbSubspace.Copy(subspace).Key, Is.EqualTo(subspace.Key)); // concat(Slice) should append the slice to the tuple prefix directly - Assert.That(subspace.Concat(Slice.FromInt32(0x01020304)).ToString(), Is.EqualTo("<02>hello<00><04><03><02><01>")); - Assert.That(subspace.Concat(Slice.FromAscii("world")).ToString(), Is.EqualTo("<02>hello<00>world")); + Assert.That(subspace.ConcatKey(Slice.FromInt32(0x01020304)).ToString(), Is.EqualTo("<02>hello<00><04><03><02><01>")); + Assert.That(subspace.ConcatKey(Slice.FromAscii("world")).ToString(), Is.EqualTo("<02>hello<00>world")); // pack(...) should use tuple serialization - Assert.That(subspace.Pack(123).ToString(), Is.EqualTo("<02>hello<00><15>{")); - Assert.That(subspace.Pack("world").ToString(), Is.EqualTo("<02>hello<00><02>world<00>")); + Assert.That(subspace.Tuples.EncodeKey(123).ToString(), Is.EqualTo("<02>hello<00><15>{")); + Assert.That(subspace.Tuples.EncodeKey("world").ToString(), Is.EqualTo("<02>hello<00><02>world<00>")); // even though the subspace prefix is a tuple, appending to it will only return the new items - var t = subspace.Append("world", 123, false); + var t = subspace.Tuples.Append("world", 123, false); Assert.That(t, Is.Not.Null); Assert.That(t.Count, Is.EqualTo(3)); Assert.That(t.Get(0), Is.EqualTo("world")); @@ -153,7 +153,7 @@ public void Test_Subspace_With_Tuple_Prefix() Assert.That(k.ToString(), Is.EqualTo("<02>hello<00><02>world<00><15>{<14>")); // if we unpack the key with the binary prefix, we should get a valid tuple - var t2 = subspace.Unpack(k); + var t2 = subspace.Tuples.Unpack(k); Assert.That(t2, Is.Not.Null); Assert.That(t2.Count, Is.EqualTo(3)); Assert.That(t2.Get(0), Is.EqualTo("world")); @@ -170,24 +170,24 @@ public void Test_Subspace_Partitioning_With_Binary_Suffix() Assert.That(parent.Key.ToString(), Is.EqualTo("")); // create a child subspace using a tuple - var child = parent[FdbKey.Directory]; + var child = parent.Partition[FdbKey.Directory]; Assert.That(child, Is.Not.Null); Assert.That(child.Key.ToString(), Is.EqualTo("")); // create a key from this child subspace - var key = child.Concat(Slice.FromFixed32(0x01020304)); + var key = child.ConcatKey(Slice.FromFixed32(0x01020304)); Assert.That(key.ToString(), Is.EqualTo("<04><03><02><01>")); // create another child - var grandChild = child[Slice.FromAscii("hello")]; + var grandChild = child.Partition[Slice.FromAscii("hello")]; Assert.That(grandChild, Is.Not.Null); Assert.That(grandChild.Key.ToString(), Is.EqualTo("hello")); - key = grandChild.Concat(Slice.FromFixed32(0x01020304)); + key = grandChild.ConcatKey(Slice.FromFixed32(0x01020304)); Assert.That(key.ToString(), Is.EqualTo("hello<04><03><02><01>")); // cornercase - Assert.That(child[Slice.Empty].Key, Is.EqualTo(child.Key)); + Assert.That(child.Partition[Slice.Empty].Key, Is.EqualTo(child.Key)); } [Test] @@ -199,12 +199,12 @@ public void Test_Subspace_Partitioning_With_Tuple_Suffix() Assert.That(parent.Key.ToString(), Is.EqualTo("")); // create a child subspace using a tuple - var child = parent.Partition(FdbTuple.Create("hca")); + var child = parent.Partition[FdbTuple.Create("hca")]; Assert.That(child, Is.Not.Null); Assert.That(child.Key.ToString(), Is.EqualTo("<02>hca<00>")); // create a tuple from this child subspace - var tuple = child.Append(123); + var tuple = child.Tuples.Append(123); Assert.That(tuple, Is.Not.Null); Assert.That(tuple.ToSlice().ToString(), Is.EqualTo("<02>hca<00><15>{")); @@ -213,11 +213,11 @@ public void Test_Subspace_Partitioning_With_Tuple_Suffix() Assert.That(t1.ToSlice().ToString(), Is.EqualTo("<02>hca<00><15>{<14>")); // check that we could also create the same tuple starting from the parent subspace - var t2 = parent.Append("hca", 123, false); + var t2 = parent.Tuples.Append("hca", 123, false); Assert.That(t2.ToSlice(), Is.EqualTo(t1.ToSlice())); // cornercase - Assert.That(child[FdbTuple.Empty].Key, Is.EqualTo(child.Key)); + Assert.That(child.Partition[FdbTuple.Empty].Key, Is.EqualTo(child.Key)); } diff --git a/FoundationDB.Tests/Layers/TupleFacts.cs b/FoundationDB.Tests/Layers/TupleFacts.cs index 2489ae4e7..d78e00b29 100644 --- a/FoundationDB.Tests/Layers/TupleFacts.cs +++ b/FoundationDB.Tests/Layers/TupleFacts.cs @@ -1721,19 +1721,19 @@ public void Test_FdbTuple_PackRange_Of_T() int[] items = new int[] { 1, 2, 3, 123, -1, int.MaxValue }; // array version - slices = FdbTuple.PackRange(tuple, items); + slices = FdbTuple.PackRangeWithPrefix(tuple, items); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); // IEnumerable version that is passed an array - slices = FdbTuple.PackRange(tuple, (IEnumerable)items); + slices = FdbTuple.PackRangeWithPrefix(tuple, (IEnumerable)items); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); // IEnumerable version but with a "real" enumerable - slices = FdbTuple.PackRange(tuple, items.Select(t => t)); + slices = FdbTuple.PackRangeWithPrefix(tuple, items.Select(t => t)); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); @@ -1744,7 +1744,7 @@ public void Test_FdbTuple_PackRange_Of_T() string[] words = new string[] { "hello", "world", "très bien", "断トツ", "abc\0def", null, String.Empty }; - var merged = FdbTuple.PackRange(Slice.FromByte(42), words); + var merged = FdbTuple.PackRangeWithPrefix(Slice.FromByte(42), words); Assert.That(merged, Is.Not.Null); Assert.That(merged.Length, Is.EqualTo(words.Length)); @@ -1758,33 +1758,33 @@ public void Test_FdbTuple_PackRange_Of_T() } // corner cases - Assert.That(() => FdbTuple.PackRange(Slice.Empty, default(int[])), Throws.InstanceOf().With.Property("ParamName").EqualTo("keys")); - Assert.That(() => FdbTuple.PackRange(Slice.Empty, default(IEnumerable)), Throws.InstanceOf().With.Property("ParamName").EqualTo("keys")); + Assert.That(() => FdbTuple.PackRangeWithPrefix(Slice.Empty, default(int[])), Throws.InstanceOf().With.Property("ParamName").EqualTo("keys")); + Assert.That(() => FdbTuple.PackRangeWithPrefix(Slice.Empty, default(IEnumerable)), Throws.InstanceOf().With.Property("ParamName").EqualTo("keys")); #endregion } [Test] - public void Test_FdbTuple_PackBoxedRange() + public void Test_FdbTuple_PackRange_Boxed() { Slice[] slices; var tuple = FdbTuple.Create("hello"); object[] items = new object[] { "world", 123, false, Guid.NewGuid(), long.MinValue }; // array version - slices = FdbTuple.PackBoxedRange(tuple, items); + slices = FdbTuple.PackRangeWithPrefix(tuple, items); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); // IEnumerable version that is passed an array - slices = FdbTuple.PackBoxedRange(tuple, (IEnumerable)items); + slices = FdbTuple.PackRangeWithPrefix(tuple, (IEnumerable)items); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); // IEnumerable version but with a "real" enumerable - slices = FdbTuple.PackBoxedRange(tuple, items.Select(t => t)); + slices = FdbTuple.PackRangeWithPrefix(tuple, items.Select(t => t)); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); diff --git a/FoundationDB.Tests/Layers/VectorFacts.cs b/FoundationDB.Tests/Layers/VectorFacts.cs index 754dc3878..57f10c910 100644 --- a/FoundationDB.Tests/Layers/VectorFacts.cs +++ b/FoundationDB.Tests/Layers/VectorFacts.cs @@ -175,7 +175,7 @@ private static async Task PrintVector(FdbVector vector, IFdbReadOnlyTransa await tr.GetRange(vector.Subspace.ToRange()).ForEachAsync((kvp) => { if (!first) sb.Append(", "); else first = false; - sb.Append(vector.Subspace.UnpackLast(kvp.Key) + ":" + kvp.Value.ToAsciiOrHexaString()); + sb.Append(vector.Subspace.Tuples.DecodeLast(kvp.Key) + ":" + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Vector: (" + sb.ToString() + ")"); diff --git a/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs b/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs index 9e389314a..0ec5d96a3 100644 --- a/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs +++ b/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs @@ -52,14 +52,14 @@ public async Task Test_AsyncQueryable_Basics() using(var db = await OpenTestPartitionAsync()) { - var location = db.Partition("Linq"); + var location = db.Partition.By("Linq"); await db.ClearRangeAsync(location, this.Cancellation); await db.WriteAsync((tr) => { - tr.Set(location.Pack("Hello"), Slice.FromString("World!")); - tr.Set(location.Pack("Narf"), Slice.FromString("Zort")); + tr.Set(location.Tuples.EncodeKey("Hello"), Slice.FromString("World!")); + tr.Set(location.Tuples.EncodeKey("Narf"), Slice.FromString("Zort")); }, this.Cancellation); var range = db.Query().RangeStartsWith(location.Key); @@ -91,11 +91,11 @@ public async Task Test_Query_Index_Single() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("Linq"); + var location = db.Partition.By("Linq"); await db.ClearRangeAsync(location, this.Cancellation); - var index = new FdbIndex("Foos.ByColor", location.Partition("Foos", "ByColor")); + var index = new FdbIndex("Foos.ByColor", location.Partition.By("Foos", "ByColor")); await db.WriteAsync((tr) => { @@ -125,11 +125,11 @@ public async Task Test_Query_Index_Range() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("Linq"); + var location = db.Partition.By("Linq"); await db.ClearRangeAsync(location, this.Cancellation); - var index = new FdbIndex("Bars.ByScore", location.Partition("Foos", "ByScore")); + var index = new FdbIndex("Bars.ByScore", location.Partition.By("Foos", "ByScore")); await db.WriteAsync((tr) => { diff --git a/FoundationDB.Tests/Linq/FdbQueryExpressionFacts.cs b/FoundationDB.Tests/Linq/FdbQueryExpressionFacts.cs index 31e70ddef..93e38037d 100644 --- a/FoundationDB.Tests/Linq/FdbQueryExpressionFacts.cs +++ b/FoundationDB.Tests/Linq/FdbQueryExpressionFacts.cs @@ -40,8 +40,8 @@ namespace FoundationDB.Linq.Expressions.Tests public class FdbQueryExpressionFacts { - private FdbIndex FooBarIndex = new FdbIndex("Foos.ByBar", new FdbSubspace(FdbTuple.Create("Foos", 1))); - private FdbIndex FooBazIndex = new FdbIndex("Foos.ByBaz", new FdbSubspace(FdbTuple.Create("Foos", 2))); + private FdbIndex FooBarIndex = new FdbIndex("Foos.ByBar", FdbSubspace.Create(FdbTuple.Create("Foos", 1))); + private FdbIndex FooBazIndex = new FdbIndex("Foos.ByBaz", FdbSubspace.Create(FdbTuple.Create("Foos", 2))); [Test] public void Test_FdbQueryIndexLookupExpression() diff --git a/FoundationDB.Tests/RangeQueryFacts.cs b/FoundationDB.Tests/RangeQueryFacts.cs index 3fc146e56..cc3d262b2 100644 --- a/FoundationDB.Tests/RangeQueryFacts.cs +++ b/FoundationDB.Tests/RangeQueryFacts.cs @@ -57,31 +57,31 @@ public async Task Test_Can_Get_Range() var location = await GetCleanDirectory(db, "Queries", "Range"); // insert all values (batched) - Console.WriteLine("Inserting " + N.ToString("N0") + " keys..."); + Log("Inserting {0:N0} keys...", N); var insert = Stopwatch.StartNew(); using (var tr = db.BeginTransaction(this.Cancellation)) { foreach (int i in Enumerable.Range(0, N)) { - tr.Set(location.Pack(i), Slice.FromInt32(i)); + tr.Set(location.Tuples.EncodeKey(i), Slice.FromInt32(i)); } await tr.CommitAsync(); } insert.Stop(); - Console.WriteLine("Committed " + N + " keys in " + insert.Elapsed.TotalMilliseconds.ToString("N1") + " ms"); + Log("Committed {0:N0} keys in {1:N1} ms", N, insert.Elapsed.TotalMilliseconds); // GetRange values using (var tr = db.BeginTransaction(this.Cancellation)) { - var query = tr.GetRange(location.Pack(0), location.Pack(N)); + var query = tr.GetRange(location.Tuples.EncodeKey(0), location.Tuples.EncodeKey(N)); Assert.That(query, Is.Not.Null); Assert.That(query.Transaction, Is.SameAs(tr)); - Assert.That(query.Begin.Key, Is.EqualTo(location.Pack(0))); - Assert.That(query.End.Key, Is.EqualTo(location.Pack(N))); + Assert.That(query.Begin.Key, Is.EqualTo(location.Tuples.EncodeKey(0))); + Assert.That(query.End.Key, Is.EqualTo(location.Tuples.EncodeKey(N))); Assert.That(query.Limit, Is.Null); Assert.That(query.TargetBytes, Is.Null); Assert.That(query.Reversed, Is.False); @@ -90,7 +90,7 @@ public async Task Test_Can_Get_Range() Assert.That(query.Range.Begin, Is.EqualTo(query.Begin)); Assert.That(query.Range.End, Is.EqualTo(query.End)); - Console.WriteLine("Getting range " + query.Range.ToString() + " ..."); + Log("Getting range {0} ...", query.Range); var ts = Stopwatch.StartNew(); var items = await query.ToListAsync(); @@ -98,16 +98,16 @@ public async Task Test_Can_Get_Range() Assert.That(items, Is.Not.Null); Assert.That(items.Count, Is.EqualTo(N)); - Console.WriteLine("Took " + ts.Elapsed.TotalMilliseconds.ToString("N1") + " ms to get " + items.Count.ToString("N0") + " results"); + Log("Took {0:N1} ms to get {1:N0} results", ts.Elapsed.TotalMilliseconds, items.Count); for (int i = 0; i < N; i++) { var kvp = items[i]; // key should be a tuple in the correct order - var key = location.Unpack(kvp.Key); + var key = location.Tuples.Unpack(kvp.Key); - if (i % 128 == 0) Console.WriteLine("... " + key.ToString() + " = " + kvp.Value.ToString()); + if (i % 128 == 0) Log("... {0} = {1}", key, kvp.Value); Assert.That(key.Count, Is.EqualTo(1)); Assert.That(key.Get(-1), Is.EqualTo(i)); @@ -128,18 +128,18 @@ public async Task Test_Can_Get_Range_First_Single_And_Last() // put test values in a namespace var location = await GetCleanDirectory(db, "Queries", "Range"); - var a = location.Partition("a"); - var b = location.Partition("b"); - var c = location.Partition("c"); + var a = location.Partition.By("a"); + var b = location.Partition.By("b"); + var c = location.Partition.By("c"); // insert a bunch of keys under 'a', only one under 'b', and nothing under 'c' await db.WriteAsync((tr) => { for (int i = 0; i < 10; i++) { - tr.Set(a.Pack(i), Slice.FromInt32(i)); + tr.Set(a.Tuples.EncodeKey(i), Slice.FromInt32(i)); } - tr.Set(b.Pack(0), Slice.FromInt32(42)); + tr.Set(b.Tuples.EncodeKey(0), Slice.FromInt32(42)); }, this.Cancellation); KeyValuePair res; @@ -151,22 +151,22 @@ await db.WriteAsync((tr) => // should return the first one res = await query.FirstOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(a.Pack(0))); + Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(0))); // should return the first one res = await query.FirstAsync(); - Assert.That(res.Key, Is.EqualTo(a.Pack(0))); + Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(0))); // should return the last one res = await query.LastOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(a.Pack(9))); + Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(9))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(9))); // should return the last one res = await query.LastAsync(); - Assert.That(res.Key, Is.EqualTo(a.Pack(9))); + Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(9))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(9))); // should fail because there is more than one @@ -183,32 +183,32 @@ await db.WriteAsync((tr) => // should return the first one res = await query.FirstOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(b.Pack(0))); + Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); // should return the first one res = await query.FirstAsync(); - Assert.That(res.Key, Is.EqualTo(b.Pack(0))); + Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); // should return the last one res = await query.LastOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(b.Pack(0))); + Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); // should return the last one res = await query.LastAsync(); - Assert.That(res.Key, Is.EqualTo(b.Pack(0))); + Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); // should return the first one res = await query.SingleOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(b.Pack(0))); + Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); // should return the first one res = await query.SingleAsync(); - Assert.That(res.Key, Is.EqualTo(b.Pack(0))); + Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); } @@ -249,12 +249,12 @@ await db.WriteAsync((tr) => // should return the fifth one res = await query.LastOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(a.Pack(4))); + Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(4))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(4))); // should return the fifth one res = await query.LastAsync(); - Assert.That(res.Key, Is.EqualTo(a.Pack(4))); + Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(4))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(4))); } @@ -265,12 +265,12 @@ await db.WriteAsync((tr) => // should return the fifth one res = await query.FirstOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(a.Pack(5))); + Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(5))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(5))); // should return the fifth one res = await query.FirstAsync(); - Assert.That(res.Key, Is.EqualTo(a.Pack(5))); + Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(5))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(5))); } @@ -286,14 +286,14 @@ public async Task Test_Can_Get_Range_With_Limit() // put test values in a namespace var location = await GetCleanDirectory(db, "Queries", "Range"); - var a = location.Partition("a"); + var a = location.Partition.By("a"); // insert a bunch of keys under 'a' await db.WriteAsync((tr) => { for (int i = 0; i < 10; i++) { - tr.Set(a.Pack(i), Slice.FromInt32(i)); + tr.Set(a.Tuples.EncodeKey(i), Slice.FromInt32(i)); } // add guard keys tr.Set(location.Key, Slice.FromInt32(-1)); @@ -313,7 +313,7 @@ await db.WriteAsync((tr) => Assert.That(elements.Count, Is.EqualTo(5)); for (int i = 0; i < 5; i++) { - Assert.That(elements[i].Key, Is.EqualTo(a.Pack(i))); + Assert.That(elements[i].Key, Is.EqualTo(a.Tuples.EncodeKey(i))); Assert.That(elements[i].Value, Is.EqualTo(Slice.FromInt32(i))); } } @@ -331,7 +331,7 @@ await db.WriteAsync((tr) => Assert.That(elements.Count, Is.EqualTo(10)); for (int i = 0; i < 10; i++) { - Assert.That(elements[i].Key, Is.EqualTo(a.Pack(i))); + Assert.That(elements[i].Key, Is.EqualTo(a.Tuples.EncodeKey(i))); Assert.That(elements[i].Value, Is.EqualTo(Slice.FromInt32(i))); } } @@ -361,7 +361,7 @@ public async Task Test_Can_Skip() var location = await GetCleanDirectory(db, "Queries", "Range"); // import test data - var data = Enumerable.Range(0, 100).Select(x => new KeyValuePair(location.Pack(x), Slice.FromFixed32(x))); + var data = Enumerable.Range(0, 100).Select(x => new KeyValuePair(location.Tuples.EncodeKey(x), Slice.FromFixed32(x))); await Fdb.Bulk.WriteAsync(db, data, this.Cancellation); // from the start @@ -443,16 +443,16 @@ public async Task Test_Original_Range_Does_Not_Overflow() var location = await GetCleanDirectory(db, "Queries", "Range"); // import test data - var data = Enumerable.Range(0, 30).Select(x => new KeyValuePair(location.Pack(x), Slice.FromFixed32(x))); + var data = Enumerable.Range(0, 30).Select(x => new KeyValuePair(location.Tuples.EncodeKey(x), Slice.FromFixed32(x))); await Fdb.Bulk.WriteAsync(db, data, this.Cancellation); using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { var query = tr - .GetRange(location.Pack(10), location.Pack(20)) // 10 -> 19 + .GetRange(location.Tuples.EncodeKey(10), location.Tuples.EncodeKey(20)) // 10 -> 19 .Take(20) // 10 -> 19 (limit 20) .Reverse(); // 19 -> 10 (limit 20) - Console.WriteLine(query); + Log("query: {0}", query); // set a limit that overflows, and then reverse from it var res = await query.ToListAsync(); @@ -462,11 +462,11 @@ public async Task Test_Original_Range_Does_Not_Overflow() using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { var query = tr - .GetRange(location.Pack(10), location.Pack(20)) // 10 -> 19 + .GetRange(location.Tuples.EncodeKey(10), location.Tuples.EncodeKey(20)) // 10 -> 19 .Reverse() // 19 -> 10 .Take(20) // 19 -> 10 (limit 20) .Reverse(); // 10 -> 19 (limit 20) - Console.WriteLine(query); + Log("query: {0}", query); var res = await query.ToListAsync(); Assert.That(res.Count, Is.EqualTo(10)); @@ -488,7 +488,7 @@ public async Task Test_Can_MergeSort() await db.ClearRangeAsync(location, this.Cancellation); // create K lists - var lists = Enumerable.Range(0, K).Select(i => location.Partition(i)).ToArray(); + var lists = Enumerable.Range(0, K).Select(i => location.Partition.By(i)).ToArray(); // lists[0] contains all multiples of K ([0, 0], [K, 1], [2K, 2], ...) // lists[1] contains all multiples of K, offset by 1 ([1, 0], [K+1, 1], [2K+1, 2], ...) @@ -502,7 +502,7 @@ public async Task Test_Can_MergeSort() { for (int i = 0; i < N; i++) { - tr.Set(lists[k].Pack((i * K) + k), FdbTuple.Pack(k, i)); + tr.Set(lists[k].Tuples.EncodeKey((i * K) + k), FdbTuple.Pack(k, i)); } await tr.CommitAsync(); } @@ -515,7 +515,7 @@ public async Task Test_Can_MergeSort() { var merge = tr.MergeSort( lists.Select(list => list.ToSelectorPair()), - kvp => location.UnpackLast(kvp.Key) + kvp => location.Tuples.DecodeLast(kvp.Key) ); Assert.That(merge, Is.Not.Null); @@ -527,7 +527,7 @@ public async Task Test_Can_MergeSort() for(int i=0;i location.Partition(i)).ToArray(); + var lists = Enumerable.Range(0, K).Select(i => location.Partition.By(i)).ToArray(); // lists[0] contains all multiples of 1 // lists[1] contains all multiples of 2 @@ -569,7 +569,7 @@ public async Task Test_Range_Intersect() { for (int i = 0; i < N; i++) { - var key = lists[k].Pack(series[k][i]); + var key = lists[k].Tuples.EncodeKey(series[k][i]); var value = FdbTuple.Pack(k, i); //Console.WriteLine("> " + key + " = " + value); tr.Set(key, value); @@ -582,13 +582,13 @@ public async Task Test_Range_Intersect() IEnumerable xs = series[0]; for (int i = 1; i < K; i++) xs = xs.Intersect(series[i]); var expected = xs.ToArray(); - Console.WriteLine(String.Join(", ", expected)); + Log("Expected: {0}", String.Join(", ", expected)); using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.Intersect( lists.Select(list => list.ToSelectorPair()), - kvp => location.UnpackLast(kvp.Key) + kvp => location.Tuples.DecodeLast(kvp.Key) ); Assert.That(merge, Is.Not.Null); @@ -601,11 +601,9 @@ public async Task Test_Range_Intersect() for (int i = 0; i < results.Count; i++) { - Assert.That(location.UnpackLast(results[i].Key), Is.EqualTo(expected[i])); + Assert.That(location.Tuples.DecodeLast(results[i].Key), Is.EqualTo(expected[i])); } } - - } } @@ -622,7 +620,7 @@ public async Task Test_Range_Except() var location = await GetCleanDirectory(db, "Queries", "Except"); // create K lists - var lists = Enumerable.Range(0, K).Select(i => location.Partition(i)).ToArray(); + var lists = Enumerable.Range(0, K).Select(i => location.Partition.By(i)).ToArray(); // lists[0] contains all multiples of 1 // lists[1] contains all multiples of 2 @@ -643,7 +641,7 @@ public async Task Test_Range_Except() { for (int i = 0; i < N; i++) { - var key = lists[k].Pack(series[k][i]); + var key = lists[k].Tuples.EncodeKey(series[k][i]); var value = FdbTuple.Pack(k, i); //Console.WriteLine("> " + key + " = " + value); tr.Set(key, value); @@ -656,13 +654,13 @@ public async Task Test_Range_Except() IEnumerable xs = series[0]; for (int i = 1; i < K; i++) xs = xs.Except(series[i]); var expected = xs.ToArray(); - Console.WriteLine(String.Join(", ", expected)); + Log("Expected: {0}", String.Join(", ", expected)); using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.Except( lists.Select(list => list.ToSelectorPair()), - kvp => location.UnpackLast(kvp.Key) + kvp => location.Tuples.DecodeLast(kvp.Key) ); Assert.That(merge, Is.Not.Null); @@ -675,7 +673,7 @@ public async Task Test_Range_Except() for (int i = 0; i < results.Count; i++) { - Assert.That(location.UnpackLast(results[i].Key), Is.EqualTo(expected[i])); + Assert.That(location.Tuples.DecodeLast(results[i].Key), Is.EqualTo(expected[i])); } } @@ -702,11 +700,11 @@ public async Task Test_Range_Except_Composite_Key() await db.WriteAsync((tr) => { // Items - tr.Set(locItems.Pack("userA", 10093), Slice.Empty); - tr.Set(locItems.Pack("userA", 19238), Slice.Empty); - tr.Set(locItems.Pack("userB", 20003), Slice.Empty); + tr.Set(locItems.Tuples.EncodeKey("userA", 10093), Slice.Empty); + tr.Set(locItems.Tuples.EncodeKey("userA", 19238), Slice.Empty); + tr.Set(locItems.Tuples.EncodeKey("userB", 20003), Slice.Empty); // Processed - tr.Set(locProcessed.Pack("userA", 19238), Slice.Empty); + tr.Set(locProcessed.Tuples.EncodeKey("userA", 19238), Slice.Empty); }, this.Cancellation); // the query (Items ∩ Processed) should return (userA, 10093) and (userB, 20003) @@ -723,7 +721,7 @@ await db.WriteAsync((tr) => // problem: Except() still returns the original (Slice,Slice) pairs from the first range, // meaning that we still need to unpack agin the key (this time knowing the location) - return query.Select(kv => locItems.Unpack(kv.Key)); + return query.Select(kv => locItems.Tuples.Unpack(kv.Key)); }, this.Cancellation); foreach(var r in results) @@ -740,11 +738,11 @@ await db.WriteAsync((tr) => { var items = tr .GetRange(locItems.ToRange()) - .Select(kv => locItems.Unpack(kv.Key)); + .Select(kv => locItems.Tuples.Unpack(kv.Key)); var processed = tr .GetRange(locProcessed.ToRange()) - .Select(kv => locProcessed.Unpack(kv.Key)); + .Select(kv => locProcessed.Tuples.Unpack(kv.Key)); // items and processed are lists of (string, int) tuples, we can compare them directly var query = items.Except(processed, FdbTupleComparisons.Composite()); diff --git a/FoundationDB.Tests/TestHelpers.cs b/FoundationDB.Tests/TestHelpers.cs index 16bba8dd0..fa943156e 100644 --- a/FoundationDB.Tests/TestHelpers.cs +++ b/FoundationDB.Tests/TestHelpers.cs @@ -82,10 +82,10 @@ public static async Task GetCleanDirectory([NotNull] IFdbD return subspace; } - public static async Task DumpSubspace([NotNull] IFdbDatabase db, [NotNull] FdbSubspace subspace, CancellationToken ct) + public static async Task DumpSubspace([NotNull] IFdbDatabase db, [NotNull] IFdbSubspace subspace, CancellationToken ct) { Assert.That(db, Is.Not.Null); - Assert.That(db.GlobalSpace.Contains(subspace.Key), Is.True, "Using a location outside of the test database partition!!! This is probably a bug in the test..."); + Assert.That(db.GlobalSpace.Contains(subspace.ToFoundationDbKey()), Is.True, "Using a location outside of the test database partition!!! This is probably a bug in the test..."); // do not log db = db.WithoutLogging(); @@ -96,17 +96,17 @@ public static async Task DumpSubspace([NotNull] IFdbDatabase db, [NotNull] FdbSu } } - public static async Task DumpSubspace([NotNull] IFdbReadOnlyTransaction tr, [NotNull] FdbSubspace subspace) + public static async Task DumpSubspace([NotNull] IFdbReadOnlyTransaction tr, [NotNull] IFdbSubspace subspace) { Assert.That(tr, Is.Not.Null); Console.WriteLine("Dumping content of subspace " + subspace.ToString() + " :"); int count = 0; await tr - .GetRange(FdbKeyRange.StartsWith(subspace.Key)) + .GetRange(FdbKeyRange.StartsWith(subspace.ToFoundationDbKey())) .ForEachAsync((kvp) => { - var key = subspace.Extract(kvp.Key); + var key = subspace.ExtractKey(kvp.Key, boundCheck: true); ++count; string keyDump = null; try diff --git a/FoundationDB.Tests/TransactionFacts.cs b/FoundationDB.Tests/TransactionFacts.cs index 132986dab..c367d9c19 100644 --- a/FoundationDB.Tests/TransactionFacts.cs +++ b/FoundationDB.Tests/TransactionFacts.cs @@ -104,16 +104,16 @@ public async Task Test_Creating_A_ReadOnly_Transaction_Throws_When_Writing() Assert.That(tr, Is.Not.Null); // reading should not fail - await tr.GetAsync(db.Pack("Hello")); + await tr.GetAsync(db.Tuples.EncodeKey("Hello")); // any attempt to recast into a writeable transaction should fail! var tr2 = (IFdbTransaction)tr; Assert.That(tr2.IsReadOnly, Is.True, "Transaction should be marked as readonly"); - var location = db.Partition("ReadOnly"); - Assert.That(() => tr2.Set(location.Pack("Hello"), Slice.Empty), Throws.InvalidOperationException); - Assert.That(() => tr2.Clear(location.Pack("Hello")), Throws.InvalidOperationException); - Assert.That(() => tr2.ClearRange(location.Pack("ABC"), location.Pack("DEF")), Throws.InvalidOperationException); - Assert.That(() => tr2.Atomic(location.Pack("Counter"), Slice.FromFixed32(1), FdbMutationType.Add), Throws.InvalidOperationException); + var location = db.Partition.By("ReadOnly"); + Assert.That(() => tr2.Set(location.Tuples.EncodeKey("Hello"), Slice.Empty), Throws.InvalidOperationException); + Assert.That(() => tr2.Clear(location.Tuples.EncodeKey("Hello")), Throws.InvalidOperationException); + Assert.That(() => tr2.ClearRange(location.Tuples.EncodeKey("ABC"), location.Tuples.EncodeKey("DEF")), Throws.InvalidOperationException); + Assert.That(() => tr2.Atomic(location.Tuples.EncodeKey("Counter"), Slice.FromFixed32(1), FdbMutationType.Add), Throws.InvalidOperationException); } } } @@ -223,11 +223,11 @@ public async Task Test_Cancelling_Transaction_Before_Commit_Should_Throw_Immedia using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.By("test"); using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.Pack(1), Slice.FromString("hello")); + tr.Set(location.Tuples.EncodeKey(1), Slice.FromString("hello")); tr.Cancel(); await TestHelpers.AssertThrowsFdbErrorAsync( @@ -248,7 +248,7 @@ public async Task Test_Cancelling_Transaction_During_Commit_Should_Abort_Task() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.By("test"); await db.ClearRangeAsync(location, this.Cancellation); @@ -259,7 +259,7 @@ public async Task Test_Cancelling_Transaction_During_Commit_Should_Abort_Task() // Writes about 5 MB of stuff in 100k chunks for (int i = 0; i < 50; i++) { - tr.Set(location.Pack(i), Slice.Random(rnd, 100 * 1000)); + tr.Set(location.Tuples.EncodeKey(i), Slice.Random(rnd, 100 * 1000)); } // start commiting @@ -288,7 +288,7 @@ public async Task Test_Cancelling_Token_During_Commit_Should_Abort_Task() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.By("test"); await db.ClearRangeAsync(location, this.Cancellation); @@ -300,7 +300,7 @@ public async Task Test_Cancelling_Token_During_Commit_Should_Abort_Task() // Writes about 5 MB of stuff in 100k chunks for (int i = 0; i < 50; i++) { - tr.Set(location.Pack(i), Slice.Random(rnd, 100 * 1000)); + tr.Set(location.Tuples.EncodeKey(i), Slice.Random(rnd, 100 * 1000)); } // start commiting with a cancellation token @@ -345,14 +345,14 @@ public async Task Test_Write_And_Read_Simple_Keys() long writeVersion; long readVersion; - var location = db.Partition("test"); + var location = db.Partition.By("test"); // write a bunch of keys using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.Pack("hello"), Slice.FromString("World!")); - tr.Set(location.Pack("timestamp"), Slice.FromInt64(ticks)); - tr.Set(location.Pack("blob"), Slice.Create(new byte[] { 42, 123, 7 })); + tr.Set(location.Tuples.EncodeKey("hello"), Slice.FromString("World!")); + tr.Set(location.Tuples.EncodeKey("timestamp"), Slice.FromInt64(ticks)); + tr.Set(location.Tuples.EncodeKey("blob"), Slice.Create(new byte[] { 42, 123, 7 })); await tr.CommitAsync(); @@ -368,15 +368,15 @@ public async Task Test_Write_And_Read_Simple_Keys() readVersion = await tr.GetReadVersionAsync(); Assert.That(readVersion, Is.GreaterThan(0), "Read version should be > 0"); - bytes = await tr.GetAsync(location.Pack("hello")); // => 1007 "past_version" + bytes = await tr.GetAsync(location.Tuples.EncodeKey("hello")); // => 1007 "past_version" Assert.That(bytes.Array, Is.Not.Null); Assert.That(Encoding.UTF8.GetString(bytes.Array, bytes.Offset, bytes.Count), Is.EqualTo("World!")); - bytes = await tr.GetAsync(location.Pack("timestamp")); + bytes = await tr.GetAsync(location.Tuples.EncodeKey("timestamp")); Assert.That(bytes.Array, Is.Not.Null); Assert.That(bytes.ToInt64(), Is.EqualTo(ticks)); - bytes = await tr.GetAsync(location.Pack("blob")); + bytes = await tr.GetAsync(location.Tuples.EncodeKey("blob")); Assert.That(bytes.Array, Is.Not.Null); Assert.That(bytes.Array, Is.EqualTo(new byte[] { 42, 123, 7 })); } @@ -390,7 +390,7 @@ public async Task Test_Can_Resolve_Key_Selector() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("keys"); + var location = db.Partition.By("keys"); await db.ClearRangeAsync(location, this.Cancellation); var minKey = location.Key + FdbKey.MinValue; @@ -406,7 +406,7 @@ public async Task Test_Can_Resolve_Key_Selector() tr.Set(minKey, Slice.FromString("min")); for (int i = 0; i < 20; i++) { - tr.Set(location.Pack(i), Slice.FromString(i.ToString())); + tr.Set(location.Tuples.EncodeKey(i), Slice.FromString(i.ToString())); } tr.Set(maxKey, Slice.FromString("max")); await tr.CommitAsync(); @@ -418,43 +418,43 @@ public async Task Test_Can_Resolve_Key_Selector() FdbKeySelector sel; // >= 0 - sel = FdbKeySelector.FirstGreaterOrEqual(location.Pack(0)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Pack(0)), "fGE(0) should return 0"); + sel = FdbKeySelector.FirstGreaterOrEqual(location.Tuples.EncodeKey(0)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Tuples.EncodeKey(0)), "fGE(0) should return 0"); Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(minKey), "fGE(0)-1 should return minKey"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Pack(1)), "fGE(0)+1 should return 1"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Tuples.EncodeKey(1)), "fGE(0)+1 should return 1"); // > 0 - sel = FdbKeySelector.FirstGreaterThan(location.Pack(0)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Pack(1)), "fGT(0) should return 1"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Pack(0)), "fGT(0)-1 should return 0"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Pack(2)), "fGT(0)+1 should return 2"); + sel = FdbKeySelector.FirstGreaterThan(location.Tuples.EncodeKey(0)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Tuples.EncodeKey(1)), "fGT(0) should return 1"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Tuples.EncodeKey(0)), "fGT(0)-1 should return 0"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Tuples.EncodeKey(2)), "fGT(0)+1 should return 2"); // <= 10 - sel = FdbKeySelector.LastLessOrEqual(location.Pack(10)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Pack(10)), "lLE(10) should return 10"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Pack(9)), "lLE(10)-1 should return 9"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Pack(11)), "lLE(10)+1 should return 11"); + sel = FdbKeySelector.LastLessOrEqual(location.Tuples.EncodeKey(10)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Tuples.EncodeKey(10)), "lLE(10) should return 10"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Tuples.EncodeKey(9)), "lLE(10)-1 should return 9"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Tuples.EncodeKey(11)), "lLE(10)+1 should return 11"); // < 10 - sel = FdbKeySelector.LastLessThan(location.Pack(10)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Pack(9)), "lLT(10) should return 9"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Pack(8)), "lLT(10)-1 should return 8"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Pack(10)), "lLT(10)+1 should return 10"); + sel = FdbKeySelector.LastLessThan(location.Tuples.EncodeKey(10)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Tuples.EncodeKey(9)), "lLT(10) should return 9"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Tuples.EncodeKey(8)), "lLT(10)-1 should return 8"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Tuples.EncodeKey(10)), "lLT(10)+1 should return 10"); // < 0 - sel = FdbKeySelector.LastLessThan(location.Pack(0)); + sel = FdbKeySelector.LastLessThan(location.Tuples.EncodeKey(0)); Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(minKey), "lLT(0) should return minKey"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Pack(0)), "lLT(0)+1 should return 0"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Tuples.EncodeKey(0)), "lLT(0)+1 should return 0"); // >= 20 - sel = FdbKeySelector.FirstGreaterOrEqual(location.Pack(20)); + sel = FdbKeySelector.FirstGreaterOrEqual(location.Tuples.EncodeKey(20)); Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(maxKey), "fGE(20) should return maxKey"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Pack(19)), "fGE(20)-1 should return 19"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Tuples.EncodeKey(19)), "fGE(20)-1 should return 19"); // > 19 - sel = FdbKeySelector.FirstGreaterThan(location.Pack(19)); + sel = FdbKeySelector.FirstGreaterThan(location.Tuples.EncodeKey(19)); Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(maxKey), "fGT(19) should return maxKey"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Pack(19)), "fGT(19)-1 should return 19"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Tuples.EncodeKey(19)), "fGT(19)-1 should return 19"); } } } @@ -539,7 +539,7 @@ public async Task Test_Get_Multiple_Values() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("Batch"); + var location = db.Partition.By("Batch"); await db.ClearRangeAsync(location, this.Cancellation); int[] ids = new int[] { 8, 7, 2, 9, 5, 0, 3, 4, 6, 1 }; @@ -548,21 +548,21 @@ public async Task Test_Get_Multiple_Values() { for (int i = 0; i < ids.Length; i++) { - tr.Set(location.Pack(i), Slice.FromString("#" + i.ToString())); + tr.Set(location.Tuples.EncodeKey(i), Slice.FromString("#" + i.ToString())); } await tr.CommitAsync(); } using (var tr = db.BeginTransaction(this.Cancellation)) { - var keys = ids.Select(id => location.Pack(id)).ToArray(); + var keys = ids.Select(id => location.Tuples.EncodeKey(id)).ToArray(); var results = await tr.GetValuesAsync(keys); Assert.That(results, Is.Not.Null); Assert.That(results.Length, Is.EqualTo(ids.Length)); - Console.WriteLine(String.Join(", ", results)); + Log(String.Join(", ", results)); for (int i = 0; i < ids.Length;i++) { @@ -582,7 +582,7 @@ public async Task Test_Get_Multiple_Keys() using(var db = await OpenTestPartitionAsync()) { - var location = db.Partition("keys"); + var location = db.Partition.By("keys"); await db.ClearRangeAsync(location, this.Cancellation); var minKey = location.Key + FdbKey.MinValue; @@ -598,7 +598,7 @@ public async Task Test_Get_Multiple_Keys() tr.Set(minKey, Slice.FromString("min")); for (int i = 0; i < 20; i++) { - tr.Set(location.Pack(i), Slice.FromString(i.ToString())); + tr.Set(location.Tuples.EncodeKey(i), Slice.FromString(i.ToString())); } tr.Set(maxKey, Slice.FromString("max")); await tr.CommitAsync(); @@ -607,7 +607,7 @@ public async Task Test_Get_Multiple_Keys() using (var tr = db.BeginTransaction(this.Cancellation)) { - var selectors = Enumerable.Range(0, N).Select((i) => FdbKeySelector.FirstGreaterOrEqual(location.Pack(i))).ToArray(); + var selectors = Enumerable.Range(0, N).Select((i) => FdbKeySelector.FirstGreaterOrEqual(location.Tuples.EncodeKey(i))).ToArray(); // GetKeysAsync([]) var results = await tr.GetKeysAsync(selectors); @@ -615,7 +615,7 @@ public async Task Test_Get_Multiple_Keys() Assert.That(results.Length, Is.EqualTo(20)); for (int i = 0; i < N; i++) { - Assert.That(results[i], Is.EqualTo(location.Pack(i))); + Assert.That(results[i], Is.EqualTo(location.Tuples.EncodeKey(i))); } // GetKeysAsync(cast to enumerable) @@ -678,7 +678,7 @@ public async Task Test_Can_Perform_Atomic_Operations() Slice key; - key = location.Pack("add"); + key = location.Tuples.EncodeKey("add"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Add, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Add, 1); await PerformAtomicOperationAndCheck(db, key, 1, FdbMutationType.Add, 0); @@ -686,21 +686,21 @@ public async Task Test_Can_Perform_Atomic_Operations() await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.Add, 1); await PerformAtomicOperationAndCheck(db, key, 123456789, FdbMutationType.Add, 987654321); - key = location.Pack("and"); + key = location.Tuples.EncodeKey("and"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitAnd, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitAnd, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.BitAnd, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x00FF00FF, FdbMutationType.BitAnd, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x0F0F0F0F, FdbMutationType.BitAnd, 0x018055AA); - key = location.Pack("or"); + key = location.Tuples.EncodeKey("or"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitOr, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitOr, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.BitOr, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x00FF00FF, FdbMutationType.BitOr, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x0F0F0F0F, FdbMutationType.BitOr, 0x018055AA); - key = location.Pack("xor"); + key = location.Tuples.EncodeKey("xor"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitXor, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitXor, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.BitXor, 0x018055AA); @@ -710,7 +710,7 @@ public async Task Test_Can_Perform_Atomic_Operations() // calling with an invalid mutation type should fail using (var tr = db.BeginTransaction(this.Cancellation)) { - key = location.Pack("invalid"); + key = location.Tuples.EncodeKey("invalid"); Assert.That(() => tr.Atomic(key, Slice.FromFixed32(42), (FdbMutationType)42), Throws.InstanceOf().With.Property("Code").EqualTo(FdbError.InvalidMutationType)); } } @@ -722,15 +722,15 @@ public async Task Test_Can_Snapshot_Read() using(var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.By("test"); await db.ClearRangeAsync(location, this.Cancellation); // write a bunch of keys await db.WriteAsync((tr) => { - tr.Set(location.Pack("hello"), Slice.FromString("World!")); - tr.Set(location.Pack("foo"), Slice.FromString("bar")); + tr.Set(location.Tuples.EncodeKey("hello"), Slice.FromString("World!")); + tr.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("bar")); }, this.Cancellation); // read them using snapshot @@ -738,10 +738,10 @@ await db.WriteAsync((tr) => { Slice bytes; - bytes = await tr.Snapshot.GetAsync(location.Pack("hello")); + bytes = await tr.Snapshot.GetAsync(location.Tuples.EncodeKey("hello")); Assert.That(bytes.ToUnicode(), Is.EqualTo("World!")); - bytes = await tr.Snapshot.GetAsync(location.Pack("foo")); + bytes = await tr.Snapshot.GetAsync(location.Tuples.EncodeKey("foo")); Assert.That(bytes.ToUnicode(), Is.EqualTo("bar")); } @@ -762,7 +762,7 @@ public async Task Test_CommittedVersion_On_ReadOnly_Transactions() long ver = tr.GetCommittedVersion(); Assert.That(ver, Is.EqualTo(-1), "Initial committed version"); - var _ = await tr.GetAsync(db.Pack("foo")); + var _ = await tr.GetAsync(db.Tuples.EncodeKey("foo")); // until the transction commits, the committed version will stay -1 ver = tr.GetCommittedVersion(); @@ -793,7 +793,7 @@ public async Task Test_CommittedVersion_On_Write_Transactions() long ver = tr.GetCommittedVersion(); Assert.That(ver, Is.EqualTo(-1), "Initial committed version"); - tr.Set(db.Pack("foo"), Slice.FromString("bar")); + tr.Set(db.Tuples.EncodeKey("foo"), Slice.FromString("bar")); // until the transction commits, the committed version should still be -1 ver = tr.GetCommittedVersion(); @@ -821,10 +821,10 @@ public async Task Test_CommittedVersion_After_Reset() // take the read version (to compare with the committed version below) long rv1 = await tr.GetReadVersionAsync(); // do something and commit - tr.Set(db.Pack("foo"), Slice.FromString("bar")); + tr.Set(db.Tuples.EncodeKey("foo"), Slice.FromString("bar")); await tr.CommitAsync(); long cv1 = tr.GetCommittedVersion(); - Console.WriteLine("COMMIT: " + rv1 + " / " + cv1); + Log("COMMIT: {0} / {1}", rv1, cv1); Assert.That(cv1, Is.GreaterThanOrEqualTo(rv1), "Committed version of write transaction should be >= the read version"); // reset the transaction @@ -832,15 +832,15 @@ public async Task Test_CommittedVersion_After_Reset() long rv2 = await tr.GetReadVersionAsync(); long cv2 = tr.GetCommittedVersion(); - Console.WriteLine("RESET: " + rv2 + " / " + cv2); + Log("RESET: {0} / {1}", rv2, cv2); //Note: the current fdb_c client does not revert the commited version to -1 ... ? //Assert.That(cv2, Is.EqualTo(-1), "Committed version should go back to -1 after reset"); // read-only + commit - await tr.GetAsync(db.Pack("foo")); + await tr.GetAsync(db.Tuples.EncodeKey("foo")); await tr.CommitAsync(); cv2 = tr.GetCommittedVersion(); - Console.WriteLine("COMMIT2: " + rv2 + " / " + cv2); + Log("COMMIT2: {0} / {1}", rv2, cv2); Assert.That(cv2, Is.EqualTo(-1), "Committed version of read-only transaction should be -1 even the transaction was previously used to write something"); } @@ -854,24 +854,24 @@ public async Task Test_Regular_Read_With_Concurrent_Change_Should_Conflict() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.By("test"); await db.ClearRangeAsync(location, this.Cancellation); await db.WriteAsync((tr) => { - tr.Set(location.Pack("foo"), Slice.FromString("foo")); + tr.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("foo")); }, this.Cancellation); using (var trA = db.BeginTransaction(this.Cancellation)) using (var trB = db.BeginTransaction(this.Cancellation)) { // regular read - var foo = await trA.GetAsync(location.Pack("foo")); - trA.Set(location.Pack("foo"), Slice.FromString("bar")); + var foo = await trA.GetAsync(location.Tuples.EncodeKey("foo")); + trA.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("bar")); // this will conflict with our read - trB.Set(location.Pack("foo"), Slice.FromString("bar")); + trB.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("bar")); await trB.CommitAsync(); // should fail with a "not_comitted" error @@ -893,23 +893,23 @@ public async Task Test_Snapshot_Read_With_Concurrent_Change_Should_Not_Conflict( using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.By("test"); await db.ClearRangeAsync(location, this.Cancellation); await db.WriteAsync((tr) => { - tr.Set(location.Pack("foo"), Slice.FromString("foo")); + tr.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("foo")); }, this.Cancellation); using (var trA = db.BeginTransaction(this.Cancellation)) using (var trB = db.BeginTransaction(this.Cancellation)) { // reading with snapshot mode should not conflict - var foo = await trA.Snapshot.GetAsync(location.Pack("foo")); - trA.Set(location.Pack("foo"), Slice.FromString("bar")); + var foo = await trA.Snapshot.GetAsync(location.Tuples.EncodeKey("foo")); + trA.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("bar")); // this would normally conflicts with the previous read if it wasn't a snapshot read - trB.Set(location.Pack("foo"), Slice.FromString("bar")); + trB.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("bar")); await trB.CommitAsync(); // should succeed @@ -925,12 +925,12 @@ public async Task Test_GetRange_With_Concurrent_Change_Should_Conflict() using(var db = await OpenTestPartitionAsync()) { - var loc = db.Partition("test"); + var loc = db.Partition.By("test"); await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); // we will read the first key from [0, 100), expected 50 @@ -941,19 +941,19 @@ await db.WriteAsync((tr) => { // [0, 100) limit 1 => 50 var kvp = await tr1 - .GetRange(loc.Pack("foo"), loc.Pack("foo", 100)) + .GetRange(loc.Tuples.EncodeKey("foo"), loc.Tuples.EncodeKey("foo", 100)) .FirstOrDefaultAsync(); - Assert.That(kvp.Key, Is.EqualTo(loc.Pack("foo", 50))); + Assert.That(kvp.Key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 50))); // 42 < 50 > conflict !!! using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 42), Slice.FromAscii("forty-two")); + tr2.Set(loc.Tuples.EncodeKey("foo", 42), Slice.FromAscii("forty-two")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(42) in TR2 should have conflicted with the GetRange(0, 100) in TR1"); } @@ -965,26 +965,26 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // [0, 100) limit 1 => 50 var kvp = await tr1 - .GetRange(loc.Pack("foo"), loc.Pack("foo", 100)) + .GetRange(loc.Tuples.EncodeKey("foo"), loc.Tuples.EncodeKey("foo", 100)) .FirstOrDefaultAsync(); - Assert.That(kvp.Key, Is.EqualTo(loc.Pack("foo", 50))); + Assert.That(kvp.Key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 50))); // 77 > 50 => no conflict using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 77), Slice.FromAscii("docm")); + tr2.Set(loc.Tuples.EncodeKey("foo", 77), Slice.FromAscii("docm")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); // should not conflict! await tr1.CommitAsync(); @@ -999,12 +999,12 @@ public async Task Test_GetKey_With_Concurrent_Change_Should_Conflict() using (var db = await OpenTestPartitionAsync()) { - var loc = db.Partition("test"); + var loc = db.Partition.By("test"); await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); // we will ask for the first key from >= 0, expecting 50, but if another transaction inserts something BEFORE 50, our key selector would have returned a different result, causing a conflict @@ -1012,18 +1012,18 @@ await db.WriteAsync((tr) => using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGE{0} => 50 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Pack("foo", 0))); - Assert.That(key, Is.EqualTo(loc.Pack("foo", 50))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Tuples.EncodeKey("foo", 0))); + Assert.That(key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 50))); // 42 < 50 => conflict !!! using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 42), Slice.FromAscii("forty-two")); + tr2.Set(loc.Tuples.EncodeKey("foo", 42), Slice.FromAscii("forty-two")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(42) in TR2 should have conflicted with the GetKey(fGE{0}) in TR1"); } @@ -1033,24 +1033,24 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGE{0} => 50 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Pack("foo", 0))); - Assert.That(key, Is.EqualTo(loc.Pack("foo", 50))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Tuples.EncodeKey("foo", 0))); + Assert.That(key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 50))); // 77 > 50 => no conflict using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 77), Slice.FromAscii("docm")); + tr2.Set(loc.Tuples.EncodeKey("foo", 77), Slice.FromAscii("docm")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); // should not conflict! await tr1.CommitAsync(); @@ -1061,25 +1061,25 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); - tr.Set(loc.Pack("foo", 100), Slice.FromAscii("one hundred")); + tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Tuples.EncodeKey("foo", 100), Slice.FromAscii("one hundred")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGE{50} + 1 => 100 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Pack("foo", 50)) + 1); - Assert.That(key, Is.EqualTo(loc.Pack("foo", 100))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Tuples.EncodeKey("foo", 50)) + 1); + Assert.That(key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 100))); // 77 between 50 and 100 => conflict !!! using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 77), Slice.FromAscii("docm")); + tr2.Set(loc.Tuples.EncodeKey("foo", 77), Slice.FromAscii("docm")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); // should conflict! await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(77) in TR2 should have conflicted with the GetKey(fGE{50} + 1) in TR1"); @@ -1090,25 +1090,25 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); - tr.Set(loc.Pack("foo", 100), Slice.FromAscii("one hundred")); + tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Tuples.EncodeKey("foo", 100), Slice.FromAscii("one hundred")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGT{50} => 100 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterThan(loc.Pack("foo", 50))); - Assert.That(key, Is.EqualTo(loc.Pack("foo", 100))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterThan(loc.Tuples.EncodeKey("foo", 50))); + Assert.That(key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 100))); // another transaction changes the VALUE of 50 and 100 (but does not change the fact that they exist nor add keys in between) using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Pack("foo", 100), Slice.FromAscii("cent")); + tr2.Set(loc.Tuples.EncodeKey("foo", 100), Slice.FromAscii("cent")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); // this causes a conflict in the current version of FDB await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(100) in TR2 should have conflicted with the GetKey(fGT{50}) in TR1"); @@ -1119,25 +1119,25 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Pack("foo", 50), Slice.FromAscii("fifty")); - tr.Set(loc.Pack("foo", 100), Slice.FromAscii("one hundred")); + tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Tuples.EncodeKey("foo", 100), Slice.FromAscii("one hundred")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // lLT{100} => 50 - var key = await tr1.GetKeyAsync(FdbKeySelector.LastLessThan(loc.Pack("foo", 100))); - Assert.That(key, Is.EqualTo(loc.Pack("foo", 50))); + var key = await tr1.GetKeyAsync(FdbKeySelector.LastLessThan(loc.Tuples.EncodeKey("foo", 100))); + Assert.That(key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 50))); // another transaction changes the VALUE of 50 and 100 (but does not change the fact that they exist nor add keys in between) using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Clear(loc.Pack("foo", 100)); + tr2.Clear(loc.Tuples.EncodeKey("foo", 100)); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Pack("bar"), Slice.Empty); + tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); // this causes a conflict in the current version of FDB await tr1.CommitAsync(); @@ -1162,8 +1162,8 @@ public async Task Test_Read_Isolation() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); - var key = location.Pack("A"); + var location = db.Partition.By("test"); + var key = location.Tuples.EncodeKey("A"); await db.ClearRangeAsync(location, this.Cancellation); @@ -1228,13 +1228,13 @@ public async Task Test_Read_Isolation_From_Writes() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.By("test"); await db.ClearRangeAsync(location, this.Cancellation); - var a = location.Pack("A"); - var b = location.Pack("B"); - var c = location.Pack("C"); - var d = location.Pack("D"); + var a = location.Tuples.EncodeKey("A"); + var b = location.Tuples.EncodeKey("B"); + var c = location.Tuples.EncodeKey("C"); + var d = location.Tuples.EncodeKey("D"); // Reads (before and after): // - A and B will use regular reads @@ -1289,11 +1289,11 @@ public async Task Test_ReadYourWritesDisable_Isolation() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.By("test"); await db.ClearRangeAsync(location, this.Cancellation); - var a = location.Pack("A"); - var b = location.Partition("B"); + var a = location.Tuples.EncodeKey("A"); + var b = location.Partition.By("B"); #region Default behaviour... @@ -1302,8 +1302,8 @@ public async Task Test_ReadYourWritesDisable_Isolation() await db.WriteAsync((tr) => { tr.Set(a, Slice.FromString("a")); - tr.Set(b.Pack(10), Slice.FromString("PRINT \"HELLO\"")); - tr.Set(b.Pack(20), Slice.FromString("GOTO 10")); + tr.Set(b.Tuples.EncodeKey(10), Slice.FromString("PRINT \"HELLO\"")); + tr.Set(b.Tuples.EncodeKey(20), Slice.FromString("GOTO 10")); }, this.Cancellation); using(var tr = db.BeginTransaction(this.Cancellation)) @@ -1314,7 +1314,7 @@ await db.WriteAsync((tr) => Assert.That(res, Is.EqualTo(new [] { "PRINT \"HELLO\"", "GOTO 10" })); tr.Set(a, Slice.FromString("aa")); - tr.Set(b.Pack(15), Slice.FromString("PRINT \"WORLD\"")); + tr.Set(b.Tuples.EncodeKey(15), Slice.FromString("PRINT \"WORLD\"")); data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("aa"), "The transaction own writes should be visible by default"); @@ -1340,7 +1340,7 @@ await db.WriteAsync((tr) => Assert.That(res, Is.EqualTo(new[] { "PRINT \"HELLO\"", "GOTO 10" })); tr.Set(a, Slice.FromString("aa")); - tr.Set(b.Pack(15), Slice.FromString("PRINT \"WORLD\"")); + tr.Set(b.Tuples.EncodeKey(15), Slice.FromString("PRINT \"WORLD\"")); data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("a"), "The transaction own writes should not be seen with ReadYourWritesDisable option enabled"); @@ -1364,14 +1364,14 @@ public async Task Test_Can_Set_Read_Version() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test"); + var location = db.Partition.By("test"); long commitedVersion; // create first version using (var tr1 = db.BeginTransaction(this.Cancellation)) { - tr1.Set(location.Pack("concurrent"), Slice.FromByte(1)); + tr1.Set(location.Tuples.EncodeKey("concurrent"), Slice.FromByte(1)); await tr1.CommitAsync(); // get this version @@ -1381,7 +1381,7 @@ public async Task Test_Can_Set_Read_Version() // mutate in another transaction using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(location.Pack("concurrent"), Slice.FromByte(2)); + tr2.Set(location.Tuples.EncodeKey("concurrent"), Slice.FromByte(2)); await tr2.CommitAsync(); } @@ -1393,7 +1393,7 @@ public async Task Test_Can_Set_Read_Version() long ver = await tr3.GetReadVersionAsync(); Assert.That(ver, Is.EqualTo(commitedVersion), "GetReadVersion should return the same value as SetReadVersion!"); - var bytes = await tr3.GetAsync(location.Pack("concurrent")); + var bytes = await tr3.GetAsync(location.Tuples.EncodeKey("concurrent")); Assert.That(bytes.GetBytes(), Is.EqualTo(new byte[] { 1 }), "Should have seen the first version!"); } @@ -1506,7 +1506,7 @@ public async Task Test_Transaction_RetryLoop_Respects_DefaultRetryLimit_Value() var t = db.ReadAsync((tr) => { ++counter; - Console.WriteLine("Called " + counter + " time(s)"); + Log("Called {0} time(s)", counter); if (counter > 4) { go.Cancel(); @@ -1571,12 +1571,12 @@ public async Task Test_Can_Add_Read_Conflict_Range() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("conflict"); + var location = db.Partition.By("conflict"); await db.ClearRangeAsync(location, this.Cancellation); - var key1 = location.Pack(1); - var key2 = location.Pack(2); + var key1 = location.Tuples.EncodeKey(1); + var key2 = location.Tuples.EncodeKey(2); using (var tr1 = db.BeginTransaction(this.Cancellation)) { @@ -1610,13 +1610,13 @@ public async Task Test_Can_Add_Write_Conflict_Range() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("conflict"); + var location = db.Partition.By("conflict"); await db.ClearRangeAsync(location, this.Cancellation); - var keyConflict = location.Pack(0); - var key1 = location.Pack(1); - var key2 = location.Pack(2); + var keyConflict = location.Tuples.EncodeKey(0); + var key1 = location.Tuples.EncodeKey(1); + var key2 = location.Tuples.EncodeKey(2); using (var tr1 = db.BeginTransaction(this.Cancellation)) { @@ -1652,12 +1652,12 @@ public async Task Test_Can_Setup_And_Cancel_Watches() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("test", "bigbrother"); + var location = db.Partition.By("test", "bigbrother"); await db.ClearRangeAsync(location, this.Cancellation); - var key1 = location.Pack("watched"); - var key2 = location.Pack("witness"); + var key1 = location.Tuples.EncodeKey("watched"); + var key2 = location.Tuples.EncodeKey("witness"); await db.WriteAsync((tr) => { @@ -1711,12 +1711,12 @@ public async Task Test_Can_Get_Addresses_For_Key() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition("location_api"); + var location = db.Partition.By("location_api"); await db.ClearRangeAsync(location, this.Cancellation); - var key1 = location.Pack(1); - var key404 = location.Pack(404); + var key1 = location.Tuples.EncodeKey(1); + var key404 = location.Tuples.EncodeKey(404); await db.WriteAsync((tr) => { @@ -1769,12 +1769,12 @@ public async Task Test_Can_Get_Boundary_Keys() using (var db = await Fdb.OpenAsync(TestHelpers.TestClusterFile, TestHelpers.TestDbName, this.Cancellation)) { //var cf = await db.GetCoordinatorsAsync(); - //Console.WriteLine("Connected to " + cf.ToString()); + //Log("Connected to {0}", cf.ToString()); using(var tr = db.BeginReadOnlyTransaction(this.Cancellation).WithAccessToSystemKeys()) { // dump nodes - Console.WriteLine("Server List:"); + Log("Server List:"); var servers = await tr.GetRange(Fdb.System.ServerList, Fdb.System.ServerList + Fdb.System.MaxValue) .Select(kvp => new KeyValuePair(kvp.Key.Substring(Fdb.System.ServerList.Count), kvp.Value)) .ToListAsync(); @@ -1787,18 +1787,18 @@ public async Task Test_Can_Get_Boundary_Keys() // the datacenter id seems to be at offset 40 var dataCenterId = key.Value.Substring(40, 16).ToHexaString(); - Console.WriteLine("- " + key.Key.ToHexaString() + ": (" + key.Value.Count + ") " + key.Value.ToAsciiOrHexaString()); - Console.WriteLine(" > node = " + nodeId); - Console.WriteLine(" > machine = " + machineId); - Console.WriteLine(" > datacenter = " + dataCenterId); + Log("- {0} : ({1}) {2}", key.Key.ToHexaString(), key.Value.Count, key.Value.ToAsciiOrHexaString()); + Log(" > node = {0}", nodeId); + Log(" > machine = {0}", machineId); + Log(" > datacenter = {0}", dataCenterId); } - Console.WriteLine(); + Log(); // dump keyServers var shards = await tr.GetRange(Fdb.System.KeyServers, Fdb.System.KeyServers + Fdb.System.MaxValue) .Select(kvp => new KeyValuePair(kvp.Key.Substring(Fdb.System.KeyServers.Count), kvp.Value)) .ToListAsync(); - Console.WriteLine("Key Servers: " + shards.Count + " shards"); + Log("Key Servers: {0} shard(s)", shards.Count); HashSet distinctNodes = new HashSet(StringComparer.Ordinal); int replicationFactor = 0; @@ -1826,14 +1826,14 @@ public async Task Test_Can_Get_Boundary_Keys() //Console.WriteLine("- " + key.Value.Substring(0, 12).ToAsciiOrHexaString() + " : " + String.Join(", ", ids) + " = " + key.Key); } - Console.WriteLine(); - Console.WriteLine("Distinct nodes: " + distinctNodes.Count); + Log(); + Log("Distinct nodes: {0}", distinctNodes.Count); foreach(var machine in distinctNodes) { - Console.WriteLine("- " + machine); + Log("- " + machine); } - Console.WriteLine(); - Console.WriteLine("Cluster topology: " + distinctNodes.Count + " processes with " + (replicationFactor == 1 ? "single" : replicationFactor == 2 ? "double" : replicationFactor == 3 ? "triple" : replicationFactor.ToString()) + " replication"); + Log(); + Log("Cluster topology: {0} process(es) with {1} replication", distinctNodes.Count, replicationFactor == 1 ? "single" : replicationFactor == 2 ? "double" : replicationFactor == 3 ? "triple" : replicationFactor.ToString()); } } } @@ -1849,10 +1849,10 @@ public async Task Test_Simple_Read_Transaction() { await tr.GetReadVersionAsync(); - var a = location.Concat(Slice.FromString("A")); - var b = location.Concat(Slice.FromString("B")); - var c = location.Concat(Slice.FromString("C")); - var z = location.Concat(Slice.FromString("Z")); + var a = location.ConcatKey(Slice.FromString("A")); + var b = location.ConcatKey(Slice.FromString("B")); + var c = location.ConcatKey(Slice.FromString("C")); + var z = location.ConcatKey(Slice.FromString("Z")); //await tr.GetAsync(location.Concat(Slice.FromString("KEY"))); diff --git a/FoundationDB.Tests/TransactionalFacts.cs b/FoundationDB.Tests/TransactionalFacts.cs index 4349145b3..cc0e8f75c 100644 --- a/FoundationDB.Tests/TransactionalFacts.cs +++ b/FoundationDB.Tests/TransactionalFacts.cs @@ -51,7 +51,7 @@ public async Task Test_ReadAsync_Should_Normally_Execute_Only_Once() using(var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.Pack("Hello"), Slice.FromString(secret)); + tr.Set(location.Tuples.EncodeKey("Hello"), Slice.FromString(secret)); await tr.CommitAsync(); } @@ -64,7 +64,7 @@ public async Task Test_ReadAsync_Should_Normally_Execute_Only_Once() Assert.That(tr.Context.Database, Is.SameAs(db)); Assert.That(tr.Context.Shared, Is.True); - return tr.GetAsync(location.Pack("Hello")); + return tr.GetAsync(location.Tuples.EncodeKey("Hello")); }, this.Cancellation); Assert.That(called, Is.EqualTo(1)); // note: if this assert fails, first ensure that you did not get a transient error while running this test! @@ -161,7 +161,7 @@ public async Task Test_Transactionals_Retries_Do_Not_Leak_When_Reading_Too_Much( var sw = Stopwatch.StartNew(); Console.WriteLine("Inserting test data (this may take a few minutes)..."); var rnd = new Random(); - await Fdb.Bulk.WriteAsync(db, Enumerable.Range(0, 100 * 1000).Select(i => new KeyValuePair(location.Pack(i), Slice.Random(rnd, 4096))), this.Cancellation); + await Fdb.Bulk.WriteAsync(db, Enumerable.Range(0, 100 * 1000).Select(i => new KeyValuePair(location.Tuples.EncodeKey(i), Slice.Random(rnd, 4096))), this.Cancellation); sw.Stop(); Console.WriteLine("> done in " + sw.Elapsed); @@ -246,7 +246,7 @@ public async Task Test_Transactionals_ReadOnly_Should_Deny_Write_Attempts() Assume.That(hijack, Is.Not.Null, "This test requires the transaction to implement IFdbTransaction !"); // this call should fail ! - hijack.Set(location.Pack("Hello"), Slice.FromString("Hijacked")); + hijack.Set(location.Tuples.EncodeKey("Hello"), Slice.FromString("Hijacked")); Assert.Fail("Calling Set() on a read-only transaction should fail"); return Task.FromResult(123); @@ -271,7 +271,7 @@ await db.WriteAsync((tr) => { for (int i = 0; i < 10; i++) { - tr.Set(location.Pack(i), Slice.FromInt32(i)); + tr.Set(location.Tuples.EncodeKey(i), Slice.FromInt32(i)); } }, this.Cancellation); @@ -285,25 +285,25 @@ await db.WriteAsync((tr) => // read 0..2 for (int i = 0; i < 3; i++) { - values[i] = (await tr.GetAsync(location.Pack(i))).ToInt32(); + values[i] = (await tr.GetAsync(location.Tuples.EncodeKey(i))).ToInt32(); } // another transaction commits a change to 3 before we read it - await db.WriteAsync((tr2) => tr2.Set(location.Pack(3), Slice.FromInt32(42)), this.Cancellation); + await db.WriteAsync((tr2) => tr2.Set(location.Tuples.EncodeKey(3), Slice.FromInt32(42)), this.Cancellation); // read 3 to 7 for (int i = 3; i < 7; i++) { - values[i] = (await tr.GetAsync(location.Pack(i))).ToInt32(); + values[i] = (await tr.GetAsync(location.Tuples.EncodeKey(i))).ToInt32(); } // another transaction commits a change to 6 after it has been read - await db.WriteAsync((tr2) => tr2.Set(location.Pack(6), Slice.FromInt32(66)), this.Cancellation); + await db.WriteAsync((tr2) => tr2.Set(location.Tuples.EncodeKey(6), Slice.FromInt32(66)), this.Cancellation); // read 7 to 9 for (int i = 7; i < 10; i++) { - values[i] = (await tr.GetAsync(location.Pack(i))).ToInt32(); + values[i] = (await tr.GetAsync(location.Tuples.EncodeKey(i))).ToInt32(); } return values; diff --git a/README.md b/README.md index b778a4c66..01601f865 100644 --- a/README.md +++ b/README.md @@ -24,22 +24,44 @@ using (var db = await Fdb.OpenAsync()) { // we will use a "Test" directory to isolate our test data var location = await db.Directory.CreateOrOpenAsync("Test", token); + // this location will remember the allocated prefix, and + // automatically add it as a prefix to all our keys // we need a transaction to be able to make changes to the db // note: production code should use "db.WriteAsync(..., token)" instead using (var trans = db.BeginTransaction(token)) { - // ("Test", "Hello", ) = "World" - trans.Set(location.Pack("Hello"), Slice.FromString("World")); - - // ("Test", "Count", ) = 42 - trans.Set(location.Pack("Count"), Slice.FromInt32(42)); + // For our convenience, we will use the Tuple Encoding format for our keys, + // which is accessible via the "location.Tuples" helper. We could have used + // any other encoding for the keys. Tuples are simple to use and have some + // intereseting ordering properties that make it easy to work with. + // => All our keys will be encoded as the packed tuple ({Test}, "foo"), + // making them very nice and compact. We could also use integers or GUIDs + // for the keys themselves. + + // Set "Hello" key to "World" + trans.Set( + location.Tuples.EncodeKey("Hello"), + Slice.FromString("World") // UTF-8 encoded string + ); + + // Set "Count" key to 42 + trans.Set( + location.Tuples.EncodeKey("Count"), + Slice.FromInt32(42) // 1 byte + ); - // Atomically add 123 to ("Test", "Total") - trans.AtomicAdd(location.Pack("Total"), Slice.FromFixed32(123)); - - // Set bits 3, 9 and 30 in the bitmap stored at ("Test", "Bitmap") - trans.AtomicOr(location.Pack("Bitmap"), Slice.FromFixed32((1 << 3) | (1 << 9) | (1 << 30))); + // Atomically add 123 to "Total" + trans.AtomicAdd( + location.Tuples.EncodeKey("Total"), + Slice.FromFixed32(123) // 4 bytes, Little Endian + ); + + // Set bits 3, 9 and 30 in the bit map stored in the key "Bitmap" + trans.AtomicOr( + location.Tuples.EncodeKey("Bitmap"), + Slice.FromFixed32((1 << 3) | (1 << 9) | (1 << 30)) // 4 bytes, Little Endian + ); // commit the changes to the db await trans.CommitAsync(); @@ -52,16 +74,16 @@ using (var db = await Fdb.OpenAsync()) using (var trans = db.BeginReadOnlyTransaction(token)) { // Read ("Test", "Hello", ) as a string - Slice value = await trans.GetAsync(location.Pack("Hello")); + Slice value = await trans.GetAsync(location.Tuples.EncodeKey("Hello")); Console.WriteLine(value.ToUnicode()); // -> World // Read ("Test", "Count", ) as an int - value = await trans.GetAsync(location.Pack("Count")); + value = await trans.GetAsync(location.Tuples.EncodeKey("Count")); Console.WriteLine(value.ToInt32()); // -> 42 // missing keys give a result of Slice.Nil, which is the equivalent // of "key not found". - value = await trans.GetAsync(location.Pack("NotFound")); + value = await trans.GetAsync(location.Tuples.EncodeKey("NotFound")); Console.WriteLine(value.HasValue); // -> false Console.WriteLine(value == Slice.Nil); // -> true // note: there is also Slice.Empty that is returned for existing keys @@ -84,9 +106,9 @@ using (var db = await Fdb.OpenAsync()) await db.WriteAsync((trans) => { // add some data to the list with the format: (..., index) = value - trans.Set(list.Pack(0), Slice.FromString("AAA")); - trans.Set(list.Pack(1), Slice.FromString("BBB")); - trans.Set(list.Pack(2), Slice.FromString("CCC")); + trans.Set(list.Tuples.EncodeKey(0), Slice.FromString("AAA")); + trans.Set(list.Tuples.EncodeKey(1), Slice.FromString("BBB")); + trans.Set(list.Tuples.EncodeKey(2), Slice.FromString("CCC")); // The actual keys will be a concatenation of the prefix of 'list', // and a packed tuple containing the index. Since we are using the // Directory Layer, this should still be fairly small (between 4 @@ -132,7 +154,7 @@ using (var db = await Fdb.OpenAsync()) .Select((kvp) => new KeyValuePair( // unpack the tuple and returns the last item as an int - list.UnpackLast(kvp.Key), + list.Tuples.DecodeLast(kvp.Key), // convert the value into an unicode string kvp.Value.ToUnicode() )) From c466dfce65d9ee9a9475320fc929133ce334199b Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 14 Nov 2014 18:02:21 +0100 Subject: [PATCH 02/63] Added a few [DebuggerStepThrough] attributes to help going through test code --- .../Subspaces/FdbSubspaceTuples.cs | 32 +++++++++++++++++-- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs index f5439ade4..dcc08fb15 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs @@ -34,6 +34,7 @@ namespace FoundationDB.Client using System.Linq; using System.Collections.Generic; using FoundationDB.Client.Utils; + using System.Diagnostics; /// Provides of methods to encode and decodes keys using the Tuple Encoding format @@ -53,24 +54,34 @@ public FdbSubspaceTuples(IFdbSubspace subspace) public IFdbSubspace Subspace { + [DebuggerStepThrough] [NotNull] //note: except for corner cases like default(FdbTupleSubspace) or unallocated value get { return m_subspace; } } + /// Return a key that is composed of the subspace prefix, and the packed representation of a tuple. + /// Tuple to pack (can be null or empty) + /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . + /// If is null or empty, then the prefix of the subspace is returned. public Slice this[[NotNull] IFdbTuple tuple] { + [DebuggerStepThrough] get { return Pack(tuple); } } public Slice this[[NotNull] ITupleFormattable item] { + [DebuggerStepThrough] get { return Pack(item); } } + #region Pack: Tuple => Slice + /// Return a key that is composed of the subspace prefix, and the packed representation of a tuple. /// Tuple to pack (can be null or empty) /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . /// If is null or empty, then the prefix of the subspace is returned. + [DebuggerStepThrough] public Slice Pack([NotNull] IFdbTuple tuple) { if (tuple == null) throw new ArgumentNullException("tuple"); @@ -81,6 +92,7 @@ public Slice Pack([NotNull] IFdbTuple tuple) /// Sequence of N-tuples to pack /// Array containing the buffer segment of each packed tuple /// tuple.Pack(new [] { "abc", [ ("Foo", 1), ("Foo", 2) ] }) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] + [DebuggerStepThrough] [NotNull] public Slice[] Pack([NotNull] IEnumerable tuples) { @@ -93,6 +105,7 @@ public Slice[] Pack([NotNull] IEnumerable tuples) /// Sequence of N-tuples to pack /// Array containing the buffer segment of each packed tuple /// BatchPack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] + [DebuggerStepThrough] [NotNull] public Slice[] Pack([NotNull] params IFdbTuple[] tuples) { @@ -103,6 +116,7 @@ public Slice[] Pack([NotNull] params IFdbTuple[] tuples) /// Tuple to pack (can be null or empty) /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . /// If is null or empty, then the prefix of the subspace is returned. + [DebuggerStepThrough] public Slice Pack([NotNull] ITupleFormattable item) { if (item == null) throw new ArgumentNullException("item"); @@ -113,6 +127,7 @@ public Slice Pack([NotNull] ITupleFormattable item) /// Sequence of N-tuples to pack /// Array containing the buffer segment of each packed tuple /// Pack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] + [DebuggerStepThrough] [NotNull] public Slice[] Pack([NotNull] IEnumerable items) { @@ -125,12 +140,17 @@ public Slice[] Pack([NotNull] IEnumerable items) /// Sequence of N-tuples to pack /// Array containing the buffer segment of each packed tuple /// Pack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] + [DebuggerStepThrough] [NotNull] public Slice[] Pack([NotNull] params ITupleFormattable[] items) { return Pack((IEnumerable)items); } + #endregion + + #region Unpack: Slice => Tuple + /// Unpack a key into a tuple, with the subspace prefix removed /// Packed version of a key that should fit inside this subspace. /// Unpacked tuple that is relative to the current subspace, or null if the key is equal to Slice.Nil @@ -175,6 +195,10 @@ public IFdbTuple[] Unpack([NotNull] params Slice[] keys) return Unpack((IEnumerable)keys); } + #endregion + + #region ToRange: Tuple => Range + public FdbKeyRange ToRange([NotNull] IFdbTuple tuple) { if (tuple == null) throw new ArgumentNullException("tuple"); @@ -187,7 +211,9 @@ public FdbKeyRange ToRange([NotNull] ITupleFormattable item) return ToRange(item.ToTuple()); } - #region EncodeKey... + #endregion + + #region EncodeKey: (T1, T2, ...) => Slice /// Create a new key by appending a value to the current subspace /// Type of the value @@ -275,7 +301,7 @@ public Slice[] EncodeKeys([NotNull] TElement[] elements, [NotNul #endregion - #region DecodeKey... + #region DecodeKey: Slice => (T1, T2, ...) /// Unpack a key into a singleton tuple, and return the single element /// Expected type of the only element @@ -403,7 +429,7 @@ public T[] DecodeKeys([NotNull] Slice[] keys) #endregion - #region Append... + #region Append: Subspace => Tuple /// Return an empty tuple that is attached to this subspace /// Empty tuple that can be extended, and whose packed representation will always be prefixed by the subspace key From c675d54e44cd1175c44cb7923ebafece8e8efbbc Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 14 Nov 2014 22:44:31 +0100 Subject: [PATCH 03/63] Refactored the Tuple encoders to be able to properly escape embedded NILs inside tuples - Changed encoding of embedded tuples to be <03>...<00>, instead of <03>...<04> - NILs inside embedded tuples are now escaped as <00> instead of only <00> - Ordering should now be: (1, (2)) then (1, (2), 3) then (1, (2, 3)) then (1, (3)) - Introduced the TupleReader and TupleWriter structs, which track the current context of encoding/decoding (currently only the Depth) - Regular top-level tuples are at depth 0, embedded tuples starts at depth 1 or more - ToRange() on partial embedded tuples are still broken regarding ordering! --- .../FoundationDB.Client.csproj | 2 + .../Layers/Tuples/FdbJoinedTuple.cs | 6 +- .../Layers/Tuples/FdbLinkedTuple.cs | 6 +- .../Layers/Tuples/FdbListTuple.cs | 6 +- .../Layers/Tuples/FdbMemoizedTuple.cs | 4 +- .../Layers/Tuples/FdbPrefixedTuple.cs | 8 +- .../Layers/Tuples/FdbSlicedTuple.cs | 8 +- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 123 ++++---- .../Layers/Tuples/FdbTupleCodec`1.cs | 15 +- .../Layers/Tuples/FdbTupleExtensions.cs | 16 +- .../Layers/Tuples/FdbTuplePacker.cs | 6 +- .../Layers/Tuples/FdbTuplePackers.cs | 145 ++++----- .../Layers/Tuples/FdbTupleParser.cs | 292 ++++++++++-------- .../Layers/Tuples/FdbTupleTypes.cs | 2 - .../Layers/Tuples/FdbTuple`1.cs | 3 +- .../Layers/Tuples/FdbTuple`2.cs | 3 +- .../Layers/Tuples/FdbTuple`3.cs | 3 +- .../Layers/Tuples/FdbTuple`4.cs | 3 +- .../Layers/Tuples/IFdbTuple.cs | 2 +- .../Layers/Tuples/TupleReader.cs | 54 ++++ .../Layers/Tuples/TupleWriter.cs | 55 ++++ FoundationDB.Client/Utils/SliceReader.cs | 8 + FoundationDB.Tests/Layers/TupleFacts.cs | 120 ++++--- 23 files changed, 533 insertions(+), 357 deletions(-) create mode 100644 FoundationDB.Client/Layers/Tuples/TupleReader.cs create mode 100644 FoundationDB.Client/Layers/Tuples/TupleWriter.cs diff --git a/FoundationDB.Client/FoundationDB.Client.csproj b/FoundationDB.Client/FoundationDB.Client.csproj index 4bf9383e7..288366797 100644 --- a/FoundationDB.Client/FoundationDB.Client.csproj +++ b/FoundationDB.Client/FoundationDB.Client.csproj @@ -73,6 +73,8 @@ + + diff --git a/FoundationDB.Client/Layers/Tuples/FdbJoinedTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbJoinedTuple.cs index 1a8320db3..121e48251 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbJoinedTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbJoinedTuple.cs @@ -66,7 +66,7 @@ public FdbJoinedTuple(IFdbTuple head, IFdbTuple tail) m_count = m_split + tail.Count; } - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { this.Head.PackTo(ref writer); this.Tail.PackTo(ref writer); @@ -74,9 +74,9 @@ public void PackTo(ref SliceWriter writer) public Slice ToSlice() { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); PackTo(ref writer); - return writer.ToSlice(); + return writer.Output.ToSlice(); } Slice IFdbKey.ToFoundationDbKey() diff --git a/FoundationDB.Client/Layers/Tuples/FdbLinkedTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbLinkedTuple.cs index 095c6a8c1..93dee281a 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbLinkedTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbLinkedTuple.cs @@ -66,7 +66,7 @@ internal FdbLinkedTuple(IFdbTuple head, T tail) } /// Pack this tuple into a buffer - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { this.Head.PackTo(ref writer); FdbTuplePacker.SerializeTo(ref writer, this.Tail); @@ -75,9 +75,9 @@ public void PackTo(ref SliceWriter writer) /// Pack this tuple into a slice public Slice ToSlice() { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); PackTo(ref writer); - return writer.ToSlice(); + return writer.Output.ToSlice(); } Slice IFdbKey.ToFoundationDbKey() diff --git a/FoundationDB.Client/Layers/Tuples/FdbListTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbListTuple.cs index 2dd621785..a175a41c5 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbListTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbListTuple.cs @@ -227,7 +227,7 @@ private static IEnumerator Enumerate(object[] items, int offset, int cou } } - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { for (int i = 0; i < m_count; i++) { @@ -237,9 +237,9 @@ public void PackTo(ref SliceWriter writer) public Slice ToSlice() { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); PackTo(ref writer); - return writer.ToSlice(); + return writer.Output.ToSlice(); } Slice IFdbKey.ToFoundationDbKey() diff --git a/FoundationDB.Client/Layers/Tuples/FdbMemoizedTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbMemoizedTuple.cs index e165da44a..36cc9a691 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbMemoizedTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbMemoizedTuple.cs @@ -75,11 +75,11 @@ public object this[int index] get { return FdbTuple.Splice(this, fromIncluded, toExcluded); } } - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { if (m_packed.IsPresent) { - writer.WriteBytes(m_packed); + writer.Output.WriteBytes(m_packed); } } diff --git a/FoundationDB.Client/Layers/Tuples/FdbPrefixedTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbPrefixedTuple.cs index ccfae0396..40b531510 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbPrefixedTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbPrefixedTuple.cs @@ -61,17 +61,17 @@ public Slice Prefix get { return m_prefix; } } - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { - writer.WriteBytes(m_prefix); + writer.Output.WriteBytes(m_prefix); m_items.PackTo(ref writer); } public Slice ToSlice() { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); PackTo(ref writer); - return writer.ToSlice(); + return writer.Output.ToSlice(); } Slice IFdbKey.ToFoundationDbKey() diff --git a/FoundationDB.Client/Layers/Tuples/FdbSlicedTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbSlicedTuple.cs index 84cb8b656..afa396b52 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbSlicedTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbSlicedTuple.cs @@ -61,12 +61,12 @@ public FdbSlicedTuple(Slice[] slices, int offset, int count) m_count = count; } - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { var slices = m_slices; for (int n = m_count, p = m_offset; n > 0; n--) { - writer.WriteBytes(slices[p++]); + writer.Output.WriteBytes(slices[p++]); } } @@ -74,9 +74,9 @@ public Slice ToSlice() { // merge all the slices making up this segment //TODO: should we get the sum of all slices to pre-allocated the buffer ? - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); PackTo(ref writer); - return writer.ToSlice(); + return writer.Output.ToSlice(); } Slice IFdbKey.ToFoundationDbKey() diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 598ce0b10..96c08e6c4 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -35,6 +35,7 @@ namespace FoundationDB.Layers.Tuples using System; using System.Collections; using System.Collections.Generic; + using System.Diagnostics; using System.Globalization; using System.Linq; using System.Text; @@ -83,7 +84,7 @@ public IFdbTuple Concat(IFdbTuple tuple) return tuple; } - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { //NO-OP } @@ -157,24 +158,28 @@ public static IFdbTuple CreateBoxed(object item) } /// Create a new 1-tuple, holding only one item + [DebuggerStepThrough] public static FdbTuple Create(T1 item1) { return new FdbTuple(item1); } /// Create a new 2-tuple, holding two items + [DebuggerStepThrough] public static FdbTuple Create(T1 item1, T2 item2) { return new FdbTuple(item1, item2); } /// Create a new 3-tuple, holding three items + [DebuggerStepThrough] public static FdbTuple Create(T1 item1, T2 item2, T3 item3) { return new FdbTuple(item1, item2, item3); } /// Create a new 4-tuple, holding four items + [DebuggerStepThrough] public static FdbTuple Create(T1 item1, T2 item2, T3 item3, T4 item4) { return new FdbTuple(item1, item2, item3, item4); @@ -325,9 +330,15 @@ public static IFdbTuple Concat([NotNull] IFdbTuple head, [NotNull] IFdbTuple tai #region Packing... + //BUGBUG: there is a potential type resolution problem between FdbTuple.Pack(IFdbTuple) and FdbTuple.Pack(T) + // ex: FdbTuple.Pack(FdbTuple.Create(1, 2)) will call FdbTuplePack(T) with T == typeof(FdbTuple) instead of expected FdbTuple.Pack(IFdbTuple) + // problem is that Pack() will treat this as the embedded tuple ((1, 2),) instead of expected (1, 2) + + //TODO: We *MUST* rename the various Pack() to something else, to solve this issue! + /// Pack a tuple into a slice /// Tuple that must be serialized into a binary slice - public static Slice Pack(IFdbTuple tuple) + public static Slice Pack([NotNull] IFdbTuple tuple) { //note: this is redundant with tuple.ToSlice() // => maybe we should remove this method? @@ -340,47 +351,47 @@ public static Slice Pack(IFdbTuple tuple) /// This is the non-generic equivalent of FdbTuple.Pack<object>() public static Slice PackBoxed(object item) { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); FdbTuplePackers.SerializeObjectTo(ref writer, item); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Pack a 1-tuple directly into a slice public static Slice Pack(T1 item1) { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); FdbTuplePacker.SerializeTo(ref writer, item1); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Pack a 2-tuple directly into a slice public static Slice Pack(T1 item1, T2 item2) { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); FdbTuplePacker.SerializeTo(ref writer, item1); FdbTuplePacker.SerializeTo(ref writer, item2); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Pack a 3-tuple directly into a slice public static Slice Pack(T1 item1, T2 item2, T3 item3) { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); FdbTuplePacker.SerializeTo(ref writer, item1); FdbTuplePacker.SerializeTo(ref writer, item2); FdbTuplePacker.SerializeTo(ref writer, item3); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Pack a 4-tuple directly into a slice public static Slice Pack(T1 item1, T2 item2, T3 item3, T4 item4) { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); FdbTuplePacker.SerializeTo(ref writer, item1); FdbTuplePacker.SerializeTo(ref writer, item2); FdbTuplePacker.SerializeTo(ref writer, item3); FdbTuplePacker.SerializeTo(ref writer, item4); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Pack a N-tuple directory into a slice @@ -389,12 +400,12 @@ public static Slice Pack([NotNull] params object[] items) if (items == null) throw new ArgumentNullException("items"); if (items.Length == 0) return Slice.Empty; - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); foreach(var item in items) { FdbTuplePackers.SerializeObjectTo(ref writer, item); } - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Merge a sequence of keys with a same prefix, all sharing the same buffer @@ -413,19 +424,19 @@ public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] IEnumerable if (array != null) return PackRangeWithPrefix(prefix, array); var next = new List(); - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); var packer = FdbTuplePacker.Encoder; //TODO: use multiple buffers if item count is huge ? foreach (var key in keys) { - if (prefix.IsPresent) writer.WriteBytes(prefix); + if (prefix.IsPresent) writer.Output.WriteBytes(prefix); packer(ref writer, key); - next.Add(writer.Position); + next.Add(writer.Output.Position); } - return FdbKey.SplitIntoSegments(writer.Buffer, 0, next); + return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); } /// Merge an array of keys with a same prefix, all sharing the same buffer @@ -439,7 +450,7 @@ public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] params T[] if (keys == null) throw new ArgumentNullException("keys"); // pre-allocate by guessing that each key will take at least 8 bytes. Even if 8 is too small, we should have at most one or two buffer resize - var writer = new SliceWriter(keys.Length * (prefix.Count + 8)); + var writer = new TupleWriter(keys.Length * (prefix.Count + 8)); var next = new List(keys.Length); var packer = FdbTuplePacker.Encoder; @@ -447,12 +458,12 @@ public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] params T[] foreach (var key in keys) { - if (prefix.IsPresent) writer.WriteBytes(prefix); + if (prefix.Count > 0) writer.Output.WriteBytes(prefix); packer(ref writer, key); - next.Add(writer.Position); + next.Add(writer.Output.Position); } - return FdbKey.SplitIntoSegments(writer.Buffer, 0, next); + return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); } /// Merge an array of elements, all sharing the same buffer @@ -479,7 +490,7 @@ public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull if (selector == null) throw new ArgumentNullException("selector"); // pre-allocate by guessing that each key will take at least 8 bytes. Even if 8 is too small, we should have at most one or two buffer resize - var writer = new SliceWriter(elements.Length * (prefix.Count + 8)); + var writer = new TupleWriter(elements.Length * (prefix.Count + 8)); var next = new List(elements.Length); var packer = FdbTuplePacker.Encoder; @@ -487,12 +498,12 @@ public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull foreach (var value in elements) { - if (prefix.IsPresent) writer.WriteBytes(prefix); + if (prefix.Count > 0) writer.Output.WriteBytes(prefix); packer(ref writer, selector(value)); - next.Add(writer.Position); + next.Add(writer.Output.Position); } - return FdbKey.SplitIntoSegments(writer.Buffer, 0, next); + return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); } /// Pack a sequence of N-tuples, all sharing the same buffer @@ -520,18 +531,18 @@ public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] IEnumerable(); - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); //TODO: use multiple buffers if item count is huge ? foreach(var tuple in tuples) { - writer.WriteBytes(prefix); + writer.Output.WriteBytes(prefix); tuple.PackTo(ref writer); - next.Add(writer.Position); + next.Add(writer.Output.Position); } - return FdbKey.SplitIntoSegments(writer.Buffer, 0, next); + return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); } /// Pack an array of N-tuples, all sharing the same buffer @@ -555,19 +566,19 @@ public static Slice[] PackRangeWithPrefix(Slice prefix, params IFdbTuple[] tuple if (tuples == null) throw new ArgumentNullException("tuples"); // pre-allocate by supposing that each tuple will take at least 16 bytes - var writer = new SliceWriter(tuples.Length * (16 + prefix.Count)); + var writer = new TupleWriter(tuples.Length * (16 + prefix.Count)); var next = new List(tuples.Length); //TODO: use multiple buffers if item count is huge ? foreach (var tuple in tuples) { - writer.WriteBytes(prefix); + writer.Output.WriteBytes(prefix); tuple.PackTo(ref writer); - next.Add(writer.Position); + next.Add(writer.Output.Position); } - return FdbKey.SplitIntoSegments(writer.Buffer, 0, next); + return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); } /// Pack a sequence of keys with a same prefix, all sharing the same buffer @@ -608,7 +619,7 @@ public static IFdbTuple Unpack(Slice packedKey) { if (packedKey.IsNullOrEmpty) return packedKey.HasValue ? FdbTuple.Empty : null; - return FdbTuplePackers.Unpack(packedKey); + return FdbTuplePackers.Unpack(packedKey, false); } /// Unpack a tuple from a serialized key, after removing the prefix @@ -629,7 +640,7 @@ public static IFdbTuple UnpackWithoutPrefix(Slice packedKey, Slice prefix) #endif // unpack the key, minus the prefix - return FdbTuplePackers.Unpack(packedKey.Substring(prefix.Count)); + return FdbTuplePackers.Unpack(packedKey.Substring(prefix.Count), false); } /// Unpack a tuple and only return its first element @@ -737,9 +748,9 @@ public static T UnpackSingleWithoutPrefix(Slice packedKey, Slice prefix) /// Reader positionned at the start of the next item to read /// If decoding succeedsd, receives the decoded value. /// True if the decoded succeeded (and receives the decoded value). False if the tuple has reached the end. - public static bool UnpackNext(ref SliceReader input, out T value) + public static bool UnpackNext(ref TupleReader input, out T value) { - if (!input.HasMore) + if (!input.Input.HasMore) { value = default(T); return false; @@ -761,62 +772,62 @@ public static Slice PackWithPrefix(Slice prefix, IFdbTuple tuple) { if (tuple == null || tuple.Count == 0) return prefix; - var writer = SliceWriter.Empty; - writer.WriteBytes(prefix); + var writer = new TupleWriter(); + writer.Output.WriteBytes(prefix); tuple.PackTo(ref writer); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Efficiently concatenate a prefix with the packed representation of a 1-tuple /// This is the non-generic equivalent of public static Slice PackBoxedWithPrefix(Slice prefix, object value) { - var writer = SliceWriter.Empty; - writer.WriteBytes(prefix); + var writer = new TupleWriter(); + writer.Output.WriteBytes(prefix); FdbTuplePackers.SerializeObjectTo(ref writer, value); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Efficiently concatenate a prefix with the packed representation of a 1-tuple public static Slice PackWithPrefix(Slice prefix, T value) { - var writer = SliceWriter.Empty; - writer.WriteBytes(prefix); + var writer = new TupleWriter(); + writer.Output.WriteBytes(prefix); FdbTuplePacker.Encoder(ref writer, value); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Efficiently concatenate a prefix with the packed representation of a 2-tuple public static Slice PackWithPrefix(Slice prefix, T1 value1, T2 value2) { - var writer = SliceWriter.Empty; - writer.WriteBytes(prefix); + var writer = new TupleWriter(); + writer.Output.WriteBytes(prefix); FdbTuplePacker.Encoder(ref writer, value1); FdbTuplePacker.Encoder(ref writer, value2); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Efficiently concatenate a prefix with the packed representation of a 3-tuple public static Slice PackWithPrefix(Slice prefix, T1 value1, T2 value2, T3 value3) { - var writer = SliceWriter.Empty; - writer.WriteBytes(prefix); + var writer = new TupleWriter(); + writer.Output.WriteBytes(prefix); FdbTuplePacker.Encoder(ref writer, value1); FdbTuplePacker.Encoder(ref writer, value2); FdbTuplePacker.Encoder(ref writer, value3); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Efficiently concatenate a prefix with the packed representation of a 4-tuple public static Slice PackWithPrefix(Slice prefix, T1 value1, T2 value2, T3 value3, T4 value4) { - var writer = SliceWriter.Empty; - writer.WriteBytes(prefix); + var writer = new TupleWriter(); + writer.Output.WriteBytes(prefix); FdbTuplePacker.Encoder(ref writer, value1); FdbTuplePacker.Encoder(ref writer, value2); FdbTuplePacker.Encoder(ref writer, value3); FdbTuplePacker.Encoder(ref writer, value4); - return writer.ToSlice(); + return writer.Output.ToSlice(); } #endregion diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleCodec`1.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleCodec`1.cs index 70f5b1e16..024e6f222 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleCodec`1.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleCodec`1.cs @@ -59,7 +59,10 @@ public override Slice EncodeOrdered(T value) public override void EncodeOrderedSelfTerm(ref SliceWriter output, T value) { - FdbTuplePacker.Encoder(ref output, value); + //HACKHACK: we lose the current depth! + var writer = new TupleWriter(output); + FdbTuplePacker.Encoder(ref writer, value); + output = writer.Output; } public override T DecodeOrdered(Slice input) @@ -69,12 +72,12 @@ public override T DecodeOrdered(Slice input) public override T DecodeOrderedSelfTerm(ref SliceReader input) { + //HACKHACK: we lose the current depth! + var reader = new TupleReader(input); T value; - if (!FdbTuple.UnpackNext(ref input, out value)) - { - return m_missingValue; - } - return value; + bool res = FdbTuple.UnpackNext(ref reader, out value); + input = reader.Input; + return res ? value : m_missingValue; } public Slice EncodeValue(T value) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs index 52e863892..63d6127fd 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs @@ -150,20 +150,20 @@ public static FdbKeyRange ToRange([NotNull] this IFdbTuple tuple, bool includePr // We want to allocate only one byte[] to store both keys, and map both Slice to each chunk // So we will serialize the tuple two times in the same writer - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); tuple.PackTo(ref writer); - writer.EnsureBytes(writer.Position + 2); - if (!includePrefix) writer.WriteByte(0); - int p0 = writer.Position; + writer.Output.EnsureBytes(writer.Output.Position + 2); + if (!includePrefix) writer.Output.WriteByte(0); + int p0 = writer.Output.Position; tuple.PackTo(ref writer); - writer.WriteByte(0xFF); - int p1 = writer.Position; + writer.Output.WriteByte(0xFF); + int p1 = writer.Output.Position; return new FdbKeyRange( - new Slice(writer.Buffer, 0, p0), - new Slice(writer.Buffer, p0, p1 - p0) + new Slice(writer.Output.Buffer, 0, p0), + new Slice(writer.Output.Buffer, p0, p1 - p0) ); } diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuplePacker.cs b/FoundationDB.Client/Layers/Tuples/FdbTuplePacker.cs index 2e5a56b63..01962aaad 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuplePacker.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuplePacker.cs @@ -48,7 +48,7 @@ public static class FdbTuplePacker #if !NET_4_0 [System.Runtime.CompilerServices.MethodImpl(System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)] #endif - public static void SerializeTo(ref SliceWriter writer, T value) + public static void SerializeTo(ref TupleWriter writer, T value) { Encoder(ref writer, value); } @@ -58,9 +58,9 @@ public static void SerializeTo(ref SliceWriter writer, T value) /// Slice that contains the binary representation of public static Slice Serialize(T value) { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); Encoder(ref writer, value); - return writer.ToSlice(); + return writer.Output.ToSlice(); } /// Deserialize a tuple segment into a value of type diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs b/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs index e305cc905..0d14bce1b 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs @@ -44,7 +44,7 @@ public static class FdbTuplePackers #region Serializers... - public delegate void Encoder(ref SliceWriter writer, T value); + public delegate void Encoder(ref TupleWriter writer, T value); /// Returns a lambda that will be able to serialize values of type /// Type of values to serialize @@ -68,7 +68,7 @@ private static Delegate GetSerializerFor(Type type) return new Encoder(FdbTuplePackers.SerializeObjectTo); } - var typeArgs = new[] { typeof(SliceWriter).MakeByRefType(), type }; + var typeArgs = new[] { typeof(TupleWriter).MakeByRefType(), type }; var method = typeof(FdbTuplePackers).GetMethod("SerializeTo", BindingFlags.Static | BindingFlags.Public, null, typeArgs, null); if (method != null) { // we have a direct serializer @@ -124,7 +124,7 @@ private static Delegate GetSerializerFor(Type type) /// Target buffer /// Nullable value to serialize /// Uses the underlying type's serializer if the value is not null - public static void SerializeNullableTo(ref SliceWriter writer, T? value) + public static void SerializeNullableTo(ref TupleWriter writer, T? value) where T : struct { if (value == null) @@ -137,7 +137,7 @@ public static void SerializeNullableTo(ref SliceWriter writer, T? value) /// Target buffer /// Untyped value whose type will be inspected at runtime /// May throw at runtime if the type is not supported - public static void SerializeObjectTo(ref SliceWriter writer, object value) + public static void SerializeObjectTo(ref TupleWriter writer, object value) { var type = value != null ? value.GetType() : null; switch (Type.GetTypeCode(type)) @@ -294,7 +294,7 @@ public static void SerializeObjectTo(ref SliceWriter writer, object value) } /// Writes a slice as a byte[] array - public static void SerializeTo(ref SliceWriter writer, Slice value) + public static void SerializeTo(ref TupleWriter writer, Slice value) { if (value.IsNull) { @@ -311,142 +311,111 @@ public static void SerializeTo(ref SliceWriter writer, Slice value) } /// Writes a byte[] array - public static void SerializeTo(ref SliceWriter writer, byte[] value) + public static void SerializeTo(ref TupleWriter writer, byte[] value) { FdbTupleParser.WriteBytes(ref writer, value); } /// Writes an array segment as a byte[] array - public static void SerializeTo(ref SliceWriter writer, ArraySegment value) + public static void SerializeTo(ref TupleWriter writer, ArraySegment value) { SerializeTo(ref writer, Slice.Create(value)); } /// Writes a char as Unicode string - public static void SerializeTo(ref SliceWriter writer, char value) + public static void SerializeTo(ref TupleWriter writer, char value) { FdbTupleParser.WriteChar(ref writer, value); } /// Writes a boolean as an integer /// Uses 0 for false, and -1 for true - public static void SerializeTo(ref SliceWriter writer, bool value) + public static void SerializeTo(ref TupleWriter writer, bool value) { - // To be compatible with other bindings, we will encode False as the number 0, and True as the number 1 - - if (value) - { // true => 15 01 - writer.WriteByte2(FdbTupleTypes.IntPos1, 1); - } - else - { // false => 14 - writer.WriteByte(FdbTupleTypes.IntZero); - } + FdbTupleParser.WriteBool(ref writer, value); } /// Writes a boolean as an integer or null - public static void SerializeTo(ref SliceWriter writer, bool? value) + public static void SerializeTo(ref TupleWriter writer, bool? value) { if (value == null) { // null => 00 FdbTupleParser.WriteNil(ref writer); } - else if (value.Value) - { // true => 15 01 - writer.WriteByte2(FdbTupleTypes.IntPos1, 1); - } else - { // false => 14 - writer.WriteByte(FdbTupleTypes.IntZero); + { + FdbTupleParser.WriteBool(ref writer, value.Value); } } /// Writes a signed byte as an integer - public static void SerializeTo(ref SliceWriter writer, sbyte value) + public static void SerializeTo(ref TupleWriter writer, sbyte value) { FdbTupleParser.WriteInt32(ref writer, value); } /// Writes an unsigned byte as an integer - public static void SerializeTo(ref SliceWriter writer, byte value) + public static void SerializeTo(ref TupleWriter writer, byte value) { - if (value == 0) - { // 0 - writer.WriteByte(FdbTupleTypes.IntZero); - } - else - { // 1..255 - writer.WriteByte2(FdbTupleTypes.IntPos1, value); - } + FdbTupleParser.WriteByte(ref writer, value); } /// Writes a signed word as an integer - public static void SerializeTo(ref SliceWriter writer, short value) + public static void SerializeTo(ref TupleWriter writer, short value) { FdbTupleParser.WriteInt32(ref writer, value); } /// Writes an unsigned word as an integer - public static void SerializeTo(ref SliceWriter writer, ushort value) + public static void SerializeTo(ref TupleWriter writer, ushort value) { FdbTupleParser.WriteUInt32(ref writer, value); } /// Writes a signed int as an integer - public static void SerializeTo(ref SliceWriter writer, int value) + public static void SerializeTo(ref TupleWriter writer, int value) { FdbTupleParser.WriteInt32(ref writer, value); } /// Writes an unsigned int as an integer - public static void SerializeTo(ref SliceWriter writer, uint value) + public static void SerializeTo(ref TupleWriter writer, uint value) { FdbTupleParser.WriteUInt32(ref writer, value); } /// Writes a signed long as an integer - public static void SerializeTo(ref SliceWriter writer, long value) + public static void SerializeTo(ref TupleWriter writer, long value) { FdbTupleParser.WriteInt64(ref writer, value); } /// Writes an unsigned long as an integer - public static void SerializeTo(ref SliceWriter writer, ulong value) + public static void SerializeTo(ref TupleWriter writer, ulong value) { FdbTupleParser.WriteUInt64(ref writer, value); } /// Writes a 32-bit IEEE floating point number - public static void SerializeTo(ref SliceWriter writer, float value) + public static void SerializeTo(ref TupleWriter writer, float value) { FdbTupleParser.WriteSingle(ref writer, value); } /// Writes a 64-bit IEEE floating point number - public static void SerializeTo(ref SliceWriter writer, double value) + public static void SerializeTo(ref TupleWriter writer, double value) { FdbTupleParser.WriteDouble(ref writer, value); } /// Writes a string as an Unicode string - public static void SerializeTo(ref SliceWriter writer, string value) + public static void SerializeTo(ref TupleWriter writer, string value) { - if (value == null) - { // <00> - writer.WriteByte(FdbTupleTypes.Nil); - } - else if (value.Length == 0) - { // <02><00> - writer.WriteByte2(FdbTupleTypes.Utf8, 0x00); - } - else - { // <02>...utf8...<00> - FdbTupleParser.WriteString(ref writer, value); - } + FdbTupleParser.WriteString(ref writer, value); } /// Writes a DateTime converted to the number of days since the Unix Epoch and stored as a 64-bit decimal - public static void SerializeTo(ref SliceWriter writer, DateTime value) + public static void SerializeTo(ref TupleWriter writer, DateTime value) { // The problem of serializing DateTime: TimeZone? Precision? // - Since we are going to lose the TimeZone infos anyway, we can just store everything in UTC and let the caller deal with it @@ -466,7 +435,7 @@ public static void SerializeTo(ref SliceWriter writer, DateTime value) } /// Writes a TimeSpan converted to to a number seconds encoded as a 64-bit decimal - public static void SerializeTo(ref SliceWriter writer, TimeSpan value) + public static void SerializeTo(ref TupleWriter writer, TimeSpan value) { // We have the same precision problem with storing DateTimes: // - Storing the number of ticks keeps the exact value, but is Windows-centric @@ -480,7 +449,7 @@ public static void SerializeTo(ref SliceWriter writer, TimeSpan value) } /// Writes a Guid as a 128-bit UUID - public static void SerializeTo(ref SliceWriter writer, Guid value) + public static void SerializeTo(ref TupleWriter writer, Guid value) { //REVIEW: should we consider serializing Guid.Empty as <14> (integer 0) ? or maybe <01><00> (empty bytestring) ? // => could spare ~16 bytes per key in indexes on GUID properties that are frequently missing or empty (== default(Guid)) @@ -488,61 +457,65 @@ public static void SerializeTo(ref SliceWriter writer, Guid value) } /// Writes a Uuid as a 128-bit UUID - public static void SerializeTo(ref SliceWriter writer, Uuid128 value) + public static void SerializeTo(ref TupleWriter writer, Uuid128 value) { FdbTupleParser.WriteUuid128(ref writer, value); } /// Writes a Uuid as a 64-bit UUID - public static void SerializeTo(ref SliceWriter writer, Uuid64 value) + public static void SerializeTo(ref TupleWriter writer, Uuid64 value) { FdbTupleParser.WriteUuid64(ref writer, value); } /// Writes an IPaddress as a 32-bit (IPv4) or 128-bit (IPv6) byte array - public static void SerializeTo(ref SliceWriter writer, System.Net.IPAddress value) + public static void SerializeTo(ref TupleWriter writer, System.Net.IPAddress value) { FdbTupleParser.WriteBytes(ref writer, value != null ? value.GetAddressBytes() : null); } - public static void SerializeTo(ref SliceWriter writer, FdbTupleAlias value) + public static void SerializeTo(ref TupleWriter writer, FdbTupleAlias value) { Contract.Requires(Enum.IsDefined(typeof(FdbTupleAlias), value)); - writer.WriteByte((byte)value); + writer.Output.WriteByte((byte)value); } - public static void SerializeTupleTo(ref SliceWriter writer, TTuple tuple) + public static void SerializeTupleTo(ref TupleWriter writer, TTuple tuple) where TTuple : IFdbTuple { Contract.Requires(tuple != null); - writer.WriteByte(FdbTupleTypes.TupleStart); + writer.Depth++; + writer.Output.WriteByte(FdbTupleTypes.TupleStart); tuple.PackTo(ref writer); - writer.WriteByte(FdbTupleTypes.TupleEnd); + writer.Output.WriteByte(0x00); + writer.Depth--; } - public static void SerializeFormattableTo(ref SliceWriter writer, ITupleFormattable formattable) + public static void SerializeFormattableTo(ref TupleWriter writer, ITupleFormattable formattable) { if (formattable == null) { - writer.WriteByte(FdbTupleTypes.Nil); + FdbTupleParser.WriteNil(ref writer); return; } var tuple = formattable.ToTuple(); if (tuple == null) throw new InvalidOperationException(String.Format("Custom formatter {0}.ToTuple() cannot return null", formattable.GetType().Name)); - writer.WriteByte(FdbTupleTypes.TupleStart); + writer.Depth++; + writer.Output.WriteByte(FdbTupleTypes.TupleStart); tuple.PackTo(ref writer); - writer.WriteByte(FdbTupleTypes.TupleEnd); + writer.Output.WriteByte(0x00); + writer.Depth--; } - public static void SerializeFdbKeyTo(ref SliceWriter writer, IFdbKey key) + public static void SerializeFdbKeyTo(ref TupleWriter writer, IFdbKey key) { Contract.Requires(key != null); var slice = key.ToFoundationDbKey(); - writer.WriteBytes(slice); + FdbTupleParser.WriteBytes(ref writer, slice); } #endregion @@ -680,6 +653,11 @@ public static object DeserializeBoxed(Slice slice) public static T DeserializeFormattable(Slice slice) where T : ITupleFormattable, new() { + if (FdbTuplePackers.IsNilSegment(slice)) + { + return default(T); + } + var tuple = FdbTupleParser.ParseTuple(slice); var value = new T(); value.FromTuple(tuple); @@ -1214,16 +1192,17 @@ public static FdbTupleAlias DeserializeAlias(Slice slice) /// Slice that contains the packed representation of a tuple with zero or more elements /// Decoded tuple [NotNull] - internal static FdbSlicedTuple Unpack(Slice buffer) + internal static FdbSlicedTuple Unpack(Slice buffer, bool embedded) { - var slicer = new SliceReader(buffer); + var reader = new TupleReader(buffer); + if (embedded) reader.Depth = 1; // most tuples will probably fit within (prefix, sub-prefix, id, key) so pre-allocating with 4 should be ok... var items = new Slice[4]; Slice item; int p = 0; - while ((item = FdbTupleParser.ParseNext(ref slicer)).HasValue) + while ((item = FdbTupleParser.ParseNext(ref reader)).HasValue) { if (p >= items.Length) { @@ -1233,7 +1212,7 @@ internal static FdbSlicedTuple Unpack(Slice buffer) items[p++] = item; } - if (slicer.HasMore) throw new FormatException("Parsing of tuple failed failed before reaching the end of the key"); + if (reader.Input.HasMore) throw new FormatException("Parsing of tuple failed failed before reaching the end of the key"); return new FdbSlicedTuple(p == 0 ? Slice.EmptySliceArray : items, 0, p); } @@ -1242,10 +1221,10 @@ internal static FdbSlicedTuple Unpack(Slice buffer) /// Decoded slice of the single element in the singleton tuple public static Slice UnpackSingle(Slice buffer) { - var slicer = new SliceReader(buffer); + var slicer = new TupleReader(buffer); var current = FdbTupleParser.ParseNext(ref slicer); - if (slicer.HasMore) throw new FormatException("Parsing of singleton tuple failed before reaching the end of the key"); + if (slicer.Input.HasMore) throw new FormatException("Parsing of singleton tuple failed before reaching the end of the key"); return current; } @@ -1255,7 +1234,7 @@ public static Slice UnpackSingle(Slice buffer) /// Raw slice corresponding to the first element of the tuple public static Slice UnpackFirst(Slice buffer) { - var slicer = new SliceReader(buffer); + var slicer = new TupleReader(buffer); return FdbTupleParser.ParseNext(ref slicer); } @@ -1265,7 +1244,7 @@ public static Slice UnpackFirst(Slice buffer) /// Raw slice corresponding to the last element of the tuple public static Slice UnpackLast(Slice buffer) { - var slicer = new SliceReader(buffer); + var slicer = new TupleReader(buffer); Slice item = Slice.Nil; @@ -1275,7 +1254,7 @@ public static Slice UnpackLast(Slice buffer) item = current; } - if (slicer.HasMore) throw new FormatException("Parsing of tuple failed failed before reaching the end of the key"); + if (slicer.Input.HasMore) throw new FormatException("Parsing of tuple failed failed before reaching the end of the key"); return item; } diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs index fa427b8a6..d2eeb3012 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs @@ -36,52 +36,71 @@ namespace FoundationDB.Layers.Tuples /// Helper class that contains low-level encoders for the tuple binary format public static class FdbTupleParser { - #region Serialization... /// Writes a null value at the end, and advance the cursor - public static void WriteNil(ref SliceWriter writer) + public static void WriteNil(ref TupleWriter writer) + { + if (writer.Depth == 0) + { // at the top level, NILs are escaped as <00> + writer.Output.WriteByte(FdbTupleTypes.Nil); + } + else + { // inside a tuple, NILs are escaped as <00> + writer.Output.WriteByte2(FdbTupleTypes.Nil, 0xFF); + } + } + + public static void WriteBool(ref TupleWriter writer, bool value) { - writer.WriteByte(FdbTupleTypes.Nil); + // To be compatible with other bindings, we will encode False as the number 0, and True as the number 1 + if (value) + { // true => 15 01 + writer.Output.WriteByte2(FdbTupleTypes.IntPos1, 1); + } + else + { // false => 14 + writer.Output.WriteByte(FdbTupleTypes.IntZero); + } } /// Writes an UInt8 at the end, and advance the cursor /// Target buffer /// Unsigned BYTE, 32 bits - public static void WriteInt8(ref SliceWriter writer, byte value) + public static void WriteByte(ref TupleWriter writer, byte value) { if (value == 0) { // zero - writer.WriteByte(FdbTupleTypes.IntZero); + writer.Output.WriteByte(FdbTupleTypes.IntZero); } else { // 1..255: frequent for array index - writer.WriteByte2(FdbTupleTypes.IntPos1, value); + writer.Output.WriteByte2(FdbTupleTypes.IntPos1, value); } } /// Writes an Int32 at the end, and advance the cursor /// Target buffer /// Signed DWORD, 32 bits, High Endian - public static void WriteInt32(ref SliceWriter writer, int value) + public static void WriteInt32(ref TupleWriter writer, int value) { if (value <= 255) { if (value == 0) { // zero - writer.WriteByte(FdbTupleTypes.IntZero); + writer.Output.WriteByte(FdbTupleTypes.IntZero); return; } if (value > 0) { // 1..255: frequent for array index - writer.WriteByte2(FdbTupleTypes.IntPos1, (byte)value); + writer.Output.WriteByte2(FdbTupleTypes.IntPos1, (byte)value); return; } if (value > -256) { // -255..-1 - writer.WriteByte2(FdbTupleTypes.IntNeg1, (byte)(255 + value)); + writer.Output.WriteByte2(FdbTupleTypes.IntNeg1, (byte)(255 + value)); return; } } @@ -92,25 +111,25 @@ public static void WriteInt32(ref SliceWriter writer, int value) /// Writes an Int64 at the end, and advance the cursor /// Target buffer /// Signed QWORD, 64 bits, High Endian - public static void WriteInt64(ref SliceWriter writer, long value) + public static void WriteInt64(ref TupleWriter writer, long value) { if (value <= 255) { if (value == 0) { // zero - writer.WriteByte(FdbTupleTypes.IntZero); + writer.Output.WriteByte(FdbTupleTypes.IntZero); return; } if (value > 0) { // 1..255: frequent for array index - writer.WriteByte2(FdbTupleTypes.IntPos1, (byte)value); + writer.Output.WriteByte2(FdbTupleTypes.IntPos1, (byte)value); return; } if (value > -256) { // -255..-1 - writer.WriteByte2(FdbTupleTypes.IntNeg1, (byte)(255 + value)); + writer.Output.WriteByte2(FdbTupleTypes.IntNeg1, (byte)(255 + value)); return; } } @@ -118,17 +137,17 @@ public static void WriteInt64(ref SliceWriter writer, long value) WriteInt64Slow(ref writer, value); } - private static void WriteInt64Slow(ref SliceWriter writer, long value) + private static void WriteInt64Slow(ref TupleWriter writer, long value) { // we are only called for values <= -256 or >= 256 // determine the number of bytes needed to encode the absolute value int bytes = NumberOfBytes(value); - writer.EnsureBytes(bytes + 1); + writer.Output.EnsureBytes(bytes + 1); - var buffer = writer.Buffer; - int p = writer.Position; + var buffer = writer.Output.Buffer; + int p = writer.Output.Position; ulong v; if (value > 0) @@ -159,23 +178,23 @@ private static void WriteInt64Slow(ref SliceWriter writer, long value) // last buffer[p++] = (byte)v; } - writer.Position = p; + writer.Output.Position = p; } /// Writes an UInt32 at the end, and advance the cursor /// Target buffer /// Signed DWORD, 32 bits, High Endian - public static void WriteUInt32(ref SliceWriter writer, uint value) + public static void WriteUInt32(ref TupleWriter writer, uint value) { if (value <= 255) { if (value == 0) { // 0 - writer.WriteByte(FdbTupleTypes.IntZero); + writer.Output.WriteByte(FdbTupleTypes.IntZero); } else { // 1..255 - writer.WriteByte2(FdbTupleTypes.IntPos1, (byte)value); + writer.Output.WriteByte2(FdbTupleTypes.IntPos1, (byte)value); } } else @@ -187,17 +206,17 @@ public static void WriteUInt32(ref SliceWriter writer, uint value) /// Writes an UInt64 at the end, and advance the cursor /// Target buffer /// Signed QWORD, 64 bits, High Endian - public static void WriteUInt64(ref SliceWriter writer, ulong value) + public static void WriteUInt64(ref TupleWriter writer, ulong value) { if (value <= 255) { if (value == 0) { // 0 - writer.WriteByte(FdbTupleTypes.IntZero); + writer.Output.WriteByte(FdbTupleTypes.IntZero); } else { // 1..255 - writer.WriteByte2(FdbTupleTypes.IntPos1, (byte)value); + writer.Output.WriteByte2(FdbTupleTypes.IntPos1, (byte)value); } } else @@ -206,17 +225,17 @@ public static void WriteUInt64(ref SliceWriter writer, ulong value) } } - private static void WriteUInt64Slow(ref SliceWriter writer, ulong value) + private static void WriteUInt64Slow(ref TupleWriter writer, ulong value) { // We are only called for values >= 256 // determine the number of bytes needed to encode the value int bytes = NumberOfBytes(value); - writer.EnsureBytes(bytes + 1); + writer.Output.EnsureBytes(bytes + 1); - var buffer = writer.Buffer; - int p = writer.Position; + var buffer = writer.Output.Buffer; + int p = writer.Output.Position; // simple case (ulong can only be positive) buffer[p++] = (byte)(FdbTupleTypes.IntBase + bytes); @@ -236,13 +255,13 @@ private static void WriteUInt64Slow(ref SliceWriter writer, ulong value) buffer[p++] = (byte)value; } - writer.Position = p; + writer.Output.Position = p; } /// Writes an Single at the end, and advance the cursor /// Target buffer /// IEEE Floating point, 32 bits, High Endian - public static void WriteSingle(ref SliceWriter writer, float value) + public static void WriteSingle(ref TupleWriter writer, float value) { // The double is converted to its Big-Endian IEEE binary representation // - If the sign bit is set, flip all the bits @@ -264,21 +283,21 @@ public static void WriteSingle(ref SliceWriter writer, float value) { // postive bits |= 0x80000000U; } - writer.EnsureBytes(5); - var buffer = writer.Buffer; - int p = writer.Position; + writer.Output.EnsureBytes(5); + var buffer = writer.Output.Buffer; + int p = writer.Output.Position; buffer[p + 0] = FdbTupleTypes.Single; buffer[p + 1] = (byte)(bits >> 24); buffer[p + 2] = (byte)(bits >> 16); buffer[p + 3] = (byte)(bits >> 8); buffer[p + 4] = (byte)(bits); - writer.Position = p + 5; + writer.Output.Position = p + 5; } /// Writes an Double at the end, and advance the cursor /// Target buffer /// IEEE Floating point, 64 bits, High Endian - public static void WriteDouble(ref SliceWriter writer, double value) + public static void WriteDouble(ref TupleWriter writer, double value) { // The double is converted to its Big-Endian IEEE binary representation // - If the sign bit is set, flip all the bits @@ -300,9 +319,9 @@ public static void WriteDouble(ref SliceWriter writer, double value) { // postive bits |= 0x8000000000000000UL; } - writer.EnsureBytes(9); - var buffer = writer.Buffer; - int p = writer.Position; + writer.Output.EnsureBytes(9); + var buffer = writer.Output.Buffer; + int p = writer.Output.Position; buffer[p] = FdbTupleTypes.Double; buffer[p + 1] = (byte)(bits >> 56); buffer[p + 2] = (byte)(bits >> 48); @@ -312,15 +331,15 @@ public static void WriteDouble(ref SliceWriter writer, double value) buffer[p + 6] = (byte)(bits >> 16); buffer[p + 7] = (byte)(bits >> 8); buffer[p + 8] = (byte)(bits); - writer.Position = p + 9; + writer.Output.Position = p + 9; } /// Writes a binary string - public static void WriteBytes(ref SliceWriter writer, byte[] value) + public static void WriteBytes(ref TupleWriter writer, byte[] value) { if (value == null) { - writer.WriteByte(FdbTupleTypes.Nil); + WriteNil(ref writer); } else { @@ -329,15 +348,15 @@ public static void WriteBytes(ref SliceWriter writer, byte[] value) } /// Writes a string encoded in UTF-8 - public static unsafe void WriteString(ref SliceWriter writer, string value) + public static unsafe void WriteString(ref TupleWriter writer, string value) { if (value == null) { // "00" - writer.WriteByte(FdbTupleTypes.Nil); + WriteNil(ref writer); } else if (value.Length == 0) { // "02 00" - writer.WriteByte2(FdbTupleTypes.Utf8, 0x00); + writer.Output.WriteByte2(FdbTupleTypes.Utf8, 0x00); } else { @@ -352,7 +371,7 @@ public static unsafe void WriteString(ref SliceWriter writer, string value) } /// Writes a char array encoded in UTF-8 - internal static unsafe void WriteChars(ref SliceWriter writer, char[] value, int offset, int count) + internal static unsafe void WriteChars(ref TupleWriter writer, char[] value, int offset, int count) { Contract.Requires(offset >= 0 && count >= 0); @@ -360,46 +379,48 @@ internal static unsafe void WriteChars(ref SliceWriter writer, char[] value, int { if (value == null) { // "00" - writer.WriteByte(FdbTupleTypes.Nil); + WriteNil(ref writer); } else { // "02 00" - writer.WriteByte2(FdbTupleTypes.Utf8, 0x00); + writer.Output.WriteByte2(FdbTupleTypes.Utf8, 0x00); } } else { fixed (char* chars = value) { - if (TryWriteUnescapedUtf8String(ref writer, chars + offset, count)) return; + if (!TryWriteUnescapedUtf8String(ref writer, chars + offset, count)) + { // the string contains \0 chars, we need to do it the hard way + WriteNulEscapedBytes(ref writer, FdbTupleTypes.Utf8, Encoding.UTF8.GetBytes(value, 0, count)); + } } - // the string contains \0 chars, we need to do it the hard way - WriteNulEscapedBytes(ref writer, FdbTupleTypes.Utf8, Encoding.UTF8.GetBytes(value, 0, count)); } } - private static unsafe void WriteUnescapedAsciiChars(ref SliceWriter writer, char* chars, int count) + private static unsafe void WriteUnescapedAsciiChars(ref TupleWriter writer, char* chars, int count) { Contract.Requires(chars != null && count >= 0); // copy and convert an ASCII string directly into the destination buffer - writer.EnsureBytes(2 + count); - int pos = writer.Position; + writer.Output.EnsureBytes(2 + count); + int pos = writer.Output.Position; char* end = chars + count; - fixed (byte* buffer = writer.Buffer) + fixed (byte* buffer = writer.Output.Buffer) { buffer[pos++] = FdbTupleTypes.Utf8; + //OPTIMIZE: copy 2 or 4 chars at once, unroll loop? while(chars < end) { buffer[pos++] = (byte)(*chars++); } buffer[pos] = 0x00; - writer.Position = pos + 1; + writer.Output.Position = pos + 1; } } - private static unsafe bool TryWriteUnescapedUtf8String(ref SliceWriter writer, char* chars, int count) + private static unsafe bool TryWriteUnescapedUtf8String(ref TupleWriter writer, char* chars, int count) { Contract.Requires(chars != null && count >= 0); @@ -456,8 +477,8 @@ private static unsafe bool TryWriteUnescapedUtf8String(ref SliceWriter writer, c // We can not really predict the final size of the encoded string, but: // * Western languages have a few chars that usually need 2 bytes. If we pre-allocate 50% more bytes, it should fit most of the time, without too much waste // * Eastern langauges will have all chars encoded to 3 bytes. If we also pre-allocated 50% more, we should only need one resize of the buffer (150% x 2 = 300%), which is acceptable - writer.EnsureBytes(checked(2 + count + (count >> 1))); // preallocate 150% of the string + 2 bytes - writer.UnsafeWriteByte(FdbTupleTypes.Utf8); + writer.Output.EnsureBytes(checked(2 + count + (count >> 1))); // preallocate 150% of the string + 2 bytes + writer.Output.UnsafeWriteByte(FdbTupleTypes.Utf8); var encoder = Encoding.UTF8.GetEncoder(); // note: encoder.Convert() tries to fill up the buffer as much as possible with complete chars, and will set 'done' to true when all chars have been converted. @@ -466,7 +487,7 @@ private static unsafe bool TryWriteUnescapedUtf8String(ref SliceWriter writer, c encoder.Convert(ptr, remaining, buf, bufLen, true, out charsUsed, out bytesUsed, out done); if (bytesUsed > 0) { - writer.WriteBytes(buf, bytesUsed); + writer.Output.WriteBytes(buf, bytesUsed); } remaining -= charsUsed; ptr += charsUsed; @@ -475,7 +496,7 @@ private static unsafe bool TryWriteUnescapedUtf8String(ref SliceWriter writer, c Contract.Assert(remaining == 0 && ptr == end); // close the string - writer.WriteByte(0x00); + writer.Output.WriteByte(0x00); #endregion @@ -483,47 +504,47 @@ private static unsafe bool TryWriteUnescapedUtf8String(ref SliceWriter writer, c } /// Writes a char encoded in UTF-8 - public static void WriteChar(ref SliceWriter writer, char value) + public static void WriteChar(ref TupleWriter writer, char value) { if (value == 0) { // NUL => "00 0F" // note: \0 is the only unicode character that will produce a zero byte when converted in UTF-8 - writer.WriteByte4(FdbTupleTypes.Utf8, 0x00, 0xFF, 0x00); + writer.Output.WriteByte4(FdbTupleTypes.Utf8, 0x00, 0xFF, 0x00); } else if (value < 0x80) { // 0x00..0x7F => 0xxxxxxx - writer.WriteByte3(FdbTupleTypes.Utf8, (byte)value, 0x00); + writer.Output.WriteByte3(FdbTupleTypes.Utf8, (byte)value, 0x00); } else if (value < 0x800) { // 0x80..0x7FF => 110xxxxx 10xxxxxx => two bytes - writer.WriteByte4(FdbTupleTypes.Utf8, (byte)(0xC0 | (value >> 6)), (byte)(0x80 | (value & 0x3F)), 0x00); + writer.Output.WriteByte4(FdbTupleTypes.Utf8, (byte)(0xC0 | (value >> 6)), (byte)(0x80 | (value & 0x3F)), 0x00); } else { // 0x800..0xFFFF => 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx // note: System.Char is 16 bits, and thus cannot represent UNICODE chars above 0xFFFF. // => This means that a System.Char will never take more than 3 bytes in UTF-8 ! var tmp = Encoding.UTF8.GetBytes(new string(value, 1)); - writer.EnsureBytes(tmp.Length + 2); - writer.UnsafeWriteByte(FdbTupleTypes.Utf8); - writer.UnsafeWriteBytes(tmp, 0, tmp.Length); - writer.UnsafeWriteByte(0x00); + writer.Output.EnsureBytes(tmp.Length + 2); + writer.Output.UnsafeWriteByte(FdbTupleTypes.Utf8); + writer.Output.UnsafeWriteBytes(tmp, 0, tmp.Length); + writer.Output.UnsafeWriteByte(0x00); } } /// Writes a binary string - public static void WriteBytes(ref SliceWriter writer, byte[] value, int offset, int count) + public static void WriteBytes(ref TupleWriter writer, byte[] value, int offset, int count) { WriteNulEscapedBytes(ref writer, FdbTupleTypes.Bytes, value, offset, count); } /// Writes a binary string - public static void WriteBytes(ref SliceWriter writer, ArraySegment value) + public static void WriteBytes(ref TupleWriter writer, ArraySegment value) { WriteNulEscapedBytes(ref writer, FdbTupleTypes.Bytes, value.Array, value.Offset, value.Count); } /// Writes a buffer with all instances of 0 escaped as '00 FF' - internal static void WriteNulEscapedBytes(ref SliceWriter writer, byte type, byte[] value, int offset, int count) + internal static void WriteNulEscapedBytes(ref TupleWriter writer, byte type, byte[] value, int offset, int count) { int n = count; @@ -534,9 +555,9 @@ internal static void WriteNulEscapedBytes(ref SliceWriter writer, byte type, byt if (value[i] == 0) ++n; } - writer.EnsureBytes(n + 2); - var buffer = writer.Buffer; - int p = writer.Position; + writer.Output.EnsureBytes(n + 2); + var buffer = writer.Output.Buffer; + int p = writer.Output.Position; buffer[p++] = type; if (n > 0) { @@ -555,12 +576,12 @@ internal static void WriteNulEscapedBytes(ref SliceWriter writer, byte type, byt } } } - buffer[p] = FdbTupleTypes.Nil; - writer.Position = p + 1; + buffer[p] = 0x00; + writer.Output.Position = p + 1; } /// Writes a buffer with all instances of 0 escaped as '00 FF' - private static void WriteNulEscapedBytes(ref SliceWriter writer, byte type, byte[] value) + private static void WriteNulEscapedBytes(ref TupleWriter writer, byte type, byte[] value) { int n = value.Length; // we need to know if there are any NUL chars (\0) that need escaping... @@ -570,9 +591,9 @@ private static void WriteNulEscapedBytes(ref SliceWriter writer, byte type, byte if (b == 0) ++n; } - writer.EnsureBytes(n + 2); - var buffer = writer.Buffer; - int p = writer.Position; + writer.Output.EnsureBytes(n + 2); + var buffer = writer.Output.Buffer; + int p = writer.Output.Position; buffer[p++] = type; if (n > 0) { @@ -590,48 +611,48 @@ private static void WriteNulEscapedBytes(ref SliceWriter writer, byte type, byte } } } - buffer[p++] = FdbTupleTypes.Nil; - writer.Position = p; + buffer[p++] = 0x00; + writer.Output.Position = p; } /// Writes a RFC 4122 encoded 16-byte Microsoft GUID - public static void WriteGuid(ref SliceWriter writer, Guid value) + public static void WriteGuid(ref TupleWriter writer, Guid value) { - writer.EnsureBytes(17); - writer.UnsafeWriteByte(FdbTupleTypes.Uuid128); + writer.Output.EnsureBytes(17); + writer.Output.UnsafeWriteByte(FdbTupleTypes.Uuid128); unsafe { // UUIDs are stored using the RFC 4122 standard, so we need to swap some parts of the System.Guid byte* ptr = stackalloc byte[16]; Uuid128.Write(value, ptr); - writer.UnsafeWriteBytes(ptr, 16); + writer.Output.UnsafeWriteBytes(ptr, 16); } } /// Writes a RFC 4122 encoded 128-bit UUID - public static void WriteUuid128(ref SliceWriter writer, Uuid128 value) + public static void WriteUuid128(ref TupleWriter writer, Uuid128 value) { - writer.EnsureBytes(17); - writer.UnsafeWriteByte(FdbTupleTypes.Uuid128); + writer.Output.EnsureBytes(17); + writer.Output.UnsafeWriteByte(FdbTupleTypes.Uuid128); unsafe { byte* ptr = stackalloc byte[16]; value.WriteTo(ptr); - writer.UnsafeWriteBytes(ptr, 16); + writer.Output.UnsafeWriteBytes(ptr, 16); } } /// Writes a 64-bit UUID - public static void WriteUuid64(ref SliceWriter writer, Uuid64 value) + public static void WriteUuid64(ref TupleWriter writer, Uuid64 value) { - writer.EnsureBytes(9); - writer.UnsafeWriteByte(FdbTupleTypes.Uuid64); + writer.Output.EnsureBytes(9); + writer.Output.UnsafeWriteByte(FdbTupleTypes.Uuid64); unsafe { byte* ptr = stackalloc byte[8]; value.WriteTo(ptr); - writer.UnsafeWriteBytes(ptr, 8); + writer.Output.UnsafeWriteBytes(ptr, 8); } } @@ -750,7 +771,7 @@ internal static IFdbTuple ParseTuple(Slice slice) Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.TupleStart); if (slice.Count <= 2) return FdbTuple.Empty; - return FdbTuple.Unpack(slice.Substring(1, slice.Count - 2)); + return FdbTuplePackers.Unpack(slice.Substring(1, slice.Count - 2), true); } internal static float ParseSingle(Slice slice) @@ -856,9 +877,9 @@ internal static Uuid64 ParseUuid64(Slice slice) /// Decode the next token from a packed tuple /// Parser from wich to read the next token /// Token decoded, or Slice.Nil if there was no more data in the buffer - public static Slice ParseNext(ref SliceReader reader) + public static Slice ParseNext(ref TupleReader reader) { - int type = reader.PeekByte(); + int type = reader.Input.PeekByte(); switch (type) { case -1: @@ -867,19 +888,35 @@ public static Slice ParseNext(ref SliceReader reader) } case FdbTupleTypes.Nil: - { // <00> => null - reader.Skip(1); - return Slice.Empty; + { // <00> / <00> => null + if (reader.Depth > 0) + { // must be <00> inside an embedded tuple + if (reader.Input.PeekByteAt(1) == 0xFF) + { // this is a Nil entry + reader.Input.Skip(2); + return Slice.Empty; + } + else + { // this is the end of the embedded tuple + reader.Input.Skip(1); + return Slice.Nil; + } + } + else + { // can be <00> outside an embedded tuple + reader.Input.Skip(1); + return Slice.Empty; + } } case FdbTupleTypes.Bytes: { // <01>(bytes)<00> - return reader.ReadByteString(); + return reader.Input.ReadByteString(); } case FdbTupleTypes.Utf8: { // <02>(utf8 bytes)<00> - return reader.ReadByteString(); + return reader.Input.ReadByteString(); } case FdbTupleTypes.TupleStart: @@ -889,39 +926,30 @@ public static Slice ParseNext(ref SliceReader reader) // This means that we may need to scan multiple times the bytes, which may not be efficient if there are multiple embedded tuples inside each other return ReadEmbeddedTupleBytes(ref reader); } - - case FdbTupleTypes.TupleEnd: - { // End Of Tuple - // this should not happen in regular parsing (the <04> is eaten by ReadEmbeddedTupleBytes()) - // but it could happen if someone is parsing a stream of tokens, and want to stops there. - reader.Skip(1); - return Slice.Nil; - } - case FdbTupleTypes.Single: { // <20>(4 bytes) - return reader.ReadBytes(5); + return reader.Input.ReadBytes(5); } case FdbTupleTypes.Double: { // <21>(8 bytes) - return reader.ReadBytes(9); + return reader.Input.ReadBytes(9); } case FdbTupleTypes.Uuid128: { // <30>(16 bytes) - return reader.ReadBytes(17); + return reader.Input.ReadBytes(17); } case FdbTupleTypes.Uuid64: { // <31>(8 bytes) - return reader.ReadBytes(9); + return reader.Input.ReadBytes(9); } case FdbTupleTypes.AliasDirectory: case FdbTupleTypes.AliasSystem: { // or - return reader.ReadBytes(1); + return reader.Input.ReadBytes(1); } } @@ -930,30 +958,38 @@ public static Slice ParseNext(ref SliceReader reader) int bytes = type - FdbTupleTypes.IntZero; if (bytes < 0) bytes = -bytes; - return reader.ReadBytes(1 + bytes); + return reader.Input.ReadBytes(1 + bytes); } - throw new FormatException(String.Format("Invalid tuple type byte {0} at index {1}/{2}", type, reader.Position, reader.Buffer.Count)); + throw new FormatException(String.Format("Invalid tuple type byte {0} at index {1}/{2}", type, reader.Input.Position, reader.Input.Buffer.Count)); } /// Read an embedded tuple, without parsing it - internal static Slice ReadEmbeddedTupleBytes(ref SliceReader reader) + internal static Slice ReadEmbeddedTupleBytes(ref TupleReader reader) { - // The current embedded tuple starts here, and stops on a <04>, but itself can contain more embedded tuples, and could have data with a <04> in them (like integer 4, a byte array with 0x04 inside it, ...) + // The current embedded tuple starts here, and stops on a <00>, but itself can contain more embedded tuples, and could have a <00> bytes as part of regular items (like bytes, strings, that end with <00> or could contain a <00> ...) // This means that we have to parse the tuple recursively, discard the tokens, and note where the cursor ended. The parsing of the tuple itself will be processed later. - int start = reader.Position; - reader.Skip(1); + ++reader.Depth; + int start = reader.Input.Position; + reader.Input.Skip(1); - while(reader.PeekByte() != FdbTupleTypes.TupleEnd) + while(reader.Input.HasMore) { var token = ParseNext(ref reader); - if (token.IsNullOrEmpty) throw new FormatException(String.Format("Truncated embedded tuple started at index {0}/{1}", start, reader.Buffer.Count)); + // the token will be Nil for either the end of the stream, or the end of the tuple + // => since we already tested Input.HasMore, we know we are in the later case + if (token.IsNull) + { + --reader.Depth; + //note: ParseNext() has already eaten the <00> + int end = reader.Input.Position; + return reader.Input.Buffer.Substring(start, end - start); + } + // else: ignore this token, it will be processed later if the tuple is unpacked and accessed } - reader.Skip(1); - int end = reader.Position; - return reader.Buffer.Substring(start, end - start); + throw new FormatException(String.Format("Truncated embedded tuple started at index {0}/{1}", start, reader.Input.Buffer.Count)); } #endregion diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleTypes.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleTypes.cs index 948945000..d37163b23 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleTypes.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleTypes.cs @@ -46,8 +46,6 @@ internal static class FdbTupleTypes /// Nested tuple [DRAFT] internal const byte TupleStart = 3; - /// End of a nested tuple [DRAFT] - internal const byte TupleEnd = 4; internal const byte IntNeg8 = 12; internal const byte IntNeg7 = 13; diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs index e86582cd0..54c7c46d9 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs @@ -49,6 +49,7 @@ public struct FdbTuple : IFdbTuple /// First and only item in the tuple public readonly T1 Item1; + [DebuggerStepThrough] public FdbTuple(T1 item1) { this.Item1 = item1; @@ -80,7 +81,7 @@ public R Get(int index) return FdbConverters.Convert(this.Item1); } - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { FdbTuplePacker.Encoder(ref writer, this.Item1); } diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs index 75a711ea4..fd4399562 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs @@ -52,6 +52,7 @@ public struct FdbTuple : IFdbTuple /// Seconde element of the pair public readonly T2 Item2; + [DebuggerStepThrough] public FdbTuple(T1 item1, T2 item2) { this.Item1 = item1; @@ -92,7 +93,7 @@ public R Get(int index) } } - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { FdbTuplePacker.Encoder(ref writer, this.Item1); FdbTuplePacker.Encoder(ref writer, this.Item2); diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs index 135f8b849..b702c8fac 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs @@ -56,6 +56,7 @@ public struct FdbTuple : IFdbTuple /// Third and last elemnt of the triplet public readonly T3 Item3; + [DebuggerStepThrough] public FdbTuple(T1 item1, T2 item2, T3 item3) { this.Item1 = item1; @@ -99,7 +100,7 @@ public R Get(int index) } } - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { FdbTuplePacker.Encoder(ref writer, this.Item1); FdbTuplePacker.Encoder(ref writer, this.Item2); diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs index 589027346..1268b4deb 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs @@ -60,6 +60,7 @@ public struct FdbTuple : IFdbTuple public readonly T4 Item4; /// Create a tuple containing for items + [DebuggerStepThrough] public FdbTuple(T1 item1, T2 item2, T3 item3, T4 item4) { this.Item1 = item1; @@ -108,7 +109,7 @@ public R Get(int index) } } - public void PackTo(ref SliceWriter writer) + public void PackTo(ref TupleWriter writer) { FdbTuplePacker.Encoder(ref writer, this.Item1); FdbTuplePacker.Encoder(ref writer, this.Item2); diff --git a/FoundationDB.Client/Layers/Tuples/IFdbTuple.cs b/FoundationDB.Client/Layers/Tuples/IFdbTuple.cs index 3de337c81..7f954f7ab 100644 --- a/FoundationDB.Client/Layers/Tuples/IFdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/IFdbTuple.cs @@ -117,7 +117,7 @@ public interface IFdbTuple : IEnumerable, IEquatable, IReadOn /// Appends the packed bytes of this instance to the end of a buffer /// Buffer that will received the packed bytes of this instance - void PackTo(ref SliceWriter writer); + void PackTo(ref TupleWriter writer); /// Pack this instance into a Slice /// diff --git a/FoundationDB.Client/Layers/Tuples/TupleReader.cs b/FoundationDB.Client/Layers/Tuples/TupleReader.cs new file mode 100644 index 000000000..14e1b9895 --- /dev/null +++ b/FoundationDB.Client/Layers/Tuples/TupleReader.cs @@ -0,0 +1,54 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Layers.Tuples +{ + using FoundationDB.Client; + using System; + using System.Diagnostics; + + [DebuggerDisplay("{Input.Position}/{Input.Buffer.Count} @ {Depth}")] + public struct TupleReader + { + public SliceReader Input; + public int Depth; + + public TupleReader(Slice buffer) + { + this.Input = new SliceReader(buffer); + this.Depth = 0; + } + + public TupleReader(SliceReader input) + { + this.Input = input; + this.Depth = 0; + } + } + +} diff --git a/FoundationDB.Client/Layers/Tuples/TupleWriter.cs b/FoundationDB.Client/Layers/Tuples/TupleWriter.cs new file mode 100644 index 000000000..9d33d227a --- /dev/null +++ b/FoundationDB.Client/Layers/Tuples/TupleWriter.cs @@ -0,0 +1,55 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Layers.Tuples +{ + using FoundationDB.Client; + using System; + using System.Diagnostics; + + [DebuggerDisplay("{Output.Position}/{Output.Buffer.Length} @ {Depth}")] + public struct TupleWriter + { + public SliceWriter Output; + public int Depth; + + public TupleWriter(SliceWriter buffer) + { + this.Output = buffer; + this.Depth = 0; + } + + public TupleWriter(int capacity) + { + this.Output = new SliceWriter(capacity); + this.Depth = 0; + } + + } + +} diff --git a/FoundationDB.Client/Utils/SliceReader.cs b/FoundationDB.Client/Utils/SliceReader.cs index 11b382d02..bb92ade0f 100644 --- a/FoundationDB.Client/Utils/SliceReader.cs +++ b/FoundationDB.Client/Utils/SliceReader.cs @@ -94,6 +94,14 @@ public int PeekByte() return p < m_buffer.Count ? m_buffer[p] : -1; } + /// Return the value of the byte at a specified offset from the current position, or -1 if this is after the end, or before the start + [Pure] + public int PeekByteAt(int offset) + { + int p = this.Position + offset; + return p < m_buffer.Count && p >= 0 ? m_buffer[p] : -1; + } + /// Skip the next bytes of the buffer public void Skip(int count) { diff --git a/FoundationDB.Tests/Layers/TupleFacts.cs b/FoundationDB.Tests/Layers/TupleFacts.cs index d78e00b29..49a132768 100644 --- a/FoundationDB.Tests/Layers/TupleFacts.cs +++ b/FoundationDB.Tests/Layers/TupleFacts.cs @@ -1319,47 +1319,67 @@ public void Test_FdbTuple_Deserialize_Alias() [Test] public void Test_FdbTuple_Serialize_Embedded_Tuples() { - Slice key; - IFdbTuple t; + Action verify = (t, expected) => + { + var key = t.ToSlice(); + Assert.That(key.ToHexaString(' '), Is.EqualTo(expected)); + Assert.That(FdbTuple.Pack(t), Is.EqualTo(key)); + var t2 = FdbTuple.Unpack(key); + Assert.That(t2, Is.Not.Null); + Assert.That(t2.Count, Is.EqualTo(t.Count), "{0}", t2); + Assert.That(t2, Is.EqualTo(t)); + }; // Index composite key IFdbTuple value = FdbTuple.Create(2014, 11, 6); // Indexing a date value (Y, M, D) string docId = "Doc123"; // key would be "(..., value, id)" - // Create(...).ToSlice() - key = FdbTuple.Create(42, value, docId).ToSlice(); - Assert.That(key.ToHexaString(' '), Is.EqualTo("15 2A 03 16 07 DE 15 0B 15 06 04 02 44 6F 63 31 32 33 00")); - t = FdbTuple.Unpack(key); - Console.WriteLine(t); - - key = FdbTuple.Create(new object[] { 42, value, docId }).ToSlice(); - Assert.That(key.ToHexaString(' '), Is.EqualTo("15 2A 03 16 07 DE 15 0B 15 06 04 02 44 6F 63 31 32 33 00")); - t = FdbTuple.Unpack(key); - Console.WriteLine(t); - - // subspace.Append(value).Append(id).ToSlice() - key = FdbTuple.Create(42).Append(value).Append(docId).ToSlice(); - Assert.That(key.ToHexaString(' '), Is.EqualTo("15 2A 03 16 07 DE 15 0B 15 06 04 02 44 6F 63 31 32 33 00")); - t = FdbTuple.Unpack(key); - Console.WriteLine(t); - - // subspace.Append(value, id).ToSlice() - key = FdbTuple.Create(42).Append(value, docId).ToSlice(); - Assert.That(key.ToHexaString(' '), Is.EqualTo("15 2A 03 16 07 DE 15 0B 15 06 04 02 44 6F 63 31 32 33 00")); - t = FdbTuple.Unpack(key); - Console.WriteLine(t); - - // FdbTuple.Pack(..., value, id) - key = FdbTuple.Pack(42, value, docId); - Assert.That(key.ToHexaString(' '), Is.EqualTo("15 2A 03 16 07 DE 15 0B 15 06 04 02 44 6F 63 31 32 33 00")); - t = FdbTuple.Unpack(key); - Console.WriteLine(t); - - t = FdbTuple.Create(1, FdbTuple.Create(2, 3), FdbTuple.Create(FdbTuple.Create(4, 5, 6)), 7); - Console.WriteLine(t); - key = t.ToSlice(); - Console.WriteLine(key); + verify( + FdbTuple.Create(42, value, docId), + "15 2A 03 16 07 DE 15 0B 15 06 00 02 44 6F 63 31 32 33 00" + ); + verify( + FdbTuple.Create(new object[] { 42, value, docId }), + "15 2A 03 16 07 DE 15 0B 15 06 00 02 44 6F 63 31 32 33 00" + ); + verify( + FdbTuple.Create(42).Append(value).Append(docId), + "15 2A 03 16 07 DE 15 0B 15 06 00 02 44 6F 63 31 32 33 00" + ); + verify( + FdbTuple.Create(42).Append(value, docId), + "15 2A 03 16 07 DE 15 0B 15 06 00 02 44 6F 63 31 32 33 00" + ); + + // multiple depth + verify( + FdbTuple.Create(1, FdbTuple.Create(2, 3), FdbTuple.Create(FdbTuple.Create(4, 5, 6)), 7), + "15 01 03 15 02 15 03 00 03 03 15 04 15 05 15 06 00 00 15 07" + ); + + // corner cases + verify( + FdbTuple.Create(FdbTuple.Empty), + "03 00" // empty tumple should have header and footer + ); + verify( + FdbTuple.Create(FdbTuple.Empty, default(string)), + "03 00 00" // outer null should not be escaped + ); + verify( + FdbTuple.Create(FdbTuple.Create(default(string)), default(string)), + "03 00 FF 00 00" // inner null should be escaped, but not outer + ); + verify( + FdbTuple.Create(FdbTuple.Create(0x100, 0x10000, 0x1000000)), + "03 16 01 00 17 01 00 00 18 01 00 00 00 00" + ); + verify( + FdbTuple.Create(default(string), FdbTuple.Empty, default(string), FdbTuple.Create(default(string)), default(string)), + "00 03 00 00 03 00 FF 00 00" + ); + } [Test] @@ -1650,10 +1670,10 @@ public void Test_FdbTuple_Serialize_ITupleFormattable() Slice packed; packed = FdbTuplePacker.Serialize(new Thing { Foo = 123, Bar = "hello" }); - Assert.That(packed.ToString(), Is.EqualTo("<03><15>{<02>hello<00><04>")); + Assert.That(packed.ToString(), Is.EqualTo("<03><15>{<02>hello<00><00>")); packed = FdbTuplePacker.Serialize(new Thing()); - Assert.That(packed.ToString(), Is.EqualTo("<03><14><00><04>")); + Assert.That(packed.ToString(), Is.EqualTo("<03><14><00><00>")); packed = FdbTuplePacker.Serialize(default(Thing)); Assert.That(packed.ToString(), Is.EqualTo("<00>")); @@ -1666,18 +1686,21 @@ public void Test_FdbTuple_Deserialize_ITupleFormattable() Slice slice; Thing thing; - slice = Slice.Unescape("<03><16><01><02>world<00><04>"); + slice = Slice.Unescape("<03><16><01><02>world<00><00>"); thing = FdbTuplePackers.DeserializeFormattable(slice); Assert.That(thing, Is.Not.Null); Assert.That(thing.Foo, Is.EqualTo(456)); Assert.That(thing.Bar, Is.EqualTo("world")); - slice = Slice.Unescape("<03><14><00><04>"); + slice = Slice.Unescape("<03><14><00><00>"); thing = FdbTuplePackers.DeserializeFormattable(slice); Assert.That(thing, Is.Not.Null); Assert.That(thing.Foo, Is.EqualTo(0)); Assert.That(thing.Bar, Is.EqualTo(null)); + slice = Slice.Unescape("<00>"); + thing = FdbTuplePackers.DeserializeFormattable(slice); + Assert.That(thing, Is.Null); } [Test] @@ -1806,10 +1829,13 @@ private static string Clean(string value) private static void PerformWriterTest(FdbTuplePackers.Encoder action, T value, string expectedResult, string message = null) { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); action(ref writer, value); - Assert.That(writer.ToSlice().ToHexaString(' '), Is.EqualTo(expectedResult), message != null ? "Value {0} ({1}) was not properly packed: {2}" : "Value {0} ({1}) was not properly packed", value == null ? "" : value is string ? Clean(value as string) : value.ToString(), (value == null ? "null" : value.GetType().Name), message); + Assert.That( + writer.Output.ToSlice().ToHexaString(' '), + Is.EqualTo(expectedResult), + message != null ? "Value {0} ({1}) was not properly packed: {2}" : "Value {0} ({1}) was not properly packed", value == null ? "" : value is string ? Clean(value as string) : value.ToString(), (value == null ? "null" : value.GetType().Name), message); } [Test] @@ -1856,9 +1882,9 @@ public void Test_FdbTupleParser_WriteInt64_Respects_Ordering() Action test = (x) => { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); FdbTupleParser.WriteInt64(ref writer, x); - var res = new KeyValuePair(x, writer.ToSlice()); + var res = new KeyValuePair(x, writer.Output.ToSlice()); list.Add(res); Console.WriteLine("{0,20} : {0:x16} {1}", res.Key, res.Value.ToString()); }; @@ -1946,9 +1972,9 @@ public void Test_FdbTupleParser_WriteUInt64_Respects_Ordering() Action test = (x) => { - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); FdbTupleParser.WriteUInt64(ref writer, x); - var res = new KeyValuePair(x, writer.ToSlice()); + var res = new KeyValuePair(x, writer.Output.ToSlice()); list.Add(res); #if DEBUG Console.WriteLine("{0,20} : {0:x16} {1}", res.Key, res.Value.ToString()); @@ -2097,10 +2123,10 @@ public void Test_FdbTupleParser_WriteChar() for (int i = 1; i <= 65535; i++) { char c = (char)i; - var writer = SliceWriter.Empty; + var writer = new TupleWriter(); FdbTupleParser.WriteChar(ref writer, c); string s = new string(c, 1); - Assert.That(writer.ToSlice().ToString(), Is.EqualTo("<02>" + Slice.Create(Encoding.UTF8.GetBytes(s)).ToString() + "<00>"), "{0} '{1}'", i, c); + Assert.That(writer.Output.ToSlice().ToString(), Is.EqualTo("<02>" + Slice.Create(Encoding.UTF8.GetBytes(s)).ToString() + "<00>"), "{0} '{1}'", i, c); } } From 82000af1a87cd3562c35eba4d52b42dd3bbfe5a3 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Sun, 16 Nov 2014 18:09:47 +0100 Subject: [PATCH 04/63] Organized projects using Solution Folders --- FoundationDB.Client.sln | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/FoundationDB.Client.sln b/FoundationDB.Client.sln index cdb6ac1e6..1e56fe6a5 100644 --- a/FoundationDB.Client.sln +++ b/FoundationDB.Client.sln @@ -42,6 +42,16 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution README.md = README.md EndProjectSection EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Core", "Core", "{A00227EE-033B-40BB-A159-3CBC99AE54F8}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Tools", "Tools", "{1F010B54-2028-4B4D-A6FF-9892A00E83FC}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Experimental", "Experimental", "{19AF13DE-0DC9-4E4A-BBB0-0EF9386F9DC8}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Tests", "Tests", "{C443AC73-535E-492E-BDE3-FBF01446842C}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Samples", "Samples", "{B40329A5-32B5-4056-8C8F-424E845D2688}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -92,4 +102,16 @@ Global GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection + GlobalSection(NestedProjects) = preSolution + {773166B7-DE74-4FCC-845C-84080CC89533} = {A00227EE-033B-40BB-A159-3CBC99AE54F8} + {CC36F01C-64E4-4B2E-8376-19D72EDE7D47} = {C443AC73-535E-492E-BDE3-FBF01446842C} + {7C888F78-99B1-4CA1-8973-282EC5CCCACD} = {C443AC73-535E-492E-BDE3-FBF01446842C} + {7C7717D6-A1E7-4541-AF8B-1AC762B5ED0F} = {A00227EE-033B-40BB-A159-3CBC99AE54F8} + {FAF14E3F-6662-4084-8B92-E6697F6B9D5A} = {A00227EE-033B-40BB-A159-3CBC99AE54F8} + {D2EE84C9-0554-40D2-B61F-8FCB4B67CEAA} = {B40329A5-32B5-4056-8C8F-424E845D2688} + {E631BCD4-386C-4EB1-AD4D-CABCE77BB4C8} = {19AF13DE-0DC9-4E4A-BBB0-0EF9386F9DC8} + {CC98DB39-31A1-4642-B4FC-9CB0AB26BF2E} = {19AF13DE-0DC9-4E4A-BBB0-0EF9386F9DC8} + {AF76A8D4-E682-4E72-B656-BE3D935712DB} = {C443AC73-535E-492E-BDE3-FBF01446842C} + {60C39E7E-E6CD-404B-8F9B-9BABF302AABC} = {1F010B54-2028-4B4D-A6FF-9892A00E83FC} + EndGlobalSection EndGlobal From 31ff2924fbe26f1a15352c1432e45246c18f3501 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Sun, 16 Nov 2014 19:35:46 +0100 Subject: [PATCH 05/63] Continue Tuple API refactoring - Renamed methods on staticclass FdbTuple to be in synch with the new naming conventions - FdbTuple.EncodeKey() *could* be used to encoded values, but the name suggest that it tries to keep ordering properties that may be overkill for values - Simplified names with replacing "Range" by either the plural (EncodeKeys) or nothing if there are no overload resolution ambiguity possible --- .../Encoders/KeyValueEncoders.cs | 20 +- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 350 ++++++++------ .../Layers/Tuples/FdbTupleCodec`1.cs | 6 +- .../Layers/Tuples/FdbTuple`1.cs | 2 +- .../Layers/Tuples/FdbTuple`2.cs | 2 +- .../Layers/Tuples/FdbTuple`3.cs | 2 +- .../Layers/Tuples/FdbTuple`4.cs | 2 +- .../Subspaces/FdbSubspaceTuples.cs | 120 +++-- .../Documents/FdbHashSetCollection.cs | 9 +- .../Benchmarks/BenchRunner.cs | 2 +- .../MessageQueue/MessageQueueRunner.cs | 2 +- .../Tutorials/ClassScheduling.cs | 4 +- .../Collections/ColaOrderedSetFacts.cs | 8 +- FoundationDB.Tests.Sandbox/Program.cs | 4 +- FoundationDB.Tests/DatabaseFacts.cs | 10 +- FoundationDB.Tests/Encoders/EncoderFacts.cs | 10 +- FoundationDB.Tests/Encoders/TypeCodecFacts.cs | 26 +- FoundationDB.Tests/FdbTest.cs | 25 +- FoundationDB.Tests/KeyFacts.cs | 28 +- FoundationDB.Tests/Layers/DirectoryFacts.cs | 6 +- FoundationDB.Tests/Layers/MapFacts.cs | 2 +- FoundationDB.Tests/Layers/RankedSetFacts.cs | 2 +- FoundationDB.Tests/Layers/TupleFacts.cs | 435 ++++++++---------- FoundationDB.Tests/RangeQueryFacts.cs | 10 +- 24 files changed, 584 insertions(+), 503 deletions(-) diff --git a/FoundationDB.Client/Encoders/KeyValueEncoders.cs b/FoundationDB.Client/Encoders/KeyValueEncoders.cs index b6abe6766..a4e7f9afc 100644 --- a/FoundationDB.Client/Encoders/KeyValueEncoders.cs +++ b/FoundationDB.Client/Encoders/KeyValueEncoders.cs @@ -625,24 +625,24 @@ private TupleKeyEncoder() { } public Slice EncodeKey(T key) { - return FdbTuple.Pack(key); + return FdbTuple.EncodeKey(key); } public T DecodeKey(Slice encoded) { if (encoded.IsNullOrEmpty) return default(T); //BUGBUG - return FdbTuple.UnpackSingle(encoded); + return FdbTuple.DecodeKey(encoded); } public Slice EncodeValue(T key) { - return FdbTuple.Pack(key); + return FdbTuple.EncodeKey(key); } public T DecodeValue(Slice encoded) { if (encoded.IsNullOrEmpty) return default(T); //BUGBUG - return FdbTuple.UnpackSingle(encoded); + return FdbTuple.DecodeKey(encoded); } } @@ -659,7 +659,7 @@ public override Slice EncodeComposite(FdbTuple key, int items) switch (items) { case 2: return key.ToSlice(); - case 1: return FdbTuple.Pack(key.Item1); + case 1: return FdbTuple.EncodeKey(key.Item1); default: throw new ArgumentOutOfRangeException("items", items, "Item count must be either 1 or 2"); } } @@ -691,8 +691,8 @@ public override Slice EncodeComposite(FdbTuple key, int items) switch (items) { case 3: return key.ToSlice(); - case 2: return FdbTuple.Pack(key.Item1, key.Item2); - case 1: return FdbTuple.Pack(key.Item1); + case 2: return FdbTuple.EncodeKey(key.Item1, key.Item2); + case 1: return FdbTuple.EncodeKey(key.Item1); default: throw new ArgumentOutOfRangeException("items", items, "Item count must be between 1 and 3"); } } @@ -725,9 +725,9 @@ public override Slice EncodeComposite(FdbTuple key, int items) switch (items) { case 4: return key.ToSlice(); - case 3: return FdbTuple.Pack(key.Item1, key.Item2, key.Item3); - case 2: return FdbTuple.Pack(key.Item1, key.Item2); - case 1: return FdbTuple.Pack(key.Item1); + case 3: return FdbTuple.EncodeKey(key.Item1, key.Item2, key.Item3); + case 2: return FdbTuple.EncodeKey(key.Item1, key.Item2); + case 1: return FdbTuple.EncodeKey(key.Item1); default: throw new ArgumentOutOfRangeException("items", items, "Item count must be between 1 and 4"); } } diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 96c08e6c4..2ea33bbc9 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -206,6 +206,9 @@ public static IFdbTuple Create(params object[] items) [NotNull] public static IFdbTuple Wrap(object[] items) { + //REVIEW: this is identital to Create(params object[]) !! + //TODO: remove this? + if (items == null) throw new ArgumentNullException("items"); if (items.Length == 0) return FdbTuple.Empty; @@ -330,11 +333,7 @@ public static IFdbTuple Concat([NotNull] IFdbTuple head, [NotNull] IFdbTuple tai #region Packing... - //BUGBUG: there is a potential type resolution problem between FdbTuple.Pack(IFdbTuple) and FdbTuple.Pack(T) - // ex: FdbTuple.Pack(FdbTuple.Create(1, 2)) will call FdbTuplePack(T) with T == typeof(FdbTuple) instead of expected FdbTuple.Pack(IFdbTuple) - // problem is that Pack() will treat this as the embedded tuple ((1, 2),) instead of expected (1, 2) - - //TODO: We *MUST* rename the various Pack() to something else, to solve this issue! + // Without prefix /// Pack a tuple into a slice /// Tuple that must be serialized into a binary slice @@ -347,17 +346,164 @@ public static Slice Pack([NotNull] IFdbTuple tuple) return tuple.ToSlice(); } - /// Pack a 1-tuple directly into a slice - /// This is the non-generic equivalent of FdbTuple.Pack<object>() - public static Slice PackBoxed(object item) + /// Pack an array of N-tuples, all sharing the same buffer + /// Sequence of N-tuples to pack + /// Array containing the buffer segment of each packed tuple + /// BatchPack([ ("Foo", 1), ("Foo", 2) ]) => [ "\x02Foo\x00\x15\x01", "\x02Foo\x00\x15\x02" ] + [NotNull] + public static Slice[] Pack([NotNull] params IFdbTuple[] tuples) { + return Pack(Slice.Nil, tuples); + } + + /// Pack a sequence of N-tuples, all sharing the same buffer + /// Sequence of N-tuples to pack + /// Array containing the buffer segment of each packed tuple + /// BatchPack([ ("Foo", 1), ("Foo", 2) ]) => [ "\x02Foo\x00\x15\x01", "\x02Foo\x00\x15\x02" ] + [NotNull] + public static Slice[] Pack([NotNull] IEnumerable tuples) + { + return Pack(Slice.Nil, tuples); + } + + // With prefix + + /// Efficiently concatenate a prefix with the packed representation of a tuple + public static Slice Pack(Slice prefix, [CanBeNull] IFdbTuple tuple) + { + if (tuple == null || tuple.Count == 0) return prefix; + var writer = new TupleWriter(); - FdbTuplePackers.SerializeObjectTo(ref writer, item); + writer.Output.WriteBytes(prefix); + tuple.PackTo(ref writer); return writer.Output.ToSlice(); } + /// Pack an array of N-tuples, all sharing the same buffer + /// Commong prefix added to all the tuples + /// Sequence of N-tuples to pack + /// Array containing the buffer segment of each packed tuple + /// BatchPack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] + [NotNull] + public static Slice[] Pack(Slice prefix, params IFdbTuple[] tuples) + { + if (tuples == null) throw new ArgumentNullException("tuples"); + + // pre-allocate by supposing that each tuple will take at least 16 bytes + var writer = new TupleWriter(tuples.Length * (16 + prefix.Count)); + var next = new List(tuples.Length); + + //TODO: use multiple buffers if item count is huge ? + + foreach (var tuple in tuples) + { + writer.Output.WriteBytes(prefix); + tuple.PackTo(ref writer); + next.Add(writer.Output.Position); + } + + return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); + } + + /// Pack a sequence of N-tuples, all sharing the same buffer + /// Commong prefix added to all the tuples + /// Sequence of N-tuples to pack + /// Array containing the buffer segment of each packed tuple + /// BatchPack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] + [NotNull] + public static Slice[] Pack(Slice prefix, [NotNull] IEnumerable tuples) + { + if (tuples == null) throw new ArgumentNullException("tuples"); + + // use optimized version for arrays + var array = tuples as IFdbTuple[]; + if (array != null) return Pack(prefix, array); + + var next = new List(); + var writer = new TupleWriter(); + + //TODO: use multiple buffers if item count is huge ? + + foreach (var tuple in tuples) + { + writer.Output.WriteBytes(prefix); + tuple.PackTo(ref writer); + next.Add(writer.Output.Position); + } + + return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); + } + + [NotNull] + public static Slice[] Pack(Slice prefix, [NotNull] TElement[] elements, Func transform) + { + if (elements == null) throw new ArgumentNullException("elements"); + if (transform == null) throw new ArgumentNullException("convert"); + + var next = new List(elements.Length); + var writer = new TupleWriter(); + + //TODO: use multiple buffers if item count is huge ? + + foreach (var element in elements) + { + var tuple = transform(element); + if (tuple == null) + { + next.Add(writer.Output.Position); + } + else + { + writer.Output.WriteBytes(prefix); + tuple.PackTo(ref writer); + next.Add(writer.Output.Position); + } + } + + return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); + } + + [NotNull] + public static Slice[] Pack(Slice prefix, [NotNull] IEnumerable elements, Func transform) + { + if (elements == null) throw new ArgumentNullException("elements"); + if (transform == null) throw new ArgumentNullException("convert"); + + // use optimized version for arrays + var array = elements as TElement[]; + if (array != null) return Pack(prefix, array, transform); + + var next = new List(); + var writer = new TupleWriter(); + + //TODO: use multiple buffers if item count is huge ? + + foreach (var element in elements) + { + var tuple = transform(element); + if (tuple == null) + { + next.Add(writer.Output.Position); + } + else + { + writer.Output.WriteBytes(prefix); + tuple.PackTo(ref writer); + next.Add(writer.Output.Position); + } + } + + return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); + } + + #endregion + + #region Encode + + //REVIEW: EncodeKey/EncodeKeys? Encode/EncodeRange? EncodeValues? EncodeItems? + /// Pack a 1-tuple directly into a slice - public static Slice Pack(T1 item1) + public static Slice EncodeKey(T1 item1) { var writer = new TupleWriter(); FdbTuplePacker.SerializeTo(ref writer, item1); @@ -365,7 +511,7 @@ public static Slice Pack(T1 item1) } /// Pack a 2-tuple directly into a slice - public static Slice Pack(T1 item1, T2 item2) + public static Slice EncodeKey(T1 item1, T2 item2) { var writer = new TupleWriter(); FdbTuplePacker.SerializeTo(ref writer, item1); @@ -374,7 +520,7 @@ public static Slice Pack(T1 item1, T2 item2) } /// Pack a 3-tuple directly into a slice - public static Slice Pack(T1 item1, T2 item2, T3 item3) + public static Slice EncodeKey(T1 item1, T2 item2, T3 item3) { var writer = new TupleWriter(); FdbTuplePacker.SerializeTo(ref writer, item1); @@ -384,7 +530,7 @@ public static Slice Pack(T1 item1, T2 item2, T3 item3) } /// Pack a 4-tuple directly into a slice - public static Slice Pack(T1 item1, T2 item2, T3 item3, T4 item4) + public static Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4) { var writer = new TupleWriter(); FdbTuplePacker.SerializeTo(ref writer, item1); @@ -395,7 +541,7 @@ public static Slice Pack(T1 item1, T2 item2, T3 item3, T4 item4) } /// Pack a N-tuple directory into a slice - public static Slice Pack([NotNull] params object[] items) + public static Slice EncodeKey([NotNull] params object[] items) { if (items == null) throw new ArgumentNullException("items"); if (items.Length == 0) return Slice.Empty; @@ -408,20 +554,26 @@ public static Slice Pack([NotNull] params object[] items) return writer.Output.ToSlice(); } + [NotNull] + public static Slice[] EncodeKeys([NotNull] IEnumerable keys) + { + return EncodePrefixedKeys(Slice.Nil, keys); + } + /// Merge a sequence of keys with a same prefix, all sharing the same buffer /// Type of the keys /// Prefix shared by all keys /// Sequence of keys to pack /// Array of slices (for all keys) that share the same underlying buffer [NotNull] - public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] IEnumerable keys) + public static Slice[] EncodePrefixedKeys(Slice prefix, [NotNull] IEnumerable keys) { if (prefix == null) throw new ArgumentNullException("prefix"); if (keys == null) throw new ArgumentNullException("keys"); // use optimized version for arrays var array = keys as T[]; - if (array != null) return PackRangeWithPrefix(prefix, array); + if (array != null) return EncodePrefixedKeys(prefix, array); var next = new List(); var writer = new TupleWriter(); @@ -439,13 +591,19 @@ public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] IEnumerable return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); } + [NotNull] + public static Slice[] EncodeKeys([NotNull] params T[] keys) + { + return EncodePrefixedKeys(Slice.Nil, keys); + } + /// Merge an array of keys with a same prefix, all sharing the same buffer /// Type of the keys /// Prefix shared by all keys /// Sequence of keys to pack /// Array of slices (for all keys) that share the same underlying buffer [NotNull] - public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] params T[] keys) + public static Slice[] EncodePrefixedKeys(Slice prefix, [NotNull] params T[] keys) { if (keys == null) throw new ArgumentNullException("keys"); @@ -472,9 +630,10 @@ public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] params T[] /// Sequence of elements to pack /// Lambda that extract the key from each element /// Array of slices (for all keys) that share the same underlying buffer - public static Slice[] PackRange([NotNull] TElement[] elements, [NotNull] Func selector) + [NotNull] + public static Slice[] EncodeKeys([NotNull] TElement[] elements, [NotNull] Func selector) { - return PackRangeWithPrefix(Slice.Empty, elements, selector); + return EncodePrefixedKeys(Slice.Empty, elements, selector); } /// Merge an array of elements with a same prefix, all sharing the same buffer @@ -484,7 +643,8 @@ public static Slice[] PackRange([NotNull] TElement[] elements, [ /// Sequence of elements to pack /// Lambda that extract the key from each element /// Array of slices (for all keys) that share the same underlying buffer - public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] TElement[] elements, [NotNull] Func selector) + [NotNull] + public static Slice[] EncodePrefixedKeys(Slice prefix, [NotNull] TElement[] elements, [NotNull] Func selector) { if (elements == null) throw new ArgumentNullException("elements"); if (selector == null) throw new ArgumentNullException("selector"); @@ -506,92 +666,17 @@ public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); } - /// Pack a sequence of N-tuples, all sharing the same buffer - /// Sequence of N-tuples to pack - /// Array containing the buffer segment of each packed tuple - /// BatchPack([ ("Foo", 1), ("Foo", 2) ]) => [ "\x02Foo\x00\x15\x01", "\x02Foo\x00\x15\x02" ] - [NotNull] - public static Slice[] PackRange([NotNull] IEnumerable tuples) - { - return PackRangeWithPrefix(Slice.Nil, tuples); - } - - /// Pack a sequence of N-tuples, all sharing the same buffer - /// Commong prefix added to all the tuples - /// Sequence of N-tuples to pack - /// Array containing the buffer segment of each packed tuple - /// BatchPack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] - [NotNull] - public static Slice[] PackRangeWithPrefix(Slice prefix, [NotNull] IEnumerable tuples) - { - if (tuples == null) throw new ArgumentNullException("tuples"); - - // use optimized version for arrays - var array = tuples as IFdbTuple[]; - if (array != null) return PackRangeWithPrefix(prefix, array); - - var next = new List(); - var writer = new TupleWriter(); - - //TODO: use multiple buffers if item count is huge ? - - foreach(var tuple in tuples) - { - writer.Output.WriteBytes(prefix); - tuple.PackTo(ref writer); - next.Add(writer.Output.Position); - } - - return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); - } - - /// Pack an array of N-tuples, all sharing the same buffer - /// Sequence of N-tuples to pack - /// Array containing the buffer segment of each packed tuple - /// BatchPack([ ("Foo", 1), ("Foo", 2) ]) => [ "\x02Foo\x00\x15\x01", "\x02Foo\x00\x15\x02" ] - [NotNull] - public static Slice[] PackRange([NotNull] IFdbTuple[] tuples) - { - return PackRangeWithPrefix(Slice.Nil, tuples); - } - - /// Pack an array of N-tuples, all sharing the same buffer - /// Commong prefix added to all the tuples - /// Sequence of N-tuples to pack - /// Array containing the buffer segment of each packed tuple - /// BatchPack("abc", [ ("Foo", 1), ("Foo", 2) ]) => [ "abc\x02Foo\x00\x15\x01", "abc\x02Foo\x00\x15\x02" ] - [NotNull] - public static Slice[] PackRangeWithPrefix(Slice prefix, params IFdbTuple[] tuples) - { - if (tuples == null) throw new ArgumentNullException("tuples"); - - // pre-allocate by supposing that each tuple will take at least 16 bytes - var writer = new TupleWriter(tuples.Length * (16 + prefix.Count)); - var next = new List(tuples.Length); - - //TODO: use multiple buffers if item count is huge ? - - foreach (var tuple in tuples) - { - writer.Output.WriteBytes(prefix); - tuple.PackTo(ref writer); - next.Add(writer.Output.Position); - } - - return FdbKey.SplitIntoSegments(writer.Output.Buffer, 0, next); - } - /// Pack a sequence of keys with a same prefix, all sharing the same buffer /// Type of the keys /// Prefix shared by all keys /// Sequence of keys to pack /// Array of slices (for all keys) that share the same underlying buffer [NotNull] - public static Slice[] PackRangeWithPrefix([NotNull] IFdbTuple prefix, [NotNull] IEnumerable keys) + public static Slice[] EncodePrefixedKeys([NotNull] IFdbTuple prefix, [NotNull] IEnumerable keys) { if (prefix == null) throw new ArgumentNullException("prefix"); - return PackRangeWithPrefix(prefix.ToSlice(), keys); + return EncodePrefixedKeys(prefix.ToSlice(), keys); } /// Pack a sequence of keys with a same prefix, all sharing the same buffer @@ -600,11 +685,11 @@ public static Slice[] PackRangeWithPrefix([NotNull] IFdbTuple prefix, [NotNul /// Sequence of keys to pack /// Array of slices (for all keys) that share the same underlying buffer [NotNull] - public static Slice[] PackRangeWithPrefix([NotNull] IFdbTuple prefix, [NotNull] params T[] keys) + public static Slice[] EncodePrefixedKeys([NotNull] IFdbTuple prefix, [NotNull] params T[] keys) { if (prefix == null) throw new ArgumentNullException("prefix"); - return PackRangeWithPrefix(prefix.ToSlice(), keys); + return EncodePrefixedKeys(prefix.ToSlice(), keys); } #endregion @@ -622,14 +707,14 @@ public static IFdbTuple Unpack(Slice packedKey) return FdbTuplePackers.Unpack(packedKey, false); } - /// Unpack a tuple from a serialized key, after removing the prefix + /// Unpack a tuple from a serialized key, after removing a required prefix /// Packed key /// Expected prefix of the key (that is not part of the tuple) /// Unpacked tuple (minus the prefix) or an exception if the key is outside the prefix /// If prefix is null /// If the unpacked key is outside the specified prefix [NotNull] - public static IFdbTuple UnpackWithoutPrefix(Slice packedKey, Slice prefix) + public static IFdbTuple Unpack(Slice packedKey, Slice prefix) { // ensure that the key starts with the prefix if (!packedKey.StartsWith(prefix)) @@ -647,7 +732,7 @@ public static IFdbTuple UnpackWithoutPrefix(Slice packedKey, Slice prefix) /// Type of the first value in the decoded tuple /// Slice that should be entirely parsable as a tuple /// Decoded value of the first item in the tuple - public static T UnpackFirst(Slice packedKey) + public static T DecodeFirst(Slice packedKey) { if (packedKey.IsNullOrEmpty) throw new InvalidOperationException("Cannot unpack the first element of an empty tuple"); @@ -662,7 +747,7 @@ public static T UnpackFirst(Slice packedKey) /// Slice composed of followed by a packed tuple /// Expected prefix of the key (that is not part of the tuple) /// Decoded value of the first item in the tuple - public static T UnpackFirstWithoutPrefix(Slice packedKey, Slice prefix) + public static T DecodePrefixedFirst(Slice packedKey, Slice prefix) { // ensure that the key starts with the prefix if (!packedKey.StartsWith(prefix)) @@ -676,14 +761,14 @@ public static T UnpackFirstWithoutPrefix(Slice packedKey, Slice prefix) } // unpack the key, minus the prefix - return UnpackFirst(packedKey.Substring(prefix.Count)); + return DecodeFirst(packedKey.Substring(prefix.Count)); } /// Unpack a tuple and only return its last element /// Type of the last value in the decoded tuple /// Slice that should be entirely parsable as a tuple /// Decoded value of the last item in the tuple - public static T UnpackLast(Slice packedKey) + public static T DecodeLast(Slice packedKey) { if (packedKey.IsNullOrEmpty) throw new InvalidOperationException("Cannot unpack the last element of an empty tuple"); @@ -698,7 +783,7 @@ public static T UnpackLast(Slice packedKey) /// Slice composed of followed by a packed tuple /// Expected prefix of the key (that is not part of the tuple) /// Decoded value of the last item in the tuple - public static T UnpackLastWithoutPrefix(Slice packedKey, Slice prefix) + public static T DecodePrefixedLast(Slice packedKey, Slice prefix) { // ensure that the key starts with the prefix if (!packedKey.StartsWith(prefix)) @@ -712,14 +797,14 @@ public static T UnpackLastWithoutPrefix(Slice packedKey, Slice prefix) } // unpack the key, minus the prefix - return UnpackLast(packedKey.Substring(prefix.Count)); + return DecodeLast(packedKey.Substring(prefix.Count)); } /// Unpack the value of a singletion tuple /// Type of the single value in the decoded tuple /// Slice that should contain the packed representation of a tuple with a single element /// Decoded value of the only item in the tuple. Throws an exception if the tuple is empty of has more than one element. - public static T UnpackSingle(Slice packedKey) + public static T DecodeKey(Slice packedKey) { if (packedKey.IsNullOrEmpty) throw new InvalidOperationException("Cannot unpack a single value out of an empty tuple"); @@ -734,13 +819,13 @@ public static T UnpackSingle(Slice packedKey) /// Slice composed of followed by a packed singleton tuple /// Expected prefix of the key (that is not part of the tuple) /// Decoded value of the only item in the tuple. Throws an exception if the tuple is empty of has more than one element. - public static T UnpackSingleWithoutPrefix(Slice packedKey, Slice prefix) + public static T DecodePrefixedKey(Slice packedKey, Slice prefix) { // ensure that the key starts with the prefix if (!packedKey.StartsWith(prefix)) throw new ArgumentOutOfRangeException("packedKey", "The specifed packed tuple does not start with the expected prefix"); // unpack the key, minus the prefix - return UnpackSingle(packedKey.Substring(prefix.Count)); + return DecodeKey(packedKey.Substring(prefix.Count)); } /// Unpack the next item in the tuple, and advance the cursor @@ -748,7 +833,7 @@ public static T UnpackSingleWithoutPrefix(Slice packedKey, Slice prefix) /// Reader positionned at the start of the next item to read /// If decoding succeedsd, receives the decoded value. /// True if the decoded succeeded (and receives the decoded value). False if the tuple has reached the end. - public static bool UnpackNext(ref TupleReader input, out T value) + public static bool DecodeNext(ref TupleReader input, out T value) { if (!input.Input.HasMore) { @@ -767,29 +852,8 @@ public static bool UnpackNext(ref TupleReader input, out T value) //note: they are equivalent to the Pack<...>() methods, they only take a binary prefix - /// Efficiently concatenate a prefix with the packed representation of a tuple - public static Slice PackWithPrefix(Slice prefix, IFdbTuple tuple) - { - if (tuple == null || tuple.Count == 0) return prefix; - - var writer = new TupleWriter(); - writer.Output.WriteBytes(prefix); - tuple.PackTo(ref writer); - return writer.Output.ToSlice(); - } - - /// Efficiently concatenate a prefix with the packed representation of a 1-tuple - /// This is the non-generic equivalent of - public static Slice PackBoxedWithPrefix(Slice prefix, object value) - { - var writer = new TupleWriter(); - writer.Output.WriteBytes(prefix); - FdbTuplePackers.SerializeObjectTo(ref writer, value); - return writer.Output.ToSlice(); - } - /// Efficiently concatenate a prefix with the packed representation of a 1-tuple - public static Slice PackWithPrefix(Slice prefix, T value) + public static Slice EncodePrefixedKey(Slice prefix, T value) { var writer = new TupleWriter(); writer.Output.WriteBytes(prefix); @@ -798,7 +862,7 @@ public static Slice PackWithPrefix(Slice prefix, T value) } /// Efficiently concatenate a prefix with the packed representation of a 2-tuple - public static Slice PackWithPrefix(Slice prefix, T1 value1, T2 value2) + public static Slice EncodePrefixedKey(Slice prefix, T1 value1, T2 value2) { var writer = new TupleWriter(); writer.Output.WriteBytes(prefix); @@ -808,7 +872,7 @@ public static Slice PackWithPrefix(Slice prefix, T1 value1, T2 value2) } /// Efficiently concatenate a prefix with the packed representation of a 3-tuple - public static Slice PackWithPrefix(Slice prefix, T1 value1, T2 value2, T3 value3) + public static Slice EncodePrefixedKey(Slice prefix, T1 value1, T2 value2, T3 value3) { var writer = new TupleWriter(); writer.Output.WriteBytes(prefix); @@ -819,7 +883,7 @@ public static Slice PackWithPrefix(Slice prefix, T1 value1, T2 value } /// Efficiently concatenate a prefix with the packed representation of a 4-tuple - public static Slice PackWithPrefix(Slice prefix, T1 value1, T2 value2, T3 value3, T4 value4) + public static Slice EncodePrefixedKey(Slice prefix, T1 value1, T2 value2, T3 value3, T4 value4) { var writer = new TupleWriter(); writer.Output.WriteBytes(prefix); @@ -830,6 +894,20 @@ public static Slice PackWithPrefix(Slice prefix, T1 value1, T2 v return writer.Output.ToSlice(); } + /// Efficiently concatenate a prefix with the packed representation of a 4-tuple + public static Slice EncodePrefixedKey(Slice prefix, [NotNull] params object[] values) + { + if (values == null) throw new ArgumentNullException("values"); + + var writer = new TupleWriter(); + writer.Output.WriteBytes(prefix); + foreach(var value in values) + { + FdbTuplePackers.SerializeObjectTo(ref writer, value); + } + return writer.Output.ToSlice(); + } + #endregion #region Internal Helpers... diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleCodec`1.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleCodec`1.cs index 024e6f222..f9bf577b1 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleCodec`1.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleCodec`1.cs @@ -54,7 +54,7 @@ public FdbTupleCodec(T missingValue) public override Slice EncodeOrdered(T value) { - return FdbTuple.Pack(value); + return FdbTuple.EncodeKey(value); } public override void EncodeOrderedSelfTerm(ref SliceWriter output, T value) @@ -67,7 +67,7 @@ public override void EncodeOrderedSelfTerm(ref SliceWriter output, T value) public override T DecodeOrdered(Slice input) { - return FdbTuple.UnpackSingle(input); + return FdbTuple.DecodeKey(input); } public override T DecodeOrderedSelfTerm(ref SliceReader input) @@ -75,7 +75,7 @@ public override T DecodeOrderedSelfTerm(ref SliceReader input) //HACKHACK: we lose the current depth! var reader = new TupleReader(input); T value; - bool res = FdbTuple.UnpackNext(ref reader, out value); + bool res = FdbTuple.DecodeNext(ref reader, out value); input = reader.Input; return res ? value : m_missingValue; } diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs index 54c7c46d9..c4663aa28 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs @@ -128,7 +128,7 @@ System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator() public Slice ToSlice() { - return FdbTuple.Pack(this.Item1); + return FdbTuple.EncodeKey(this.Item1); } Slice IFdbKey.ToFoundationDbKey() diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs index fd4399562..2de75ecec 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs @@ -146,7 +146,7 @@ System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator() public Slice ToSlice() { - return FdbTuple.Pack(this.Item1, this.Item2); + return FdbTuple.EncodeKey(this.Item1, this.Item2); } Slice IFdbKey.ToFoundationDbKey() diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs index b702c8fac..e6c6c9689 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs @@ -166,7 +166,7 @@ System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator() public Slice ToSlice() { - return FdbTuple.Pack(this.Item1, this.Item2, this.Item3); + return FdbTuple.EncodeKey(this.Item1, this.Item2, this.Item3); } Slice IFdbKey.ToFoundationDbKey() diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs index 1268b4deb..84a87536d 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs @@ -167,7 +167,7 @@ System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator() public Slice ToSlice() { - return FdbTuple.Pack(this.Item1, this.Item2, this.Item3, this.Item4); + return FdbTuple.EncodeKey(this.Item1, this.Item2, this.Item3, this.Item4); } Slice IFdbKey.ToFoundationDbKey() diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs index dcc08fb15..8dee4aa99 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs @@ -85,7 +85,7 @@ public Slice this[[NotNull] ITupleFormattable item] public Slice Pack([NotNull] IFdbTuple tuple) { if (tuple == null) throw new ArgumentNullException("tuple"); - return FdbTuple.PackWithPrefix(m_subspace.Key, tuple); + return FdbTuple.Pack(m_subspace.Key, tuple); } /// Pack a sequence of tuples, all sharing the same buffer @@ -98,7 +98,7 @@ public Slice[] Pack([NotNull] IEnumerable tuples) { if (tuples == null) throw new ArgumentNullException("tuples"); - return FdbTuple.PackRangeWithPrefix(m_subspace.Key, tuples); + return FdbTuple.Pack(m_subspace.Key, tuples); } /// Pack a sequence of tuples, all sharing the same buffer @@ -133,7 +133,7 @@ public Slice[] Pack([NotNull] IEnumerable items) { if (items == null) throw new ArgumentNullException("items"); - return FdbTuple.PackRangeWithPrefix(m_subspace.Key, items.Select((item) => item.ToTuple())); + return FdbTuple.Pack(m_subspace.Key, items, (item) => item.ToTuple()); } /// Pack a sequence of keys, all sharing the same buffer @@ -215,56 +215,72 @@ public FdbKeyRange ToRange([NotNull] ITupleFormattable item) #region EncodeKey: (T1, T2, ...) => Slice - /// Create a new key by appending a value to the current subspace - /// Type of the value - /// Value that will be appended at the end of the key - /// Key the correspond to the concatenation of the current subspace's prefix and + /// Create a new key by adding a single item to the current subspace + /// Type of the item + /// Item that will be appended at the end of the key + /// Key that is equivalent to adding the packed singleton to the subspace's prefix /// tuple.Pack(x) is equivalent to tuple.Append(x).ToSlice() - public Slice EncodeKey(T key) + /// The key produced can be decoded back into the original value by calling , or a tuple by calling + public Slice EncodeKey(T item) { - return FdbTuple.PackWithPrefix(m_subspace.Key, key); + return FdbTuple.EncodePrefixedKey(m_subspace.Key, item); } - /// Create a new key by appending two values to the current subspace - /// Type of the next to last value - /// Type of the last value - /// Value that will be in the next to last position - /// Value that will be in the last position - /// Key the correspond to the concatenation of the current subspace's prefix, and - /// (...,).Pack(x, y) is equivalent to (...,).Append(x).Append(y).ToSlice() - public Slice EncodeKey(T1 key1, T2 key2) + /// Create a new key by adding two items to the current subspace + /// Type of the first item + /// Type of the second item + /// Item that will be in the next to last position + /// Item that will be in the last position + /// Key that is equivalent to adding the packed pair (, ) to the subspace's prefix + /// {subspace}.EncodeKey(x, y) is much faster way to do {subspace}.Key + FdbTuple.Create(x, y).ToSlice() + /// The key produced can be decoded back into a pair by calling either or + public Slice EncodeKey(T1 item1, T2 item2) { - return FdbTuple.PackWithPrefix(m_subspace.Key, key1, key2); + return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2); } - /// Create a new key by appending three values to the current subspace - /// Type of the first value - /// Type of the second value - /// Type of the thrid value - /// Value that will be appended first - /// Value that will be appended second - /// Value that will be appended third - /// Key the correspond to the concatenation of the current subspace's prefix, , and - /// tuple.Pack(x, y, z) is equivalent to tuple.Append(x).Append(y).Append(z).ToSlice() - public Slice EncodeKey(T1 key1, T2 key2, T3 key3) + /// Create a new key by adding three items to the current subspace + /// Type of the first item + /// Type of the second item + /// Type of the third item + /// Item that will be appended first + /// Item that will be appended second + /// Item that will be appended third + /// Key that is equivalent to adding the packed triplet (, , ) to the subspace's prefix + /// {subspace}.EncodeKey(x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(x, y, z).ToSlice() + /// The key produced can be decoded back into a triplet by calling either or + public Slice EncodeKey(T1 item1, T2 item2, T3 item3) { - return FdbTuple.PackWithPrefix(m_subspace.Key, key1, key2, key3); + return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3); } - /// Create a new key by appending three values to the current subspace - /// Type of the first value - /// Type of the second value - /// Type of the third value - /// Type of the fourth value - /// Value that will be appended first - /// Value that will be appended second - /// Value that will be appended third - /// Value that will be appended fourth - /// Key the correspond to the concatenation of the current subspace's prefix, , , and - /// tuple.Pack(w, x, y, z) is equivalent to tuple.Append(w).Append(x).Append(y).Append(z).ToSlice() - public Slice EncodeKey(T1 key1, T2 key2, T3 key3, T4 key4) + /// Create a new key by adding three items to the current subspace + /// Type of the first item + /// Type of the second item + /// Type of the third item + /// Type of the fourth item + /// Item that will be appended first + /// Item that will be appended second + /// Item that will be appended third + /// Item that will be appended fourth + /// Key that is equivalent to adding the packed tuple quad (, , , ) to the subspace's prefix + /// {subspace}.EncodeKey(w, x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(w, x, y, z).ToSlice() + /// The key produced can be decoded back into a quad by calling either or + public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4) { - return FdbTuple.PackWithPrefix(m_subspace.Key, key1, key2, key3, key4); + return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4); + } + + /// Create a new key by adding multiple items to the current subspace + /// Array of items to add + /// Key that is equivalent to adding the packed tuple created from to the subspace's prefix + /// {subspace}.EncodeKey(object[]) is much faster way to do {subspace}.Key + FdbTuple.Create(object[]).ToSlice() + /// The key produced can be decoded back into a tuple by calling . + public Slice EncodeKey(params object[] items) + { + //note: this is bad practice, because it encourage people passing in object[] arrays, + // but there is not point in going all the way to 10 or more items. + return FdbTuple.EncodePrefixedKey(m_subspace.Key, items); } /// Merge a sequence of keys with the subspace's prefix, all sharing the same buffer @@ -274,7 +290,7 @@ public Slice EncodeKey(T1 key1, T2 key2, T3 key3, T4 key4) [NotNull] public Slice[] EncodeKeys([NotNull] IEnumerable keys) { - return FdbTuple.PackRangeWithPrefix(m_subspace.Key, keys); + return FdbTuple.EncodePrefixedKeys(m_subspace.Key, keys); } /// Merge a sequence of keys with the subspace's prefix, all sharing the same buffer @@ -284,7 +300,7 @@ public Slice[] EncodeKeys([NotNull] IEnumerable keys) [NotNull] public Slice[] EncodeKeys([NotNull] T[] keys) { - return FdbTuple.PackRangeWithPrefix(m_subspace.Key, keys); + return FdbTuple.EncodePrefixedKeys(m_subspace.Key, keys); } /// Merge a sequence of elements with the subspace's prefix, all sharing the same buffer @@ -296,7 +312,7 @@ public Slice[] EncodeKeys([NotNull] T[] keys) [NotNull] public Slice[] EncodeKeys([NotNull] TElement[] elements, [NotNull] Func selector) { - return FdbTuple.PackRangeWithPrefix(m_subspace.Key, elements, selector); + return FdbTuple.EncodePrefixedKeys(m_subspace.Key, elements, selector); } #endregion @@ -310,7 +326,7 @@ public Slice[] EncodeKeys([NotNull] TElement[] elements, [NotNul /// new Subspace([FE]).UnpackSingle<int>([FE 02 'H' 'e' 'l' 'l' 'o' 00]) => (string) "Hello" public T DecodeKey(Slice key) { - return FdbTuple.UnpackSingle(m_subspace.ExtractKey(key, boundCheck: true)); + return FdbTuple.DecodeKey(m_subspace.ExtractKey(key, boundCheck: true)); } @@ -353,6 +369,8 @@ public FdbTuple DecodeKey(Slice key) ); } + //note: there is no DecodeKey(slice) => object[] because this would encourage the bad practive of dealing with tuples as object[] arrays ! + /// Unpack a key into a tuple, and return only the first element /// Expected type of the last element /// Packed version of a key that should fit inside this subspace @@ -360,7 +378,7 @@ public FdbTuple DecodeKey(Slice key) /// new Subspace([FE]).UnpackLast<int>([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => (string) "Hello" public T DecodeFirst(Slice key) { - return FdbTuple.UnpackFirst(m_subspace.ExtractKey(key, boundCheck: true)); + return FdbTuple.DecodeFirst(m_subspace.ExtractKey(key, boundCheck: true)); } /// Unpack a key into a tuple, and return only the last element @@ -370,7 +388,7 @@ public T DecodeFirst(Slice key) /// new Subspace([FE]).UnpackLast<int>([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => (int) 1 public T DecodeLast(Slice key) { - return FdbTuple.UnpackLast(m_subspace.ExtractKey(key, boundCheck: true)); + return FdbTuple.DecodeLast(m_subspace.ExtractKey(key, boundCheck: true)); } /// Unpack an array of key into tuples, and return an array with only the first elements of each tuple @@ -386,7 +404,7 @@ public T[] DecodeKeysFirst([NotNull] Slice[] keys) for (int i = 0; i < keys.Length; i++) { //REVIEW: what should we do if we encounter Slice.Nil keys ?? - values[i] = FdbTuple.UnpackFirst(m_subspace.ExtractKey(keys[i], boundCheck: true)); + values[i] = FdbTuple.DecodeFirst(m_subspace.ExtractKey(keys[i], boundCheck: true)); } return values; } @@ -404,7 +422,7 @@ public T[] DecodeKeysLast([NotNull] Slice[] keys) for (int i = 0; i < keys.Length; i++) { //REVIEW: what should we do if we encounter Slice.Nil keys ?? - values[i] = FdbTuple.UnpackLast(m_subspace.ExtractKey(keys[i], boundCheck: true)); + values[i] = FdbTuple.DecodeLast(m_subspace.ExtractKey(keys[i], boundCheck: true)); } return values; } @@ -422,7 +440,7 @@ public T[] DecodeKeys([NotNull] Slice[] keys) for (int i = 0; i < keys.Length; i++) { //REVIEW: what should we do if we encounter Slice.Nil keys ?? - values[i] = FdbTuple.UnpackSingle(m_subspace.ExtractKey(keys[i], boundCheck: true)); + values[i] = FdbTuple.DecodeKey(m_subspace.ExtractKey(keys[i], boundCheck: true)); } return values; } diff --git a/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs b/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs index c7b94ec49..7d8d27057 100644 --- a/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs +++ b/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs @@ -32,6 +32,7 @@ namespace FoundationDB.Layers.Blobs using FoundationDB.Client.Utils; using FoundationDB.Layers.Tuples; using FoundationDB.Linq; + using JetBrains.Annotations; using System; using System.Collections.Generic; using System.Globalization; @@ -87,7 +88,7 @@ protected virtual string ParseFieldKey(IFdbTuple key) /// Unique identifier of the hashset /// Name of the field to read /// Value of the corresponding field, or Slice.Nil if it the hashset does not exist, or doesn't have a field with this name - public Task GetValueAsync(IFdbReadOnlyTransaction trans, IFdbTuple id, string field) + public Task GetValueAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IFdbTuple id, string field) { if (trans == null) throw new ArgumentNullException("trans"); if (id == null) throw new ArgumentNullException("id"); @@ -100,7 +101,7 @@ public Task GetValueAsync(IFdbReadOnlyTransaction trans, IFdbTuple id, st /// Transaction that will be used for this request /// Unique identifier of the hashset /// Dictionary containing, for all fields, their associated values - public async Task> GetAsync(IFdbReadOnlyTransaction trans, IFdbTuple id) + public async Task> GetAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IFdbTuple id) { if (trans == null) throw new ArgumentNullException("trans"); if (id == null) throw new ArgumentNullException("id"); @@ -125,13 +126,13 @@ await trans /// Unique identifier of the hashset /// List of the fields to read /// Dictionary containing the values of the selected fields, or Slice.Empty if that particular field does not exist. - public async Task> GetAsync(IFdbReadOnlyTransaction trans, IFdbTuple id, string[] fields) + public async Task> GetAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IFdbTuple id, [NotNull] params string[] fields) { if (trans == null) throw new ArgumentNullException("trans"); if (id == null) throw new ArgumentNullException("id"); if (fields == null) throw new ArgumentNullException("fields"); - var keys = FdbTuple.PackRangeWithPrefix(GetKey(id), fields); + var keys = FdbTuple.EncodePrefixedKeys(GetKey(id), fields); var values = await trans.GetValuesAsync(keys).ConfigureAwait(false); Contract.Assert(values != null && values.Length == fields.Length); diff --git a/FoundationDB.Samples/Benchmarks/BenchRunner.cs b/FoundationDB.Samples/Benchmarks/BenchRunner.cs index fd5546c08..fafd119d6 100644 --- a/FoundationDB.Samples/Benchmarks/BenchRunner.cs +++ b/FoundationDB.Samples/Benchmarks/BenchRunner.cs @@ -117,7 +117,7 @@ public async Task Run(IFdbDatabase db, TextWriter log, CancellationToken ct) } else { - var foos = FdbTuple.PackRangeWithPrefix(foo, Enumerable.Range(1, this.Value).ToArray()); + var foos = FdbTuple.EncodePrefixedKeys(foo, Enumerable.Range(1, this.Value).ToArray()); await db.ReadAsync(tr => tr.GetValuesAsync(foos), ct); } break; diff --git a/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs b/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs index 7526457fc..cd70c86f3 100644 --- a/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs +++ b/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs @@ -85,7 +85,7 @@ public async Task RunProducer(IFdbDatabase db, CancellationToken ct) while (!ct.IsCancellationRequested) { int k = cnt++; - Slice taskId = FdbTuple.Pack(this.Id.GetHashCode(), k); + Slice taskId = FdbTuple.EncodeKey(this.Id.GetHashCode(), k); var ts = Stopwatch.GetTimestamp(); string msg = "Message #" + k + " from producer " + this.Id + " (" + DateTime.UtcNow.ToString("O") + ")"; diff --git a/FoundationDB.Samples/Tutorials/ClassScheduling.cs b/FoundationDB.Samples/Tutorials/ClassScheduling.cs index 0b08a7987..d7f8948f1 100644 --- a/FoundationDB.Samples/Tutorials/ClassScheduling.cs +++ b/FoundationDB.Samples/Tutorials/ClassScheduling.cs @@ -48,7 +48,7 @@ protected Slice AttendsKey(string s, string c) protected FdbKeyRange AttendsKeys(string s) { - return this.Subspace.ToRange(FdbTuple.Pack("attends", s)); + return this.Subspace.ToRange(FdbTuple.EncodeKey("attends", s)); } /// @@ -78,7 +78,7 @@ await db.WriteAsync((tr) => /// public Task> AvailableClasses(IFdbReadOnlyTransaction tr) { - return tr.GetRange(this.Subspace.ToRange(FdbTuple.Pack("class"))) + return tr.GetRange(this.Subspace.ToRange(FdbTuple.EncodeKey("class"))) .Where(kvp => { int _; return Int32.TryParse(kvp.Value.ToAscii(), out _); }) // (step 3) .Select(kvp => this.Subspace.Tuples.DecodeKey(kvp.Key)) .ToListAsync(); diff --git a/FoundationDB.Storage.Memory.Test/Collections/ColaOrderedSetFacts.cs b/FoundationDB.Storage.Memory.Test/Collections/ColaOrderedSetFacts.cs index ab4ca1cc6..dd9208a97 100644 --- a/FoundationDB.Storage.Memory.Test/Collections/ColaOrderedSetFacts.cs +++ b/FoundationDB.Storage.Memory.Test/Collections/ColaOrderedSetFacts.cs @@ -180,7 +180,7 @@ public void Test_Check_Costs() Console.WriteLine("Inserting (" + N + " items)"); for (int i = 0; i < N; i++) { - cola.Add(FdbTuple.Pack(i << 1)); + cola.Add(FdbTuple.EncodeKey(i << 1)); } Console.WriteLine("> " + cmp.Count + " cmps (" + ((double)cmp.Count / N) + " / insert)"); @@ -191,7 +191,7 @@ public void Test_Check_Costs() int n = 0; for (int i = 0; i < (N << 1); i++) { - if (cola.Contains(FdbTuple.Pack(i))) ++n; + if (cola.Contains(FdbTuple.EncodeKey(i))) ++n; } Assert.That(n, Is.EqualTo(N)); Console.WriteLine("> " + cmp.Count + " cmps (" + ((double)cmp.Count / (N << 1)) + " / lookup)"); @@ -203,7 +203,7 @@ public void Test_Check_Costs() Console.WriteLine("Tail scan (" + tail + " lookups)"); for (int i = 0; i < tail; i++) { - if (cola.Contains(FdbTuple.Pack(offset + i))) ++n; + if (cola.Contains(FdbTuple.EncodeKey(offset + i))) ++n; } Console.WriteLine("> " + cmp.Count + " cmps (" + ((double)cmp.Count / tail) + " / lookup)"); @@ -212,7 +212,7 @@ public void Test_Check_Costs() int p = 0; foreach(var x in cola) { - Assert.That(FdbTuple.UnpackSingle(x), Is.EqualTo(p << 1)); + Assert.That(FdbTuple.DecodeKey(x), Is.EqualTo(p << 1)); ++p; } Assert.That(p, Is.EqualTo(N)); diff --git a/FoundationDB.Tests.Sandbox/Program.cs b/FoundationDB.Tests.Sandbox/Program.cs index 35c0da673..a46147e0c 100644 --- a/FoundationDB.Tests.Sandbox/Program.cs +++ b/FoundationDB.Tests.Sandbox/Program.cs @@ -266,8 +266,8 @@ private static async Task HelloWorld(CancellationToken ct) // Writes some data in to the database using (var tr = db.BeginTransaction(ct)) { - tr.Set(FdbTuple.Pack("Test", 123), Slice.FromString("Hello World!")); - tr.Set(FdbTuple.Pack("Test", 456), Slice.FromInt64(DateTime.UtcNow.Ticks)); + tr.Set(FdbTuple.EncodeKey("Test", 123), Slice.FromString("Hello World!")); + tr.Set(FdbTuple.EncodeKey("Test", 456), Slice.FromInt64(DateTime.UtcNow.Ticks)); } } diff --git a/FoundationDB.Tests/DatabaseFacts.cs b/FoundationDB.Tests/DatabaseFacts.cs index e62eec84c..9427762dc 100644 --- a/FoundationDB.Tests/DatabaseFacts.cs +++ b/FoundationDB.Tests/DatabaseFacts.cs @@ -257,10 +257,10 @@ public async Task Test_Can_Open_Database_With_Non_Empty_GlobalSpace() var subspace = db.Partition.By("hello"); Assert.That(subspace.Key.ToString(), Is.EqualTo("<02>test<00><02>hello<00>")); - // keys inside the global space are invlaid - Assert.That(db.IsKeyValid(FdbTuple.Pack("test", 123)), Is.True); + // keys inside the global space are valid + Assert.That(db.IsKeyValid(FdbTuple.EncodeKey("test", 123)), Is.True); - // keys outside the global space are invlaid + // keys outside the global space are invalid Assert.That(db.IsKeyValid(Slice.Create(new byte[] { 42 })), Is.False); } @@ -274,10 +274,10 @@ public async Task Test_Can_Open_Database_With_Non_Empty_GlobalSpace() var subspace = db.Partition.By("hello"); Assert.That(subspace.Key.ToString(), Is.EqualTo("*<00>Z<02>hello<00>")); - // keys inside the global space are invlaid + // keys inside the global space are valid Assert.That(db.IsKeyValid(Slice.Unescape("*<00>Z123")), Is.True); - // keys outside the global space are invlaid + // keys outside the global space are invalid Assert.That(db.IsKeyValid(Slice.Create(new byte[] { 123 })), Is.False); Assert.That(db.IsKeyValid(Slice.Unescape("*")), Is.False); diff --git a/FoundationDB.Tests/Encoders/EncoderFacts.cs b/FoundationDB.Tests/Encoders/EncoderFacts.cs index d2bf8040f..66cd7a218 100644 --- a/FoundationDB.Tests/Encoders/EncoderFacts.cs +++ b/FoundationDB.Tests/Encoders/EncoderFacts.cs @@ -115,7 +115,7 @@ public void Test_Tuple_Composite_Encoder() // note: EncodeKey(...) is just a shortcurt for packing all items in a tuple, and EncodeComposite(..., count = 3) var data = encoder.EncodeKey(x, y, z); - Assert.That(data, Is.EqualTo(FdbTuple.Pack(x, y, z))); + Assert.That(data, Is.EqualTo(FdbTuple.EncodeKey(x, y, z))); var items = encoder.DecodeKey(data); Assert.That(items.Item1, Is.EqualTo(x)); @@ -125,15 +125,15 @@ public void Test_Tuple_Composite_Encoder() // partial key encoding data = encoder.EncodeComposite(items, 2); - Assert.That(data, Is.EqualTo(FdbTuple.Pack(x, y))); - items = encoder.DecodeComposite(FdbTuple.Pack(x, y), 2); + Assert.That(data, Is.EqualTo(FdbTuple.EncodeKey(x, y))); + items = encoder.DecodeComposite(FdbTuple.EncodeKey(x, y), 2); Assert.That(items.Item1, Is.EqualTo(x)); Assert.That(items.Item2, Is.EqualTo(y)); Assert.That(items.Item3, Is.EqualTo(default(Guid))); data = encoder.EncodeComposite(items, 1); - Assert.That(data, Is.EqualTo(FdbTuple.Pack(x))); - items = encoder.DecodeComposite(FdbTuple.Pack(x), 1); + Assert.That(data, Is.EqualTo(FdbTuple.EncodeKey(x))); + items = encoder.DecodeComposite(FdbTuple.EncodeKey(x), 1); Assert.That(items.Item1, Is.EqualTo(x)); Assert.That(items.Item2, Is.EqualTo(default(long))); Assert.That(items.Item3, Is.EqualTo(default(Guid))); diff --git a/FoundationDB.Tests/Encoders/TypeCodecFacts.cs b/FoundationDB.Tests/Encoders/TypeCodecFacts.cs index 1ea484619..77f53c905 100644 --- a/FoundationDB.Tests/Encoders/TypeCodecFacts.cs +++ b/FoundationDB.Tests/Encoders/TypeCodecFacts.cs @@ -48,13 +48,13 @@ public void Test_Simple_Integer_Codec() var codec = FdbTupleCodec.Default; Assert.That(codec, Is.Not.Null); - Assert.That(codec.EncodeOrdered(0), Is.EqualTo(FdbTuple.Pack(0))); - Assert.That(codec.EncodeOrdered(123), Is.EqualTo(FdbTuple.Pack(123))); - Assert.That(codec.EncodeOrdered(123456), Is.EqualTo(FdbTuple.Pack(123456))); + Assert.That(codec.EncodeOrdered(0), Is.EqualTo(FdbTuple.EncodeKey(0))); + Assert.That(codec.EncodeOrdered(123), Is.EqualTo(FdbTuple.EncodeKey(123))); + Assert.That(codec.EncodeOrdered(123456), Is.EqualTo(FdbTuple.EncodeKey(123456))); - Assert.That(codec.DecodeOrdered(FdbTuple.Pack(0)), Is.EqualTo(0)); - Assert.That(codec.DecodeOrdered(FdbTuple.Pack(123)), Is.EqualTo(123)); - Assert.That(codec.DecodeOrdered(FdbTuple.Pack(123456)), Is.EqualTo(123456)); + Assert.That(codec.DecodeOrdered(FdbTuple.EncodeKey(0)), Is.EqualTo(0)); + Assert.That(codec.DecodeOrdered(FdbTuple.EncodeKey(123)), Is.EqualTo(123)); + Assert.That(codec.DecodeOrdered(FdbTuple.EncodeKey(123456)), Is.EqualTo(123456)); } [Test] @@ -63,13 +63,13 @@ public void Test_Simple_String_Codec() var codec = FdbTupleCodec.Default; Assert.That(codec, Is.Not.Null); - Assert.That(codec.EncodeOrdered("héllø Wörld"), Is.EqualTo(FdbTuple.Pack("héllø Wörld"))); - Assert.That(codec.EncodeOrdered(String.Empty), Is.EqualTo(FdbTuple.Pack(""))); - Assert.That(codec.EncodeOrdered(null), Is.EqualTo(FdbTuple.Pack(default(string)))); + Assert.That(codec.EncodeOrdered("héllø Wörld"), Is.EqualTo(FdbTuple.EncodeKey("héllø Wörld"))); + Assert.That(codec.EncodeOrdered(String.Empty), Is.EqualTo(FdbTuple.EncodeKey(""))); + Assert.That(codec.EncodeOrdered(null), Is.EqualTo(FdbTuple.EncodeKey(default(string)))); - Assert.That(codec.DecodeOrdered(FdbTuple.Pack("héllø Wörld")), Is.EqualTo("héllø Wörld")); - Assert.That(codec.DecodeOrdered(FdbTuple.Pack(String.Empty)), Is.EqualTo("")); - Assert.That(codec.DecodeOrdered(FdbTuple.Pack(default(string))), Is.Null); + Assert.That(codec.DecodeOrdered(FdbTuple.EncodeKey("héllø Wörld")), Is.EqualTo("héllø Wörld")); + Assert.That(codec.DecodeOrdered(FdbTuple.EncodeKey(String.Empty)), Is.EqualTo("")); + Assert.That(codec.DecodeOrdered(FdbTuple.EncodeKey(default(string))), Is.Null); } [Test] @@ -90,7 +90,7 @@ public void Test_Simple_SelfTerms_Codecs() second.EncodeOrderedSelfTerm(ref writer, y); third.EncodeOrderedSelfTerm(ref writer, z); var data = writer.ToSlice(); - Assert.That(data, Is.EqualTo(FdbTuple.Pack(x, y, z))); + Assert.That(data, Is.EqualTo(FdbTuple.EncodeKey(x, y, z))); var reader = new SliceReader(data); Assert.That(first.DecodeOrderedSelfTerm(ref reader), Is.EqualTo(x)); diff --git a/FoundationDB.Tests/FdbTest.cs b/FoundationDB.Tests/FdbTest.cs index d2963e0a9..c8aca0b78 100644 --- a/FoundationDB.Tests/FdbTest.cs +++ b/FoundationDB.Tests/FdbTest.cs @@ -149,34 +149,47 @@ protected async Task DeleteSubspace(IFdbDatabase db, IFdbSubspace subspace) // These methods are just there to help with the problem of culture-aware string formatting + [DebuggerStepThrough] + protected static void Log(string text) + { + Console.WriteLine(text); + } + [DebuggerStepThrough] protected static void Log() { - Console.WriteLine(); + Log(String.Empty); } [DebuggerStepThrough] - protected static void Log(string text) + protected static void Log(object item) { - Console.WriteLine(text); + if (item == null) + { + Log("null"); + } + else + { + Log(String.Format(CultureInfo.InvariantCulture, "[{0}] {1}", item.GetType().Name, item)); + } } [DebuggerStepThrough] protected static void Log(string format, object arg0) { - Console.WriteLine(String.Format(CultureInfo.InvariantCulture, format, arg0)); + Log(String.Format(CultureInfo.InvariantCulture, format, arg0)); } [DebuggerStepThrough] protected static void Log(string format, object arg0, object arg1) { - Console.WriteLine(String.Format(CultureInfo.InvariantCulture, format, arg0, arg1)); + Log(String.Format(CultureInfo.InvariantCulture, format, arg0, arg1)); } [DebuggerStepThrough] protected static void Log(string format, params object[] args) { - Console.WriteLine(String.Format(CultureInfo.InvariantCulture, format, args)); + Log(String.Format(CultureInfo.InvariantCulture, format, args)); } #endregion diff --git a/FoundationDB.Tests/KeyFacts.cs b/FoundationDB.Tests/KeyFacts.cs index ec496d8ad..58b0c3029 100644 --- a/FoundationDB.Tests/KeyFacts.cs +++ b/FoundationDB.Tests/KeyFacts.cs @@ -255,7 +255,7 @@ public void Test_FdbKeyRange_Test() Assert.That(range.Test(Slice.FromAscii("Z\x00"), endIncluded: true), Is.EqualTo(AFTER)); Assert.That(range.Test(Slice.FromAscii("\xFF"), endIncluded: true), Is.EqualTo(AFTER)); - range = FdbKeyRange.Create(FdbTuple.Pack("A"), FdbTuple.Pack("Z")); + range = FdbKeyRange.Create(FdbTuple.EncodeKey("A"), FdbTuple.EncodeKey("Z")); Assert.That(range.Test(FdbTuple.Create("@")), Is.EqualTo((BEFORE))); Assert.That(range.Test(FdbTuple.Create("A")), Is.EqualTo((INSIDE))); Assert.That(range.Test(FdbTuple.Create("Z")), Is.EqualTo((AFTER))); @@ -344,20 +344,20 @@ public void Test_FdbKey_PrettyPrint() // tuples should be decoded properly - Assert.That(FdbKey.Dump(FdbTuple.Pack(123)), Is.EqualTo("(123,)"), "Singleton tuples should end with a ','"); - Assert.That(FdbKey.Dump(FdbTuple.Pack(Slice.FromAscii("hello"))), Is.EqualTo("('hello',)"), "ASCII strings should use single quotes"); - Assert.That(FdbKey.Dump(FdbTuple.Pack("héllø")), Is.EqualTo("(\"héllø\",)"), "Unicode strings should use double quotes"); - Assert.That(FdbKey.Dump(FdbTuple.Pack(Slice.Create(new byte[] { 1, 2, 3 }))), Is.EqualTo("(<01 02 03>,)")); - Assert.That(FdbKey.Dump(FdbTuple.Pack(123, 456)), Is.EqualTo("(123, 456)"), "Elements should be separated with a space, and not end up with ','"); - Assert.That(FdbKey.Dump(FdbTuple.Pack(true, false, default(object))), Is.EqualTo("(1, 0, null)"), "Booleans should be displayed as numbers, and null should be in lowercase"); //note: even though it's tempting to using Python's "Nil", it's not very ".NETty" - Assert.That(FdbKey.Dump(FdbTuple.Pack(1.0d, Math.PI, Math.E)), Is.EqualTo("(1, 3.1415926535897931, 2.7182818284590451)"), "Doubles should used dot and have full precision (17 digits)"); - Assert.That(FdbKey.Dump(FdbTuple.Pack(1.0f, (float)Math.PI, (float)Math.E)), Is.EqualTo("(1, 3.14159274, 2.71828175)"), "Singles should used dot and have full precision (10 digits)"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(123)), Is.EqualTo("(123,)"), "Singleton tuples should end with a ','"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(Slice.FromAscii("hello"))), Is.EqualTo("('hello',)"), "ASCII strings should use single quotes"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey("héllø")), Is.EqualTo("(\"héllø\",)"), "Unicode strings should use double quotes"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(Slice.Create(new byte[] { 1, 2, 3 }))), Is.EqualTo("(<01 02 03>,)")); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(123, 456)), Is.EqualTo("(123, 456)"), "Elements should be separated with a space, and not end up with ','"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(true, false, default(object))), Is.EqualTo("(1, 0, null)"), "Booleans should be displayed as numbers, and null should be in lowercase"); //note: even though it's tempting to using Python's "Nil", it's not very ".NETty" + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(1.0d, Math.PI, Math.E)), Is.EqualTo("(1, 3.1415926535897931, 2.7182818284590451)"), "Doubles should used dot and have full precision (17 digits)"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(1.0f, (float)Math.PI, (float)Math.E)), Is.EqualTo("(1, 3.14159274, 2.71828175)"), "Singles should used dot and have full precision (10 digits)"); var guid = Guid.NewGuid(); - Assert.That(FdbKey.Dump(FdbTuple.Pack(guid)), Is.EqualTo(String.Format("({0},)", guid.ToString("D"))), "GUIDs should be displayed as a string literal, without quotes"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(guid)), Is.EqualTo(String.Format("({0},)", guid.ToString("D"))), "GUIDs should be displayed as a string literal, without quotes"); var uuid128 = Uuid128.NewUuid(); - Assert.That(FdbKey.Dump(FdbTuple.Pack(uuid128)), Is.EqualTo(String.Format("({0},)", uuid128.ToString("D"))), "Uuid128s should be displayed as a string literal, without quotes"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(uuid128)), Is.EqualTo(String.Format("({0},)", uuid128.ToString("D"))), "Uuid128s should be displayed as a string literal, without quotes"); var uuid64 = Uuid64.NewUuid(); - Assert.That(FdbKey.Dump(FdbTuple.Pack(uuid64)), Is.EqualTo(String.Format("({0},)", uuid64.ToString("D"))), "Uuid64s should be displayed as a string literal, without quotes"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(uuid64)), Is.EqualTo(String.Format("({0},)", uuid64.ToString("D"))), "Uuid64s should be displayed as a string literal, without quotes"); // ranges should be decoded when possible var key = FdbTuple.ToRange(FdbTuple.Create("hello")); @@ -365,12 +365,12 @@ public void Test_FdbKey_PrettyPrint() Assert.That(FdbKey.PrettyPrint(key.Begin, FdbKey.PrettyPrintMode.Begin), Is.EqualTo("(\"hello\",).<00>")); Assert.That(FdbKey.PrettyPrint(key.End, FdbKey.PrettyPrintMode.End), Is.EqualTo("(\"hello\",).")); - key = FdbKeyRange.StartsWith(FdbTuple.Pack("hello")); + key = FdbKeyRange.StartsWith(FdbTuple.EncodeKey("hello")); // "<02>hello<00>" .. "<02>hello<01>" Assert.That(FdbKey.PrettyPrint(key.Begin, FdbKey.PrettyPrintMode.Begin), Is.EqualTo("(\"hello\",)")); Assert.That(FdbKey.PrettyPrint(key.End, FdbKey.PrettyPrintMode.End), Is.EqualTo("(\"hello\",) + 1")); - var t = FdbTuple.Pack(123); + var t = FdbTuple.EncodeKey(123); Assert.That(FdbKey.PrettyPrint(t, FdbKey.PrettyPrintMode.Single), Is.EqualTo("(123,)")); Assert.That(FdbKey.PrettyPrint(FdbTuple.ToRange(t).Begin, FdbKey.PrettyPrintMode.Begin), Is.EqualTo("(123,).<00>")); Assert.That(FdbKey.PrettyPrint(FdbTuple.ToRange(t).End, FdbKey.PrettyPrintMode.End), Is.EqualTo("(123,).")); diff --git a/FoundationDB.Tests/Layers/DirectoryFacts.cs b/FoundationDB.Tests/Layers/DirectoryFacts.cs index cfbb66778..1a0090c8e 100644 --- a/FoundationDB.Tests/Layers/DirectoryFacts.cs +++ b/FoundationDB.Tests/Layers/DirectoryFacts.cs @@ -903,13 +903,13 @@ public async Task Test_Directory_Partitions_Should_Disallow_Creation_Of_Direct_K shouldFail(() => partition.Tuples.EncodeKeys((IEnumerable)new object[] { 123, "hello", true })); shouldFail(() => partition.Tuples.Unpack(barKey)); - shouldFail(() => partition.Tuples.Unpack(new[] { barKey, barKey + FdbTuple.Pack(123) })); + shouldFail(() => partition.Tuples.Unpack(new[] { barKey, barKey + FdbTuple.EncodeKey(123) })); shouldFail(() => partition.Tuples.DecodeKey(barKey)); shouldFail(() => partition.Tuples.DecodeKeys(new[] { barKey, barKey })); shouldFail(() => partition.Tuples.DecodeLast(barKey)); - shouldFail(() => partition.Tuples.DecodeKeysLast(new[] { barKey, barKey + FdbTuple.Pack(123) })); + shouldFail(() => partition.Tuples.DecodeKeysLast(new[] { barKey, barKey + FdbTuple.EncodeKey(123) })); shouldFail(() => partition.Tuples.DecodeFirst(barKey)); - shouldFail(() => partition.Tuples.DecodeKeysFirst(new[] { barKey, barKey + FdbTuple.Pack(123) })); + shouldFail(() => partition.Tuples.DecodeKeysFirst(new[] { barKey, barKey + FdbTuple.EncodeKey(123) })); shouldFail(() => partition.Tuples.ToTuple()); diff --git a/FoundationDB.Tests/Layers/MapFacts.cs b/FoundationDB.Tests/Layers/MapFacts.cs index 517b157c3..5e7210dd9 100644 --- a/FoundationDB.Tests/Layers/MapFacts.cs +++ b/FoundationDB.Tests/Layers/MapFacts.cs @@ -168,7 +168,7 @@ public async Task Test_FdbMap_With_Custom_Key_Encoder() // Encode IPEndPoint as the (IP, Port,) encoded with the Tuple codec // note: there is a much simpler way or creating composite keys, this is just a quick and dirty test! var keyEncoder = KeyValueEncoders.Bind( - (ipe) => ipe == null ? Slice.Empty : FdbTuple.Pack(ipe.Address, ipe.Port), + (ipe) => ipe == null ? Slice.Empty : FdbTuple.EncodeKey(ipe.Address, ipe.Port), (packed) => { if (packed.IsNullOrEmpty) return default(IPEndPoint); diff --git a/FoundationDB.Tests/Layers/RankedSetFacts.cs b/FoundationDB.Tests/Layers/RankedSetFacts.cs index fc98e667f..40e134791 100644 --- a/FoundationDB.Tests/Layers/RankedSetFacts.cs +++ b/FoundationDB.Tests/Layers/RankedSetFacts.cs @@ -62,7 +62,7 @@ await db.ReadWriteAsync(async (tr) => var rnd = new Random(); for (int i = 0; i < 1000; i++) { - await db.ReadWriteAsync((tr) => vector.InsertAsync(tr, FdbTuple.Pack(rnd.Next())), this.Cancellation); + await db.ReadWriteAsync((tr) => vector.InsertAsync(tr, FdbTuple.EncodeKey(rnd.Next())), this.Cancellation); } await db.ReadAsync((tr) => PrintRankedSet(vector, tr), this.Cancellation); diff --git a/FoundationDB.Tests/Layers/TupleFacts.cs b/FoundationDB.Tests/Layers/TupleFacts.cs index 49a132768..414d1c7a4 100644 --- a/FoundationDB.Tests/Layers/TupleFacts.cs +++ b/FoundationDB.Tests/Layers/TupleFacts.cs @@ -30,6 +30,7 @@ namespace FoundationDB.Layers.Tuples.Tests { using FoundationDB.Client; using FoundationDB.Client.Converters; + using FoundationDB.Client.Tests; using FoundationDB.Client.Utils; using NUnit.Framework; using System; @@ -40,7 +41,7 @@ namespace FoundationDB.Layers.Tuples.Tests using System.Text; [TestFixture] - public class TupleFacts + public class TupleFacts : FdbTest { #region General Use... @@ -193,40 +194,40 @@ public void Test_FdbTuple_Unpack_First_And_Last() Slice packed; - packed = FdbTuple.Pack(1); - Assert.That(FdbTuple.UnpackFirst(packed), Is.EqualTo(1)); - Assert.That(FdbTuple.UnpackFirst(packed), Is.EqualTo("1")); - Assert.That(FdbTuple.UnpackLast(packed), Is.EqualTo(1)); - Assert.That(FdbTuple.UnpackLast(packed), Is.EqualTo("1")); + packed = FdbTuple.EncodeKey(1); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo(1)); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo("1")); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo(1)); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo("1")); - packed = FdbTuple.Pack(1, 2); - Assert.That(FdbTuple.UnpackFirst(packed), Is.EqualTo(1)); - Assert.That(FdbTuple.UnpackFirst(packed), Is.EqualTo("1")); - Assert.That(FdbTuple.UnpackLast(packed), Is.EqualTo(2)); - Assert.That(FdbTuple.UnpackLast(packed), Is.EqualTo("2")); + packed = FdbTuple.EncodeKey(1, 2); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo(1)); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo("1")); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo(2)); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo("2")); - packed = FdbTuple.Pack(1, 2, 3); - Assert.That(FdbTuple.UnpackFirst(packed), Is.EqualTo(1)); - Assert.That(FdbTuple.UnpackFirst(packed), Is.EqualTo("1")); - Assert.That(FdbTuple.UnpackLast(packed), Is.EqualTo(3)); - Assert.That(FdbTuple.UnpackLast(packed), Is.EqualTo("3")); + packed = FdbTuple.EncodeKey(1, 2, 3); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo(1)); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo("1")); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo(3)); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo("3")); - packed = FdbTuple.Pack(1, 2, 3, 4); - Assert.That(FdbTuple.UnpackFirst(packed), Is.EqualTo(1)); - Assert.That(FdbTuple.UnpackFirst(packed), Is.EqualTo("1")); - Assert.That(FdbTuple.UnpackLast(packed), Is.EqualTo(4)); - Assert.That(FdbTuple.UnpackLast(packed), Is.EqualTo("4")); + packed = FdbTuple.EncodeKey(1, 2, 3, 4); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo(1)); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo("1")); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo(4)); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo("4")); - packed = FdbTuple.Pack(1, 2, 3, 4, 5); - Assert.That(FdbTuple.UnpackFirst(packed), Is.EqualTo(1)); - Assert.That(FdbTuple.UnpackFirst(packed), Is.EqualTo("1")); - Assert.That(FdbTuple.UnpackLast(packed), Is.EqualTo(5)); - Assert.That(FdbTuple.UnpackLast(packed), Is.EqualTo("5")); + packed = FdbTuple.EncodeKey(1, 2, 3, 4, 5); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo(1)); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo("1")); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo(5)); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo("5")); - Assert.That(() => FdbTuple.UnpackFirst(Slice.Nil), Throws.InstanceOf()); - Assert.That(() => FdbTuple.UnpackFirst(Slice.Empty), Throws.InstanceOf()); - Assert.That(() => FdbTuple.UnpackLast(Slice.Nil), Throws.InstanceOf()); - Assert.That(() => FdbTuple.UnpackLast(Slice.Empty), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeFirst(Slice.Nil), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeFirst(Slice.Empty), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeLast(Slice.Nil), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeLast(Slice.Empty), Throws.InstanceOf()); } @@ -237,18 +238,18 @@ public void Test_FdbTuple_UnpackSingle() Slice packed; - packed = FdbTuple.Pack(1); - Assert.That(FdbTuple.UnpackSingle(packed), Is.EqualTo(1)); - Assert.That(FdbTuple.UnpackSingle(packed), Is.EqualTo("1")); + packed = FdbTuple.EncodeKey(1); + Assert.That(FdbTuple.DecodeKey(packed), Is.EqualTo(1)); + Assert.That(FdbTuple.DecodeKey(packed), Is.EqualTo("1")); - packed = FdbTuple.Pack("Hello\0World"); - Assert.That(FdbTuple.UnpackSingle(packed), Is.EqualTo("Hello\0World")); + packed = FdbTuple.EncodeKey("Hello\0World"); + Assert.That(FdbTuple.DecodeKey(packed), Is.EqualTo("Hello\0World")); - Assert.That(() => FdbTuple.UnpackSingle(Slice.Nil), Throws.InstanceOf()); - Assert.That(() => FdbTuple.UnpackSingle(Slice.Empty), Throws.InstanceOf()); - Assert.That(() => FdbTuple.UnpackSingle(FdbTuple.Pack(1, 2)), Throws.InstanceOf()); - Assert.That(() => FdbTuple.UnpackSingle(FdbTuple.Pack(1, 2, 3)), Throws.InstanceOf()); - Assert.That(() => FdbTuple.UnpackSingle(FdbTuple.Pack(1, 2, 3, 4)), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeKey(Slice.Nil), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeKey(Slice.Empty), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeKey(FdbTuple.EncodeKey(1, 2)), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeKey(FdbTuple.EncodeKey(1, 2, 3)), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeKey(FdbTuple.EncodeKey(1, 2, 3, 4)), Throws.InstanceOf()); } @@ -257,12 +258,12 @@ public void Test_FdbTuple_Embedded_Tuples() { // (A,B).Append((C,D)) should return (A,B,(C,D)) (length 3) and not (A,B,C,D) (length 4) - var x = FdbTuple.Create("A", "B"); - var y = FdbTuple.Create("C", "D"); + FdbTuple x = FdbTuple.Create("A", "B"); + FdbTuple y = FdbTuple.Create("C", "D"); // using the instance method that returns a FdbTuple IFdbTuple z = x.Append(y); - Console.WriteLine(z); + Log(z); Assert.That(z, Is.Not.Null); Assert.That(z.Count, Is.EqualTo(3)); Assert.That(z[0], Is.EqualTo("A")); @@ -274,9 +275,9 @@ public void Test_FdbTuple_Embedded_Tuples() Assert.That(t[0], Is.EqualTo("C")); Assert.That(t[1], Is.EqualTo("D")); - // using the IFdbTuple extension method + // casted down to the interface IFdbTuple z = ((IFdbTuple)x).Append((IFdbTuple)y); - Console.WriteLine(z); + Log(z); Assert.That(z, Is.Not.Null); Assert.That(z.Count, Is.EqualTo(3)); Assert.That(z[0], Is.EqualTo("A")); @@ -293,7 +294,7 @@ public void Test_FdbTuple_Embedded_Tuples() IFdbTuple value = FdbTuple.Create(2014, 11, 6); // Indexing a date value (Y, M, D) string id = "Doc123"; z = subspace.Append(value, id); - Console.WriteLine(z); + Log(z); Assert.That(z.Count, Is.EqualTo(4)); } @@ -552,15 +553,15 @@ public void Test_FdbTuple_Serialize_Bytes() Slice packed; - packed = FdbTuple.Pack(new byte[] { 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0 }); + packed = FdbTuple.EncodeKey(new byte[] { 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0 }); Assert.That(packed.ToString(), Is.EqualTo("<01><12>4Vx<9A><00>")); - packed = FdbTuple.Pack(new byte[] { 0x00, 0x42 }); + packed = FdbTuple.EncodeKey(new byte[] { 0x00, 0x42 }); Assert.That(packed.ToString(), Is.EqualTo("<01><00>B<00>")); - packed = FdbTuple.Pack(new byte[] { 0x42, 0x00 }); + packed = FdbTuple.EncodeKey(new byte[] { 0x42, 0x00 }); Assert.That(packed.ToString(), Is.EqualTo("<01>B<00><00>")); - packed = FdbTuple.Pack(new byte[] { 0x42, 0x00, 0x42 }); + packed = FdbTuple.EncodeKey(new byte[] { 0x42, 0x00, 0x42 }); Assert.That(packed.ToString(), Is.EqualTo("<01>B<00>B<00>")); - packed = FdbTuple.Pack(new byte[] { 0x42, 0x00, 0x00, 0x42 }); + packed = FdbTuple.EncodeKey(new byte[] { 0x42, 0x00, 0x00, 0x42 }); Assert.That(packed.ToString(), Is.EqualTo("<01>B<00><00>B<00>")); } @@ -1053,25 +1054,25 @@ public void Test_FdbTuple_Serialize_Singles() [Test] public void Test_FdbTuple_Deserialize_Singles() { - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 80 00 00 00")), Is.EqualTo(0f), "0f"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 C2 28 00 00")), Is.EqualTo(42f), "42f"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 3D D7 FF FF")), Is.EqualTo(-42f), "-42f"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 80 00 00 00")), Is.EqualTo(0f), "0f"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 C2 28 00 00")), Is.EqualTo(42f), "42f"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 3D D7 FF FF")), Is.EqualTo(-42f), "-42f"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 BF B5 04 F3")), Is.EqualTo((float)Math.Sqrt(2)), "Sqrt(2)"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 BF B5 04 F3")), Is.EqualTo((float)Math.Sqrt(2)), "Sqrt(2)"); // well known values - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 00 80 00 00")), Is.EqualTo(float.MinValue), "float.MinValue"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 FF 7F FF FF")), Is.EqualTo(float.MaxValue), "float.MaxValue"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 7F FF FF FF")), Is.EqualTo(-0f), "-0f"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 00 7F FF FF")), Is.EqualTo(float.NegativeInfinity), "float.NegativeInfinity"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 FF 80 00 00")), Is.EqualTo(float.PositiveInfinity), "float.PositiveInfinity"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 00 80 00 00")), Is.EqualTo(float.MinValue), "float.Epsilon"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 80 00 00 01")), Is.EqualTo(float.Epsilon), "+float.Epsilon"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 7F FF FF FE")), Is.EqualTo(-float.Epsilon), "-float.Epsilon"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 00 80 00 00")), Is.EqualTo(float.MinValue), "float.MinValue"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 FF 7F FF FF")), Is.EqualTo(float.MaxValue), "float.MaxValue"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 7F FF FF FF")), Is.EqualTo(-0f), "-0f"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 00 7F FF FF")), Is.EqualTo(float.NegativeInfinity), "float.NegativeInfinity"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 FF 80 00 00")), Is.EqualTo(float.PositiveInfinity), "float.PositiveInfinity"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 00 80 00 00")), Is.EqualTo(float.MinValue), "float.Epsilon"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 80 00 00 01")), Is.EqualTo(float.Epsilon), "+float.Epsilon"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 7F FF FF FE")), Is.EqualTo(-float.Epsilon), "-float.Epsilon"); // all possible variants of NaN should end up equal and normalized to float.NaN - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 00 3F FF FF")), Is.EqualTo(float.NaN), "float.NaN"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("20 00 3F FF FF")), Is.EqualTo(float.NaN), "float.NaN"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 00 3F FF FF")), Is.EqualTo(float.NaN), "float.NaN"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("20 00 3F FF FF")), Is.EqualTo(float.NaN), "float.NaN"); } [Test] @@ -1113,7 +1114,7 @@ public void Test_FdbTuple_Serialize_Doubles() // roundtripping vectors of doubles var tuple = FdbTuple.Create(Math.PI, Math.E, Math.Log(1), Math.Log(2)); - Assert.That(FdbTuple.Unpack(FdbTuple.Pack(Math.PI, Math.E, Math.Log(1), Math.Log(2))), Is.EqualTo(tuple)); + Assert.That(FdbTuple.Unpack(FdbTuple.EncodeKey(Math.PI, Math.E, Math.Log(1), Math.Log(2))), Is.EqualTo(tuple)); Assert.That(FdbTuple.Unpack(FdbTuple.Create(Math.PI, Math.E, Math.Log(1), Math.Log(2)).ToSlice()), Is.EqualTo(tuple)); Assert.That(FdbTuple.Unpack(FdbTuple.Empty.Append(Math.PI).Append(Math.E).Append(Math.Log(1)).Append(Math.Log(2)).ToSlice()), Is.EqualTo(tuple)); } @@ -1121,24 +1122,24 @@ public void Test_FdbTuple_Serialize_Doubles() [Test] public void Test_FdbTuple_Deserialize_Doubles() { - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 80 00 00 00 00 00 00 00")), Is.EqualTo(0d), "0d"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 C0 45 00 00 00 00 00 00")), Is.EqualTo(42d), "42d"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 3F BA FF FF FF FF FF FF")), Is.EqualTo(-42d), "-42d"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 80 00 00 00 00 00 00 00")), Is.EqualTo(0d), "0d"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 C0 45 00 00 00 00 00 00")), Is.EqualTo(42d), "42d"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 3F BA FF FF FF FF FF FF")), Is.EqualTo(-42d), "-42d"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 C0 09 21 FB 54 44 2D 18")), Is.EqualTo(Math.PI), "Math.PI"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 C0 05 BF 0A 8B 14 57 69")), Is.EqualTo(Math.E), "Math.E"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 C0 09 21 FB 54 44 2D 18")), Is.EqualTo(Math.PI), "Math.PI"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 C0 05 BF 0A 8B 14 57 69")), Is.EqualTo(Math.E), "Math.E"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 00 10 00 00 00 00 00 00")), Is.EqualTo(double.MinValue), "double.MinValue"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 FF EF FF FF FF FF FF FF")), Is.EqualTo(double.MaxValue), "double.MaxValue"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 7F FF FF FF FF FF FF FF")), Is.EqualTo(-0d), "-0d"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 00 0F FF FF FF FF FF FF")), Is.EqualTo(double.NegativeInfinity), "double.NegativeInfinity"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 FF F0 00 00 00 00 00 00")), Is.EqualTo(double.PositiveInfinity), "double.PositiveInfinity"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 80 00 00 00 00 00 00 01")), Is.EqualTo(double.Epsilon), "+double.Epsilon"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 7F FF FF FF FF FF FF FE")), Is.EqualTo(-double.Epsilon), "-double.Epsilon"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 00 10 00 00 00 00 00 00")), Is.EqualTo(double.MinValue), "double.MinValue"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 FF EF FF FF FF FF FF FF")), Is.EqualTo(double.MaxValue), "double.MaxValue"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 7F FF FF FF FF FF FF FF")), Is.EqualTo(-0d), "-0d"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 00 0F FF FF FF FF FF FF")), Is.EqualTo(double.NegativeInfinity), "double.NegativeInfinity"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 FF F0 00 00 00 00 00 00")), Is.EqualTo(double.PositiveInfinity), "double.PositiveInfinity"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 80 00 00 00 00 00 00 01")), Is.EqualTo(double.Epsilon), "+double.Epsilon"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 7F FF FF FF FF FF FF FE")), Is.EqualTo(-double.Epsilon), "-double.Epsilon"); // all possible variants of NaN should end up equal and normalized to double.NaN - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 00 07 FF FF FF FF FF FF")), Is.EqualTo(double.NaN), "double.NaN"); - Assert.That(FdbTuple.UnpackSingle(Slice.FromHexa("21 00 07 FF FF FF FF FF 84")), Is.EqualTo(double.NaN), "double.NaN"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 00 07 FF FF FF FF FF FF")), Is.EqualTo(double.NaN), "double.NaN"); + Assert.That(FdbTuple.DecodeKey(Slice.FromHexa("21 00 07 FF FF FF FF FF 84")), Is.EqualTo(double.NaN), "double.NaN"); } [Test] @@ -1149,17 +1150,17 @@ public void Test_FdbTuple_Serialize_Booleans() Slice packed; // bool - packed = FdbTuple.Pack(false); + packed = FdbTuple.EncodeKey(false); Assert.That(packed.ToString(), Is.EqualTo("<14>")); - packed = FdbTuple.Pack(true); + packed = FdbTuple.EncodeKey(true); Assert.That(packed.ToString(), Is.EqualTo("<15><01>")); // bool? - packed = FdbTuple.Pack(default(bool?)); + packed = FdbTuple.EncodeKey(default(bool?)); Assert.That(packed.ToString(), Is.EqualTo("<00>")); - packed = FdbTuple.Pack((bool?)false); + packed = FdbTuple.EncodeKey((bool?)false); Assert.That(packed.ToString(), Is.EqualTo("<14>")); - packed = FdbTuple.Pack((bool?)true); + packed = FdbTuple.EncodeKey((bool?)true); Assert.That(packed.ToString(), Is.EqualTo("<15><01>")); // tuple containing bools @@ -1175,25 +1176,25 @@ public void Test_FdbTuple_Deserialize_Booleans() // Null, 0, and empty byte[]/strings are equivalent to False. All others are equivalent to True // Falsy... - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<00>")), Is.EqualTo(false), "Null => False"); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<14>")), Is.EqualTo(false), "0 => False"); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<01><00>")), Is.EqualTo(false), "byte[0] => False"); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<02><00>")), Is.EqualTo(false), "String.Empty => False"); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<00>")), Is.EqualTo(false), "Null => False"); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<14>")), Is.EqualTo(false), "0 => False"); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<01><00>")), Is.EqualTo(false), "byte[0] => False"); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<02><00>")), Is.EqualTo(false), "String.Empty => False"); // Truthy - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<15><01>")), Is.EqualTo(true), "1 => True"); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<13>")), Is.EqualTo(true), "-1 => True"); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<01>Hello<00>")), Is.EqualTo(true), "'Hello' => True"); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<02>Hello<00>")), Is.EqualTo(true), "\"Hello\" => True"); - Assert.That(FdbTuple.UnpackSingle(FdbTuple.Pack(123456789)), Is.EqualTo(true), "random int => True"); - - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<02>True<00>")), Is.EqualTo(true), "\"True\" => True"); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<02>False<00>")), Is.EqualTo(true), "\"False\" => True ***"); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<15><01>")), Is.EqualTo(true), "1 => True"); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<13>")), Is.EqualTo(true), "-1 => True"); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<01>Hello<00>")), Is.EqualTo(true), "'Hello' => True"); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<02>Hello<00>")), Is.EqualTo(true), "\"Hello\" => True"); + Assert.That(FdbTuple.DecodeKey(FdbTuple.EncodeKey(123456789)), Is.EqualTo(true), "random int => True"); + + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<02>True<00>")), Is.EqualTo(true), "\"True\" => True"); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<02>False<00>")), Is.EqualTo(true), "\"False\" => True ***"); // note: even though it would be tempting to convert the string "false" to False, it is not a standard behavior accross all bindings // When decoded to object, though, they should return 0 and 1 - Assert.That(FdbTuplePackers.DeserializeBoxed(FdbTuple.Pack(false)), Is.EqualTo(0)); - Assert.That(FdbTuplePackers.DeserializeBoxed(FdbTuple.Pack(true)), Is.EqualTo(1)); + Assert.That(FdbTuplePackers.DeserializeBoxed(FdbTuple.EncodeKey(false)), Is.EqualTo(0)); + Assert.That(FdbTuplePackers.DeserializeBoxed(FdbTuple.EncodeKey(true)), Is.EqualTo(1)); } [Test] @@ -1223,16 +1224,16 @@ public void Test_FdbTuple_Serialize_IPAddress() [Test] public void Test_FdbTuple_Deserialize_IPAddress() { - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<01><7F><00><00><01><00>")), Is.EqualTo(IPAddress.Parse("127.0.0.1"))); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<01><00><00><00><00><00>")), Is.EqualTo(IPAddress.Parse("0.0.0.0"))); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<01><01><02><03><04><00>")), Is.EqualTo(IPAddress.Parse("1.2.3.4"))); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<01><7F><00><00><01><00>")), Is.EqualTo(IPAddress.Parse("127.0.0.1"))); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<01><00><00><00><00><00>")), Is.EqualTo(IPAddress.Parse("0.0.0.0"))); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<01><01><02><03><04><00>")), Is.EqualTo(IPAddress.Parse("1.2.3.4"))); - Assert.That(FdbTuple.UnpackSingle(FdbTuple.Pack("127.0.0.1")), Is.EqualTo(IPAddress.Loopback)); + Assert.That(FdbTuple.DecodeKey(FdbTuple.EncodeKey("127.0.0.1")), Is.EqualTo(IPAddress.Loopback)); var ip = IPAddress.Parse("192.168.0.1"); - Assert.That(FdbTuple.UnpackSingle(FdbTuple.Pack(ip.ToString())), Is.EqualTo(ip)); - Assert.That(FdbTuple.UnpackSingle(FdbTuple.Pack(ip.GetAddressBytes())), Is.EqualTo(ip)); - Assert.That(FdbTuple.UnpackSingle(FdbTuple.Pack(ip.Address)), Is.EqualTo(ip)); + Assert.That(FdbTuple.DecodeKey(FdbTuple.EncodeKey(ip.ToString())), Is.EqualTo(ip)); + Assert.That(FdbTuple.DecodeKey(FdbTuple.EncodeKey(ip.GetAddressBytes())), Is.EqualTo(ip)); + Assert.That(FdbTuple.DecodeKey(FdbTuple.EncodeKey(ip.Address)), Is.EqualTo(ip)); } [Test] @@ -1242,43 +1243,43 @@ public void Test_FdbTuple_NullableTypes() // serialize - Assert.That(FdbTuple.Pack(0), Is.EqualTo(Slice.Unescape("<14>"))); - Assert.That(FdbTuple.Pack(123), Is.EqualTo(Slice.Unescape("<15>{"))); - Assert.That(FdbTuple.Pack(null), Is.EqualTo(Slice.Unescape("<00>"))); + Assert.That(FdbTuple.EncodeKey(0), Is.EqualTo(Slice.Unescape("<14>"))); + Assert.That(FdbTuple.EncodeKey(123), Is.EqualTo(Slice.Unescape("<15>{"))); + Assert.That(FdbTuple.EncodeKey(null), Is.EqualTo(Slice.Unescape("<00>"))); - Assert.That(FdbTuple.Pack(0L), Is.EqualTo(Slice.Unescape("<14>"))); - Assert.That(FdbTuple.Pack(123L), Is.EqualTo(Slice.Unescape("<15>{"))); - Assert.That(FdbTuple.Pack(null), Is.EqualTo(Slice.Unescape("<00>"))); + Assert.That(FdbTuple.EncodeKey(0L), Is.EqualTo(Slice.Unescape("<14>"))); + Assert.That(FdbTuple.EncodeKey(123L), Is.EqualTo(Slice.Unescape("<15>{"))); + Assert.That(FdbTuple.EncodeKey(null), Is.EqualTo(Slice.Unescape("<00>"))); - Assert.That(FdbTuple.Pack(true), Is.EqualTo(Slice.Unescape("<15><01>"))); - Assert.That(FdbTuple.Pack(false), Is.EqualTo(Slice.Unescape("<14>"))); - Assert.That(FdbTuple.Pack(null), Is.EqualTo(Slice.Unescape("<00>")), "Maybe it was File Not Found?"); + Assert.That(FdbTuple.EncodeKey(true), Is.EqualTo(Slice.Unescape("<15><01>"))); + Assert.That(FdbTuple.EncodeKey(false), Is.EqualTo(Slice.Unescape("<14>"))); + Assert.That(FdbTuple.EncodeKey(null), Is.EqualTo(Slice.Unescape("<00>")), "Maybe it was File Not Found?"); - Assert.That(FdbTuple.Pack(Guid.Empty), Is.EqualTo(Slice.Unescape("0<00><00><00><00><00><00><00><00><00><00><00><00><00><00><00><00>"))); - Assert.That(FdbTuple.Pack(null), Is.EqualTo(Slice.Unescape("<00>"))); + Assert.That(FdbTuple.EncodeKey(Guid.Empty), Is.EqualTo(Slice.Unescape("0<00><00><00><00><00><00><00><00><00><00><00><00><00><00><00><00>"))); + Assert.That(FdbTuple.EncodeKey(null), Is.EqualTo(Slice.Unescape("<00>"))); - Assert.That(FdbTuple.Pack(TimeSpan.Zero), Is.EqualTo(Slice.Unescape("!<80><00><00><00><00><00><00><00>"))); - Assert.That(FdbTuple.Pack(null), Is.EqualTo(Slice.Unescape("<00>"))); + Assert.That(FdbTuple.EncodeKey(TimeSpan.Zero), Is.EqualTo(Slice.Unescape("!<80><00><00><00><00><00><00><00>"))); + Assert.That(FdbTuple.EncodeKey(null), Is.EqualTo(Slice.Unescape("<00>"))); // deserialize - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<14>")), Is.EqualTo(0)); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<15>{")), Is.EqualTo(123)); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<00>")), Is.Null); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<14>")), Is.EqualTo(0)); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<15>{")), Is.EqualTo(123)); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<00>")), Is.Null); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<14>")), Is.EqualTo(0L)); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<15>{")), Is.EqualTo(123L)); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<00>")), Is.Null); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<14>")), Is.EqualTo(0L)); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<15>{")), Is.EqualTo(123L)); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<00>")), Is.Null); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<15><01>")), Is.True); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<14>")), Is.False); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<00>")), Is.Null); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<15><01>")), Is.True); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<14>")), Is.False); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<00>")), Is.Null); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("0<00><00><00><00><00><00><00><00><00><00><00><00><00><00><00><00>")), Is.EqualTo(Guid.Empty)); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<00>")), Is.Null); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("0<00><00><00><00><00><00><00><00><00><00><00><00><00><00><00><00>")), Is.EqualTo(Guid.Empty)); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<00>")), Is.Null); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<14>")), Is.EqualTo(TimeSpan.Zero)); - Assert.That(FdbTuple.UnpackSingle(Slice.Unescape("<00>")), Is.Null); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<14>")), Is.EqualTo(TimeSpan.Zero)); + Assert.That(FdbTuple.DecodeKey(Slice.Unescape("<00>")), Is.Null); } @@ -1286,17 +1287,17 @@ public void Test_FdbTuple_NullableTypes() public void Test_FdbTuple_Serialize_Alias() { Assert.That( - FdbTuple.Pack(FdbTupleAlias.System).ToString(), + FdbTuple.EncodeKey(FdbTupleAlias.System).ToString(), Is.EqualTo("") ); Assert.That( - FdbTuple.Pack(FdbTupleAlias.Directory).ToString(), + FdbTuple.EncodeKey(FdbTupleAlias.Directory).ToString(), Is.EqualTo("") ); Assert.That( - FdbTuple.Pack(FdbTupleAlias.Zero).ToString(), + FdbTuple.EncodeKey(FdbTupleAlias.Zero).ToString(), Is.EqualTo("<00>") ); @@ -1448,35 +1449,35 @@ public void Test_FdbTuple_Create_ToSlice() } [Test] - public void Test_FdbTuple_Pack() + public void Test_FdbTuple_EncodeKey() { Assert.That( - FdbTuple.Pack("hello world").ToString(), + FdbTuple.EncodeKey("hello world").ToString(), Is.EqualTo("<02>hello world<00>") ); Assert.That( - FdbTuple.Pack("hello", "world").ToString(), + FdbTuple.EncodeKey("hello", "world").ToString(), Is.EqualTo("<02>hello<00><02>world<00>") ); Assert.That( - FdbTuple.Pack("hello world", 123).ToString(), + FdbTuple.EncodeKey("hello world", 123).ToString(), Is.EqualTo("<02>hello world<00><15>{") ); Assert.That( - FdbTuple.Pack("hello world", 1234, -1234).ToString(), + FdbTuple.EncodeKey("hello world", 1234, -1234).ToString(), Is.EqualTo("<02>hello world<00><16><04><12>-") ); Assert.That( - FdbTuple.Pack("hello world", 123, false).ToString(), + FdbTuple.EncodeKey("hello world", 123, false).ToString(), Is.EqualTo("<02>hello world<00><15>{<14>") ); Assert.That( - FdbTuple.Pack("hello world", 123, false, new byte[] { 123, 1, 66, 0, 42 }).ToString(), + FdbTuple.EncodeKey("hello world", 123, false, new byte[] { 123, 1, 66, 0, 42 }).ToString(), Is.EqualTo("<02>hello world<00><15>{<14><01>{<01>B<00>*<00>") ); } @@ -1486,26 +1487,26 @@ public void Test_FdbTuple_Unpack() { var packed = FdbTuple.Create("hello world").ToSlice(); - Console.WriteLine(packed); + Log(packed); var tuple = FdbTuple.Unpack(packed); Assert.That(tuple, Is.Not.Null); - Console.WriteLine(tuple); + Log(tuple); Assert.That(tuple.Count, Is.EqualTo(1)); Assert.That(tuple.Get(0), Is.EqualTo("hello world")); packed = FdbTuple.Create("hello world", 123).ToSlice(); - Console.WriteLine(packed); + Log(packed); tuple = FdbTuple.Unpack(packed); Assert.That(tuple, Is.Not.Null); - Console.WriteLine(tuple); + Log(tuple); Assert.That(tuple.Count, Is.EqualTo(2)); Assert.That(tuple.Get(0), Is.EqualTo("hello world")); Assert.That(tuple.Get(1), Is.EqualTo(123)); packed = FdbTuple.Create(1, 256, 257, 65536, int.MaxValue, long.MaxValue).ToSlice(); - Console.WriteLine(packed); + Log(packed); tuple = FdbTuple.Unpack(packed); Assert.That(tuple, Is.Not.Null); @@ -1518,12 +1519,12 @@ public void Test_FdbTuple_Unpack() Assert.That(tuple.Get(5), Is.EqualTo(long.MaxValue)); packed = FdbTuple.Create(-1, -256, -257, -65536, int.MinValue, long.MinValue).ToSlice(); - Console.WriteLine(packed); + Log(packed); tuple = FdbTuple.Unpack(packed); Assert.That(tuple, Is.Not.Null); Assert.That(tuple, Is.InstanceOf()); - Console.WriteLine(tuple); + Log(tuple); Assert.That(tuple.Count, Is.EqualTo(6)); Assert.That(tuple.Get(0), Is.EqualTo(-1)); Assert.That(tuple.Get(1), Is.EqualTo(-256)); @@ -1564,62 +1565,32 @@ public void Test_FdbTuple_CreateBoxed() } [Test] - public void Test_FdbTuple_PackBoxed() + public void Test_FdbTuple_EncodeKey_Boxed() { Slice slice; - slice = FdbTuple.PackBoxed(default(object)); + slice = FdbTuple.EncodeKey(default(object)); Assert.That(slice.ToString(), Is.EqualTo("<00>")); - slice = FdbTuple.PackBoxed((object)1); + slice = FdbTuple.EncodeKey(1); Assert.That(slice.ToString(), Is.EqualTo("<15><01>")); - slice = FdbTuple.PackBoxed((object)1L); + slice = FdbTuple.EncodeKey(1L); Assert.That(slice.ToString(), Is.EqualTo("<15><01>")); - slice = FdbTuple.PackBoxed((object)1U); + slice = FdbTuple.EncodeKey(1U); Assert.That(slice.ToString(), Is.EqualTo("<15><01>")); - slice = FdbTuple.PackBoxed((object)1UL); + slice = FdbTuple.EncodeKey(1UL); Assert.That(slice.ToString(), Is.EqualTo("<15><01>")); - slice = FdbTuple.PackBoxed((object)false); + slice = FdbTuple.EncodeKey(false); Assert.That(slice.ToString(), Is.EqualTo("<14>")); - slice = FdbTuple.PackBoxed((object)new byte[] { 4, 5, 6 }); + slice = FdbTuple.EncodeKey(new byte[] { 4, 5, 6 }); Assert.That(slice.ToString(), Is.EqualTo("<01><04><05><06><00>")); - slice = FdbTuple.PackBoxed((object)"hello"); - Assert.That(slice.ToString(), Is.EqualTo("<02>hello<00>")); - } - - [Test] - public void Test_FdbTuple_Pack_Boxed_Values() - { - Slice slice; - - slice = FdbTuple.Pack(default(object)); - Assert.That(slice.ToString(), Is.EqualTo("<00>")); - - slice = FdbTuple.Pack(1); - Assert.That(slice.ToString(), Is.EqualTo("<15><01>")); - - slice = FdbTuple.Pack(1L); - Assert.That(slice.ToString(), Is.EqualTo("<15><01>")); - - slice = FdbTuple.Pack(1U); - Assert.That(slice.ToString(), Is.EqualTo("<15><01>")); - - slice = FdbTuple.Pack(1UL); - Assert.That(slice.ToString(), Is.EqualTo("<15><01>")); - - slice = FdbTuple.Pack(false); - Assert.That(slice.ToString(), Is.EqualTo("<14>")); - - slice = FdbTuple.Pack(new byte[] { 4, 5, 6 }); - Assert.That(slice.ToString(), Is.EqualTo("<01><04><05><06><00>")); - - slice = FdbTuple.Pack("hello"); + slice = FdbTuple.EncodeKey("hello"); Assert.That(slice.ToString(), Is.EqualTo("<02>hello<00>")); } @@ -1658,7 +1629,7 @@ public void Test_FdbTuple_Numbers_Are_Sorted_Lexicographically() } } sw.Stop(); - Console.WriteLine("Checked " + N.ToString("N0") + " tuples in " + sw.ElapsedMilliseconds + " ms"); + Log("Checked {0:N0} tuples in {1:N1} ms", N, sw.ElapsedMilliseconds); } @@ -1715,26 +1686,26 @@ public void Test_FdbTuple_BatchPack_Of_Tuples() }; // array version - slices = FdbTuple.PackRange(tuples); + slices = FdbTuple.Pack(tuples); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(tuples.Length)); Assert.That(slices, Is.EqualTo(tuples.Select(t => t.ToSlice()).ToArray())); // IEnumerable version that is passed an array - slices = FdbTuple.PackRange((IEnumerable)tuples); + slices = FdbTuple.Pack((IEnumerable)tuples); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(tuples.Length)); Assert.That(slices, Is.EqualTo(tuples.Select(t => t.ToSlice()).ToArray())); // IEnumerable version but with a "real" enumerable - slices = FdbTuple.PackRange(tuples.Select(t => t)); + slices = FdbTuple.Pack(tuples.Select(t => t)); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(tuples.Length)); Assert.That(slices, Is.EqualTo(tuples.Select(t => t.ToSlice()).ToArray())); } [Test] - public void Test_FdbTuple_PackRange_Of_T() + public void Test_FdbTuple_EncodeKeys_Of_T() { Slice[] slices; @@ -1744,19 +1715,19 @@ public void Test_FdbTuple_PackRange_Of_T() int[] items = new int[] { 1, 2, 3, 123, -1, int.MaxValue }; // array version - slices = FdbTuple.PackRangeWithPrefix(tuple, items); + slices = FdbTuple.EncodePrefixedKeys(tuple, items); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); // IEnumerable version that is passed an array - slices = FdbTuple.PackRangeWithPrefix(tuple, (IEnumerable)items); + slices = FdbTuple.EncodePrefixedKeys(tuple, (IEnumerable)items); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); // IEnumerable version but with a "real" enumerable - slices = FdbTuple.PackRangeWithPrefix(tuple, items.Select(t => t)); + slices = FdbTuple.EncodePrefixedKeys(tuple, items.Select(t => t)); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); @@ -1767,13 +1738,13 @@ public void Test_FdbTuple_PackRange_Of_T() string[] words = new string[] { "hello", "world", "très bien", "断トツ", "abc\0def", null, String.Empty }; - var merged = FdbTuple.PackRangeWithPrefix(Slice.FromByte(42), words); + var merged = FdbTuple.EncodePrefixedKeys(Slice.FromByte(42), words); Assert.That(merged, Is.Not.Null); Assert.That(merged.Length, Is.EqualTo(words.Length)); for (int i = 0; i < words.Length; i++) { - var expected = Slice.FromByte(42) + FdbTuple.Pack(words[i]); + var expected = Slice.FromByte(42) + FdbTuple.EncodeKey(words[i]); Assert.That(merged[i], Is.EqualTo(expected)); Assert.That(merged[i].Array, Is.SameAs(merged[0].Array), "All slices should be stored in the same buffer"); @@ -1781,33 +1752,33 @@ public void Test_FdbTuple_PackRange_Of_T() } // corner cases - Assert.That(() => FdbTuple.PackRangeWithPrefix(Slice.Empty, default(int[])), Throws.InstanceOf().With.Property("ParamName").EqualTo("keys")); - Assert.That(() => FdbTuple.PackRangeWithPrefix(Slice.Empty, default(IEnumerable)), Throws.InstanceOf().With.Property("ParamName").EqualTo("keys")); + Assert.That(() => FdbTuple.EncodePrefixedKeys(Slice.Empty, default(int[])), Throws.InstanceOf().With.Property("ParamName").EqualTo("keys")); + Assert.That(() => FdbTuple.EncodePrefixedKeys(Slice.Empty, default(IEnumerable)), Throws.InstanceOf().With.Property("ParamName").EqualTo("keys")); #endregion } [Test] - public void Test_FdbTuple_PackRange_Boxed() + public void Test_FdbTuple_EncodeKeys_Boxed() { Slice[] slices; var tuple = FdbTuple.Create("hello"); object[] items = new object[] { "world", 123, false, Guid.NewGuid(), long.MinValue }; // array version - slices = FdbTuple.PackRangeWithPrefix(tuple, items); + slices = FdbTuple.EncodePrefixedKeys(tuple, items); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); // IEnumerable version that is passed an array - slices = FdbTuple.PackRangeWithPrefix(tuple, (IEnumerable)items); + slices = FdbTuple.EncodePrefixedKeys(tuple, (IEnumerable)items); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); // IEnumerable version but with a "real" enumerable - slices = FdbTuple.PackRangeWithPrefix(tuple, items.Select(t => t)); + slices = FdbTuple.EncodePrefixedKeys(tuple, items.Select(t => t)); Assert.That(slices, Is.Not.Null); Assert.That(slices.Length, Is.EqualTo(items.Length)); Assert.That(slices, Is.EqualTo(items.Select(x => tuple.Append(x).ToSlice()).ToArray())); @@ -1886,7 +1857,7 @@ public void Test_FdbTupleParser_WriteInt64_Respects_Ordering() FdbTupleParser.WriteInt64(ref writer, x); var res = new KeyValuePair(x, writer.Output.ToSlice()); list.Add(res); - Console.WriteLine("{0,20} : {0:x16} {1}", res.Key, res.Value.ToString()); + Log("{0,20} : {0:x16} {1}", res.Key, res.Value.ToString()); }; // We can't test 2^64 values, be we are interested at what happens around powers of two (were size can change) @@ -1977,7 +1948,7 @@ public void Test_FdbTupleParser_WriteUInt64_Respects_Ordering() var res = new KeyValuePair(x, writer.Output.ToSlice()); list.Add(res); #if DEBUG - Console.WriteLine("{0,20} : {0:x16} {1}", res.Key, res.Value.ToString()); + Log("{0,20} : {0:x16} {1}", res.Key, res.Value); #endif }; @@ -2348,7 +2319,7 @@ public void Bench_FdbTuple_Unpack_Random() string FUNKY_STRING = "hello\x00world"; string UNICODE_STRING = "héllø 世界"; - Console.Write("Creating " + N.ToString("N0") + " random tuples"); + Console.Write("Creating {0:N0} random tuples", N); var tuples = new List(N); var rnd = new Random(777); var guids = Enumerable.Range(0, 10).Select(_ => Guid.NewGuid()).ToArray(); @@ -2387,60 +2358,60 @@ public void Bench_FdbTuple_Unpack_Random() tuples.Add(tuple); } sw.Stop(); - Console.WriteLine(" done in " + sw.Elapsed.TotalSeconds + " sec"); - Console.WriteLine(" > " + tuples.Sum(x => x.Count).ToString("N0") + " items"); - Console.WriteLine(" > " + tuples[42]); - Console.WriteLine(); + Log(" done in {0:N3} sec", sw.Elapsed.TotalSeconds); + Log(" > {0:N0} items", tuples.Sum(x => x.Count)); + Log(" > {0}", tuples[42]); + Log(); Console.Write("Packing tuples..."); sw.Restart(); - var slices = FdbTuple.PackRange(tuples); + var slices = FdbTuple.Pack(tuples); sw.Stop(); - Console.WriteLine(" done in " + sw.Elapsed.TotalSeconds + " sec"); - Console.WriteLine(" > " + (N / sw.Elapsed.TotalSeconds).ToString("N0") + " tps"); - Console.WriteLine(" > " + slices.Sum(x => x.Count).ToString("N0") + " bytes"); - Console.WriteLine(" > " + slices[42]); - Console.WriteLine(); + Log(" done in {0:N3} sec", sw.Elapsed.TotalSeconds); + Log(" > {0:N0} tps", N / sw.Elapsed.TotalSeconds); + Log(" > {0:N0} bytes", slices.Sum(x => x.Count)); + Log(" > {0}", slices[42]); + Log(); Console.Write("Unpacking tuples..."); sw.Restart(); var unpacked = slices.Select(slice => FdbTuple.Unpack(slice)).ToList(); sw.Stop(); - Console.WriteLine(" done in " + sw.Elapsed.TotalSeconds + " sec"); - Console.WriteLine(" > " + (N / sw.Elapsed.TotalSeconds).ToString("N0") + " tps"); - Console.WriteLine(" > " + unpacked[42]); - Console.WriteLine(); + Log(" done in {0:N3} sec", sw.Elapsed.TotalSeconds); + Log(" > {0:N0} tps", N / sw.Elapsed.TotalSeconds); + Log(" > {0}", unpacked[42]); + Log(); Console.Write("Comparing ..."); sw.Restart(); tuples.Zip(unpacked, (x, y) => x.Equals(y)).All(b => b); sw.Stop(); - Console.WriteLine(" done in " + sw.Elapsed.TotalSeconds + " sec"); - Console.WriteLine(); + Log(" done in {0:N3} sec", sw.Elapsed.TotalSeconds); + Log(); Console.Write("Tuples.ToString ..."); sw.Restart(); var strings = tuples.Select(x => x.ToString()).ToList(); sw.Stop(); - Console.WriteLine(" done in " + sw.Elapsed.TotalSeconds + " sec"); - Console.WriteLine(" > " + strings.Sum(x => x.Length).ToString("N0") + " chars"); - Console.WriteLine(" > " + strings[42]); - Console.WriteLine(); + Log(" done in {0:N3} sec", sw.Elapsed.TotalSeconds); + Log(" > {0:N0} chars", strings.Sum(x => x.Length)); + Log(" > {0}", strings[42]); + Log(); Console.Write("Unpacked.ToString ..."); sw.Restart(); strings = unpacked.Select(x => x.ToString()).ToList(); sw.Stop(); - Console.WriteLine(" done in " + sw.Elapsed.TotalSeconds + " sec"); - Console.WriteLine(" > " + strings.Sum(x => x.Length).ToString("N0") + " chars"); - Console.WriteLine(" > " + strings[42]); - Console.WriteLine(); + Log(" done in {0:N3} sec", sw.Elapsed.TotalSeconds); + Log(" > {0:N0} chars", strings.Sum(x => x.Length)); + Log(" > {0}", strings[42]); + Log(); Console.Write("Memoizing ..."); sw.Restart(); var memoized = tuples.Select(x => x.Memoize()).ToList(); sw.Stop(); - Console.WriteLine(" done in " + sw.Elapsed.TotalSeconds + " sec"); + Log(" done in {0:N3} sec", sw.Elapsed.TotalSeconds); } #endregion diff --git a/FoundationDB.Tests/RangeQueryFacts.cs b/FoundationDB.Tests/RangeQueryFacts.cs index cc3d262b2..c6f036040 100644 --- a/FoundationDB.Tests/RangeQueryFacts.cs +++ b/FoundationDB.Tests/RangeQueryFacts.cs @@ -502,7 +502,7 @@ public async Task Test_Can_MergeSort() { for (int i = 0; i < N; i++) { - tr.Set(lists[k].Tuples.EncodeKey((i * K) + k), FdbTuple.Pack(k, i)); + tr.Set(lists[k].Tuples.EncodeKey((i * K) + k), FdbTuple.EncodeKey(k, i)); } await tr.CommitAsync(); } @@ -527,8 +527,8 @@ public async Task Test_Can_MergeSort() for(int i=0;i " + key + " = " + value); tr.Set(key, value); } @@ -642,7 +642,7 @@ public async Task Test_Range_Except() for (int i = 0; i < N; i++) { var key = lists[k].Tuples.EncodeKey(series[k][i]); - var value = FdbTuple.Pack(k, i); + var value = FdbTuple.EncodeKey(k, i); //Console.WriteLine("> " + key + " = " + value); tr.Set(key, value); } From 5c9c980a5d283888cc1615ee6246eb8b04221235 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Mon, 17 Nov 2014 12:21:58 +0100 Subject: [PATCH 06/63] Renamed location.Partition.By(...) to location.Partition.ByKey(...) - Responding to feedback on the new names - ByKey(...) better matches the conventions of location.Tuples.EncodeKey(...) --- .../Layers/Directories/FdbDirectoryLayer.cs | 8 ++-- .../Directories/FdbHighContentionAllocator.cs | 4 +- .../Subspaces/FdbSubspacePartition.cs | 8 ++-- .../Collections/FdbQueue`1.cs | 6 +-- .../Collections/FdbRankedSet.cs | 10 ++--- .../Messaging/FdbWorkerPool.cs | 14 +++--- FoundationDB.Samples/Benchmarks/LeakTest.cs | 2 +- .../MessageQueue/MessageQueueRunner.cs | 10 ++--- .../Transactions/MemoryTransactionFacts.cs | 4 +- FoundationDB.Tests.Sandbox/Program.cs | 20 ++++----- FoundationDB.Tests/DatabaseFacts.cs | 4 +- FoundationDB.Tests/Layers/BlobFacts.cs | 6 +-- FoundationDB.Tests/Layers/DirectoryFacts.cs | 40 ++++++++--------- FoundationDB.Tests/Layers/IndexingFacts.cs | 10 ++--- FoundationDB.Tests/Layers/MapFacts.cs | 6 +-- FoundationDB.Tests/Layers/MultiMapFacts.cs | 2 +- FoundationDB.Tests/Layers/RankedSetFacts.cs | 2 +- .../Layers/StringInternFacts.cs | 4 +- .../Linq/FdbAsyncQueryableFacts.cs | 10 ++--- FoundationDB.Tests/RangeQueryFacts.cs | 14 +++--- FoundationDB.Tests/TransactionFacts.cs | 44 +++++++++---------- 21 files changed, 114 insertions(+), 114 deletions(-) diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs index e2ad404e8..628a89f8c 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs @@ -123,8 +123,8 @@ internal FdbDirectoryLayer(IFdbSubspace nodeSubspace, IFdbSubspace contentSubspa this.NodeSubspace = nodeSubspace; // The root node is the one whose contents are the node subspace - this.RootNode = nodeSubspace.Partition.By(nodeSubspace.Key); - this.Allocator = new FdbHighContentionAllocator(this.RootNode.Partition.By(HcaKey)); + this.RootNode = nodeSubspace.Partition.ByKey(nodeSubspace.Key); + this.Allocator = new FdbHighContentionAllocator(this.RootNode.Partition.ByKey(HcaKey)); if (location == null || location.Count == 0) { this.Location = FdbTuple.Empty; @@ -876,7 +876,7 @@ private async Task NodeContainingKey(IFdbReadOnlyTransaction tr, S private IFdbSubspace NodeWithPrefix(Slice prefix) { if (prefix.IsNullOrEmpty) return null; - return this.NodeSubspace.Partition.By(prefix); + return this.NodeSubspace.Partition.ByKey(prefix); } /// Returns a new Directory Subspace given its node subspace, path and layer id @@ -937,7 +937,7 @@ private IFdbAsyncEnumerable> SubdirNamesAndNo { Contract.Requires(tr != null && node != null); - var sd = node.Partition.By(SUBDIRS); + var sd = node.Partition.ByKey(SUBDIRS); return tr .GetRange(sd.ToRange()) .Select(kvp => new KeyValuePair( diff --git a/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs b/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs index 7813a38aa..79351fade 100644 --- a/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs +++ b/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs @@ -46,8 +46,8 @@ public FdbHighContentionAllocator(IFdbSubspace subspace) if (subspace == null) throw new ArgumentException("subspace"); this.Subspace = subspace; - this.Counters = subspace.Partition.By(COUNTERS); - this.Recent = subspace.Partition.By(RECENT); + this.Counters = subspace.Partition.ByKey(COUNTERS); + this.Recent = subspace.Partition.ByKey(RECENT); } /// Location of the allocator diff --git a/FoundationDB.Client/Subspaces/FdbSubspacePartition.cs b/FoundationDB.Client/Subspaces/FdbSubspacePartition.cs index 458f72c16..16cc324e1 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspacePartition.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspacePartition.cs @@ -109,7 +109,7 @@ public IFdbSubspace this[ITupleFormattable item] /// new FdbSubspace(["Users", ]).Partition("Contacts") == new FdbSubspace(["Users", "Contacts", ]) /// [NotNull] - public IFdbSubspace By(T value) + public IFdbSubspace ByKey(T value) { return this[FdbTuple.Create(value)]; } @@ -125,7 +125,7 @@ public IFdbSubspace By(T value) /// new FdbSubspace(["Users", ]).Partition("Contacts", "Friends") == new FdbSubspace(["Users", "Contacts", "Friends", ]) /// [NotNull] - public IFdbSubspace By(T1 value1, T2 value2) + public IFdbSubspace ByKey(T1 value1, T2 value2) { return this[FdbTuple.Create(value1, value2)]; } @@ -142,7 +142,7 @@ public IFdbSubspace By(T1 value1, T2 value2) /// new FdbSubspace(["Users", ]).Partition("John Smith", "Contacts", "Friends") == new FdbSubspace(["Users", "John Smith", "Contacts", "Friends", ]) /// [NotNull] - public IFdbSubspace By(T1 value1, T2 value2, T3 value3) + public IFdbSubspace ByKey(T1 value1, T2 value2, T3 value3) { return this[FdbTuple.Create(value1, value2, value3)]; } @@ -161,7 +161,7 @@ public IFdbSubspace By(T1 value1, T2 value2, T3 value3) /// new FdbSubspace(["Users", ]).Partition("John Smith", "Contacts", "Friends", "Messages") == new FdbSubspace(["Users", "John Smith", "Contacts", "Friends", "Messages", ]) /// [NotNull] - public IFdbSubspace By(T1 value1, T2 value2, T3 value3, T4 value4) + public IFdbSubspace ByKey(T1 value1, T2 value2, T3 value3, T4 value4) { return this[FdbTuple.Create(value1, value2, value3, value4)]; } diff --git a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs index 713e3cf67..46de00f4c 100644 --- a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs +++ b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs @@ -77,9 +77,9 @@ public FdbQueue([NotNull] IFdbSubspace subspace, bool highContention, [NotNull] this.Encoder = encoder; //TODO: rewrite this, using FdbEncoderSubpsace<..> ! - this.ConflictedPop = subspace.Partition.By(Slice.FromAscii("pop")); - this.ConflictedItem = subspace.Partition.By(Slice.FromAscii("conflict")); - this.QueueItem = subspace.Partition.By(Slice.FromAscii("item")); + this.ConflictedPop = subspace.Partition.ByKey(Slice.FromAscii("pop")); + this.ConflictedItem = subspace.Partition.ByKey(Slice.FromAscii("conflict")); + this.QueueItem = subspace.Partition.ByKey(Slice.FromAscii("item")); } /// Subspace used as a prefix for all items in this table diff --git a/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs b/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs index 4352dce1f..43999d267 100644 --- a/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs +++ b/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs @@ -78,7 +78,7 @@ public Task SizeAsync([NotNull] IFdbReadOnlyTransaction trans) if (trans == null) throw new ArgumentNullException("trans"); return trans - .GetRange(this.Subspace.Partition.By(MAX_LEVELS - 1).ToRange()) + .GetRange(this.Subspace.Partition.ByKey(MAX_LEVELS - 1).ToRange()) .Select(kv => DecodeCount(kv.Value)) .SumAsync(); } @@ -101,7 +101,7 @@ public async Task InsertAsync([NotNull] IFdbTransaction trans, Slice key) if ((keyHash & ((1 << (level * LEVEL_FAN_POW)) - 1)) != 0) { //Console.WriteLine("> [" + level + "] Incrementing previous key: " + FdbKey.Dump(prevKey)); - trans.AtomicAdd(this.Subspace.Partition.By(level, prevKey), EncodeCount(1)); + trans.AtomicAdd(this.Subspace.Partition.ByKey(level, prevKey), EncodeCount(1)); } else { @@ -141,7 +141,7 @@ public async Task EraseAsync([NotNull] IFdbTransaction trans, Slice key) for (int level = 0; level < MAX_LEVELS; level++) { // This could be optimized with hash - var k = this.Subspace.Partition.By(level, key); + var k = this.Subspace.Partition.ByKey(level, key); var c = await trans.GetAsync(k).ConfigureAwait(false); if (c.HasValue) trans.Clear(k); if (level == 0) continue; @@ -169,7 +169,7 @@ public async Task EraseAsync([NotNull] IFdbTransaction trans, Slice key) var rankKey = Slice.Empty; for(int level = MAX_LEVELS - 1; level >= 0; level--) { - var lss = this.Subspace.Partition.By(level); + var lss = this.Subspace.Partition.ByKey(level); long lastCount = 0; var kcs = await trans.GetRange( FdbKeySelector.FirstGreaterOrEqual(lss.Tuples.EncodeKey(rankKey)), @@ -198,7 +198,7 @@ public async Task GetNthAsync([NotNull] IFdbReadOnlyTransaction trans, lo var key = Slice.Empty; for (int level = MAX_LEVELS - 1; level >= 0; level--) { - var lss = this.Subspace.Partition.By(level); + var lss = this.Subspace.Partition.ByKey(level); var kcs = await trans.GetRange(lss.Tuples.EncodeKey(key), lss.ToRange().End).ToListAsync().ConfigureAwait(false); if (kcs.Count == 0) break; diff --git a/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs b/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs index 87155d526..d614d29c6 100644 --- a/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs +++ b/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs @@ -114,12 +114,12 @@ public FdbWorkerPool(IFdbSubspace subspace) this.Subspace = subspace; - this.TaskStore = subspace.Partition.By(Slice.FromChar('T')); - this.IdleRing = subspace.Partition.By(Slice.FromChar('I')); - this.BusyRing = subspace.Partition.By(Slice.FromChar('B')); - this.UnassignedTaskRing = subspace.Partition.By(Slice.FromChar('U')); + this.TaskStore = subspace.Partition.ByKey(Slice.FromChar('T')); + this.IdleRing = subspace.Partition.ByKey(Slice.FromChar('I')); + this.BusyRing = subspace.Partition.ByKey(Slice.FromChar('B')); + this.UnassignedTaskRing = subspace.Partition.ByKey(Slice.FromChar('U')); - this.Counters = new FdbCounterMap(subspace.Partition.By(Slice.FromChar('C'))); + this.Counters = new FdbCounterMap(subspace.Partition.ByKey(Slice.FromChar('C'))); } private async Task> FindRandomItem(IFdbTransaction tr, IFdbSubspace ring) @@ -173,7 +173,7 @@ private void StoreTask(IFdbTransaction tr, Slice taskId, DateTime scheduledUtc, { tr.Annotate("Writing task {0}", taskId.ToAsciiOrHexaString()); - var prefix = this.TaskStore.Partition.By(taskId); + var prefix = this.TaskStore.Partition.ByKey(taskId); // store task body and timestamp tr.Set(prefix.Key, taskBody); @@ -319,7 +319,7 @@ await db.ReadWriteAsync( { // get the task body tr.Annotate("Fetching body for task {0}", msg.Id.ToAsciiOrHexaString()); - var prefix = this.TaskStore.Partition.By(msg.Id); + var prefix = this.TaskStore.Partition.ByKey(msg.Id); //TODO: replace this with a get_range ? var data = await tr.GetValuesAsync(new [] { prefix.ToFoundationDbKey(), diff --git a/FoundationDB.Samples/Benchmarks/LeakTest.cs b/FoundationDB.Samples/Benchmarks/LeakTest.cs index 1fdf25caf..0f5f99439 100644 --- a/FoundationDB.Samples/Benchmarks/LeakTest.cs +++ b/FoundationDB.Samples/Benchmarks/LeakTest.cs @@ -62,7 +62,7 @@ public async Task RunWorker(IFdbDatabase db, int id, CancellationToken ct) values[i] = "initial_value_" + rnd.Next(); } - var location = this.Subspace.Partition.By(student); + var location = this.Subspace.Partition.ByKey(student); for (int i = 0; i < 1/*this.N*/ && !ct.IsCancellationRequested; i++) { diff --git a/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs b/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs index cd70c86f3..81c9426ac 100644 --- a/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs +++ b/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs @@ -141,11 +141,11 @@ public async Task RunClear(IFdbDatabase db, CancellationToken ct) public async Task RunStatus(IFdbDatabase db, CancellationToken ct) { - var countersLocation = this.WorkerPool.Subspace.Partition.By(Slice.FromChar('C')); - var idleLocation = this.WorkerPool.Subspace.Partition.By(Slice.FromChar('I')); - var busyLocation = this.WorkerPool.Subspace.Partition.By(Slice.FromChar('B')); - var tasksLocation = this.WorkerPool.Subspace.Partition.By(Slice.FromChar('T')); - var unassignedLocation = this.WorkerPool.Subspace.Partition.By(Slice.FromChar('U')); + var countersLocation = this.WorkerPool.Subspace.Partition.ByKey(Slice.FromChar('C')); + var idleLocation = this.WorkerPool.Subspace.Partition.ByKey(Slice.FromChar('I')); + var busyLocation = this.WorkerPool.Subspace.Partition.ByKey(Slice.FromChar('B')); + var tasksLocation = this.WorkerPool.Subspace.Partition.ByKey(Slice.FromChar('T')); + var unassignedLocation = this.WorkerPool.Subspace.Partition.ByKey(Slice.FromChar('U')); using(var tr = db.BeginTransaction(ct)) { diff --git a/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs b/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs index c15b8d8e6..bf505a989 100644 --- a/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs +++ b/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs @@ -699,8 +699,8 @@ public async Task Test_Use_Simple_Layer() { var location = db.GlobalSpace; - var map = new FdbMap("Foos", db.GlobalSpace.Partition.By("Foos"), KeyValueEncoders.Values.StringEncoder); - var index = new FdbIndex("Foos.ByColor", db.GlobalSpace.Partition.By("Foos", "Color")); + var map = new FdbMap("Foos", db.GlobalSpace.Partition.ByKey("Foos"), KeyValueEncoders.Values.StringEncoder); + var index = new FdbIndex("Foos.ByColor", db.GlobalSpace.Partition.ByKey("Foos", "Color")); using (var tr = db.BeginTransaction(this.Cancellation)) { diff --git a/FoundationDB.Tests.Sandbox/Program.cs b/FoundationDB.Tests.Sandbox/Program.cs index a46147e0c..7492abfe0 100644 --- a/FoundationDB.Tests.Sandbox/Program.cs +++ b/FoundationDB.Tests.Sandbox/Program.cs @@ -320,7 +320,7 @@ private static async Task BenchInsertSmallKeysAsync(IFdbDatabase db, int N, int var rnd = new Random(); var tmp = new byte[size]; - var subspace = db.Partition.By("Batch"); + var subspace = db.Partition.ByKey("Batch"); var times = new List(); for (int k = 0; k <= 4; k++) @@ -361,7 +361,7 @@ private static async Task BenchConcurrentInsert(IFdbDatabase db, int k, int N, i Console.WriteLine("Inserting " + N + " keys in " + k + " batches of " + n + " with " + size + "-bytes values..."); // store every key under ("Batch", i) - var subspace = db.Partition.By("Batch"); + var subspace = db.Partition.ByKey("Batch"); // total estimated size of all transactions long totalPayloadSize = 0; @@ -429,7 +429,7 @@ private static async Task BenchSerialWriteAsync(IFdbDatabase db, int N, Cancella { // read a lot of small keys, one by one - var location = db.Partition.By("hello"); + var location = db.Partition.ByKey("hello"); var sw = Stopwatch.StartNew(); IFdbTransaction trans = null; @@ -464,7 +464,7 @@ private static async Task BenchSerialReadAsync(IFdbDatabase db, int N, Cancellat // read a lot of small keys, one by one - var location = db.Partition.By("hello"); + var location = db.Partition.ByKey("hello"); var sw = Stopwatch.StartNew(); for (int k = 0; k < N; k += 1000) @@ -489,7 +489,7 @@ private static async Task BenchConcurrentReadAsync(IFdbDatabase db, int N, Cance Console.WriteLine("Reading " + N + " keys (concurrent)"); - var location = db.Partition.By("hello"); + var location = db.Partition.ByKey("hello"); var keys = Enumerable.Range(0, N).Select(i => location.Tuples.EncodeKey(i)).ToArray(); @@ -517,7 +517,7 @@ private static async Task BenchClearAsync(IFdbDatabase db, int N, CancellationTo { // clear a lot of small keys, in a single transaction - var location = db.Partition.By(Slice.FromAscii("hello")); + var location = db.Partition.ByKey(Slice.FromAscii("hello")); var sw = Stopwatch.StartNew(); using (var trans = db.BeginTransaction(ct)) @@ -561,7 +561,7 @@ private static async Task BenchUpdateLotsOfKeysAsync(IFdbDatabase db, int N, Can { // change one byte in a large number of keys - var location = db.Partition.By("lists"); + var location = db.Partition.ByKey("lists"); var rnd = new Random(); var keys = Enumerable.Range(0, N).Select(x => location.Tuples.EncodeKey(x)).ToArray(); @@ -616,7 +616,7 @@ private static async Task BenchBulkInsertThenBulkReadAsync(IFdbDatabase db, int var timings = instrumented ? new List>() : null; // put test values inside a namespace - var subspace = db.Partition.By("BulkInsert"); + var subspace = db.Partition.ByKey("BulkInsert"); // cleanup everything using (var tr = db.BeginTransaction(ct)) @@ -702,7 +702,7 @@ private static async Task BenchBulkInsertThenBulkReadAsync(IFdbDatabase db, int private static async Task BenchMergeSortAsync(IFdbDatabase db, int N, int K, int B, CancellationToken ct) { // create multiple lists - var location = db.Partition.By("MergeSort"); + var location = db.Partition.ByKey("MergeSort"); await db.ClearRangeAsync(location, ct); var sources = Enumerable.Range(0, K).Select(i => 'A' + i).ToArray(); @@ -714,7 +714,7 @@ private static async Task BenchMergeSortAsync(IFdbDatabase db, int N, int K, int { using (var tr = db.BeginTransaction(ct)) { - var list = location.Partition.By(source); + var list = location.Partition.ByKey(source); for (int i = 0; i < N; i++) { tr.Set(list.Tuples.EncodeKey(rnd.Next()), Slice.FromInt32(i)); diff --git a/FoundationDB.Tests/DatabaseFacts.cs b/FoundationDB.Tests/DatabaseFacts.cs index 9427762dc..63f05ba8d 100644 --- a/FoundationDB.Tests/DatabaseFacts.cs +++ b/FoundationDB.Tests/DatabaseFacts.cs @@ -254,7 +254,7 @@ public async Task Test_Can_Open_Database_With_Non_Empty_GlobalSpace() Assert.That(db.GlobalSpace, Is.Not.Null); Assert.That(db.GlobalSpace.Key.ToString(), Is.EqualTo("<02>test<00>")); - var subspace = db.Partition.By("hello"); + var subspace = db.Partition.ByKey("hello"); Assert.That(subspace.Key.ToString(), Is.EqualTo("<02>test<00><02>hello<00>")); // keys inside the global space are valid @@ -271,7 +271,7 @@ public async Task Test_Can_Open_Database_With_Non_Empty_GlobalSpace() Assert.That(db.GlobalSpace, Is.Not.Null); Assert.That(db.GlobalSpace.Key.ToString(), Is.EqualTo("*<00>Z")); - var subspace = db.Partition.By("hello"); + var subspace = db.Partition.ByKey("hello"); Assert.That(subspace.Key.ToString(), Is.EqualTo("*<00>Z<02>hello<00>")); // keys inside the global space are valid diff --git a/FoundationDB.Tests/Layers/BlobFacts.cs b/FoundationDB.Tests/Layers/BlobFacts.cs index efa64781a..cec7fcba9 100644 --- a/FoundationDB.Tests/Layers/BlobFacts.cs +++ b/FoundationDB.Tests/Layers/BlobFacts.cs @@ -49,7 +49,7 @@ public async Task Test_FdbBlob_NotFound_Blob_Is_Empty() // clear previous values await DeleteSubspace(db, location); - var blob = new FdbBlob(location.Partition.By("Empty")); + var blob = new FdbBlob(location.Partition.ByKey("Empty")); long? size; @@ -75,7 +75,7 @@ public async Task Test_FdbBlob_Can_AppendToBlob() // clear previous values await DeleteSubspace(db, location); - var blob = new FdbBlob(location.Partition.By("BobTheBlob")); + var blob = new FdbBlob(location.Partition.ByKey("BobTheBlob")); using (var tr = db.BeginTransaction(this.Cancellation)) { @@ -112,7 +112,7 @@ public async Task Test_FdbBlob_CanAppendLargeChunks() // clear previous values await DeleteSubspace(db, location); - var blob = new FdbBlob(location.Partition.By("BigBlob")); + var blob = new FdbBlob(location.Partition.ByKey("BigBlob")); var data = new byte[100 * 1000]; for (int i = 0; i < data.Length; i++) data[i] = (byte)i; diff --git a/FoundationDB.Tests/Layers/DirectoryFacts.cs b/FoundationDB.Tests/Layers/DirectoryFacts.cs index 1a0090c8e..7f5ef5f8f 100644 --- a/FoundationDB.Tests/Layers/DirectoryFacts.cs +++ b/FoundationDB.Tests/Layers/DirectoryFacts.cs @@ -51,7 +51,7 @@ public async Task Test_Allocator() using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition.By(Slice.FromString("hca")); + var location = db.Partition.ByKey(Slice.FromString("hca")); await db.ClearRangeAsync(location, this.Cancellation); #if ENABLE_LOGGING @@ -110,7 +110,7 @@ public async Task Test_CreateOrOpen_Simple() using (var db = await OpenTestDatabaseAsync()) { // we will put everything under a custom namespace - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); #if ENABLE_LOGGING @@ -175,7 +175,7 @@ public async Task Test_CreateOrOpen_With_Layer() using (var db = await OpenTestDatabaseAsync()) { // we will put everything under a custom namespace - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); #if ENABLE_LOGGING @@ -248,7 +248,7 @@ public async Task Test_CreateOrOpen_SubFolder() { // we will put everything under a custom namespace - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); #if ENABLE_LOGGING @@ -296,7 +296,7 @@ public async Task Test_List_SubFolders() using (var db = await OpenTestDatabaseAsync()) { // we will put everything under a custom namespace - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -349,7 +349,7 @@ public async Task Test_List_Folders_Should_Be_Sorted_By_Name() using (var db = await OpenTestDatabaseAsync()) { // we will put everything under a custom namespace - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -388,7 +388,7 @@ public async Task Test_Move_Folder() using (var db = await OpenTestDatabaseAsync()) { // we will put everything under a custom namespace - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); #if ENABLE_LOGGING @@ -443,7 +443,7 @@ public async Task Test_Remove_Folder() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -503,7 +503,7 @@ public async Task Test_Can_Change_Layer_Of_Existing_Directory() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -552,7 +552,7 @@ public async Task Test_Directory_Partitions() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -599,7 +599,7 @@ public async Task Test_Directory_Cannot_Move_To_Another_Partition() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -627,7 +627,7 @@ public async Task Test_Directory_Cannot_Move_To_A_Sub_Partition() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -683,7 +683,7 @@ public async Task Test_Renaming_Partition_Uses_Parent_DirectoryLayer() using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -731,7 +731,7 @@ public async Task Test_Removing_Partition_Uses_Parent_DirectoryLayer() using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -772,7 +772,7 @@ public async Task Test_Directory_Methods_Should_Fail_With_Empty_Paths() { using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -819,7 +819,7 @@ public async Task Test_Directory_Partitions_Should_Disallow_Creation_Of_Direct_K using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition.By("DL"); + var location = db.Partition.ByKey("DL"); await db.ClearRangeAsync(location, this.Cancellation); var directory = FdbDirectoryLayer.Create(location); @@ -868,10 +868,10 @@ public async Task Test_Directory_Partitions_Should_Disallow_Creation_Of_Direct_K shouldFail(() => partition.Keys.Extract(barKey, barKey + FdbKey.MinValue)); // Partition - shouldFail(() => partition.Partition.By(123)); - shouldFail(() => partition.Partition.By(123, "hello")); - shouldFail(() => partition.Partition.By(123, "hello", false)); - shouldFail(() => partition.Partition.By(123, "hello", false, "world")); + shouldFail(() => partition.Partition.ByKey(123)); + shouldFail(() => partition.Partition.ByKey(123, "hello")); + shouldFail(() => partition.Partition.ByKey(123, "hello", false)); + shouldFail(() => partition.Partition.ByKey(123, "hello", false, "world")); // Keys diff --git a/FoundationDB.Tests/Layers/IndexingFacts.cs b/FoundationDB.Tests/Layers/IndexingFacts.cs index 6f879276e..9522cecf7 100644 --- a/FoundationDB.Tests/Layers/IndexingFacts.cs +++ b/FoundationDB.Tests/Layers/IndexingFacts.cs @@ -49,13 +49,13 @@ public async Task Task_Can_Add_Update_Remove_From_Index() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("Indexing"); + var location = db.Partition.ByKey("Indexing"); // clear previous values await DeleteSubspace(db, location); - var subspace = location.Partition.By("FoosByColor"); + var subspace = location.Partition.ByKey("FoosByColor"); var index = new FdbIndex("Foos.ByColor", subspace); // add items to the index @@ -148,9 +148,9 @@ public async Task Test_Can_Combine_Indexes() new Character { Id = 6, Name = "Catwoman", Brand="DC", IsVilain = default(bool?) }, }; - var indexBrand = new FdbIndex("Heroes.ByBrand", location.Partition.By("CharactersByBrand")); - var indexSuperHero = new FdbIndex("Heroes.BySuper", location.Partition.By("SuperHeros")); - var indexAlignment = new FdbIndex("Heros.ByAlignment", location.Partition.By("FriendsOrFoe")); + var indexBrand = new FdbIndex("Heroes.ByBrand", location.Partition.ByKey("CharactersByBrand")); + var indexSuperHero = new FdbIndex("Heroes.BySuper", location.Partition.ByKey("SuperHeros")); + var indexAlignment = new FdbIndex("Heros.ByAlignment", location.Partition.ByKey("FriendsOrFoe")); // index everything await db.WriteAsync((tr) => diff --git a/FoundationDB.Tests/Layers/MapFacts.cs b/FoundationDB.Tests/Layers/MapFacts.cs index 5e7210dd9..bb32e35c6 100644 --- a/FoundationDB.Tests/Layers/MapFacts.cs +++ b/FoundationDB.Tests/Layers/MapFacts.cs @@ -49,7 +49,7 @@ public async Task Test_FdbMap_Read_Write_Delete() { var location = await GetCleanDirectory(db, "Collections", "Maps"); - var map = new FdbMap("Foos", location.Partition.By("Foos"), KeyValueEncoders.Values.StringEncoder); + var map = new FdbMap("Foos", location.Partition.ByKey("Foos"), KeyValueEncoders.Values.StringEncoder); string secret = "world:" + Guid.NewGuid().ToString(); @@ -128,7 +128,7 @@ public async Task Test_FdbMap_List() { var location = await GetCleanDirectory(db, "Collections", "Maps"); - var map = new FdbMap("Foos", location.Partition.By("Foos"), KeyValueEncoders.Values.StringEncoder); + var map = new FdbMap("Foos", location.Partition.ByKey("Foos"), KeyValueEncoders.Values.StringEncoder); // write a bunch of keys await db.WriteAsync((tr) => @@ -188,7 +188,7 @@ public async Task Test_FdbMap_With_Custom_Key_Encoder() { var location = await GetCleanDirectory(db, "Collections", "Maps"); - var map = new FdbMap("Firewall", location.Partition.By("Hosts"), keyEncoder, KeyValueEncoders.Values.StringEncoder); + var map = new FdbMap("Firewall", location.Partition.ByKey("Hosts"), keyEncoder, KeyValueEncoders.Values.StringEncoder); // import all the rules await db.WriteAsync((tr) => diff --git a/FoundationDB.Tests/Layers/MultiMapFacts.cs b/FoundationDB.Tests/Layers/MultiMapFacts.cs index be3019db0..0f9bbf33c 100644 --- a/FoundationDB.Tests/Layers/MultiMapFacts.cs +++ b/FoundationDB.Tests/Layers/MultiMapFacts.cs @@ -50,7 +50,7 @@ public async Task Test_FdbMultiMap_Read_Write_Delete() var location = await GetCleanDirectory(db, "Collections", "MultiMaps"); - var map = new FdbMultiMap(location.Partition.By("Foos"), allowNegativeValues: false); + var map = new FdbMultiMap(location.Partition.ByKey("Foos"), allowNegativeValues: false); // read non existing value using (var tr = db.BeginTransaction(this.Cancellation)) diff --git a/FoundationDB.Tests/Layers/RankedSetFacts.cs b/FoundationDB.Tests/Layers/RankedSetFacts.cs index 40e134791..45283d93e 100644 --- a/FoundationDB.Tests/Layers/RankedSetFacts.cs +++ b/FoundationDB.Tests/Layers/RankedSetFacts.cs @@ -75,7 +75,7 @@ private static async Task PrintRankedSet(FdbRankedSet rs, IFdbReadOnlyTransactio for (int l = 0; l < 6; l++) { sb.AppendFormat("Level {0}:\r\n", l); - await tr.GetRange(rs.Subspace.Partition.By(l).ToRange()).ForEachAsync((kvp) => + await tr.GetRange(rs.Subspace.Partition.ByKey(l).ToRange()).ForEachAsync((kvp) => { sb.AppendFormat("\t{0} = {1}\r\n", rs.Subspace.Tuples.Unpack(kvp.Key), kvp.Value.ToInt64()); }); diff --git a/FoundationDB.Tests/Layers/StringInternFacts.cs b/FoundationDB.Tests/Layers/StringInternFacts.cs index 1c3ca0023..c58307647 100644 --- a/FoundationDB.Tests/Layers/StringInternFacts.cs +++ b/FoundationDB.Tests/Layers/StringInternFacts.cs @@ -43,8 +43,8 @@ public async Task Test_StringIntern_Example() { using (var db = await OpenTestPartitionAsync()) { - var stringSpace = db.Partition.By("Strings"); - var dataSpace = db.Partition.By("Data"); + var stringSpace = db.Partition.ByKey("Strings"); + var dataSpace = db.Partition.ByKey("Data"); // clear all previous data await DeleteSubspace(db, stringSpace); diff --git a/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs b/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs index 0ec5d96a3..5058530fc 100644 --- a/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs +++ b/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs @@ -52,7 +52,7 @@ public async Task Test_AsyncQueryable_Basics() using(var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("Linq"); + var location = db.Partition.ByKey("Linq"); await db.ClearRangeAsync(location, this.Cancellation); @@ -91,11 +91,11 @@ public async Task Test_Query_Index_Single() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("Linq"); + var location = db.Partition.ByKey("Linq"); await db.ClearRangeAsync(location, this.Cancellation); - var index = new FdbIndex("Foos.ByColor", location.Partition.By("Foos", "ByColor")); + var index = new FdbIndex("Foos.ByColor", location.Partition.ByKey("Foos", "ByColor")); await db.WriteAsync((tr) => { @@ -125,11 +125,11 @@ public async Task Test_Query_Index_Range() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("Linq"); + var location = db.Partition.ByKey("Linq"); await db.ClearRangeAsync(location, this.Cancellation); - var index = new FdbIndex("Bars.ByScore", location.Partition.By("Foos", "ByScore")); + var index = new FdbIndex("Bars.ByScore", location.Partition.ByKey("Foos", "ByScore")); await db.WriteAsync((tr) => { diff --git a/FoundationDB.Tests/RangeQueryFacts.cs b/FoundationDB.Tests/RangeQueryFacts.cs index c6f036040..fcce3d1b0 100644 --- a/FoundationDB.Tests/RangeQueryFacts.cs +++ b/FoundationDB.Tests/RangeQueryFacts.cs @@ -128,9 +128,9 @@ public async Task Test_Can_Get_Range_First_Single_And_Last() // put test values in a namespace var location = await GetCleanDirectory(db, "Queries", "Range"); - var a = location.Partition.By("a"); - var b = location.Partition.By("b"); - var c = location.Partition.By("c"); + var a = location.Partition.ByKey("a"); + var b = location.Partition.ByKey("b"); + var c = location.Partition.ByKey("c"); // insert a bunch of keys under 'a', only one under 'b', and nothing under 'c' await db.WriteAsync((tr) => @@ -286,7 +286,7 @@ public async Task Test_Can_Get_Range_With_Limit() // put test values in a namespace var location = await GetCleanDirectory(db, "Queries", "Range"); - var a = location.Partition.By("a"); + var a = location.Partition.ByKey("a"); // insert a bunch of keys under 'a' await db.WriteAsync((tr) => @@ -488,7 +488,7 @@ public async Task Test_Can_MergeSort() await db.ClearRangeAsync(location, this.Cancellation); // create K lists - var lists = Enumerable.Range(0, K).Select(i => location.Partition.By(i)).ToArray(); + var lists = Enumerable.Range(0, K).Select(i => location.Partition.ByKey(i)).ToArray(); // lists[0] contains all multiples of K ([0, 0], [K, 1], [2K, 2], ...) // lists[1] contains all multiples of K, offset by 1 ([1, 0], [K+1, 1], [2K+1, 2], ...) @@ -548,7 +548,7 @@ public async Task Test_Range_Intersect() var location = await GetCleanDirectory(db, "Queries", "Intersect"); // create K lists - var lists = Enumerable.Range(0, K).Select(i => location.Partition.By(i)).ToArray(); + var lists = Enumerable.Range(0, K).Select(i => location.Partition.ByKey(i)).ToArray(); // lists[0] contains all multiples of 1 // lists[1] contains all multiples of 2 @@ -620,7 +620,7 @@ public async Task Test_Range_Except() var location = await GetCleanDirectory(db, "Queries", "Except"); // create K lists - var lists = Enumerable.Range(0, K).Select(i => location.Partition.By(i)).ToArray(); + var lists = Enumerable.Range(0, K).Select(i => location.Partition.ByKey(i)).ToArray(); // lists[0] contains all multiples of 1 // lists[1] contains all multiples of 2 diff --git a/FoundationDB.Tests/TransactionFacts.cs b/FoundationDB.Tests/TransactionFacts.cs index c367d9c19..5f9fb199f 100644 --- a/FoundationDB.Tests/TransactionFacts.cs +++ b/FoundationDB.Tests/TransactionFacts.cs @@ -109,7 +109,7 @@ public async Task Test_Creating_A_ReadOnly_Transaction_Throws_When_Writing() // any attempt to recast into a writeable transaction should fail! var tr2 = (IFdbTransaction)tr; Assert.That(tr2.IsReadOnly, Is.True, "Transaction should be marked as readonly"); - var location = db.Partition.By("ReadOnly"); + var location = db.Partition.ByKey("ReadOnly"); Assert.That(() => tr2.Set(location.Tuples.EncodeKey("Hello"), Slice.Empty), Throws.InvalidOperationException); Assert.That(() => tr2.Clear(location.Tuples.EncodeKey("Hello")), Throws.InvalidOperationException); Assert.That(() => tr2.ClearRange(location.Tuples.EncodeKey("ABC"), location.Tuples.EncodeKey("DEF")), Throws.InvalidOperationException); @@ -223,7 +223,7 @@ public async Task Test_Cancelling_Transaction_Before_Commit_Should_Throw_Immedia using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); using (var tr = db.BeginTransaction(this.Cancellation)) { @@ -248,7 +248,7 @@ public async Task Test_Cancelling_Transaction_During_Commit_Should_Abort_Task() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); @@ -288,7 +288,7 @@ public async Task Test_Cancelling_Token_During_Commit_Should_Abort_Task() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); @@ -345,7 +345,7 @@ public async Task Test_Write_And_Read_Simple_Keys() long writeVersion; long readVersion; - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); // write a bunch of keys using (var tr = db.BeginTransaction(this.Cancellation)) @@ -390,7 +390,7 @@ public async Task Test_Can_Resolve_Key_Selector() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("keys"); + var location = db.Partition.ByKey("keys"); await db.ClearRangeAsync(location, this.Cancellation); var minKey = location.Key + FdbKey.MinValue; @@ -539,7 +539,7 @@ public async Task Test_Get_Multiple_Values() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("Batch"); + var location = db.Partition.ByKey("Batch"); await db.ClearRangeAsync(location, this.Cancellation); int[] ids = new int[] { 8, 7, 2, 9, 5, 0, 3, 4, 6, 1 }; @@ -582,7 +582,7 @@ public async Task Test_Get_Multiple_Keys() using(var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("keys"); + var location = db.Partition.ByKey("keys"); await db.ClearRangeAsync(location, this.Cancellation); var minKey = location.Key + FdbKey.MinValue; @@ -722,7 +722,7 @@ public async Task Test_Can_Snapshot_Read() using(var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); @@ -854,7 +854,7 @@ public async Task Test_Regular_Read_With_Concurrent_Change_Should_Conflict() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); @@ -893,7 +893,7 @@ public async Task Test_Snapshot_Read_With_Concurrent_Change_Should_Not_Conflict( using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); await db.WriteAsync((tr) => @@ -925,7 +925,7 @@ public async Task Test_GetRange_With_Concurrent_Change_Should_Conflict() using(var db = await OpenTestPartitionAsync()) { - var loc = db.Partition.By("test"); + var loc = db.Partition.ByKey("test"); await db.WriteAsync((tr) => { @@ -999,7 +999,7 @@ public async Task Test_GetKey_With_Concurrent_Change_Should_Conflict() using (var db = await OpenTestPartitionAsync()) { - var loc = db.Partition.By("test"); + var loc = db.Partition.ByKey("test"); await db.WriteAsync((tr) => { @@ -1162,7 +1162,7 @@ public async Task Test_Read_Isolation() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); var key = location.Tuples.EncodeKey("A"); await db.ClearRangeAsync(location, this.Cancellation); @@ -1228,7 +1228,7 @@ public async Task Test_Read_Isolation_From_Writes() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); var a = location.Tuples.EncodeKey("A"); @@ -1289,11 +1289,11 @@ public async Task Test_ReadYourWritesDisable_Isolation() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); var a = location.Tuples.EncodeKey("A"); - var b = location.Partition.By("B"); + var b = location.Partition.ByKey("B"); #region Default behaviour... @@ -1364,7 +1364,7 @@ public async Task Test_Can_Set_Read_Version() using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test"); + var location = db.Partition.ByKey("test"); long commitedVersion; @@ -1571,7 +1571,7 @@ public async Task Test_Can_Add_Read_Conflict_Range() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("conflict"); + var location = db.Partition.ByKey("conflict"); await db.ClearRangeAsync(location, this.Cancellation); @@ -1610,7 +1610,7 @@ public async Task Test_Can_Add_Write_Conflict_Range() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("conflict"); + var location = db.Partition.ByKey("conflict"); await db.ClearRangeAsync(location, this.Cancellation); @@ -1652,7 +1652,7 @@ public async Task Test_Can_Setup_And_Cancel_Watches() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("test", "bigbrother"); + var location = db.Partition.ByKey("test", "bigbrother"); await db.ClearRangeAsync(location, this.Cancellation); @@ -1711,7 +1711,7 @@ public async Task Test_Can_Get_Addresses_For_Key() { using (var db = await OpenTestPartitionAsync()) { - var location = db.Partition.By("location_api"); + var location = db.Partition.ByKey("location_api"); await db.ClearRangeAsync(location, this.Cancellation); From b6d35931a467e5c1d3b2a3a831beb67586311ca3 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Mon, 17 Nov 2014 22:04:10 +0100 Subject: [PATCH 07/63] Tuple refactoring: object[] considered harmful - Removed subspace.Tuples.EncodeKey(object[]) because it encourages bad practice - To make up for it, added overloads of EncodeKey, Append that take up to 8 generic types - Also added a struct tuple with 5 items, because composite keys with 5 items are not infrequent. - There is no DecodeKey with 6 or more items because there is no generic typed tuple with that many items --- .../FoundationDB.Client.csproj | 1 + .../Layers/Tuples/FdbListTuple.cs | 3 + FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 126 ++++++++- .../Layers/Tuples/FdbTuple`5.cs | 261 ++++++++++++++++++ .../Subspaces/FdbSubspaceTuples.cs | 161 ++++++++--- FoundationDB.Tests/Layers/TupleFacts.cs | 58 ++++ 6 files changed, 564 insertions(+), 46 deletions(-) create mode 100644 FoundationDB.Client/Layers/Tuples/FdbTuple`5.cs diff --git a/FoundationDB.Client/FoundationDB.Client.csproj b/FoundationDB.Client/FoundationDB.Client.csproj index 288366797..52445db96 100644 --- a/FoundationDB.Client/FoundationDB.Client.csproj +++ b/FoundationDB.Client/FoundationDB.Client.csproj @@ -73,6 +73,7 @@ + diff --git a/FoundationDB.Client/Layers/Tuples/FdbListTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbListTuple.cs index a175a41c5..c649d54a3 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbListTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbListTuple.cs @@ -39,6 +39,9 @@ namespace FoundationDB.Layers.Tuples /// Tuple that can hold any number of untyped items public sealed class FdbListTuple : IFdbTuple { + // We could use a FdbListTuple for tuples where all items are of type T, and FdbListTuple could derive from FdbListTuple. + // => this could speed up a bit the use case of FdbTuple.FromArray or FdbTuple.FromSequence + /// List of the items in the tuple. /// It is supposed to be immutable! private readonly object[] m_items; diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 2ea33bbc9..972fbeed1 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -185,6 +185,13 @@ public static FdbTuple Create(T1 item1, T2 item2 return new FdbTuple(item1, item2, item3, item4); } + /// Create a new 5-tuple, holding five items + [DebuggerStepThrough] + public static FdbTuple Create(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) + { + return new FdbTuple(item1, item2, item3, item4, item5); + } + /// Create a new N-tuple, from N items /// Items to wrap in a tuple /// If you already have an array of items, you should call instead. Mutating the array, would also mutate the tuple! @@ -540,17 +547,57 @@ public static Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 i return writer.Output.ToSlice(); } - /// Pack a N-tuple directory into a slice - public static Slice EncodeKey([NotNull] params object[] items) + /// Pack a 5-tuple directly into a slice + public static Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) { - if (items == null) throw new ArgumentNullException("items"); - if (items.Length == 0) return Slice.Empty; + var writer = new TupleWriter(); + FdbTuplePacker.SerializeTo(ref writer, item1); + FdbTuplePacker.SerializeTo(ref writer, item2); + FdbTuplePacker.SerializeTo(ref writer, item3); + FdbTuplePacker.SerializeTo(ref writer, item4); + FdbTuplePacker.SerializeTo(ref writer, item5); + return writer.Output.ToSlice(); + } + /// Pack a 6-tuple directly into a slice + public static Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6) + { var writer = new TupleWriter(); - foreach(var item in items) - { - FdbTuplePackers.SerializeObjectTo(ref writer, item); - } + FdbTuplePacker.SerializeTo(ref writer, item1); + FdbTuplePacker.SerializeTo(ref writer, item2); + FdbTuplePacker.SerializeTo(ref writer, item3); + FdbTuplePacker.SerializeTo(ref writer, item4); + FdbTuplePacker.SerializeTo(ref writer, item5); + FdbTuplePacker.SerializeTo(ref writer, item6); + return writer.Output.ToSlice(); + } + + /// Pack a 6-tuple directly into a slice + public static Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7) + { + var writer = new TupleWriter(); + FdbTuplePacker.SerializeTo(ref writer, item1); + FdbTuplePacker.SerializeTo(ref writer, item2); + FdbTuplePacker.SerializeTo(ref writer, item3); + FdbTuplePacker.SerializeTo(ref writer, item4); + FdbTuplePacker.SerializeTo(ref writer, item5); + FdbTuplePacker.SerializeTo(ref writer, item6); + FdbTuplePacker.SerializeTo(ref writer, item7); + return writer.Output.ToSlice(); + } + + /// Pack a 6-tuple directly into a slice + public static Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7, T8 item8) + { + var writer = new TupleWriter(); + FdbTuplePacker.SerializeTo(ref writer, item1); + FdbTuplePacker.SerializeTo(ref writer, item2); + FdbTuplePacker.SerializeTo(ref writer, item3); + FdbTuplePacker.SerializeTo(ref writer, item4); + FdbTuplePacker.SerializeTo(ref writer, item5); + FdbTuplePacker.SerializeTo(ref writer, item6); + FdbTuplePacker.SerializeTo(ref writer, item7); + FdbTuplePacker.SerializeTo(ref writer, item8); return writer.Output.ToSlice(); } @@ -894,17 +941,61 @@ public static Slice EncodePrefixedKey(Slice prefix, T1 value1, T return writer.Output.ToSlice(); } - /// Efficiently concatenate a prefix with the packed representation of a 4-tuple - public static Slice EncodePrefixedKey(Slice prefix, [NotNull] params object[] values) + /// Efficiently concatenate a prefix with the packed representation of a 5-tuple + public static Slice EncodePrefixedKey(Slice prefix, T1 value1, T2 value2, T3 value3, T4 value4, T5 value5) { - if (values == null) throw new ArgumentNullException("values"); + var writer = new TupleWriter(); + writer.Output.WriteBytes(prefix); + FdbTuplePacker.Encoder(ref writer, value1); + FdbTuplePacker.Encoder(ref writer, value2); + FdbTuplePacker.Encoder(ref writer, value3); + FdbTuplePacker.Encoder(ref writer, value4); + FdbTuplePacker.Encoder(ref writer, value5); + return writer.Output.ToSlice(); + } + /// Efficiently concatenate a prefix with the packed representation of a 6-tuple + public static Slice EncodePrefixedKey(Slice prefix, T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6) + { var writer = new TupleWriter(); writer.Output.WriteBytes(prefix); - foreach(var value in values) - { - FdbTuplePackers.SerializeObjectTo(ref writer, value); - } + FdbTuplePacker.Encoder(ref writer, value1); + FdbTuplePacker.Encoder(ref writer, value2); + FdbTuplePacker.Encoder(ref writer, value3); + FdbTuplePacker.Encoder(ref writer, value4); + FdbTuplePacker.Encoder(ref writer, value5); + FdbTuplePacker.Encoder(ref writer, value6); + return writer.Output.ToSlice(); + } + + /// Efficiently concatenate a prefix with the packed representation of a 7-tuple + public static Slice EncodePrefixedKey(Slice prefix, T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6, T7 value7) + { + var writer = new TupleWriter(); + writer.Output.WriteBytes(prefix); + FdbTuplePacker.Encoder(ref writer, value1); + FdbTuplePacker.Encoder(ref writer, value2); + FdbTuplePacker.Encoder(ref writer, value3); + FdbTuplePacker.Encoder(ref writer, value4); + FdbTuplePacker.Encoder(ref writer, value5); + FdbTuplePacker.Encoder(ref writer, value6); + FdbTuplePacker.Encoder(ref writer, value7); + return writer.Output.ToSlice(); + } + + /// Efficiently concatenate a prefix with the packed representation of a 8-tuple + public static Slice EncodePrefixedKey(Slice prefix, T1 value1, T2 value2, T3 value3, T4 value4, T5 value5, T6 value6, T7 value7, T8 value8) + { + var writer = new TupleWriter(); + writer.Output.WriteBytes(prefix); + FdbTuplePacker.Encoder(ref writer, value1); + FdbTuplePacker.Encoder(ref writer, value2); + FdbTuplePacker.Encoder(ref writer, value3); + FdbTuplePacker.Encoder(ref writer, value4); + FdbTuplePacker.Encoder(ref writer, value5); + FdbTuplePacker.Encoder(ref writer, value6); + FdbTuplePacker.Encoder(ref writer, value7); + FdbTuplePacker.Encoder(ref writer, value8); return writer.Output.ToSlice(); } @@ -1216,6 +1307,11 @@ internal static int CombineHashCodes(int h1, int h2, int h3, int h4) return CombineHashCodes(CombineHashCodes(h1, h2), CombineHashCodes(h3, h4)); } + internal static int CombineHashCodes(int h1, int h2, int h3, int h4, int h5) + { + return CombineHashCodes(CombineHashCodes(h1, h2, h3), CombineHashCodes(h4, h5)); + } + internal static bool Equals(IFdbTuple left, object other, [NotNull] IEqualityComparer comparer) { return object.ReferenceEquals(left, null) ? other == null : FdbTuple.Equals(left, other as IFdbTuple, comparer); diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`5.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`5.cs new file mode 100644 index 000000000..f8d72e235 --- /dev/null +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`5.cs @@ -0,0 +1,261 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Layers.Tuples +{ + using FoundationDB.Client; + using FoundationDB.Client.Converters; + using FoundationDB.Client.Utils; + using JetBrains.Annotations; + using System; + using System.Collections; + using System.Collections.Generic; + using System.ComponentModel; + using System.Diagnostics; + using System.Text; + + /// Tuple that can hold four items + /// Type of the 1st item + /// Type of the 2nd item + /// Type of the 3rd item + /// Type of the 4th item + /// Type of the 5th item + [ImmutableObject(true), DebuggerDisplay("{ToString()}")] + public struct FdbTuple : IFdbTuple + { + // This is mostly used by code that create a lot of temporary quartets, to reduce the pressure on the Garbage Collector by allocating them on the stack. + // Please note that if you return an FdbTuple as an IFdbTuple, it will be boxed by the CLR and all memory gains will be lost + + /// First element of the tuple + public readonly T1 Item1; + /// Second element of the tuple + public readonly T2 Item2; + /// Third element of the tuple + public readonly T3 Item3; + /// Fourth element of the tuple + public readonly T4 Item4; + /// Fifth and last element of the tuple + public readonly T5 Item5; + + /// Create a tuple containing for items + [DebuggerStepThrough] + public FdbTuple(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) + { + this.Item1 = item1; + this.Item2 = item2; + this.Item3 = item3; + this.Item4 = item4; + this.Item5 = item5; + } + + /// Number of items in this tuple + public int Count { get { return 5; } } + + /// Return the Nth item in this tuple + public object this[int index] + { + get + { + switch (index) + { + case 0: case -5: return this.Item1; + case 1: case -4: return this.Item2; + case 2: case -3: return this.Item3; + case 3: case -2: return this.Item4; + case 4: case -1: return this.Item5; + default: FdbTuple.FailIndexOutOfRange(index, 5); return null; + } + } + } + + public IFdbTuple this[int? fromIncluded, int? toExcluded] + { + get { return FdbTuple.Splice(this, fromIncluded, toExcluded); } + } + + /// Return the typed value of an item of the tuple, given its position + /// Expected type of the item + /// Position of the item (if negative, means relative from the end) + /// Value of the item at position , adapted into type . + public R Get(int index) + { + switch(index) + { + case 0: case -5: return FdbConverters.Convert(this.Item1); + case 1: case -4: return FdbConverters.Convert(this.Item2); + case 2: case -3: return FdbConverters.Convert(this.Item3); + case 3: case -2: return FdbConverters.Convert(this.Item4); + case 4: case -1: return FdbConverters.Convert(this.Item5); + default: FdbTuple.FailIndexOutOfRange(index, 5); return default(R); + } + } + + public void PackTo(ref TupleWriter writer) + { + FdbTuplePacker.Encoder(ref writer, this.Item1); + FdbTuplePacker.Encoder(ref writer, this.Item2); + FdbTuplePacker.Encoder(ref writer, this.Item3); + FdbTuplePacker.Encoder(ref writer, this.Item4); + FdbTuplePacker.Encoder(ref writer, this.Item5); + } + + IFdbTuple IFdbTuple.Append(T6 value) + { + // the caller doesn't care about the return type, so just box everything into a list tuple + return new FdbListTuple(new object[6] { this.Item1, this.Item2, this.Item3, this.Item4, this.Item5, value }, 0, 6); + } + + /// Appends a single new item at the end of the current tuple. + /// Value that will be added as an embedded item + /// New tuple with one extra item + /// If is a tuple, and you want to append the *items* of this tuple, and not the tuple itself, please call ! + [NotNull] + public FdbLinkedTuple Append(T6 value) + { + // the caller probably cares about the return type, since it is using a struct, but whatever tuple type we use will end up boxing this tuple on the heap, and we will loose type information. + // but, by returning a FdbLinkedTuple, the tuple will still remember the exact type, and efficiently serializer/convert the values (without having to guess the type) + return new FdbLinkedTuple(this, value); + } + + /// Appends the items of a tuple at the end of the current tuple. + /// Tuple whose items are to be appended at the end + /// New tuple composed of the current tuple's items, followed by 's items + public IFdbTuple Concat(IFdbTuple tuple) + { + return FdbTuple.Concat(this, tuple); + } + + /// Copy all the items of this tuple into an array at the specified offset + public void CopyTo(object[] array, int offset) + { + array[offset] = this.Item1; + array[offset + 1] = this.Item2; + array[offset + 2] = this.Item3; + array[offset + 3] = this.Item4; + array[offset + 4] = this.Item5; + } + + public IEnumerator GetEnumerator() + { + yield return this.Item1; + yield return this.Item2; + yield return this.Item3; + yield return this.Item4; + yield return this.Item5; + } + + System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator() + { + return this.GetEnumerator(); + } + + public Slice ToSlice() + { + return FdbTuple.EncodeKey(this.Item1, this.Item2, this.Item3, this.Item4, this.Item5); + } + + Slice IFdbKey.ToFoundationDbKey() + { + return this.ToSlice(); + } + + public override string ToString() + { + return new StringBuilder(48).Append('(') + .Append(FdbTuple.Stringify(this.Item1)).Append(", ") + .Append(FdbTuple.Stringify(this.Item2)).Append(", ") + .Append(FdbTuple.Stringify(this.Item3)).Append(", ") + .Append(FdbTuple.Stringify(this.Item4)).Append(", ") + .Append(FdbTuple.Stringify(this.Item5)).Append(')') + .ToString(); + } + + public override bool Equals(object obj) + { + return obj != null && ((IStructuralEquatable)this).Equals(obj, SimilarValueComparer.Default); + } + + public bool Equals(IFdbTuple other) + { + return other != null && ((IStructuralEquatable)this).Equals(other, SimilarValueComparer.Default); + } + + public override int GetHashCode() + { + return ((IStructuralEquatable)this).GetHashCode(SimilarValueComparer.Default); + } + + public static bool operator ==(FdbTuple left, FdbTuple right) + { + var comparer = SimilarValueComparer.Default; + return comparer.Equals(left.Item1, right.Item1) + && comparer.Equals(left.Item2, right.Item2) + && comparer.Equals(left.Item3, right.Item3) + && comparer.Equals(left.Item4, right.Item4) + && comparer.Equals(left.Item5, right.Item5); + } + + public static bool operator !=(FdbTuple left, FdbTuple right) + { + var comparer = SimilarValueComparer.Default; + return !comparer.Equals(left.Item1, right.Item1) + || !comparer.Equals(left.Item2, right.Item2) + || !comparer.Equals(left.Item3, right.Item3) + || !comparer.Equals(left.Item4, right.Item4) + || !comparer.Equals(left.Item5, right.Item5); + } + + bool IStructuralEquatable.Equals(object other, IEqualityComparer comparer) + { + if (other == null) return false; + if (other is FdbTuple) + { + var tuple = (FdbTuple)other; + return comparer.Equals(this.Item1, tuple.Item1) + && comparer.Equals(this.Item2, tuple.Item2) + && comparer.Equals(this.Item3, tuple.Item3) + && comparer.Equals(this.Item4, tuple.Item4) + && comparer.Equals(this.Item5, tuple.Item5); + } + return FdbTuple.Equals(this, other, comparer); + } + + int IStructuralEquatable.GetHashCode(IEqualityComparer comparer) + { + return FdbTuple.CombineHashCodes( + comparer.GetHashCode(this.Item1), + comparer.GetHashCode(this.Item2), + comparer.GetHashCode(this.Item3), + comparer.GetHashCode(this.Item4), + comparer.GetHashCode(this.Item5) + ); + } + + } + +} diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs index 8dee4aa99..f6ce01d73 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs @@ -254,7 +254,7 @@ public Slice EncodeKey(T1 item1, T2 item2, T3 item3) return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3); } - /// Create a new key by adding three items to the current subspace + /// Create a new key by adding four items to the current subspace /// Type of the first item /// Type of the second item /// Type of the third item @@ -271,16 +271,81 @@ public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4) return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4); } - /// Create a new key by adding multiple items to the current subspace - /// Array of items to add - /// Key that is equivalent to adding the packed tuple created from to the subspace's prefix - /// {subspace}.EncodeKey(object[]) is much faster way to do {subspace}.Key + FdbTuple.Create(object[]).ToSlice() - /// The key produced can be decoded back into a tuple by calling . - public Slice EncodeKey(params object[] items) + /// Create a new key by adding five items to the current subspace + /// Type of the first item + /// Type of the second item + /// Type of the third item + /// Type of the fourth item + /// Type of the fifth item + /// Item that will be appended first + /// Item that will be appended second + /// Item that will be appended third + /// Item that will be appended fourth + /// Item that will be appended fifth + /// Key that is equivalent to adding the packed tuple quad (, , , ) to the subspace's prefix + /// {subspace}.EncodeKey(w, x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(w, x, y, z).ToSlice() + /// The key produced can be decoded back into a tuple by calling either or + public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) { - //note: this is bad practice, because it encourage people passing in object[] arrays, - // but there is not point in going all the way to 10 or more items. - return FdbTuple.EncodePrefixedKey(m_subspace.Key, items); + return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4, item5); + } + + /// Create a new key by adding six items to the current subspace + /// Type of the first item + /// Type of the second item + /// Type of the third item + /// Type of the fourth item + /// Type of the fifth item + /// Item that will be appended first + /// Item that will be appended second + /// Item that will be appended third + /// Item that will be appended fourth + /// Item that will be appended fifth + /// Key that is equivalent to adding the packed tuple quad (, , , ) to the subspace's prefix + /// {subspace}.EncodeKey(w, x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(w, x, y, z).ToSlice() + /// The key produced can be decoded back into a tuple by calling + public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6) + { + return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4, item5, item6); + } + + /// Create a new key by adding seven items to the current subspace + /// Type of the first item + /// Type of the second item + /// Type of the third item + /// Type of the fourth item + /// Type of the fifth item + /// Item that will be appended first + /// Item that will be appended second + /// Item that will be appended third + /// Item that will be appended fourth + /// Item that will be appended fifth + /// Key that is equivalent to adding the packed tuple quad (, , , ) to the subspace's prefix + /// {subspace}.EncodeKey(w, x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(w, x, y, z).ToSlice() + /// The key produced can be decoded back into a tuple by calling + public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7) + { + return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4, item5, item6, item7); + } + + + /// Create a new key by adding eight items to the current subspace + /// Type of the first item + /// Type of the second item + /// Type of the third item + /// Type of the fourth item + /// Type of the fifth item + /// Item that will be appended first + /// Item that will be appended second + /// Item that will be appended third + /// Item that will be appended fourth + /// Item that will be appended fifth + /// Key that is equivalent to adding the packed tuple quad (, , , ) to the subspace's prefix + /// {subspace}.EncodeKey(w, x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(w, x, y, z).ToSlice() + /// The key produced can be decoded back into a tuple by calling + public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7, T8 item8) + { + return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4, item5, item6, item7, item8); } /// Merge a sequence of keys with the subspace's prefix, all sharing the same buffer @@ -369,6 +434,21 @@ public FdbTuple DecodeKey(Slice key) ); } + public FdbTuple DecodeKey(Slice key) + { + var tuple = Unpack(key); + if (tuple == null) throw new FormatException("The specified key does not contain any items"); + if (tuple.Count != 5) throw new FormatException("The specified key is not a tuple with 5 items"); + + return FdbTuple.Create( + tuple.Get(0), + tuple.Get(1), + tuple.Get(2), + tuple.Get(3), + tuple.Get(4) + ); + } + //note: there is no DecodeKey(slice) => object[] because this would encourage the bad practive of dealing with tuples as object[] arrays ! /// Unpack a key into a tuple, and return only the first element @@ -493,29 +573,29 @@ public IFdbTuple Append(T value) /// Create a new 2-tuple that is attached to this subspace /// Type of the first value to append /// Type of the second value to append - /// First value that will be appended - /// Second value that will be appended - /// Tuple of size 2 that contains and , and whose packed representation will always be prefixed by the subspace key. - /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2>(value1, value2))' + /// First value that will be appended + /// Second value that will be appended + /// Tuple of size 2 that contains and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2>(item1, item2))' [NotNull] - public IFdbTuple Append(T1 value1, T2 value2) + public IFdbTuple Append(T1 item1, T2 item2) { - return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(value1, value2)); + return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(item1, item2)); } /// Create a new 3-tuple that is attached to this subspace /// Type of the first value to append /// Type of the second value to append /// Type of the third value to append - /// First value that will be appended - /// Second value that will be appended - /// Third value that will be appended - /// Tuple of size 3 that contains , and , and whose packed representation will always be prefixed by the subspace key. - /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3>(value1, value2, value3))' + /// First value that will be appended + /// Second value that will be appended + /// Third value that will be appended + /// Tuple of size 3 that contains , and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3>(item1, item2, item3))' [NotNull] - public IFdbTuple Append(T1 value1, T2 value2, T3 value3) + public IFdbTuple Append(T1 item1, T2 item2, T3 item3) { - return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(value1, value2, value3)); + return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(item1, item2, item3)); } /// Create a new 4-tuple that is attached to this subspace @@ -523,16 +603,35 @@ public IFdbTuple Append(T1 value1, T2 value2, T3 value3) /// Type of the second value to append /// Type of the third value to append /// Type of the fourth value to append - /// First value that will be appended - /// Second value that will be appended - /// Third value that will be appended - /// Fourth value that will be appended - /// Tuple of size 4 that contains , , and , and whose packed representation will always be prefixed by the subspace key. - /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3, T4>(value1, value2, value3, value4))' + /// First value that will be appended + /// Second value that will be appended + /// Third value that will be appended + /// Fourth value that will be appended + /// Tuple of size 4 that contains , , and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3, T4>(item1, item2, item3, item4))' + [NotNull] + public IFdbTuple Append(T1 item1, T2 item2, T3 item3, T4 item4) + { + return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(item1, item2, item3, item4)); + } + + /// Create a new 5-tuple that is attached to this subspace + /// Type of the first value to append + /// Type of the second value to append + /// Type of the third value to append + /// Type of the fourth value to append + /// Type of the fifth value to append + /// First value that will be appended + /// Second value that will be appended + /// Third value that will be appended + /// Fourth value that will be appended + /// Fifth value that will be appended + /// Tuple of size 5 that contains , , , and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3, T4, T5>(item1, item2, item3, item4, item5))' [NotNull] - public IFdbTuple Append(T1 value1, T2 value2, T3 value3, T4 value4) + public IFdbTuple Append(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) { - return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(value1, value2, value3, value4)); + return new FdbPrefixedTuple(m_subspace.Key, FdbTuple.Create(item1, item2, item3, item4, item5)); } #endregion diff --git a/FoundationDB.Tests/Layers/TupleFacts.cs b/FoundationDB.Tests/Layers/TupleFacts.cs index 414d1c7a4..2030e08fd 100644 --- a/FoundationDB.Tests/Layers/TupleFacts.cs +++ b/FoundationDB.Tests/Layers/TupleFacts.cs @@ -91,6 +91,24 @@ public void Test_FdbTuple_Create() Assert.That(t4[2], Is.EqualTo(false)); Assert.That(t4[3], Is.EqualTo(1234L)); + var t5 = FdbTuple.Create("hello world", 123, false, 1234L, -1234); + Assert.That(t5.Count, Is.EqualTo(5)); + Assert.That(t5.Item1, Is.EqualTo("hello world")); + Assert.That(t5.Item2, Is.EqualTo(123)); + Assert.That(t5.Item3, Is.EqualTo(false)); + Assert.That(t5.Item4, Is.EqualTo(1234L)); + Assert.That(t5.Item5, Is.EqualTo(-1234)); + Assert.That(t5.Get(0), Is.EqualTo("hello world")); + Assert.That(t5.Get(1), Is.EqualTo(123)); + Assert.That(t5.Get(2), Is.EqualTo(false)); + Assert.That(t5.Get(3), Is.EqualTo(1234L)); + Assert.That(t5.Get(4), Is.EqualTo(-1234)); + Assert.That(t5[0], Is.EqualTo("hello world")); + Assert.That(t5[1], Is.EqualTo(123)); + Assert.That(t5[2], Is.EqualTo(false)); + Assert.That(t5[3], Is.EqualTo(1234L)); + Assert.That(t5[4], Is.EqualTo(-1234)); + var tn = FdbTuple.Create(new object[] { "hello world", 123, false, 1234L, -1234, "six" }); Assert.That(tn.Count, Is.EqualTo(6)); Assert.That(tn.Get(0), Is.EqualTo("hello world")); @@ -132,6 +150,18 @@ public void Test_FdbTuple_Negative_Indexing() Assert.That(t4[-3], Is.EqualTo(123)); Assert.That(t4[-4], Is.EqualTo("hello world")); + var t5 = FdbTuple.Create("hello world", 123, false, 1234L, -1234); + Assert.That(t5.Get(-1), Is.EqualTo(-1234)); + Assert.That(t5.Get(-2), Is.EqualTo(1234L)); + Assert.That(t5.Get(-3), Is.EqualTo(false)); + Assert.That(t5.Get(-4), Is.EqualTo(123)); + Assert.That(t5.Get(-5), Is.EqualTo("hello world")); + Assert.That(t5[-1], Is.EqualTo(-1234)); + Assert.That(t5[-2], Is.EqualTo(1234L)); + Assert.That(t5[-3], Is.EqualTo(false)); + Assert.That(t5[-4], Is.EqualTo(123)); + Assert.That(t5[-5], Is.EqualTo("hello world")); + var tn = FdbTuple.Create(new object[] { "hello world", 123, false, 1234, -1234, "six" }); Assert.That(tn.Get(-1), Is.EqualTo("six")); Assert.That(tn.Get(-2), Is.EqualTo(-1234)); @@ -177,6 +207,12 @@ public void Test_FdbTuple_First_And_Last() Assert.That(t4.Last(), Is.EqualTo(4)); Assert.That(t4.Last(), Is.EqualTo("4")); + var t5 = FdbTuple.Create(1, 2, 3, 4, 5); + Assert.That(t5.First(), Is.EqualTo(1)); + Assert.That(t5.First(), Is.EqualTo("1")); + Assert.That(t5.Last(), Is.EqualTo(5)); + Assert.That(t5.Last(), Is.EqualTo("5")); + var tn = FdbTuple.Create(1, 2, 3, 4, 5, 6); Assert.That(tn.First(), Is.EqualTo(1)); Assert.That(tn.First(), Is.EqualTo("1")); @@ -224,6 +260,24 @@ public void Test_FdbTuple_Unpack_First_And_Last() Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo(5)); Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo("5")); + packed = FdbTuple.EncodeKey(1, 2, 3, 4, 5, 6); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo(1)); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo("1")); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo(6)); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo("6")); + + packed = FdbTuple.EncodeKey(1, 2, 3, 4, 5, 6, 7); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo(1)); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo("1")); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo(7)); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo("7")); + + packed = FdbTuple.EncodeKey(1, 2, 3, 4, 5, 6, 7, 8); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo(1)); + Assert.That(FdbTuple.DecodeFirst(packed), Is.EqualTo("1")); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo(8)); + Assert.That(FdbTuple.DecodeLast(packed), Is.EqualTo("8")); + Assert.That(() => FdbTuple.DecodeFirst(Slice.Nil), Throws.InstanceOf()); Assert.That(() => FdbTuple.DecodeFirst(Slice.Empty), Throws.InstanceOf()); Assert.That(() => FdbTuple.DecodeLast(Slice.Nil), Throws.InstanceOf()); @@ -250,6 +304,10 @@ public void Test_FdbTuple_UnpackSingle() Assert.That(() => FdbTuple.DecodeKey(FdbTuple.EncodeKey(1, 2)), Throws.InstanceOf()); Assert.That(() => FdbTuple.DecodeKey(FdbTuple.EncodeKey(1, 2, 3)), Throws.InstanceOf()); Assert.That(() => FdbTuple.DecodeKey(FdbTuple.EncodeKey(1, 2, 3, 4)), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeKey(FdbTuple.EncodeKey(1, 2, 3, 4, 5)), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeKey(FdbTuple.EncodeKey(1, 2, 3, 4, 5, 6)), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeKey(FdbTuple.EncodeKey(1, 2, 3, 4, 5, 6, 7)), Throws.InstanceOf()); + Assert.That(() => FdbTuple.DecodeKey(FdbTuple.EncodeKey(1, 2, 3, 4, 5, 6, 7, 8)), Throws.InstanceOf()); } From f8082d1c963085f9de12009f84726716814bfe70 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Mon, 17 Nov 2014 22:06:18 +0100 Subject: [PATCH 08/63] Tuple refactoring: trying to reduce the mess with CreateRange, Create, and Wrap - There are too many variants to create a tuple from an array, so we need to simplifiy - Testing things with a FdbTuple.FromArray and FromSequence, which should be explicit enough - Still need to decide what to do between Create and Wrap. If they both stay, one need to imply copying the array, the other simply wrapping the array? --- FdbShell/Program.cs | 2 +- .../Layers/Directories/FdbDirectoryLayer.cs | 10 +- .../Directories/FdbDirectorySubspace.cs | 2 +- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 109 ++++++++++-------- FoundationDB.Tests/Layers/TupleFacts.cs | 8 +- 5 files changed, 71 insertions(+), 60 deletions(-) diff --git a/FdbShell/Program.cs b/FdbShell/Program.cs index 0f6c111dc..422fdf200 100644 --- a/FdbShell/Program.cs +++ b/FdbShell/Program.cs @@ -302,7 +302,7 @@ private static async Task MainAsync(string[] args, CancellationToken cancel) var tokens = s.Trim().Split(new [] { ' ' }, StringSplitOptions.RemoveEmptyEntries); string cmd = tokens.Length > 0 ? tokens[0] : String.Empty; string prm = tokens.Length > 1 ? tokens[1] : String.Empty; - var extras = tokens.Length > 2 ? FdbTuple.CreateRange(tokens.Skip(2)) : FdbTuple.Empty; + var extras = tokens.Length > 2 ? FdbTuple.FromEnumerable(tokens.Skip(2)) : FdbTuple.Empty; var trimmedCommand = cmd.Trim().ToLowerInvariant(); switch (trimmedCommand) diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs index 628a89f8c..5e7888f3c 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs @@ -289,9 +289,9 @@ public Task MoveAsync(IFdbTransaction trans, IEnumerable TryMoveAsync(IFdbTransaction trans, IEnumerabl if (oldPath == null) throw new ArgumentNullException("oldPath"); if (newPath == null) throw new ArgumentNullException("newPath"); - var oldLocation = FdbTuple.CreateRange(oldPath); + var oldLocation = FdbTuple.FromEnumerable(oldPath); VerifyPath(oldLocation, "oldPath"); - var newLocation = FdbTuple.CreateRange(newPath); + var newLocation = FdbTuple.FromEnumerable(newPath); VerifyPath(newLocation, "newPath"); return MoveInternalAsync(trans, oldLocation, newLocation, throwOnError: false); @@ -496,7 +496,7 @@ internal static IFdbTuple ParsePath(IEnumerable path, string argName = n throw new ArgumentException("The path of a directory cannot contain null elements", argName ?? "path"); } } - return FdbTuple.CreateRange(pathCopy); + return FdbTuple.FromArray(pathCopy); } internal static IFdbTuple ParsePath(string name, string argName = null) diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs b/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs index 9d568119a..0018c4607 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs @@ -106,7 +106,7 @@ protected virtual IFdbTuple ToRelativePath(IFdbTuple location) /// Path relative to the path of the current partition protected IFdbTuple ToRelativePath(IEnumerable path) { - return ToRelativePath(path == null ? null : FdbTuple.CreateRange(path)); + return ToRelativePath(path == null ? null : FdbTuple.FromEnumerable(path)); } /// Ensure that this directory was registered with the correct layer id diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 972fbeed1..6cb58cc01 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -213,15 +213,9 @@ public static IFdbTuple Create(params object[] items) [NotNull] public static IFdbTuple Wrap(object[] items) { - //REVIEW: this is identital to Create(params object[]) !! + //REVIEW: this is similar to Create(params object[]) and CreateRange(object[]) !! //TODO: remove this? - - if (items == null) throw new ArgumentNullException("items"); - - if (items.Length == 0) return FdbTuple.Empty; - - // review: should be create a copy ? - return new FdbListTuple(items, 0, items.Length); + return Create(items); } /// Create a new N-tuple that wraps a section of an array of untyped items @@ -241,55 +235,61 @@ public static IFdbTuple Wrap(object[] items, int offset, int count) return new FdbListTuple(items, offset, count); } - /// Create a new N-tuple, from an array of untyped items - [NotNull] - public static IFdbTuple CreateRange(object[] items) - { - if (items == null) throw new ArgumentNullException("items"); + ///// Create a new N-tuple, from an array of untyped items + //[NotNull] + //public static IFdbTuple CreateRange(object[] items) + //{ + // //REVIEW: this is idential to Create(object[]) and Wrap(object[]) !! - return CreateRange(items, 0, items.Length); - } + // if (items == null) throw new ArgumentNullException("items"); - /// Create a new N-tuple, from a section of an array of untyped items - [NotNull] - public static IFdbTuple CreateRange(object[] items, int offset, int count) - { - if (items == null) throw new ArgumentNullException("items"); - if (offset < 0) throw new ArgumentOutOfRangeException("offset", "Offset cannot be less than zero"); - if (count < 0) throw new ArgumentOutOfRangeException("count", "Count cannot be less than zero"); - if (offset + count > items.Length) throw new ArgumentOutOfRangeException("count", "Source array is too small"); + // return CreateRange(items, 0, items.Length); + //} - if (count == 0) return FdbTuple.Empty; + ///// Create a new N-tuple, from a section of an array of untyped items + //[NotNull] + //public static IFdbTuple CreateRange(object[] items, int offset, int count) + //{ + // //REVIEW: this is idential to Wrap(object[]) !! - // copy the items - var tmp = new object[count]; - Array.Copy(items, offset, tmp, 0, count); - return new FdbListTuple(tmp, 0, count); - } + // if (items == null) throw new ArgumentNullException("items"); + // if (offset < 0) throw new ArgumentOutOfRangeException("offset", "Offset cannot be less than zero"); + // if (count < 0) throw new ArgumentOutOfRangeException("count", "Count cannot be less than zero"); + // if (offset + count > items.Length) throw new ArgumentOutOfRangeException("count", "Source array is too small"); - /// Create a new N-tuple from a sequence of items - [NotNull] - public static IFdbTuple CreateRange(IEnumerable items) - { - if (items == null) throw new ArgumentNullException("items"); + // if (count == 0) return FdbTuple.Empty; - // may already be a tuple (because it implements IE) - var tuple = items as IFdbTuple ?? new FdbListTuple(items); - return tuple; - } + // // copy the items + // var tmp = new object[count]; + // Array.Copy(items, offset, tmp, 0, count); + // return new FdbListTuple(tmp, 0, count); + //} + + ///// Create a new N-tuple from a sequence of items + //[NotNull] + //public static IFdbTuple CreateRange(IEnumerable items) + //{ + // if (items == null) throw new ArgumentNullException("items"); - /// Create a new N-tuple, from an array of typed items + // // may already be a tuple (because it implements IE) + // var tuple = items as IFdbTuple ?? new FdbListTuple(items); + // return tuple; + //} + + /// Create a new tuple, from an array of typed items + /// Array of items + /// Tuple with the same size as and where all the items are of type [NotNull] - public static IFdbTuple CreateRange(T[] items) + public static IFdbTuple FromArray(T[] items) { if (items == null) throw new ArgumentNullException("items"); - return CreateRange(items, 0, items.Length); + return FromArray(items, 0, items.Length); } - /// Create a new N-tuple, from a section of an array of typed items + /// Create a new tuple, from a section of an array of typed items [NotNull] - public static IFdbTuple CreateRange(T[] items, int offset, int count) + public static IFdbTuple FromArray(T[] items, int offset, int count) { if (items == null) throw new ArgumentNullException("items"); if (offset < 0) throw new ArgumentOutOfRangeException("offset", "Offset cannot be less than zero"); @@ -297,27 +297,38 @@ public static IFdbTuple CreateRange(T[] items, int offset, int count) if (offset + count > items.Length) throw new ArgumentOutOfRangeException("count", "Source array is too small"); if (count == 0) return FdbTuple.Empty; + if (count == 1) return FdbTuple.Create(items[offset]); + if (count == 2) return FdbTuple.Create(items[offset], items[offset + 1]); // copy the items var tmp = new object[count]; Array.Copy(items, offset, tmp, 0, count); + //TODO: we would probably benefit from having an FdbListTuple here! return new FdbListTuple(tmp, 0, count); } - /// Create a new N-tuple from a sequence of typed items + /// Create a new tuple from a sequence of typed items [NotNull] - public static IFdbTuple CreateRange(IEnumerable items) + public static IFdbTuple FromEnumerable(IEnumerable items) { if (items == null) throw new ArgumentNullException("items"); + var arr = items as T[]; + if (arr != null) + { + return FromArray(arr, 0, arr.Length); + } + // may already be a tuple (because it implements IE) var tuple = items as IFdbTuple; - if (tuple == null) + if (tuple != null) { - object[] tmp = items.Cast().ToArray(); - tuple = new FdbListTuple(tmp, 0, tmp.Length); + return tuple; } - return tuple; + + object[] tmp = items.Cast().ToArray(); + //TODO: we would probably benefit from having an FdbListTuple here! + return new FdbListTuple(tmp, 0, tmp.Length); } /// Concatenates two tuples together diff --git a/FoundationDB.Tests/Layers/TupleFacts.cs b/FoundationDB.Tests/Layers/TupleFacts.cs index 2030e08fd..20c10cbd2 100644 --- a/FoundationDB.Tests/Layers/TupleFacts.cs +++ b/FoundationDB.Tests/Layers/TupleFacts.cs @@ -1495,12 +1495,12 @@ public void Test_FdbTuple_Create_ToSlice() ); Assert.That( - FdbTuple.CreateRange(new object[] { "hello world", 123, false, new byte[] { 123, 1, 66, 0, 42 } }, 1, 2).ToSlice().ToString(), + FdbTuple.FromArray(new object[] { "hello world", 123, false, new byte[] { 123, 1, 66, 0, 42 } }, 1, 2).ToSlice().ToString(), Is.EqualTo("<15>{<14>") ); Assert.That( - FdbTuple.CreateRange(new List { "hello world", 123, false, new byte[] { 123, 1, 66, 0, 42 } }).ToSlice().ToString(), + FdbTuple.FromEnumerable(new List { "hello world", 123, false, new byte[] { 123, 1, 66, 0, 42 } }).ToSlice().ToString(), Is.EqualTo("<02>hello world<00><15>{<14><01>{<01>B<00>*<00>") ); @@ -2251,8 +2251,8 @@ public void Test_FdbTuple_Not_Equal() [Test] public void Test_FdbTuple_Substring_Equality() { - var x = FdbTuple.CreateRange(new [] { "A", "C" }); - var y = FdbTuple.CreateRange(new[] { "A", "B", "C" }); + var x = FdbTuple.FromArray(new [] { "A", "C" }); + var y = FdbTuple.FromArray(new[] { "A", "B", "C" }); Assert.That(x.Substring(0, 1), Is.EqualTo(y.Substring(0, 1))); Assert.That(x.Substring(1, 1), Is.EqualTo(y.Substring(2, 1))); From 08763af58a450a6c24aa1db40b2f334eca0d6a74 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Tue, 18 Nov 2014 18:39:31 +0100 Subject: [PATCH 09/63] Subspace refactoring: moved ToRange() to the properties subspace.Keys and subpsace.Tuples - subspace.Tuples.ToRange() has the same behavior as the old subspace.ToRange() (prefix+\x00 .. prefix+\xFF) - subspace.Keys.ToRange() includes ALL the keys, and is the equivalent of FdbKeyRange.StartsWith(subspace.Prefix) - If you had "subspace.ToRange()" before, you should first change it to "subspace.Tuples.ToRange()" to stay compatible, and then decide if you actually needed "subspace.Keys.ToRange()" or not. --- FdbShell/Commands/BasicCommands.cs | 10 +++--- FoundationDB.Client/FdbDatabase.cs | 7 ---- .../Filters/FdbDatabaseFilter.cs | 5 --- .../Layers/Directories/FdbDirectoryLayer.cs | 10 +++--- .../Directories/FdbHighContentionAllocator.cs | 2 +- FoundationDB.Client/Subspaces/FdbSubspace.cs | 11 ------ .../Subspaces/FdbSubspaceExtensions.cs | 12 ------- .../Subspaces/FdbSubspaceKeys.cs | 13 ++++--- .../Subspaces/FdbSubspaceTuples.cs | 15 +++++++- FoundationDB.Client/Subspaces/IFdbSubspace.cs | 4 --- .../Collections/FdbMap`2.cs | 18 +++++----- .../Collections/FdbQueue`1.cs | 16 ++++----- .../Collections/FdbRankedSet.cs | 6 ++-- .../Collections/FdbVector`1.cs | 10 +++--- .../Counters/FdbHighContentionCounter.cs | 6 ++-- .../Indexes/FdbIndex`2.cs | 4 +-- .../Messaging/FdbWorkerPool.cs | 6 ++-- .../MessageQueue/MessageQueueRunner.cs | 10 +++--- .../Tutorials/ClassScheduling.cs | 4 +-- FoundationDB.Tests/DatabaseBulkFacts.cs | 6 ++-- FoundationDB.Tests/Layers/DirectoryFacts.cs | 17 +++++----- FoundationDB.Tests/Layers/RankedSetFacts.cs | 9 +++-- FoundationDB.Tests/Layers/VectorFacts.cs | 2 +- FoundationDB.Tests/RangeQueryFacts.cs | 34 +++++++++---------- FoundationDB.Tests/TransactionFacts.cs | 8 ++--- FoundationDB.Tests/TransactionalFacts.cs | 2 +- README.md | 6 ++-- 27 files changed, 118 insertions(+), 135 deletions(-) diff --git a/FdbShell/Commands/BasicCommands.cs b/FdbShell/Commands/BasicCommands.cs index b114b1a7a..5a4813d6f 100644 --- a/FdbShell/Commands/BasicCommands.cs +++ b/FdbShell/Commands/BasicCommands.cs @@ -68,7 +68,7 @@ public static async Task Dir(string[] path, IFdbTuple extras, DirectoryBrowseOpt { if (!(subfolder is FdbDirectoryPartition)) { - long count = await Fdb.System.EstimateCountAsync(db, subfolder.ToRange(), ct); + long count = await Fdb.System.EstimateCountAsync(db, subfolder.Tuples.ToRange(), ct); log.WriteLine(" {0,-12} {1,-12} {3,9:N0} {2}", FdbKey.Dump(FdbSubspace.Copy(subfolder).Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, count); } else @@ -115,7 +115,7 @@ public static async Task CreateDirectory(string[] path, IFdbTuple extras, IFdbDa log.WriteLine("- Created under {0} [{1}]", FdbKey.Dump(folder.Key), folder.Key.ToHexaString(' ')); // look if there is already stuff under there - var stuff = await db.ReadAsync((tr) => tr.GetRange(folder.ToRange()).FirstOrDefaultAsync(), cancellationToken: ct); + var stuff = await db.ReadAsync((tr) => tr.GetRange(folder.Tuples.ToRange()).FirstOrDefaultAsync(), cancellationToken: ct); if (stuff.Key.IsPresent) { log.WriteLine("CAUTION: There is already some data under {0} !"); @@ -224,7 +224,7 @@ public static async Task Count(string[] path, IFdbTuple extras, IFdbDatabase db, log.Write("\r# Found {0:N0} keys...", state.Item1); }); - long count = await Fdb.System.EstimateCountAsync(db, copy.ToRange(), progress, ct); + long count = await Fdb.System.EstimateCountAsync(db, copy.Tuples.ToRange(), progress, ct); log.WriteLine("\r# Found {0:N0} keys in {1}", count, String.Join("/", folder.Path)); } @@ -245,7 +245,7 @@ public static async Task Show(string[] path, IFdbTuple extras, bool reverse, IFd log.WriteLine("# Content of {0} [{1}]", FdbKey.Dump(folder.Key), folder.Key.ToHexaString(' ')); var keys = await db.QueryAsync((tr) => { - var query = tr.GetRange(folder.ToRange()); + var query = tr.GetRange(folder.Tuples.ToRange()); return reverse ? query.Reverse().Take(count) : query.Take(count + 1); @@ -329,7 +329,7 @@ public static async Task Map(string[] path, IFdbTuple extras, IFdbDatabase db, T return; } - var span = folder.DirectoryLayer.ContentSubspace.ToRange(); + var span = folder.DirectoryLayer.ContentSubspace.Tuples.ToRange(); // note: this may break in future versions of the DL! Maybe we need a custom API to get a flat list of all directories in a DL that span a specific range ? diff --git a/FoundationDB.Client/FdbDatabase.cs b/FoundationDB.Client/FdbDatabase.cs index dde6bcfe2..f269f2d68 100644 --- a/FoundationDB.Client/FdbDatabase.cs +++ b/FoundationDB.Client/FdbDatabase.cs @@ -578,13 +578,6 @@ public FdbSubspaceTuples Tuples get { return m_globalSpace.Tuples; } } - /// Returns a range that contains all the keys that are inside the database global subspace combined with the suffix , but not inside the System subspace. - /// If the global space is empty, this will return the range [<00>, <FF>) - public FdbKeyRange ToRange(Slice key) - { - return m_globalSpace.ToRange(key); - } - /// Returns true if the key is inside the system key space (starts with '\xFF') internal static bool IsSystemKey(ref Slice key) { diff --git a/FoundationDB.Client/Filters/FdbDatabaseFilter.cs b/FoundationDB.Client/Filters/FdbDatabaseFilter.cs index 9c2224789..46daecf3b 100644 --- a/FoundationDB.Client/Filters/FdbDatabaseFilter.cs +++ b/FoundationDB.Client/Filters/FdbDatabaseFilter.cs @@ -183,11 +183,6 @@ public virtual Slice[] ExtractKeys(IEnumerable keys, bool boundCheck = fa return m_database.ExtractKeys(keys, boundCheck); } - public virtual FdbKeyRange ToRange(Slice key) - { - return m_database.ToRange(key); - } - #endregion #region Transactionals... diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs index 5e7888f3c..f8c0b2398 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs @@ -854,7 +854,7 @@ private async Task NodeContainingKey(IFdbReadOnlyTransaction tr, S var kvp = await tr .GetRange( - this.NodeSubspace.ToRange().Begin, + this.NodeSubspace.Tuples.ToRange().Begin, this.NodeSubspace.Tuples.EncodeKey(key) + FdbKey.MinValue ) .LastOrDefaultAsync() @@ -939,7 +939,7 @@ private IFdbAsyncEnumerable> SubdirNamesAndNo var sd = node.Partition.ByKey(SUBDIRS); return tr - .GetRange(sd.ToRange()) + .GetRange(sd.Tuples.ToRange()) .Select(kvp => new KeyValuePair( sd.Tuples.DecodeKey(kvp.Key), NodeWithPrefix(kvp.Value) @@ -969,8 +969,10 @@ private async Task RemoveRecursive(IFdbTransaction tr, IFdbSubspace node) //note: we could use Task.WhenAll to remove the children, but there is a risk of task explosion if the subtree is very large... await SubdirNamesAndNodes(tr, node).ForEachAsync((kvp) => RemoveRecursive(tr, kvp.Value)).ConfigureAwait(false); - tr.ClearRange(FdbKeyRange.StartsWith(ContentsOfNode(node, FdbTuple.Empty, Slice.Empty).Key)); - tr.ClearRange(node.ToRange()); + // remove ALL the contents + tr.ClearRange(ContentsOfNode(node, FdbTuple.Empty, Slice.Empty).Keys.ToRange()); + // and all the metadata for this folder + tr.ClearRange(node.Tuples.ToRange()); } private async Task IsPrefixFree(IFdbReadOnlyTransaction tr, Slice prefix) diff --git a/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs b/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs index 79351fade..cdc025fd0 100644 --- a/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs +++ b/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs @@ -68,7 +68,7 @@ public async Task AllocateAsync(IFdbTransaction trans) long start = 0, count = 0; var kv = await trans .Snapshot - .GetRange(this.Counters.ToRange()) + .GetRange(this.Counters.Tuples.ToRange()) .LastOrDefaultAsync(); if (kv.Key.IsPresent) diff --git a/FoundationDB.Client/Subspaces/FdbSubspace.cs b/FoundationDB.Client/Subspaces/FdbSubspace.cs index 7c477f5b5..2076b90ae 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspace.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspace.cs @@ -277,17 +277,6 @@ public Slice[] ExtractKeys([NotNull] IEnumerable keys, bool boundCheck = } } - /// Gets a key range respresenting all keys strictly within a sub-section of this Subspace. - /// Suffix added to the subspace prefix - /// Key range that, when passed to ClearRange() or GetRange(), would clear or return all the keys contained by this subspace, excluding the subspace prefix itself. - public virtual FdbKeyRange ToRange(Slice suffix = default(Slice)) - { - if (suffix.IsPresent) - return FdbTuple.ToRange(GetKeyPrefix().Concat(suffix)); - else - return FdbTuple.ToRange(GetKeyPrefix()); - } - #endregion #region IEquatable / IComparable... diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs b/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs index c6110cf66..ecde905d5 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs @@ -80,17 +80,5 @@ public static bool Contains([NotNull] this IFdbSubspace subspace, [NotNull return subspace.Contains(key.ToFoundationDbKey()); } - public static FdbKeyRange ToRange([NotNull] this IFdbSubspace subspace, [NotNull] TKey key) - where TKey : IFdbKey - { - if (key == null) throw new ArgumentNullException("key"); - return subspace.ToRange(key.ToFoundationDbKey()); - } - - public static FdbKeySelectorPair ToSelectorPair([NotNull] this IFdbSubspace subspace) - { - return FdbKeySelectorPair.Create(subspace.ToRange()); - } - } } diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs b/FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs index 23fe4f542..3ea23d66a 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs @@ -139,19 +139,22 @@ public Slice[] Extract([NotNull] IEnumerable keys) public FdbKeyRange ToRange() { - return m_subspace.ToRange(); + return FdbKeyRange.StartsWith(m_subspace.Key); } - public FdbKeyRange ToRange(Slice key) + /// Gets a key range respresenting all keys strictly within a sub-section of this Subspace. + /// Suffix added to the subspace prefix + /// Key range that, when passed to ClearRange() or GetRange(), would clear or return all the keys contained by this subspace, excluding the subspace prefix itself. + public FdbKeyRange ToRange(Slice suffix) { - return m_subspace.ToRange(key); + return FdbKeyRange.StartsWith(m_subspace.ConcatKey(suffix)); } - public FdbKeyRange ToRange([NotNull] IFdbKey key) + public FdbKeyRange ToRange([NotNull] TKey key) where TKey : IFdbKey { if (key == null) throw new ArgumentNullException("key"); - return m_subspace.ToRange(key.ToFoundationDbKey()); + return FdbKeyRange.StartsWith(m_subspace.ConcatKey(key.ToFoundationDbKey())); } } diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs index f6ce01d73..48d7093af 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs @@ -199,10 +199,23 @@ public IFdbTuple[] Unpack([NotNull] params Slice[] keys) #region ToRange: Tuple => Range + public FdbKeyRange ToRange() + { + return FdbTuple.ToRange(m_subspace.Key); + } + + /// Gets a key range respresenting all keys strictly within a sub-section of this Subspace. + /// Suffix added to the subspace prefix + /// Key range that, when passed to ClearRange() or GetRange(), would clear or return all the keys contained by this subspace, excluding the subspace prefix itself. + public FdbKeyRange ToRange(Slice suffix) + { + return FdbTuple.ToRange(m_subspace.Key.Concat(suffix)); + } + public FdbKeyRange ToRange([NotNull] IFdbTuple tuple) { if (tuple == null) throw new ArgumentNullException("tuple"); - return m_subspace.ToRange(tuple.ToSlice()); + return FdbTuple.ToRange(FdbTuple.Pack(m_subspace.Key, tuple)); } public FdbKeyRange ToRange([NotNull] ITupleFormattable item) diff --git a/FoundationDB.Client/Subspaces/IFdbSubspace.cs b/FoundationDB.Client/Subspaces/IFdbSubspace.cs index 7ce64e8c5..c2c35161d 100644 --- a/FoundationDB.Client/Subspaces/IFdbSubspace.cs +++ b/FoundationDB.Client/Subspaces/IFdbSubspace.cs @@ -80,10 +80,6 @@ public interface IFdbSubspace : IFdbKey [NotNull] Slice[] ExtractKeys([NotNull] IEnumerable keys, bool boundCheck = false); - /// Return a pair of keys that contain all the keys inside this subspace - FdbKeyRange ToRange(Slice suffix = default(Slice)); - //REVIEW: this is not exactly true if ToRange() use the Tuple ToRange() wich adds <00> and to the prefix! - } } diff --git a/FoundationDB.Layers.Common/Collections/FdbMap`2.cs b/FoundationDB.Layers.Common/Collections/FdbMap`2.cs index 8483a6921..b9298c31c 100644 --- a/FoundationDB.Layers.Common/Collections/FdbMap`2.cs +++ b/FoundationDB.Layers.Common/Collections/FdbMap`2.cs @@ -146,7 +146,7 @@ public IFdbAsyncEnumerable> All([NotNull] IFdbReadOnl if (trans == null) throw new ArgumentNullException("trans"); return trans - .GetRange(this.Location.ToRange(), options) + .GetRange(this.Location.Tuples.ToRange(), options) .Select(this.DecodeItem); } @@ -202,7 +202,7 @@ public void Clear([NotNull] IFdbTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); - trans.ClearRange(this.Location.ToRange()); + trans.ClearRange(this.Location.Tuples.ToRange()); } #region Export... @@ -219,8 +219,8 @@ public Task ExportAsync([NotNull] IFdbDatabase db, [NotNull] Action { foreach (var item in batch) @@ -246,7 +246,7 @@ public Task ExportAsync([NotNull] IFdbDatabase db, [NotNull] Func { foreach (var item in batch) @@ -271,7 +271,7 @@ public Task ExportAsync([NotNull] IFdbDatabase db, [NotNull] Action { if (batch.Length > 0) @@ -297,7 +297,7 @@ public Task ExportAsync([NotNull] IFdbDatabase db, [NotNull] Func handler(DecodeItems(batch), ct), cancellationToken ); @@ -324,7 +324,7 @@ public async Task AggregateAsync([NotNull] IFdbDatabase db, Fu await Fdb.Bulk.ExportAsync( db, - this.Location.ToRange(), + this.Location.Tuples.ToRange(), (batch, _, ct) => { state = handler(state, DecodeItems(batch)); @@ -357,7 +357,7 @@ public async Task AggregateAsync([NotNull] IFdbDatabas await Fdb.Bulk.ExportAsync( db, - this.Location.ToRange(), + this.Location.Tuples.ToRange(), (batch, _, ct) => { state = handler(state, DecodeItems(batch)); diff --git a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs index 46de00f4c..aa403ee33 100644 --- a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs +++ b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs @@ -174,7 +174,7 @@ public Task ExportAsync(IFdbDatabase db, Action handler, CancellationTo return Fdb.Bulk.ExportAsync( db, - this.QueueItem.ToRange(), + this.QueueItem.Tuples.ToRange(), (kvs, offset, ct) => { foreach(var kv in kvs) @@ -199,7 +199,7 @@ public Task ExportAsync(IFdbDatabase db, Func handler, Cancellati return Fdb.Bulk.ExportAsync( db, - this.QueueItem.ToRange(), + this.QueueItem.Tuples.ToRange(), async (kvs, offset, ct) => { foreach (var kv in kvs) @@ -223,7 +223,7 @@ public Task ExportAsync(IFdbDatabase db, Action handler, Cancellation return Fdb.Bulk.ExportAsync( db, - this.QueueItem.ToRange(), + this.QueueItem.Tuples.ToRange(), (kvs, offset, ct) => { handler(this.Encoder.DecodeRange(kvs), offset); @@ -242,7 +242,7 @@ public Task ExportAsync(IFdbDatabase db, Func handler, Cancella return Fdb.Bulk.ExportAsync( db, - this.QueueItem.ToRange(), + this.QueueItem.Tuples.ToRange(), (kvs, offset, ct) => handler(this.Encoder.DecodeRange(kvs), offset), cancellationToken ); @@ -279,7 +279,7 @@ private async Task PushAtAsync([NotNull] IFdbTransaction tr, T value, long index private async Task GetNextIndexAsync([NotNull] IFdbReadOnlyTransaction tr, IFdbSubspace subspace) { - var range = subspace.ToRange(); + var range = subspace.Tuples.ToRange(); var lastKey = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(range.End)).ConfigureAwait(false); @@ -293,7 +293,7 @@ private async Task GetNextIndexAsync([NotNull] IFdbReadOnlyTransaction tr, private Task> GetFirstItemAsync([NotNull] IFdbReadOnlyTransaction tr) { - var range = this.QueueItem.ToRange(); + var range = this.QueueItem.Tuples.ToRange(); return tr.GetRange(range).FirstOrDefaultAsync(); } @@ -332,13 +332,13 @@ private async Task AddConflictedPopAsync([NotNull] IFdbTransaction tr, bo private Task>> GetWaitingPopsAsync([NotNull] IFdbReadOnlyTransaction tr, int numPops) { - var range = this.ConflictedPop.ToRange(); + var range = this.ConflictedPop.Tuples.ToRange(); return tr.GetRange(range, limit: numPops, reverse: false).ToListAsync(); } private Task>> GetItemsAsync([NotNull] IFdbReadOnlyTransaction tr, int numItems) { - var range = this.QueueItem.ToRange(); + var range = this.QueueItem.Tuples.ToRange(); return tr.GetRange(range, limit: numItems, reverse: false).ToListAsync(); } diff --git a/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs b/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs index 43999d267..29e27a666 100644 --- a/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs +++ b/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs @@ -78,7 +78,7 @@ public Task SizeAsync([NotNull] IFdbReadOnlyTransaction trans) if (trans == null) throw new ArgumentNullException("trans"); return trans - .GetRange(this.Subspace.Partition.ByKey(MAX_LEVELS - 1).ToRange()) + .GetRange(this.Subspace.Partition.ByKey(MAX_LEVELS - 1).Tuples.ToRange()) .Select(kv => DecodeCount(kv.Value)) .SumAsync(); } @@ -199,7 +199,7 @@ public async Task GetNthAsync([NotNull] IFdbReadOnlyTransaction trans, lo for (int level = MAX_LEVELS - 1; level >= 0; level--) { var lss = this.Subspace.Partition.ByKey(level); - var kcs = await trans.GetRange(lss.Tuples.EncodeKey(key), lss.ToRange().End).ToListAsync().ConfigureAwait(false); + var kcs = await trans.GetRange(lss.Tuples.EncodeKey(key), lss.Tuples.ToRange().End).ToListAsync().ConfigureAwait(false); if (kcs.Count == 0) break; @@ -226,7 +226,7 @@ public async Task GetNthAsync([NotNull] IFdbReadOnlyTransaction trans, lo /// Clears the entire set. public Task ClearAllAsync([NotNull] IFdbTransaction trans) { - trans.ClearRange(this.Subspace.ToRange()); + trans.ClearRange(this.Subspace.Tuples.ToRange()); return SetupLevelsAsync(trans); } diff --git a/FoundationDB.Layers.Common/Collections/FdbVector`1.cs b/FoundationDB.Layers.Common/Collections/FdbVector`1.cs index 77e6c433b..cf387cde6 100644 --- a/FoundationDB.Layers.Common/Collections/FdbVector`1.cs +++ b/FoundationDB.Layers.Common/Collections/FdbVector`1.cs @@ -112,7 +112,7 @@ public Task BackAsync([NotNull] IFdbReadOnlyTransaction tr) if (tr == null) throw new ArgumentNullException("tr"); return tr - .GetRange(this.Subspace.ToRange()) + .GetRange(this.Subspace.Tuples.ToRange()) .Select((kvp) => this.Encoder.DecodeValue(kvp.Value)) .LastOrDefaultAsync(); } @@ -128,7 +128,7 @@ public async Task> PopAsync([NotNull] IFdbTransaction tr) { if (tr == null) throw new ArgumentNullException("tr"); - var keyRange = this.Subspace.ToRange(); + var keyRange = this.Subspace.Tuples.ToRange(); // Read the last two entries so we can check if the second to last item // is being represented sparsely. If so, we will be required to set it @@ -202,7 +202,7 @@ public async Task GetAsync([NotNull] IFdbReadOnlyTransaction tr, long index) if (index < 0) throw new IndexOutOfRangeException(String.Format("Index {0} must be positive", index)); var start = GetKeyAt(index); - var end = this.Subspace.ToRange().End; + var end = this.Subspace.Tuples.ToRange().End; var output = await tr .GetRange(start, end) @@ -259,7 +259,7 @@ public async Task ResizeAsync([NotNull] IFdbTransaction tr, long length) if (length < currentSize) { - tr.ClearRange(GetKeyAt(length), this.Subspace.ToRange().End); + tr.ClearRange(GetKeyAt(length), this.Subspace.Tuples.ToRange().End); // Check if the new end of the vector was being sparsely represented if (await ComputeSizeAsync(tr).ConfigureAwait(false) < length) @@ -287,7 +287,7 @@ private async Task ComputeSizeAsync(IFdbReadOnlyTransaction tr) { Contract.Requires(tr != null); - var keyRange = this.Subspace.ToRange(); + var keyRange = this.Subspace.Tuples.ToRange(); var lastKey = await tr.GetKeyAsync(FdbKeySelector.LastLessOrEqual(keyRange.End)).ConfigureAwait(false); diff --git a/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs b/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs index 61c9e5155..f00a0d0d9 100644 --- a/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs +++ b/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs @@ -105,8 +105,8 @@ private async Task Coalesce(int N, CancellationToken ct) bool right; lock(this.Rng) { right = this.Rng.NextDouble() < 0.5; } var query = right - ? tr.Snapshot.GetRange(loc, this.Subspace.ToRange().End, limit: N, reverse: false) - : tr.Snapshot.GetRange(this.Subspace.ToRange().Begin, loc, limit: N, reverse: true); + ? tr.Snapshot.GetRange(loc, this.Subspace.Tuples.ToRange().End, limit: N, reverse: false) + : tr.Snapshot.GetRange(this.Subspace.Tuples.ToRange().Begin, loc, limit: N, reverse: true); var shards = await query.ToListAsync().ConfigureAwait(false); if (shards.Count > 0) @@ -176,7 +176,7 @@ public async Task GetTransactional(IFdbReadOnlyTransaction trans) long total = 0; await trans - .GetRange(this.Subspace.ToRange()) + .GetRange(this.Subspace.Tuples.ToRange()) .ForEachAsync((kvp) => { checked { total += this.Encoder.DecodeValue(kvp.Value); } }) .ConfigureAwait(false); diff --git a/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs b/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs index 5d955aa0e..e25619b96 100644 --- a/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs +++ b/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs @@ -163,7 +163,7 @@ public FdbRangeQuery LookupGreaterThan([NotNull] IFdbReadOnlyTransaction tr var space = new FdbKeySelectorPair( FdbKeySelector.FirstGreaterThan(prefix), - this.Location.ToSelectorPair().End + FdbKeySelector.FirstGreaterOrEqual(this.Location.Tuples.ToRange().End) ); return trans @@ -178,7 +178,7 @@ public FdbRangeQuery LookupLessThan([NotNull] IFdbReadOnlyTransaction trans if (orEqual) prefix = FdbKey.Increment(prefix); var space = new FdbKeySelectorPair( - this.Location.ToSelectorPair().Begin, + FdbKeySelector.FirstGreaterOrEqual(this.Location.Tuples.ToRange().Begin), FdbKeySelector.FirstGreaterThan(prefix) ); diff --git a/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs b/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs index d614d29c6..a4d36e3f0 100644 --- a/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs +++ b/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs @@ -124,7 +124,7 @@ public FdbWorkerPool(IFdbSubspace subspace) private async Task> FindRandomItem(IFdbTransaction tr, IFdbSubspace ring) { - var range = ring.ToRange(); + var range = ring.Tuples.ToRange(); // start from a random position around the ring Slice key = ring.Tuples.EncodeKey(GetRandomId()); @@ -161,7 +161,7 @@ private async Task PushQueueAsync(IFdbTransaction tr, IFdbSubspace queue, Slice // - an empty queue must correspond to an empty subspace // get the current size of the queue - var range = queue.ToRange(); + var range = queue.Tuples.ToRange(); var lastKey = await tr.Snapshot.GetKeyAsync(FdbKeySelector.LastLessThan(range.End)).ConfigureAwait(false); int count = lastKey < range.Begin ? 0 : queue.Tuples.DecodeFirst(lastKey) + 1; @@ -292,7 +292,7 @@ await db.ReadWriteAsync( tr.Annotate("Look for next queued item"); // Find the next task on the queue - var item = await tr.GetRange(this.UnassignedTaskRing.ToRange()).FirstOrDefaultAsync().ConfigureAwait(false); + var item = await tr.GetRange(this.UnassignedTaskRing.Tuples.ToRange()).FirstOrDefaultAsync().ConfigureAwait(false); if (item.Key != null) { // pop the Task from the queue diff --git a/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs b/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs index 81c9426ac..683f0657b 100644 --- a/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs +++ b/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs @@ -149,7 +149,7 @@ public async Task RunStatus(IFdbDatabase db, CancellationToken ct) using(var tr = db.BeginTransaction(ct)) { - var counters = await tr.Snapshot.GetRange(countersLocation.ToRange()).Select(kvp => new KeyValuePair(countersLocation.Tuples.DecodeLast(kvp.Key), kvp.Value.ToInt64())).ToListAsync().ConfigureAwait(false); + var counters = await tr.Snapshot.GetRange(countersLocation.Tuples.ToRange()).Select(kvp => new KeyValuePair(countersLocation.Tuples.DecodeLast(kvp.Key), kvp.Value.ToInt64())).ToListAsync().ConfigureAwait(false); Console.WriteLine("Status at " + DateTimeOffset.Now.ToString("O")); foreach(var counter in counters) @@ -159,22 +159,22 @@ public async Task RunStatus(IFdbDatabase db, CancellationToken ct) Console.WriteLine("Dump:"); Console.WriteLine("> Idle"); - await tr.Snapshot.GetRange(idleLocation.ToRange()).ForEachAsync((kvp) => + await tr.Snapshot.GetRange(idleLocation.Tuples.ToRange()).ForEachAsync((kvp) => { Console.WriteLine("- Idle." + idleLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Busy"); - await tr.Snapshot.GetRange(busyLocation.ToRange()).ForEachAsync((kvp) => + await tr.Snapshot.GetRange(busyLocation.Tuples.ToRange()).ForEachAsync((kvp) => { Console.WriteLine("- Busy." + busyLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Unassigned"); - await tr.Snapshot.GetRange(unassignedLocation.ToRange()).ForEachAsync((kvp) => + await tr.Snapshot.GetRange(unassignedLocation.Tuples.ToRange()).ForEachAsync((kvp) => { Console.WriteLine("- Unassigned." + unassignedLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Tasks"); - await tr.Snapshot.GetRange(tasksLocation.ToRange()).ForEachAsync((kvp) => + await tr.Snapshot.GetRange(tasksLocation.Tuples.ToRange()).ForEachAsync((kvp) => { Console.WriteLine("- Tasks." + tasksLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); diff --git a/FoundationDB.Samples/Tutorials/ClassScheduling.cs b/FoundationDB.Samples/Tutorials/ClassScheduling.cs index d7f8948f1..7bdeb13ae 100644 --- a/FoundationDB.Samples/Tutorials/ClassScheduling.cs +++ b/FoundationDB.Samples/Tutorials/ClassScheduling.cs @@ -48,7 +48,7 @@ protected Slice AttendsKey(string s, string c) protected FdbKeyRange AttendsKeys(string s) { - return this.Subspace.ToRange(FdbTuple.EncodeKey("attends", s)); + return this.Subspace.Tuples.ToRange(FdbTuple.Create("attends", s)); } /// @@ -78,7 +78,7 @@ await db.WriteAsync((tr) => /// public Task> AvailableClasses(IFdbReadOnlyTransaction tr) { - return tr.GetRange(this.Subspace.ToRange(FdbTuple.EncodeKey("class"))) + return tr.GetRange(this.Subspace.Tuples.ToRange(FdbTuple.Create("class"))) .Where(kvp => { int _; return Int32.TryParse(kvp.Value.ToAscii(), out _); }) // (step 3) .Select(kvp => this.Subspace.Tuples.DecodeKey(kvp.Key)) .ToListAsync(); diff --git a/FoundationDB.Tests/DatabaseBulkFacts.cs b/FoundationDB.Tests/DatabaseBulkFacts.cs index b43fc28e6..135966ef3 100644 --- a/FoundationDB.Tests/DatabaseBulkFacts.cs +++ b/FoundationDB.Tests/DatabaseBulkFacts.cs @@ -168,7 +168,7 @@ public async Task Test_Can_Bulk_Insert_Items() var stored = await db.ReadAsync((tr) => { - return tr.GetRange(location.ToRange()).ToArrayAsync(); + return tr.GetRange(location.Tuples.ToRange()).ToArrayAsync(); }, this.Cancellation); Assert.That(stored.Length, Is.EqualTo(N), "DB contains less or more items than expected"); @@ -316,7 +316,7 @@ public async Task Test_Can_Bulk_Batched_Insert_Items() var stored = await db.ReadAsync((tr) => { - return tr.GetRange(location.ToRange()).ToArrayAsync(); + return tr.GetRange(location.Tuples.ToRange()).ToArrayAsync(); }, this.Cancellation); Assert.That(stored.Length, Is.EqualTo(N), "DB contains less or more items than expected"); @@ -613,7 +613,7 @@ await Fdb.Bulk.WriteAsync( { double average = await Fdb.Bulk.ExportAsync( db, - location.ToRange(), + location.Tuples.ToRange(), async (xs, pos, ct) => { Assert.That(xs, Is.Not.Null); diff --git a/FoundationDB.Tests/Layers/DirectoryFacts.cs b/FoundationDB.Tests/Layers/DirectoryFacts.cs index 7f5ef5f8f..3e290002d 100644 --- a/FoundationDB.Tests/Layers/DirectoryFacts.cs +++ b/FoundationDB.Tests/Layers/DirectoryFacts.cs @@ -889,6 +889,11 @@ public async Task Test_Directory_Partitions_Should_Disallow_Creation_Of_Direct_K shouldFail(() => { var _ = partition.Keys[location.Key]; }); shouldFail(() => { var _ = partition.Keys[location]; }); + shouldFail(() => partition.Keys.ToRange()); + shouldFail(() => partition.Keys.ToRange(Slice.FromString("hello"))); + shouldFail(() => partition.Keys.ToRange(FdbTuple.EncodeKey("hello"))); + shouldFail(() => partition.Keys.ToRange(location)); + // Tuples shouldFail(() => partition.Tuples.EncodeKey(123)); @@ -920,15 +925,9 @@ public async Task Test_Directory_Partitions_Should_Disallow_Creation_Of_Direct_K shouldFail(() => partition.Tuples.Concat(FdbTuple.Create(123, "hello", false, "world"))); shouldFail(() => partition.Tuples.Append(new object[] { 123, "hello", false, "world" })); - // ToRange - shouldFail(() => partition.ToRange()); - shouldFail(() => partition.ToRange(Slice.FromString("hello"))); - shouldFail(() => partition.ToRange(FdbTuple.Create("hello"))); - shouldFail(() => partition.ToRange(location)); - - // ToSelectorPair - shouldFail(() => partition.ToSelectorPair()); - + shouldFail(() => partition.Tuples.ToRange()); + shouldFail(() => partition.Tuples.ToRange(Slice.FromString("hello"))); + shouldFail(() => partition.Tuples.ToRange(FdbTuple.Create("hello"))); } } diff --git a/FoundationDB.Tests/Layers/RankedSetFacts.cs b/FoundationDB.Tests/Layers/RankedSetFacts.cs index 45283d93e..8ef481895 100644 --- a/FoundationDB.Tests/Layers/RankedSetFacts.cs +++ b/FoundationDB.Tests/Layers/RankedSetFacts.cs @@ -59,11 +59,16 @@ await db.ReadWriteAsync(async (tr) => await PrintRankedSet(vector, tr); }, this.Cancellation); + Console.WriteLine(); var rnd = new Random(); - for (int i = 0; i < 1000; i++) + var sw = Stopwatch.StartNew(); + for (int i = 0; i < 100; i++) { + Console.Write("\rInserting " + i); await db.ReadWriteAsync((tr) => vector.InsertAsync(tr, FdbTuple.EncodeKey(rnd.Next())), this.Cancellation); } + sw.Stop(); + Console.WriteLine("\rDone in {0:N3} sec", sw.Elapsed.TotalSeconds); await db.ReadAsync((tr) => PrintRankedSet(vector, tr), this.Cancellation); } @@ -75,7 +80,7 @@ private static async Task PrintRankedSet(FdbRankedSet rs, IFdbReadOnlyTransactio for (int l = 0; l < 6; l++) { sb.AppendFormat("Level {0}:\r\n", l); - await tr.GetRange(rs.Subspace.Partition.ByKey(l).ToRange()).ForEachAsync((kvp) => + await tr.GetRange(rs.Subspace.Partition.ByKey(l).Tuples.ToRange()).ForEachAsync((kvp) => { sb.AppendFormat("\t{0} = {1}\r\n", rs.Subspace.Tuples.Unpack(kvp.Key), kvp.Value.ToInt64()); }); diff --git a/FoundationDB.Tests/Layers/VectorFacts.cs b/FoundationDB.Tests/Layers/VectorFacts.cs index 57f10c910..eb5001275 100644 --- a/FoundationDB.Tests/Layers/VectorFacts.cs +++ b/FoundationDB.Tests/Layers/VectorFacts.cs @@ -172,7 +172,7 @@ private static async Task PrintVector(FdbVector vector, IFdbReadOnlyTransa bool first = true; var sb = new StringBuilder(); - await tr.GetRange(vector.Subspace.ToRange()).ForEachAsync((kvp) => + await tr.GetRange(vector.Subspace.Tuples.ToRange()).ForEachAsync((kvp) => { if (!first) sb.Append(", "); else first = false; sb.Append(vector.Subspace.Tuples.DecodeLast(kvp.Key) + ":" + kvp.Value.ToAsciiOrHexaString()); diff --git a/FoundationDB.Tests/RangeQueryFacts.cs b/FoundationDB.Tests/RangeQueryFacts.cs index fcce3d1b0..b7ad5d1e9 100644 --- a/FoundationDB.Tests/RangeQueryFacts.cs +++ b/FoundationDB.Tests/RangeQueryFacts.cs @@ -147,7 +147,7 @@ await db.WriteAsync((tr) => // A: more then one item using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.ToRange()); + var query = tr.GetRange(a.Tuples.ToRange()); // should return the first one res = await query.FirstOrDefaultAsync(); @@ -179,7 +179,7 @@ await db.WriteAsync((tr) => // B: exactly one item using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(b.ToRange()); + var query = tr.GetRange(b.Tuples.ToRange()); // should return the first one res = await query.FirstOrDefaultAsync(); @@ -215,7 +215,7 @@ await db.WriteAsync((tr) => // C: no items using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(c.ToRange()); + var query = tr.GetRange(c.Tuples.ToRange()); // should return nothing res = await query.FirstOrDefaultAsync(); @@ -245,7 +245,7 @@ await db.WriteAsync((tr) => // A: with a size limit using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.ToRange()).Take(5); + var query = tr.GetRange(a.Tuples.ToRange()).Take(5); // should return the fifth one res = await query.LastOrDefaultAsync(); @@ -261,7 +261,7 @@ await db.WriteAsync((tr) => // A: with an offset using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.ToRange()).Skip(5); + var query = tr.GetRange(a.Tuples.ToRange()).Skip(5); // should return the fifth one res = await query.FirstOrDefaultAsync(); @@ -304,7 +304,7 @@ await db.WriteAsync((tr) => using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.ToRange()).Take(5); + var query = tr.GetRange(a.Tuples.ToRange()).Take(5); Assert.That(query, Is.Not.Null); Assert.That(query.Limit, Is.EqualTo(5)); @@ -322,7 +322,7 @@ await db.WriteAsync((tr) => using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.ToRange()).Take(12); + var query = tr.GetRange(a.Tuples.ToRange()).Take(12); Assert.That(query, Is.Not.Null); Assert.That(query.Limit, Is.EqualTo(12)); @@ -340,7 +340,7 @@ await db.WriteAsync((tr) => using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.ToRange()).Take(0); + var query = tr.GetRange(a.Tuples.ToRange()).Take(0); Assert.That(query, Is.Not.Null); Assert.That(query.Limit, Is.EqualTo(0)); @@ -367,7 +367,7 @@ public async Task Test_Can_Skip() // from the start using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(location.ToRange()); + var query = tr.GetRange(location.Tuples.ToRange()); // |>>>>>>>>>>>>(50---------->99)| var res = await query.Skip(50).ToListAsync(); @@ -394,7 +394,7 @@ public async Task Test_Can_Skip() // from the end using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(location.ToRange()); + var query = tr.GetRange(location.Tuples.ToRange()); // |(0 <--------- 49)<<<<<<<<<<<<<| var res = await query.Reverse().Skip(50).ToListAsync(); @@ -421,7 +421,7 @@ public async Task Test_Can_Skip() // from both sides using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(location.ToRange()); + var query = tr.GetRange(location.Tuples.ToRange()); // |>>>>>>>>>(25<------------74)<<<<<<<<| var res = await query.Skip(25).Reverse().Skip(25).ToListAsync(); @@ -514,7 +514,7 @@ public async Task Test_Can_MergeSort() using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.MergeSort( - lists.Select(list => list.ToSelectorPair()), + lists.Select(list => FdbKeySelectorPair.Create(list.Tuples.ToRange())), kvp => location.Tuples.DecodeLast(kvp.Key) ); @@ -587,7 +587,7 @@ public async Task Test_Range_Intersect() using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.Intersect( - lists.Select(list => list.ToSelectorPair()), + lists.Select(list => FdbKeySelectorPair.Create(list.Tuples.ToRange())), kvp => location.Tuples.DecodeLast(kvp.Key) ); @@ -659,7 +659,7 @@ public async Task Test_Range_Except() using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.Except( - lists.Select(list => list.ToSelectorPair()), + lists.Select(list => FdbKeySelectorPair.Create(list.Tuples.ToRange())), kvp => location.Tuples.DecodeLast(kvp.Key) ); @@ -714,7 +714,7 @@ await db.WriteAsync((tr) => var results = await db.QueryAsync((tr) => { var query = tr.Except( - new[] { locItems.ToRange(), locProcessed.ToRange() }, + new[] { locItems.Tuples.ToRange(), locProcessed.Tuples.ToRange() }, (kv) => FdbTuple.Unpack(kv.Key).Substring(-2), // note: keys come from any of the two ranges, so we must only keep the last 2 elements of the tuple FdbTupleComparisons.Composite() // compares t[0] as a string, and t[1] as an int ); @@ -737,11 +737,11 @@ await db.WriteAsync((tr) => results = await db.QueryAsync((tr) => { var items = tr - .GetRange(locItems.ToRange()) + .GetRange(locItems.Tuples.ToRange()) .Select(kv => locItems.Tuples.Unpack(kv.Key)); var processed = tr - .GetRange(locProcessed.ToRange()) + .GetRange(locProcessed.Tuples.ToRange()) .Select(kv => locProcessed.Tuples.Unpack(kv.Key)); // items and processed are lists of (string, int) tuples, we can compare them directly diff --git a/FoundationDB.Tests/TransactionFacts.cs b/FoundationDB.Tests/TransactionFacts.cs index 5f9fb199f..aceae2ace 100644 --- a/FoundationDB.Tests/TransactionFacts.cs +++ b/FoundationDB.Tests/TransactionFacts.cs @@ -1310,7 +1310,7 @@ await db.WriteAsync((tr) => { var data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("a")); - var res = await tr.GetRange(b.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + var res = await tr.GetRange(b.Tuples.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new [] { "PRINT \"HELLO\"", "GOTO 10" })); tr.Set(a, Slice.FromString("aa")); @@ -1318,7 +1318,7 @@ await db.WriteAsync((tr) => data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("aa"), "The transaction own writes should be visible by default"); - res = await tr.GetRange(b.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + res = await tr.GetRange(b.Tuples.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new[] { "PRINT \"HELLO\"", "PRINT \"WORLD\"", "GOTO 10" }), "The transaction own writes should be visible by default"); //note: don't commit @@ -1336,7 +1336,7 @@ await db.WriteAsync((tr) => var data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("a")); - var res = await tr.GetRange(b.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + var res = await tr.GetRange(b.Tuples.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new[] { "PRINT \"HELLO\"", "GOTO 10" })); tr.Set(a, Slice.FromString("aa")); @@ -1344,7 +1344,7 @@ await db.WriteAsync((tr) => data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("a"), "The transaction own writes should not be seen with ReadYourWritesDisable option enabled"); - res = await tr.GetRange(b.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + res = await tr.GetRange(b.Tuples.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new[] { "PRINT \"HELLO\"", "GOTO 10" }), "The transaction own writes should not be seen with ReadYourWritesDisable option enabled"); //note: don't commit diff --git a/FoundationDB.Tests/TransactionalFacts.cs b/FoundationDB.Tests/TransactionalFacts.cs index cc0e8f75c..4ced3e7ea 100644 --- a/FoundationDB.Tests/TransactionalFacts.cs +++ b/FoundationDB.Tests/TransactionalFacts.cs @@ -172,7 +172,7 @@ public async Task Test_Transactionals_Retries_Do_Not_Leak_When_Reading_Too_Much( var result = await db.ReadAsync((tr) => { Console.WriteLine("Retry #" + tr.Context.Retries + " @ " + tr.Context.Duration.Elapsed); - return tr.GetRange(location.ToRange()).ToListAsync(); + return tr.GetRange(location.Tuples.ToRange()).ToListAsync(); }, this.Cancellation); Assert.Fail("Too fast! increase the amount of inserted data, or slow down the system!"); diff --git a/README.md b/README.md index 01601f865..0b6991e67 100644 --- a/README.md +++ b/README.md @@ -143,12 +143,12 @@ using (var db = await Fdb.OpenAsync()) { // do a range query on the list subspace, which should return all the pairs // in the subspace, one for each entry in the array. - // We exploit the fact that subspace.ToRange() usually does not include the - // subspace prefix itself, because we don't want our counter to be returned + // We exploit the fact that subspace.Tuples.ToRange() usually does not include + // the subspace prefix itself, because we don't want our counter to be returned // with the query itself. return trans // ask for all keys that are _inside_ our subspace - .GetRange(list.ToRange()) + .GetRange(list.Tuples.ToRange()) // transform the resultoing KeyValuePair into something // nicer to use, like a typed KeyValuePair .Select((kvp) => From 9582300806795bfb7ca591702d095b2623dd0e63 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Wed, 19 Nov 2014 21:20:46 +0100 Subject: [PATCH 10/63] Tuples: adding With(..) and OfSize(..) helpers - FdbTuple.Create(123, "abc").With((x, y) => ... ) will have x == 123 and y == "abc" - tuple.OfSize(3).Foo(...) will verify that tuple.Count == 3, and throw if this is not the case. - inspired by https://github.com/louthy/language-ext ! --- .../Encoders/KeyValueEncoders.cs | 14 +- .../Layers/Tuples/FdbTupleExtensions.cs | 195 ++++++++++- .../Layers/Tuples/FdbTuple`1.cs | 15 + .../Layers/Tuples/FdbTuple`2.cs | 17 +- .../Layers/Tuples/FdbTuple`3.cs | 15 + .../Layers/Tuples/FdbTuple`4.cs | 15 + .../Layers/Tuples/FdbTuple`5.cs | 15 + .../Formatters/FdbGenericTupleFormatter.cs | 6 +- FoundationDB.Tests/Layers/TupleFacts.cs | 302 ++++++++++++++++++ 9 files changed, 579 insertions(+), 15 deletions(-) diff --git a/FoundationDB.Client/Encoders/KeyValueEncoders.cs b/FoundationDB.Client/Encoders/KeyValueEncoders.cs index a4e7f9afc..5e44469e3 100644 --- a/FoundationDB.Client/Encoders/KeyValueEncoders.cs +++ b/FoundationDB.Client/Encoders/KeyValueEncoders.cs @@ -108,7 +108,7 @@ public T DecodeValue(Slice encoded) { return m_decoder(encoded); } - + } /// Wrapper for encoding and decoding a pair with lambda functions @@ -644,7 +644,7 @@ public T DecodeValue(Slice encoded) if (encoded.IsNullOrEmpty) return default(T); //BUGBUG return FdbTuple.DecodeKey(encoded); } - + } internal class TupleCompositeEncoder : CompositeKeyEncoder @@ -668,9 +668,8 @@ public override FdbTuple DecodeComposite(Slice encoded, int items) { if (items < 1 || items > 2) throw new ArgumentOutOfRangeException("items", items, "Item count must be either 1 or 2"); - var t = FdbTuple.Unpack(encoded); + var t = FdbTuple.Unpack(encoded).OfSize(items); Contract.Assert(t != null); - if (t.Count != items) throw new ArgumentException(String.Format("Was expected {0} items, but decoded tuple only has {1}", items, t.Count)); return FdbTuple.Create( t.Get(0), @@ -701,9 +700,8 @@ public override FdbTuple DecodeComposite(Slice encoded, int items) { if (items < 1 || items > 3) throw new ArgumentOutOfRangeException("items", items, "Item count must be between 1 and 3"); - var t = FdbTuple.Unpack(encoded); + var t = FdbTuple.Unpack(encoded).OfSize(items); Contract.Assert(t != null); - if (t.Count != items) throw new ArgumentException(String.Format("Was expected {0} items, but decoded tuple only has {1}", items, t.Count)); return FdbTuple.Create( t.Get(0), @@ -736,9 +734,7 @@ public override FdbTuple DecodeComposite(Slice encoded, int item { if (items < 1 || items > 4) throw new ArgumentOutOfRangeException("items", items, "Item count must be between 1 and 4"); - var t = FdbTuple.Unpack(encoded); - Contract.Assert(t != null); - if (t.Count != items) throw new ArgumentException(String.Format("Was expected {0} items, but decoded tuple only has {1}", items, t.Count)); + var t = FdbTuple.Unpack(encoded).OfSize(items); return FdbTuple.Create( t.Get(0), diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs index 63d6127fd..ba5a0d5ec 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs @@ -79,7 +79,7 @@ public static T[] ToArray([NotNull] this IFdbTuple tuple) items[i] = tuple.Get(i); } } - return items; + return items; } /// Returns a byte array containing the packed version of a tuple @@ -281,6 +281,199 @@ public static FdbKeySelectorPair ToSelectorPair([NotNull] this IFdbTuple tuple) return FdbKeySelectorPair.StartsWith(tuple.ToSlice()); } + /// Verify that this tuple has the expected size + /// Tuple which must be of a specific size + /// Expected number of items in this tuple + /// The itself it it has the correct size; otherwise, an exception is thrown + /// If is null + /// If is smaller or larger than + [ContractAnnotation("halt <= tuple:null")] + [NotNull] + public static IFdbTuple OfSize(this IFdbTuple tuple, int size) + { + if (tuple == null || tuple.Count != size) ThrowInvalidTupleSize(tuple, size, 0); + return tuple; + } + + /// Verify that this tuple has at least a certain size + /// Tuple which must be of a specific size + /// Expected minimum number of items in this tuple + /// The itself it it has the correct size; otherwise, an exception is thrown + /// If is null + /// If is smaller than + [ContractAnnotation("halt <= tuple:null")] + [NotNull] + public static IFdbTuple OfSizeAtLeast(this IFdbTuple tuple, int size) + { + if (tuple == null || tuple.Count < size) ThrowInvalidTupleSize(tuple, size, -1); + return tuple; + } + + /// Verify that this tuple has at most a certain size + /// Tuple which must be of a specific size + /// Expected maximum number of items in this tuple + /// The itself it it has the correct size; otherwise, an exception is thrown + /// If is null + /// If is larger than + [ContractAnnotation("halt <= tuple:null")] + [NotNull] + public static IFdbTuple OfSizeAtMost(this IFdbTuple tuple, int size) + { + if (tuple == null || tuple.Count > size) ThrowInvalidTupleSize(tuple, size, 1); + return tuple; + } + + [ContractAnnotation("=> halt")] + internal static void ThrowInvalidTupleSize(IFdbTuple tuple, int expected, int test) + { + if (tuple == null) + { + throw new ArgumentNullException("tuple"); + } + switch(test) + { + case 1: throw new InvalidOperationException(String.Format("This operation requires a tuple of size {0} or less, but this tuple has {1} elements", expected, tuple.Count)); + case -1: throw new InvalidOperationException(String.Format("This operation requires a tuple of size {0} or more, but this tuple has {1} elements", expected, tuple.Count)); + default: throw new InvalidOperationException(String.Format("This operation requires a tuple of size {0}, but this tuple has {1} elements", expected, tuple.Count)); + } + } + + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) + { + OfSize(tuple, 1); + lambda(tuple.Get(0)); + } + + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) + { + OfSize(tuple, 2); + lambda(tuple.Get(0), tuple.Get(1)); + } + + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) + { + OfSize(tuple, 3); + lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2)); + } + + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) + { + OfSize(tuple, 4); + lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3)); + } + + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) + { + OfSize(tuple, 5); + lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4)); + } + + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) + { + OfSize(tuple, 6); + lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4), tuple.Get(5)); + } + + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) + { + OfSize(tuple, 7); + lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4), tuple.Get(5), tuple.Get(6)); + } + + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) + { + OfSize(tuple, 8); + lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4), tuple.Get(5), tuple.Get(6), tuple.Get(7)); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + { + OfSize(tuple, 1); + return lambda(tuple.Get(0)); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + { + OfSize(tuple, 2); + return lambda(tuple.Get(0), tuple.Get(1)); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + { + OfSize(tuple, 3); + return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2)); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + { + OfSize(tuple, 4); + return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3)); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + { + OfSize(tuple, 5); + return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4)); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + { + OfSize(tuple, 6); + return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4), tuple.Get(5)); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + { + OfSize(tuple, 7); + return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4), tuple.Get(5), tuple.Get(6)); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + { + OfSize(tuple, 8); + return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4), tuple.Get(5), tuple.Get(6), tuple.Get(7)); + } + #endregion } diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs index c4663aa28..4f42585e1 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`1.cs @@ -116,6 +116,21 @@ public void CopyTo([NotNull] object[] array, int offset) array[offset] = this.Item1; } + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public void With([NotNull] Action lambda) + { + lambda(this.Item1); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public R With([NotNull] Func lambda) + { + return lambda(this.Item1); + } + public IEnumerator GetEnumerator() { yield return this.Item1; diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs index 2de75ecec..8bc12a5e2 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`2.cs @@ -86,7 +86,7 @@ public object this[int index] public R Get(int index) { switch(index) - { + { case 0: case -2: return FdbConverters.Convert(this.Item1); case 1: case -1: return FdbConverters.Convert(this.Item2); default: FdbTuple.FailIndexOutOfRange(index, 2); return default(R); @@ -133,6 +133,21 @@ public void CopyTo(object[] array, int offset) array[offset + 1] = this.Item2; } + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public void With([NotNull] Action lambda) + { + lambda(this.Item1, this.Item2); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public R With([NotNull] Func lambda) + { + return lambda(this.Item1, this.Item2); + } + public IEnumerator GetEnumerator() { yield return this.Item1; diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs index e6c6c9689..31d2d57c1 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`3.cs @@ -152,6 +152,21 @@ public void CopyTo(object[] array, int offset) array[offset + 2] = this.Item3; } + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public void With([NotNull] Action lambda) + { + lambda(this.Item1, this.Item2, this.Item3); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public R With([NotNull] Func lambda) + { + return lambda(this.Item1, this.Item2, this.Item3); + } + public IEnumerator GetEnumerator() { yield return this.Item1; diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs index 84a87536d..bd1d2cabf 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`4.cs @@ -152,6 +152,21 @@ public void CopyTo(object[] array, int offset) array[offset + 3] = this.Item4; } + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public void With([NotNull] Action lambda) + { + lambda(this.Item1, this.Item2, this.Item3, this.Item4); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public R With([NotNull] Func lambda) + { + return lambda(this.Item1, this.Item2, this.Item3, this.Item4); + } + public IEnumerator GetEnumerator() { yield return this.Item1; diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple`5.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple`5.cs index f8d72e235..cad413598 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple`5.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple`5.cs @@ -160,6 +160,21 @@ public void CopyTo(object[] array, int offset) array[offset + 4] = this.Item5; } + /// Execute a lambda Action with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + public void With([NotNull] Action lambda) + { + lambda(this.Item1, this.Item2, this.Item3, this.Item4, this.Item5); + } + + /// Execute a lambda Function with the content of this tuple + /// Action that will be passed the content of this tuple as parameters + /// Result of calling with the items of this tuple + public R With([NotNull] Func lambda) + { + return lambda(this.Item1, this.Item2, this.Item3, this.Item4, this.Item5); + } + public IEnumerator GetEnumerator() { yield return this.Item1; diff --git a/FoundationDB.Client/Layers/Tuples/Formatters/FdbGenericTupleFormatter.cs b/FoundationDB.Client/Layers/Tuples/Formatters/FdbGenericTupleFormatter.cs index 8ddfc1e23..29edfb904 100644 --- a/FoundationDB.Client/Layers/Tuples/Formatters/FdbGenericTupleFormatter.cs +++ b/FoundationDB.Client/Layers/Tuples/Formatters/FdbGenericTupleFormatter.cs @@ -33,6 +33,7 @@ namespace FoundationDB.Layers.Tuples /// Simple key formatter that maps a value into a singleton tuple, and back internal sealed class FdbGenericTupleFormatter : ITupleFormatter { + public IFdbTuple ToTuple(T key) { return FdbTuple.Create(key); @@ -40,10 +41,7 @@ public IFdbTuple ToTuple(T key) public T FromTuple(IFdbTuple tuple) { - if (tuple == null) throw new ArgumentNullException("tuple"); - if (tuple.Count != 1) throw new ArgumentException("Tuple must have only one item", "tuple"); - - return tuple.Get(0); + return tuple.OfSize(1).Get(0); } } diff --git a/FoundationDB.Tests/Layers/TupleFacts.cs b/FoundationDB.Tests/Layers/TupleFacts.cs index 20c10cbd2..c4a03a49d 100644 --- a/FoundationDB.Tests/Layers/TupleFacts.cs +++ b/FoundationDB.Tests/Layers/TupleFacts.cs @@ -356,6 +356,308 @@ public void Test_FdbTuple_Embedded_Tuples() Assert.That(z.Count, Is.EqualTo(4)); } + [Test] + public void Test_FdbTuple_With() + { + //note: important to always cast to (IFdbTuple) to be sure that we don't call specialized instance methods (tested elsewhere) + IFdbTuple t; + + // Size 1 + + t = FdbTuple.Create(123); + t.With((int a) => + { + Assert.That(a, Is.EqualTo(123)); + }); + Assert.That(t.With((int a) => + { + Assert.That(a, Is.EqualTo(123)); + return 42; + }), Is.EqualTo(42)); + + // Size 2 + + t = t.Append("abc"); + t.With((int a, string b) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + }); + Assert.That(t.With((int a, string b) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + return 42; + }), Is.EqualTo(42)); + + // Size 3 + + t = t.Append(3.14f); + t.With((int a, string b, float c) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + }); + Assert.That(t.With((int a, string b, float c) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + return 42; + }), Is.EqualTo(42)); + + // Size 4 + + t = t.Append(true); + t.With((int a, string b, float c, bool d) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + }); + Assert.That(t.With((int a, string b, float c, bool d) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + return 42; + }), Is.EqualTo(42)); + + // Size 5 + + t = t.Append('z'); + t.With((int a, string b, float c, bool d, char e) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + Assert.That(e, Is.EqualTo('z')); + }); + Assert.That(t.With((int a, string b, float c, bool d, char e) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + Assert.That(e, Is.EqualTo('z')); + return 42; + }), Is.EqualTo(42)); + + // Size 6 + + t = t.Append(Math.PI); + t.With((int a, string b, float c, bool d, char e, double f) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + Assert.That(e, Is.EqualTo('z')); + Assert.That(f, Is.EqualTo(Math.PI)); + }); + Assert.That(t.With((int a, string b, float c, bool d, char e, double f) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + Assert.That(e, Is.EqualTo('z')); + Assert.That(f, Is.EqualTo(Math.PI)); + return 42; + }), Is.EqualTo(42)); + + // Size 7 + + t = t.Append(IPAddress.Loopback); + t.With((int a, string b, float c, bool d, char e, double f, IPAddress g) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + Assert.That(e, Is.EqualTo('z')); + Assert.That(f, Is.EqualTo(Math.PI)); + Assert.That(g, Is.EqualTo(IPAddress.Loopback)); + }); + Assert.That(t.With((int a, string b, float c, bool d, char e, double f, IPAddress g) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + Assert.That(e, Is.EqualTo('z')); + Assert.That(f, Is.EqualTo(Math.PI)); + Assert.That(g, Is.EqualTo(IPAddress.Loopback)); + return 42; + }), Is.EqualTo(42)); + + // Size 8 + + t = t.Append(DateTime.MaxValue); + t.With((int a, string b, float c, bool d, char e, double f, IPAddress g, DateTime h) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + Assert.That(e, Is.EqualTo('z')); + Assert.That(f, Is.EqualTo(Math.PI)); + Assert.That(g, Is.EqualTo(IPAddress.Loopback)); + Assert.That(h, Is.EqualTo(DateTime.MaxValue)); + }); + Assert.That(t.With((int a, string b, float c, bool d, char e, double f, IPAddress g, DateTime h) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + Assert.That(e, Is.EqualTo('z')); + Assert.That(f, Is.EqualTo(Math.PI)); + Assert.That(g, Is.EqualTo(IPAddress.Loopback)); + Assert.That(h, Is.EqualTo(DateTime.MaxValue)); + return 42; + }), Is.EqualTo(42)); + + } + + [Test] + public void Test_FdbTuple_With_Struct() + { + // calling With() on the structs is faster + + FdbTuple t1 = FdbTuple.Create(123); + t1.With((a) => + { + Assert.That(a, Is.EqualTo(123)); + }); + Assert.That(t1.With((a) => + { + Assert.That(a, Is.EqualTo(123)); + return 42; + }), Is.EqualTo(42)); + + FdbTuple t2 = FdbTuple.Create(123, "abc"); + t2.With((a, b) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + }); + Assert.That(t2.With((a, b) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + return 42; + }), Is.EqualTo(42)); + + FdbTuple t3 = FdbTuple.Create(123, "abc", 3.14f); + t3.With((a, b, c) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + }); + Assert.That(t3.With((a, b, c) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + return 42; + }), Is.EqualTo(42)); + + FdbTuple t4 = FdbTuple.Create(123, "abc", 3.14f, true); + t4.With((a, b, c, d) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + }); + Assert.That(t4.With((a, b, c, d) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + return 42; + }), Is.EqualTo(42)); + + FdbTuple t5 = FdbTuple.Create(123, "abc", 3.14f, true, 'z'); + t5.With((a, b, c, d, e) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + Assert.That(e, Is.EqualTo('z')); + }); + Assert.That(t5.With((a, b, c, d, e) => + { + Assert.That(a, Is.EqualTo(123)); + Assert.That(b, Is.EqualTo("abc")); + Assert.That(c, Is.EqualTo(3.14f)); + Assert.That(d, Is.True); + Assert.That(e, Is.EqualTo('z')); + return 42; + }), Is.EqualTo(42)); + + //TODO: add more if we ever add struct tuples with 6 or more items + } + + [Test] + public void Test_FdbTuple_Of_Size() + { + // OfSize(n) check the size and return the tuple if it passed + // VerifySize(n) only check the size + // Both should throw if tuple is null, or not the expected size + + Action verify = (t) => + { + for (int i = 0; i <= 10; i++) + { + if (t.Count > i) + { + Assert.That(() => t.OfSize(i), Throws.InstanceOf()); + Assert.That(t.OfSizeAtLeast(i), Is.SameAs(t)); + Assert.That(() => t.OfSizeAtMost(i), Throws.InstanceOf()); + } + else if (t.Count < i) + { + Assert.That(() => t.OfSize(i), Throws.InstanceOf()); + Assert.That(() => t.OfSizeAtLeast(i), Throws.InstanceOf()); + Assert.That(t.OfSizeAtMost(i), Is.SameAs(t)); + } + else + { + Assert.That(t.OfSize(i), Is.SameAs(t)); + Assert.That(t.OfSizeAtLeast(i), Is.SameAs(t)); + Assert.That(t.OfSizeAtMost(i), Is.SameAs(t)); + } + } + }; + + verify(FdbTuple.Empty); + verify(FdbTuple.Create(123)); + verify(FdbTuple.Create(123, "abc")); + verify(FdbTuple.Create(123, "abc", 3.14f)); + verify(FdbTuple.Create(123, "abc", 3.14f, true)); + verify(FdbTuple.Create(123, "abc", 3.14f, true, 'z')); + verify(FdbTuple.FromArray(new[] { "hello", "world", "!" })); + verify(FdbTuple.FromEnumerable(Enumerable.Range(0, 10))); + + verify(FdbTuple.Create(123, "abc", 3.14f, true, 'z')[0, 2]); + verify(FdbTuple.Create(123, "abc", 3.14f, true, 'z')[1, 4]); + verify(FdbTuple.FromEnumerable(Enumerable.Range(0, 50)).Substring(15, 6)); + + IFdbTuple none = null; + Assert.That(() => none.OfSize(0), Throws.InstanceOf()); + Assert.That(() => none.OfSizeAtLeast(0), Throws.InstanceOf()); + Assert.That(() => none.OfSizeAtMost(0), Throws.InstanceOf()); + } + #endregion #region Splicing... From 305281c66d12ec0fe350bd6443dbce8606197310 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Thu, 20 Nov 2014 15:06:30 +0100 Subject: [PATCH 11/63] FdbTuple.FromArray used struct tuples for size 0 to 3 --- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 6cb58cc01..3ad8178e9 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -296,15 +296,20 @@ public static IFdbTuple FromArray(T[] items, int offset, int count) if (count < 0) throw new ArgumentOutOfRangeException("count", "Count cannot be less than zero"); if (offset + count > items.Length) throw new ArgumentOutOfRangeException("count", "Source array is too small"); - if (count == 0) return FdbTuple.Empty; - if (count == 1) return FdbTuple.Create(items[offset]); - if (count == 2) return FdbTuple.Create(items[offset], items[offset + 1]); - - // copy the items - var tmp = new object[count]; - Array.Copy(items, offset, tmp, 0, count); - //TODO: we would probably benefit from having an FdbListTuple here! - return new FdbListTuple(tmp, 0, count); + switch(count) + { + case 0: return FdbTuple.Empty; + case 1: return FdbTuple.Create(items[offset]); + case 2: return FdbTuple.Create(items[offset], items[offset + 1]); + case 3: return FdbTuple.Create(items[offset], items[offset + 1], items[offset + 2]); + default: + { // copy the items in a temp array + //TODO: we would probably benefit from having an FdbListTuple here! + var tmp = new object[count]; + Array.Copy(items, offset, tmp, 0, count); + return new FdbListTuple(tmp, 0, count); + } + } } /// Create a new tuple from a sequence of typed items From 3e2700d621cf1cfbb3cbaf75ebc68a5fb3a913b0 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Thu, 20 Nov 2014 15:31:16 +0100 Subject: [PATCH 12/63] Tuples: added FdbTupleParser.BeginTuple() and EndTuple() - allows custom type codecs to output embedded tuples --- .../Layers/Tuples/FdbTuplePackers.cs | 12 ++++-------- .../Layers/Tuples/FdbTupleParser.cs | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs b/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs index 0d14bce1b..28dcca720 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs @@ -486,11 +486,9 @@ public static void SerializeTupleTo(ref TupleWriter writer, TTuple tuple { Contract.Requires(tuple != null); - writer.Depth++; - writer.Output.WriteByte(FdbTupleTypes.TupleStart); + FdbTupleParser.BeginTuple(ref writer); tuple.PackTo(ref writer); - writer.Output.WriteByte(0x00); - writer.Depth--; + FdbTupleParser.EndTuple(ref writer); } public static void SerializeFormattableTo(ref TupleWriter writer, ITupleFormattable formattable) @@ -504,11 +502,9 @@ public static void SerializeFormattableTo(ref TupleWriter writer, ITupleFormatta var tuple = formattable.ToTuple(); if (tuple == null) throw new InvalidOperationException(String.Format("Custom formatter {0}.ToTuple() cannot return null", formattable.GetType().Name)); - writer.Depth++; - writer.Output.WriteByte(FdbTupleTypes.TupleStart); + FdbTupleParser.BeginTuple(ref writer); tuple.PackTo(ref writer); - writer.Output.WriteByte(0x00); - writer.Depth--; + FdbTupleParser.EndTuple(ref writer); } public static void SerializeFdbKeyTo(ref TupleWriter writer, IFdbKey key) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs index d2eeb3012..ba976354f 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs @@ -656,6 +656,20 @@ public static void WriteUuid64(ref TupleWriter writer, Uuid64 value) } } + /// Mark the start of a new embedded tuple + public static void BeginTuple(ref TupleWriter writer) + { + writer.Depth++; + writer.Output.WriteByte(FdbTupleTypes.TupleStart); + } + + /// Mark the end of an embedded tuple + public static void EndTuple(ref TupleWriter writer) + { + writer.Output.WriteByte(0x00); + writer.Depth--; + } + #endregion #region Deserialization... From dd66b077e82e05be8880cadc007f4944fb2773b7 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 21 Nov 2014 23:50:09 +0100 Subject: [PATCH 13/63] AsyncLinq: added Distinct(), MinAsync(), MaxAsync(), ElementAtAsync(), and ElementAtOrDefaultAsync() --- .../FoundationDB.Client.csproj | 1 + .../Linq/FdbAsyncEnumerable.Iterators.cs | 33 ++++ .../Linq/FdbAsyncEnumerable.cs | 126 +++++++++++++ .../Linq/FdbDistinctAsyncIterator.cs | 147 +++++++++++++++ .../Linq/FdbAsyncEnumerableFacts.cs | 168 ++++++++++++++++++ 5 files changed, 475 insertions(+) create mode 100644 FoundationDB.Client/Linq/FdbDistinctAsyncIterator.cs diff --git a/FoundationDB.Client/FoundationDB.Client.csproj b/FoundationDB.Client/FoundationDB.Client.csproj index 52445db96..f522a810d 100644 --- a/FoundationDB.Client/FoundationDB.Client.csproj +++ b/FoundationDB.Client/FoundationDB.Client.csproj @@ -76,6 +76,7 @@ + diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs index 6cbb25a2d..04f39a593 100644 --- a/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs @@ -333,6 +333,39 @@ internal static async Task Run(IFdbAsyncEnumerable sourc return count; } + /// Immediately execute an action on each element of an async sequence, with the possibility of stopping before the end + /// Type of elements of the async sequence + /// Source async sequence + /// If different than default, can be used to optimise the way the source will produce the items + /// Lambda called for each element as it arrives. If the return value is true, the next value will be processed. If the return value is false, the iterations will stop immediately. + /// Cancellation token that can be used to cancel the operation + /// Number of items that have been processed successfully + internal static async Task Run(IFdbAsyncEnumerable source, FdbAsyncMode mode, Func action, CancellationToken ct) + { + if (source == null) throw new ArgumentNullException("source"); + if (action == null) throw new ArgumentNullException("action"); + + ct.ThrowIfCancellationRequested(); + + //note: we should not use "ConfigureAwait(false)" here because we would like to execute the action in the original synchronization context if possible... + + long count = 0; + using (var iterator = source.GetEnumerator(mode)) + { + if (iterator == null) throw new InvalidOperationException("The underlying sequence returned a null async iterator"); + + while (await iterator.MoveNext(ct)) + { + if (!action(iterator.Current)) + { + break; + } + ++count; + } + } + return count; + } + /// Immediately execute an asunc action on each element of an async sequence /// Type of elements of the async sequence /// Source async sequence diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs index 7d47bfcab..e16b25229 100644 --- a/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs @@ -369,6 +369,24 @@ public static IFdbAsyncEnumerable SelectAsync(this IF #endregion + #region Distinct... + + public static IFdbAsyncEnumerable Distinct(this IFdbAsyncEnumerable source, IEqualityComparer comparer = null) + { + if (source == null) throw new ArgumentNullException("count"); + comparer = comparer ?? EqualityComparer.Default; + + return new FdbDistinctAsyncIterator(source, comparer); + } + + #endregion + + // If you are bored, maybe consider adding: + // - DefaultIfEmpty + // - Zip + // - OrderBy and OrderBy + // - GroupBy + #endregion #region Leaving the Monad... @@ -659,6 +677,60 @@ public static IFdbAsyncEnumerable SelectAsync(this IF return found ? last : default(T); } + /// Returns the element at a specific location of an async sequence, or an exception if there are not enough elements + public static async Task ElementAtAsync(this IFdbAsyncEnumerable source, int index, CancellationToken ct = default(CancellationToken)) + { + if (source == null) throw new ArgumentNullException("source"); + if (index < 0) throw new ArgumentOutOfRangeException("index"); + ct.ThrowIfCancellationRequested(); + + var rq = source as FdbRangeQuery; + if (rq != null) return await rq.Skip(index).SingleAsync(); + + int counter = index; + T item = default(T); + await Run( + source, + FdbAsyncMode.All, + (x) => + { + if (counter-- == 0) { item = x; return false; } + return true; + }, + ct + ).ConfigureAwait(false); + + if (counter >= 0) throw new InvalidOperationException("The sequence was too small"); + return item; + } + + /// Returns the element at a specific location of an async sequence, or the default value for the type if it there are not enough elements + public static async Task ElementAtOrDefaultAsync(this IFdbAsyncEnumerable source, int index, CancellationToken ct = default(CancellationToken)) + { + if (source == null) throw new ArgumentNullException("source"); + if (index < 0) throw new ArgumentOutOfRangeException("index"); + ct.ThrowIfCancellationRequested(); + + var rq = source as FdbRangeQuery; + if (rq != null) return await rq.Skip(index).SingleAsync(); + + int counter = index; + T item = default(T); + await Run( + source, + FdbAsyncMode.All, + (x) => + { + if (counter-- == 0) { item = x; return false; } + return true; + }, + ct + ).ConfigureAwait(false); + + if (counter >= 0) return default(T); + return item; + } + /// Returns the number of elements in an async sequence. public static async Task CountAsync(this IFdbAsyncEnumerable source, CancellationToken ct = default(CancellationToken)) { @@ -723,6 +795,60 @@ public static IFdbAsyncEnumerable SelectAsync(this IF return sum; } + /// Returns the smallest value in the specified async sequence + public static async Task MinAsync(this IFdbAsyncEnumerable source, IComparer comparer = null, CancellationToken ct = default(CancellationToken)) + { + if (source == null) throw new ArgumentNullException("source"); + comparer = comparer ?? Comparer.Default; + + bool found = false; + T min = default(T); + + await Run( + source, + FdbAsyncMode.All, + (x) => + { + if (!found || comparer.Compare(x, min) < 0) + { + min = x; + found = true; + } + }, + ct + ).ConfigureAwait(false); + + if (!found) throw new InvalidOperationException("The sequence was empty"); + return min; + } + + /// Returns the largest value in the specified async sequence + public static async Task MaxAsync(this IFdbAsyncEnumerable source, IComparer comparer = null, CancellationToken ct = default(CancellationToken)) + { + if (source == null) throw new ArgumentNullException("source"); + comparer = comparer ?? Comparer.Default; + + bool found = false; + T max = default(T); + + await Run( + source, + FdbAsyncMode.All, + (x) => + { + if (!found || comparer.Compare(x, max) > 0) + { + max = x; + found = true; + } + }, + ct + ).ConfigureAwait(false); + + if (!found) throw new InvalidOperationException("The sequence was empty"); + return max; + } + /// Determines whether an async sequence contains any elements. /// This is the logical equivalent to "source.Count() > 0" but can be better optimized by some providers public static async Task AnyAsync(this IFdbAsyncEnumerable source, CancellationToken ct = default(CancellationToken)) diff --git a/FoundationDB.Client/Linq/FdbDistinctAsyncIterator.cs b/FoundationDB.Client/Linq/FdbDistinctAsyncIterator.cs new file mode 100644 index 000000000..687ba1097 --- /dev/null +++ b/FoundationDB.Client/Linq/FdbDistinctAsyncIterator.cs @@ -0,0 +1,147 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Linq +{ + using FoundationDB.Client.Utils; + using JetBrains.Annotations; + using System; + using System.Collections.Generic; + using System.Threading; + using System.Threading.Tasks; + + /// Filters duplicate items from an async sequence + /// Type of elements of the async sequence + internal sealed class FdbDistinctAsyncIterator : FdbAsyncFilter + { + + private readonly IEqualityComparer m_comparer; + private HashSet m_set; + + public FdbDistinctAsyncIterator([NotNull] IFdbAsyncEnumerable source, IEqualityComparer comparer) + : base(source) + { + Contract.Requires(comparer != null); + + m_comparer = comparer; + } + + protected override FdbAsyncIterator Clone() + { + return new FdbDistinctAsyncIterator(m_source, m_comparer); + } + + protected override Task OnFirstAsync(CancellationToken ct) + { + // we start with an empty set... + m_set = new HashSet(m_comparer); + + return base.OnFirstAsync(ct); + } + + protected override async Task OnNextAsync(CancellationToken cancellationToken) + { + while (!cancellationToken.IsCancellationRequested) + { + if (!await m_iterator.MoveNext(cancellationToken).ConfigureAwait(false)) + { // completed + m_set = null; + return Completed(); + } + + if (cancellationToken.IsCancellationRequested) break; + + TSource current = m_iterator.Current; + if (!m_set.Add(current)) + { // this item has already been seen + continue; + } + + return Publish(current); + } + + m_set = null; + return Canceled(cancellationToken); + } + + public override async Task ExecuteAsync(Action handler, CancellationToken ct) + { + if (handler == null) throw new ArgumentNullException("handler"); + + if (ct.IsCancellationRequested) ct.ThrowIfCancellationRequested(); + + var mode = m_mode; + if (mode == FdbAsyncMode.Head) mode = FdbAsyncMode.Iterator; + + using (var iter = m_source.GetEnumerator(mode)) + { + var set = new HashSet(m_comparer); + + while (!ct.IsCancellationRequested && (await iter.MoveNext(ct).ConfigureAwait(false))) + { + var current = iter.Current; + if (set.Add(current)) + { // first occurrence of this item + handler(current); + } + } + } + + if (ct.IsCancellationRequested) ct.ThrowIfCancellationRequested(); + + } + + public override async Task ExecuteAsync(Func asyncHandler, CancellationToken ct) + { + if (asyncHandler == null) throw new ArgumentNullException("asyncHandler"); + + if (ct.IsCancellationRequested) ct.ThrowIfCancellationRequested(); + + var mode = m_mode; + if (mode == FdbAsyncMode.Head) mode = FdbAsyncMode.Iterator; + + using (var iter = m_source.GetEnumerator(mode)) + { + var set = new HashSet(m_comparer); + + while (!ct.IsCancellationRequested && (await iter.MoveNext(ct).ConfigureAwait(false))) + { + var current = iter.Current; + if (set.Add(current)) + { // first occurence of this item + await asyncHandler(current, ct).ConfigureAwait(false); + } + } + } + + if (ct.IsCancellationRequested) ct.ThrowIfCancellationRequested(); + } + + } + +} diff --git a/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs b/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs index 337742cc2..d23e988a4 100644 --- a/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs +++ b/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs @@ -328,6 +328,7 @@ public async Task Test_Can_Where_And_Skip() var results = await query.ToListAsync(); Assert.That(results, Is.EqualTo(new int[] { 31, 33, 35, 37, 39, 41 })); } + [Test] public async Task Test_Can_SelectMany() { @@ -420,6 +421,83 @@ public async Task Test_Can_Get_LastOrDefault() } + [Test] + public async Task Test_Can_Get_ElementAt() + { + var source = Enumerable.Range(42, 10).ToAsyncEnumerable(); + + Assert.That(() => source.ElementAtAsync(-1).GetAwaiter().GetResult(), Throws.InstanceOf()); + + int item = await source.ElementAtAsync(0); + Assert.That(item, Is.EqualTo(42)); + + item = await source.ElementAtAsync(5); + Assert.That(item, Is.EqualTo(47)); + + item = await source.ElementAtAsync(9); + Assert.That(item, Is.EqualTo(51)); + + Assert.That(() => source.ElementAtAsync(10).GetAwaiter().GetResult(), Throws.InstanceOf()); + + source = FdbAsyncEnumerable.Empty(); + Assert.That(() => source.ElementAtAsync(0).GetAwaiter().GetResult(), Throws.InstanceOf()); + } + + [Test] + public async Task Test_Can_Get_ElementAtOrDefault() + { + var source = Enumerable.Range(42, 10).ToAsyncEnumerable(); + + Assert.That(() => source.ElementAtOrDefaultAsync(-1).GetAwaiter().GetResult(), Throws.InstanceOf()); + + int item = await source.ElementAtOrDefaultAsync(0); + Assert.That(item, Is.EqualTo(42)); + + item = await source.ElementAtOrDefaultAsync(5); + Assert.That(item, Is.EqualTo(47)); + + item = await source.ElementAtOrDefaultAsync(9); + Assert.That(item, Is.EqualTo(51)); + + item = await source.ElementAtOrDefaultAsync(10); + Assert.That(item, Is.EqualTo(0)); + + source = FdbAsyncEnumerable.Empty(); + item = await source.ElementAtOrDefaultAsync(0); + Assert.That(item, Is.EqualTo(0)); + item = await source.ElementAtOrDefaultAsync(42); + Assert.That(item, Is.EqualTo(0)); + } + + [Test] + public async Task Test_Can_Distinct() + { + var items = new int[] { 1, 42, 7, 42, 9, 13, 7, 66 }; + var source = items.ToAsyncEnumerable(); + + var distincts = await source.Distinct().ToListAsync(); + Assert.That(distincts, Is.Not.Null.And.EqualTo(items.Distinct().ToList())); + + var sequence = Enumerable.Range(0, 100).Select(x => (x * 1049) % 43); + source = sequence.ToAsyncEnumerable(); + distincts = await source.Distinct().ToListAsync(); + Assert.That(distincts, Is.Not.Null.And.EqualTo(sequence.Distinct().ToList())); + } + + [Test] + public async Task Test_Can_Distinct_With_Comparer() + { + var items = new string[] { "World", "hello", "Hello", "world", "World!", "FileNotFound" }; + + var source = items.ToAsyncEnumerable(); + + var distincts = await source.Distinct(StringComparer.Ordinal).ToListAsync(); + Assert.That(distincts, Is.Not.Null.And.EqualTo(items.Distinct(StringComparer.Ordinal).ToList())); + + distincts = await source.Distinct(StringComparer.OrdinalIgnoreCase).ToListAsync(); + Assert.That(distincts, Is.Not.Null.And.EqualTo(items.Distinct(StringComparer.OrdinalIgnoreCase).ToList())); + } + [Test] public async Task Test_Can_ForEach() { @@ -535,6 +613,96 @@ public async Task Test_Can_Count_With_Predicate() Assert.That(count, Is.EqualTo(5)); } + [Test] + public async Task Test_Can_Min() + { + var rnd = new Random(1234); + var items = Enumerable.Range(0, 100).Select(_ => rnd.Next()).ToList(); + + var source = items.ToAsyncEnumerable(); + int min = await source.MinAsync(); + Assert.That(min, Is.EqualTo(items.Min())); + + // if min is the first + items[0] = min - 1; + source = items.ToAsyncEnumerable(); + min = await source.MinAsync(); + Assert.That(min, Is.EqualTo(items.Min())); + + // if min is the last + items[items.Count - 1] = min - 1; + source = items.ToAsyncEnumerable(); + min = await source.MinAsync(); + Assert.That(min, Is.EqualTo(items.Min())); + + // empty should fail + source = FdbAsyncEnumerable.Empty(); + Assert.That(() => source.MinAsync().GetAwaiter().GetResult(), Throws.InstanceOf()); + } + + [Test] + public async Task Test_Can_Max() + { + var rnd = new Random(1234); + var items = Enumerable.Range(0, 100).Select(_ => rnd.Next()).ToList(); + + var source = items.ToAsyncEnumerable(); + int max = await source.MaxAsync(); + Assert.That(max, Is.EqualTo(items.Max())); + + // if max is the first + items[0] = max + 1; + source = items.ToAsyncEnumerable(); + max = await source.MaxAsync(); + Assert.That(max, Is.EqualTo(items.Max())); + + // if max is the last + items[items.Count - 1] = max + 1; + source = items.ToAsyncEnumerable(); + max = await source.MaxAsync(); + Assert.That(max, Is.EqualTo(items.Max())); + + // empty should fail + source = FdbAsyncEnumerable.Empty(); + Assert.That(() => source.MaxAsync().GetAwaiter().GetResult(), Throws.InstanceOf()); + } + + [Test] + public async Task Test_Can_Sum_Signed() + { + var rnd = new Random(1234); + var items = Enumerable.Range(0, 100).Select(_ => (long)rnd.Next()).ToList(); + + var source = items.ToAsyncEnumerable(); + long sum = await source.SumAsync(); + long expected = 0; + foreach (var x in items) expected = checked(expected + x); + Assert.That(sum, Is.EqualTo(expected)); + + // empty should return 0 + source = FdbAsyncEnumerable.Empty(); + sum = await source.SumAsync(); + Assert.That(sum, Is.EqualTo(0)); + } + + [Test] + public async Task Test_Can_Sum_Unsigned() + { + var rnd = new Random(1234); + var items = Enumerable.Range(0, 100).Select(_ => (ulong)rnd.Next()).ToList(); + + var source = items.ToAsyncEnumerable(); + ulong sum = await source.SumAsync(); + ulong expected = 0; + foreach (var x in items) expected = checked(expected + x); + Assert.That(sum, Is.EqualTo(expected)); + + // empty should return 0 + source = FdbAsyncEnumerable.Empty(); + sum = await source.SumAsync(); + Assert.That(sum, Is.EqualTo(0)); + } + [Test] public async Task Test_Can_Select_Anonymous_Types() { From 5c0607dc34dca21f5c1012a164c3cebaf6cc8a21 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 21 Nov 2014 23:51:40 +0100 Subject: [PATCH 14/63] AsyncLinq: added some [NotNull] and cleaned up spaces --- FoundationDB.Client/FdbRangeQuery.cs | 6 +++--- .../Linq/FdbAsyncEnumerable.Iterators.cs | 6 ++++++ FoundationDB.Client/Linq/FdbAsyncEnumerable.cs | 2 +- FoundationDB.Client/Linq/FdbAsyncIterator.cs | 17 +++++++++++++++-- FoundationDB.Client/Utils/Slice.cs | 9 +++++---- 5 files changed, 30 insertions(+), 10 deletions(-) diff --git a/FoundationDB.Client/FdbRangeQuery.cs b/FoundationDB.Client/FdbRangeQuery.cs index b76baa6ef..5d2765fdd 100644 --- a/FoundationDB.Client/FdbRangeQuery.cs +++ b/FoundationDB.Client/FdbRangeQuery.cs @@ -148,15 +148,15 @@ public FdbRangeQuery Skip(int count) // Take(N).Skip(k) ? if (limit.HasValue) - { + { // If k >= N, then the result will be empty // If k < N, then we need to update the begin key, and limit accordingly if (count >= limit.Value) - { + { limit = 0; // hopefully this would be optimized an runtime? } else - { + { limit -= count; } } diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs index 04f39a593..80d0f94ed 100644 --- a/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs @@ -29,6 +29,7 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Linq { using FoundationDB.Async; + using JetBrains.Annotations; using System; using System.Collections.Generic; using System.Diagnostics; @@ -142,11 +143,13 @@ internal static FdbWhereSelectAsyncIterator Map Filter(IFdbAsyncEnumerable source, Func predicate) { return new FdbWhereAsyncIterator(source, predicate, null); } + [NotNull] internal static FdbWhereAsyncIterator Filter(IFdbAsyncEnumerable source, Func> asyncPredicate) { return new FdbWhereAsyncIterator(source, null, asyncPredicate); @@ -156,6 +159,7 @@ internal static FdbWhereAsyncIterator Filter(IFdbAsyncEnumerab #region Offset... + [NotNull] internal static FdbWhereSelectAsyncIterator Offset(IFdbAsyncEnumerable source, int offset) { return new FdbWhereSelectAsyncIterator(source, filter: null, asyncFilter: null, transform: TaskHelpers.Cache.Identity, asyncTransform: null, limit: null, offset: offset); @@ -165,11 +169,13 @@ internal static FdbWhereSelectAsyncIterator Offset(IF #region Limit... + [NotNull] internal static FdbWhereSelectAsyncIterator Limit(IFdbAsyncEnumerable source, int limit) { return new FdbWhereSelectAsyncIterator(source, filter: null, asyncFilter: null, transform: TaskHelpers.Cache.Identity, asyncTransform: null, limit: limit, offset: null); } + [NotNull] internal static FdbTakeWhileAsyncIterator Limit(IFdbAsyncEnumerable source, Func condition) { return new FdbTakeWhileAsyncIterator(source, condition); diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs index e16b25229..3214d97e7 100644 --- a/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs @@ -40,7 +40,7 @@ namespace FoundationDB.Linq /// Provides a set of static methods for querying objects that implement . public static partial class FdbAsyncEnumerable { - // Welcome to the wonderful world of the Monads! + // Welcome to the wonderful world of the Monads! #region Entering the Monad... diff --git a/FoundationDB.Client/Linq/FdbAsyncIterator.cs b/FoundationDB.Client/Linq/FdbAsyncIterator.cs index e98de6764..81117db00 100644 --- a/FoundationDB.Client/Linq/FdbAsyncIterator.cs +++ b/FoundationDB.Client/Linq/FdbAsyncIterator.cs @@ -136,6 +136,7 @@ public async Task MoveNext(CancellationToken ct) #region LINQ... + [NotNull] public virtual FdbAsyncIterator Where([NotNull] Func predicate) { if (predicate == null) throw new ArgumentNullException("predicate"); @@ -143,6 +144,7 @@ public virtual FdbAsyncIterator Where([NotNull] Func pre return FdbAsyncEnumerable.Filter(this, predicate); } + [NotNull] public virtual FdbAsyncIterator Where([NotNull] Func> asyncPredicate) { if (asyncPredicate == null) throw new ArgumentNullException("asyncPredicate"); @@ -150,6 +152,7 @@ public virtual FdbAsyncIterator Where([NotNull] Func(this, asyncPredicate); } + [NotNull] public virtual FdbAsyncIterator Select([NotNull] Func selector) { if (selector == null) throw new ArgumentNullException("selector"); @@ -157,6 +160,7 @@ public virtual FdbAsyncIterator Select([NotNull] Func return FdbAsyncEnumerable.Map(this, selector); } + [NotNull] public virtual FdbAsyncIterator Select([NotNull] Func> asyncSelector) { if (asyncSelector == null) throw new ArgumentNullException("asyncSelector"); @@ -164,6 +168,7 @@ public virtual FdbAsyncIterator Select([NotNull] Func(this, asyncSelector); } + [NotNull] public virtual FdbAsyncIterator SelectMany([NotNull] Func> selector) { if (selector == null) throw new ArgumentNullException("selector"); @@ -171,6 +176,7 @@ public virtual FdbAsyncIterator SelectMany([NotNull] Func(this, selector); } + [NotNull] public virtual FdbAsyncIterator SelectMany([NotNull] Func>> asyncSelector) { if (asyncSelector == null) throw new ArgumentNullException("asyncSelector"); @@ -178,6 +184,7 @@ public virtual FdbAsyncIterator SelectMany([NotNull] Func(this, asyncSelector); } + [NotNull] public virtual FdbAsyncIterator SelectMany([NotNull] Func> collectionSelector, [NotNull] Func resultSelector) { if (collectionSelector == null) throw new ArgumentNullException("collectionSelector"); @@ -186,6 +193,7 @@ public virtual FdbAsyncIterator SelectMany([NotNull] Fu return FdbAsyncEnumerable.Flatten(this, collectionSelector, resultSelector); } + [NotNull] public virtual FdbAsyncIterator SelectMany([NotNull] Func>> asyncCollectionSelector, [NotNull] Func resultSelector) { if (asyncCollectionSelector == null) throw new ArgumentNullException("asyncCollectionSelector"); @@ -194,26 +202,31 @@ public virtual FdbAsyncIterator SelectMany([NotNull] Fu return FdbAsyncEnumerable.Flatten(this, asyncCollectionSelector, resultSelector); } + [NotNull] public virtual FdbAsyncIterator Take(int count) { return FdbAsyncEnumerable.Limit(this, count); } + [NotNull] public virtual FdbAsyncIterator TakeWhile([NotNull] Func condition) { return FdbAsyncEnumerable.Limit(this, condition); } + [NotNull] public virtual FdbAsyncIterator Skip(int count) { return FdbAsyncEnumerable.Offset(this, count); } + [NotNull] public virtual Task ExecuteAsync([NotNull] Action action, CancellationToken ct) { return FdbAsyncEnumerable.Run(this, FdbAsyncMode.All, action, ct); } + [NotNull] public virtual Task ExecuteAsync([NotNull] Func asyncAction, CancellationToken ct) { return FdbAsyncEnumerable.Run(this, FdbAsyncMode.All, asyncAction, ct); @@ -251,7 +264,7 @@ protected bool Completed() } [ContractAnnotation("=> halt")] - protected bool Failed(Exception e) + protected bool Failed([NotNull] Exception e) { this.Dispose(); //return false; @@ -260,7 +273,7 @@ protected bool Failed(Exception e) #if !NET_4_0 [ContractAnnotation("=> halt")] - protected bool Failed(ExceptionDispatchInfo e) + protected bool Failed([NotNull] ExceptionDispatchInfo e) { this.Dispose(); e.Throw(); diff --git a/FoundationDB.Client/Utils/Slice.cs b/FoundationDB.Client/Utils/Slice.cs index ebd4c0bb6..a825e4b7b 100644 --- a/FoundationDB.Client/Utils/Slice.cs +++ b/FoundationDB.Client/Utils/Slice.cs @@ -489,7 +489,7 @@ public static Slice FromFixed16(short value) { return new Slice( new byte[] - { + { (byte)value, (byte)(value >> 8) }, @@ -517,7 +517,7 @@ public static Slice FromFixedU16(ushort value) { return new Slice( new byte[] - { + { (byte)value, (byte)(value >> 8) }, @@ -532,7 +532,7 @@ public static Slice FromFixedU16BE(ushort value) { return new Slice( new byte[] - { + { (byte)(value >> 8), (byte)value }, @@ -571,6 +571,7 @@ public static Slice FromInt32(int value) } if (value <= 65535) { + //TODO: possible micro optimization is for values like 0x100, 0x201, 0x1413 or 0x4342, where we could use 2 consecutive bytes in the ByteSprite, return new Slice(new byte[] { (byte)value, (byte)(value >> 8) }, 0, 2); } } @@ -583,7 +584,7 @@ public static Slice FromFixed32(int value) { return new Slice( new byte[] - { + { (byte)value, (byte)(value >> 8), (byte)(value >> 16), From 567a34fd1081dda56affc697342748d124ae8427 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Tue, 25 Nov 2014 10:43:10 +0100 Subject: [PATCH 15/63] Refactored the way time is measured in FdbOperationContext - the Stopwatch is now private, and we only expose TimeSpans - tr.Context.Elapsed is the time since the last reset/retry - tr.Context.ElapsedTotal is the time since the creation of the transaction --- FoundationDB.Client/FdbOperationContext.cs | 63 ++++++++++++++++------ FoundationDB.Tests/TransactionalFacts.cs | 2 +- 2 files changed, 47 insertions(+), 18 deletions(-) diff --git a/FoundationDB.Client/FdbOperationContext.cs b/FoundationDB.Client/FdbOperationContext.cs index a1d555f16..973b7ea92 100644 --- a/FoundationDB.Client/FdbOperationContext.cs +++ b/FoundationDB.Client/FdbOperationContext.cs @@ -38,19 +38,22 @@ namespace FoundationDB.Client using System.Threading.Tasks; /// - /// Represents the context of a retryable transactional function wich accept a read-only or read-write transaction. + /// Represents the context of a retryable transactional function which accepts a read-only or read-write transaction. /// [DebuggerDisplay("Retries={Retries}, Committed={Committed}, Elapsed={Duration.Elapsed}")] public sealed class FdbOperationContext : IDisposable { + //REVIEW: maybe we should find a way to reduce the size of this class? (it's already almost at 100 bytes !) + /// The database used by the operation - public IFdbDatabase Database { [NotNull] get; private set; } + public IFdbDatabase Database { [NotNull] get; private set; /*readonly*/ } /// Result of the operation (or null) public object Result { get; set; } + //REVIEW: should we force using a "SetResult()/TrySetResult()" method for this ? /// Cancellation token associated with the operation - public CancellationToken Cancellation { get; internal set; } + public CancellationToken Cancellation { get; private set; /*readonly*/ } /// If set to true, will abort and not commit the transaction. If false, will try to commit the transaction (and retry on failure) public bool Abort { get; set; } @@ -61,8 +64,19 @@ public sealed class FdbOperationContext : IDisposable /// Date at wich the operation was first started public DateTime StartedUtc { get; private set; } - /// Time spent since the start of the first attempt - public Stopwatch Duration { [NotNull] get; private set; } + /// Stopwatch that is started at the creation of the transaction, and stopped when it commits or gets disposed + internal Stopwatch Clock { [NotNull] get; private set; /*readonly*/ } + + /// Duration of all the previous attemps before the current one (starts at 0, and gets updated at each reset/retry) + internal TimeSpan BaseDuration { get; private set; } + + /// Time elapsed since the start of the first attempt + public TimeSpan ElapsedTotal { get { return this.Clock.Elapsed; } } + + /// Time elapsed since the start of the current attempt + /// This value is reset to zero every time the transation fails and is retried. + /// Note that this may not represent the actual lifetime of the transaction with the database itself, which starts at the first read operation. + public TimeSpan Elapsed { get { return this.Clock.Elapsed.Subtract(this.BaseDuration); } } /// If true, the transaction has been committed successfully public bool Committed { get; private set; } @@ -71,22 +85,28 @@ public sealed class FdbOperationContext : IDisposable internal bool Shared { get { return (this.Mode & FdbTransactionMode.InsideRetryLoop) != 0; } } /// Mode of the transaction - public FdbTransactionMode Mode { get; private set; } + public FdbTransactionMode Mode { get; private set; /*readonly*/ } /// Internal source of cancellation, able to abort any pending IO operations attached to this transaction - internal CancellationTokenSource TokenSource { get; private set; } + internal CancellationTokenSource TokenSource { [CanBeNull] get; private set; /*readonly*/ } + /// Create a new retry loop operation context + /// Database that will be used by the retry loop + /// Operation mode of the retry loop + /// Optional cancellation token that will abort the retry loop if triggered. public FdbOperationContext([NotNull] IFdbDatabase db, FdbTransactionMode mode, CancellationToken cancellationToken) { if (db == null) throw new ArgumentNullException("db"); this.Database = db; this.Mode = mode; - this.Duration = new Stopwatch(); + this.Clock = new Stopwatch(); + // note: we don't start the clock yet, only when the context starts executing... - // by default, we hook ourselves on the db's CancellationToken + // by default, we hook ourselves to the db's CancellationToken, but we may need to also + // hook with a different, caller-provided, token and respond to cancellation from both sites. var token = db.Cancellation; - if (cancellationToken.CanBeCanceled && cancellationToken != token) + if (cancellationToken.CanBeCanceled && !cancellationToken.Equals(token)) { this.TokenSource = CancellationTokenSource.CreateLinkedTokenSource(token, cancellationToken); token = this.TokenSource.Token; @@ -94,6 +114,7 @@ public FdbOperationContext([NotNull] IFdbDatabase db, FdbTransactionMode mode, C this.Cancellation = token; } + /// Execute a retry loop on this context internal static async Task ExecuteInternal([NotNull] IFdbDatabase db, [NotNull] FdbOperationContext context, [NotNull] Delegate handler, Delegate onDone) { Contract.Requires(db != null && context != null && handler != null); @@ -103,10 +124,15 @@ internal static async Task ExecuteInternal([NotNull] IFdbDatabase db, [NotNull] try { + // make sure to reset everything (in case a context is reused multiple times) context.Committed = false; context.Retries = 0; + context.BaseDuration = TimeSpan.Zero; context.StartedUtc = DateTime.UtcNow; - context.Duration.Start(); + context.Clock.Start(); + //note: we start the clock immediately, but the transaction's 5 seconde max lifetime is actually measured from the first read operation (Get, GetRange, GetReadVersion, etc...) + // => algorithms that monitor the elapsed duration to rate limit themselves may think that the trans is older than it really is... + // => we would need to plug into the transaction handler itself to be notified when exactly a read op starts... using (var trans = db.BeginTransaction(context.Mode, CancellationToken.None, context)) { @@ -172,6 +198,7 @@ internal static async Task ExecuteInternal([NotNull] IFdbDatabase db, [NotNull] } catch (FdbException x) { + //TODO: will be able to await in catch block in C# 6 ! e = x; } @@ -182,9 +209,12 @@ internal static async Task ExecuteInternal([NotNull] IFdbDatabase db, [NotNull] if (Logging.On && Logging.IsVerbose) Logging.Verbose(String.Format(CultureInfo.InvariantCulture, "fdb: transaction {0} can be safely retried", trans.Id)); } - if (context.Duration.Elapsed.TotalSeconds >= 1) + // update the base time for the next attempt + context.BaseDuration = context.ElapsedTotal; + if (context.BaseDuration.TotalSeconds >= 10) { - if (Logging.On) Logging.Info(String.Format(CultureInfo.InvariantCulture, "fdb WARNING: long transaction ({0:N1} sec elapsed in transaction lambda function ({1} retries, {2})", context.Duration.Elapsed.TotalSeconds, context.Retries, context.Committed ? "committed" : "not yet committed")); + //REVIEW: this may not be a goot idea to spam the logs with long running transactions?? + if (Logging.On) Logging.Info(String.Format(CultureInfo.InvariantCulture, "fdb WARNING: long transaction ({0:N1} sec elapsed in transaction lambda function ({1} retries, {2})", context.BaseDuration.TotalSeconds, context.Retries, context.Committed ? "committed" : "not yet committed")); } context.Retries++; @@ -200,7 +230,7 @@ internal static async Task ExecuteInternal([NotNull] IFdbDatabase db, [NotNull] } finally { - context.Duration.Stop(); + context.Clock.Stop(); context.Dispose(); } } @@ -278,15 +308,14 @@ public static async Task RunWriteWithResultAsync([NotNull] IFdbDatabase db if (asyncHandler == null) throw new ArgumentNullException("asyncHandler"); cancellationToken.ThrowIfCancellationRequested(); - R result = default(R); Func handler = async (tr) => { - result = await asyncHandler(tr).ConfigureAwait(false); + tr.Context.Result = await asyncHandler(tr).ConfigureAwait(false); }; var context = new FdbOperationContext(db, FdbTransactionMode.Default | FdbTransactionMode.InsideRetryLoop, cancellationToken); await ExecuteInternal(db, context, handler, onDone).ConfigureAwait(false); - return result; + return (R)context.Result; } #endregion diff --git a/FoundationDB.Tests/TransactionalFacts.cs b/FoundationDB.Tests/TransactionalFacts.cs index 4ced3e7ea..b1674c1a1 100644 --- a/FoundationDB.Tests/TransactionalFacts.cs +++ b/FoundationDB.Tests/TransactionalFacts.cs @@ -171,7 +171,7 @@ public async Task Test_Transactionals_Retries_Do_Not_Leak_When_Reading_Too_Much( { var result = await db.ReadAsync((tr) => { - Console.WriteLine("Retry #" + tr.Context.Retries + " @ " + tr.Context.Duration.Elapsed); + Console.WriteLine("Retry #" + tr.Context.Retries + " @ " + tr.Context.ElapsedTotal); return tr.GetRange(location.Tuples.ToRange()).ToListAsync(); }, this.Cancellation); From 713b793821ad38708b98f32fdfbf03cd4283777f Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Tue, 25 Nov 2014 10:47:39 +0100 Subject: [PATCH 16/63] AsyncLINQ: refactoring to better handle composition of filters and transforms, added observers and initial implementation for sorting - AsyncFilterExpression and AsyncTransformExpression help combine filters and selector that could be either sync or async. - Fixed bug where in a foo.Select(...).Where(...) the Where could be evaluated before the Select - Added .Observe((x) => { ...}) that can execute actions on each element of the sequence, but without changing the result - Added the notion of QueryStatistics that can be used to measure what flows through some part of the query - Added TakeWhile(.., out QueryStatistics stopped), and other like WithCountStatistics(..) or WithSizeStatistics(...) - Fixed most aggregate methods to call into FdbAsyncIterator.ExecuteAsync(..) whenever possible, instead of calling MoveNext() manually. - Added initial support for sorting (not complete) --- .../FoundationDB.Client.csproj | 25 +- .../Linq/Expressions/AsyncFilterExpression.cs | 173 ++++++++ .../Expressions/AsyncObserverExpression.cs | 131 +++++++ .../Expressions/AsyncTransformExpression.cs | 170 ++++++++ .../Linq/FdbAsyncEnumerable.Iterators.cs | 160 ++++---- .../FdbAsyncEnumerable.OrderedSequence.cs | 256 ++++++++++++ .../Linq/FdbAsyncEnumerable.Sorters.cs | 156 ++++++++ .../Linq/FdbAsyncEnumerable.cs | 368 +++++++++++++----- .../Linq/IFdbAsyncOrderedEnumerable.cs | 45 +++ .../FdbAsyncFilterIterator.cs} | 22 +- .../Linq/{ => Iterators}/FdbAsyncIterator.cs | 17 +- .../{ => Iterators}/FdbAsyncIteratorPump.cs | 0 .../FdbDistinctAsyncIterator.cs | 2 +- .../Linq/Iterators/FdbObserverIterator.cs | 86 ++++ .../FdbParallelSelectAsyncIterator.cs | 2 +- .../FdbSelectManyAsyncIterator.cs | 39 +- .../FdbTakeWhileAsyncIterator.cs | 2 +- .../{ => Iterators}/FdbWhereAsyncIterator.cs | 92 ++--- .../FdbWhereSelectAsyncIterator.cs | 327 ++++++++++------ .../Linq/FdbAsyncEnumerableFacts.cs | 38 +- 20 files changed, 1693 insertions(+), 418 deletions(-) create mode 100644 FoundationDB.Client/Linq/Expressions/AsyncFilterExpression.cs create mode 100644 FoundationDB.Client/Linq/Expressions/AsyncObserverExpression.cs create mode 100644 FoundationDB.Client/Linq/Expressions/AsyncTransformExpression.cs create mode 100644 FoundationDB.Client/Linq/FdbAsyncEnumerable.OrderedSequence.cs create mode 100644 FoundationDB.Client/Linq/FdbAsyncEnumerable.Sorters.cs create mode 100644 FoundationDB.Client/Linq/IFdbAsyncOrderedEnumerable.cs rename FoundationDB.Client/Linq/{FdbAsyncFilter.cs => Iterators/FdbAsyncFilterIterator.cs} (79%) rename FoundationDB.Client/Linq/{ => Iterators}/FdbAsyncIterator.cs (90%) rename FoundationDB.Client/Linq/{ => Iterators}/FdbAsyncIteratorPump.cs (100%) rename FoundationDB.Client/Linq/{ => Iterators}/FdbDistinctAsyncIterator.cs (99%) create mode 100644 FoundationDB.Client/Linq/Iterators/FdbObserverIterator.cs rename FoundationDB.Client/Linq/{ => Iterators}/FdbParallelSelectAsyncIterator.cs (99%) rename FoundationDB.Client/Linq/{ => Iterators}/FdbSelectManyAsyncIterator.cs (79%) rename FoundationDB.Client/Linq/{ => Iterators}/FdbTakeWhileAsyncIterator.cs (98%) rename FoundationDB.Client/Linq/{ => Iterators}/FdbWhereAsyncIterator.cs (70%) rename FoundationDB.Client/Linq/{ => Iterators}/FdbWhereSelectAsyncIterator.cs (53%) diff --git a/FoundationDB.Client/FoundationDB.Client.csproj b/FoundationDB.Client/FoundationDB.Client.csproj index f522a810d..0447f1d75 100644 --- a/FoundationDB.Client/FoundationDB.Client.csproj +++ b/FoundationDB.Client/FoundationDB.Client.csproj @@ -76,7 +76,14 @@ - + + + + + + + + @@ -168,9 +175,9 @@ - + - + @@ -178,14 +185,14 @@ - + - - - - - + + + + + diff --git a/FoundationDB.Client/Linq/Expressions/AsyncFilterExpression.cs b/FoundationDB.Client/Linq/Expressions/AsyncFilterExpression.cs new file mode 100644 index 000000000..e3a9a407f --- /dev/null +++ b/FoundationDB.Client/Linq/Expressions/AsyncFilterExpression.cs @@ -0,0 +1,173 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Linq +{ + using FoundationDB.Async; + using JetBrains.Annotations; + using System; + using System.Threading; + using System.Threading.Tasks; + + /// Expression that evalute a condition on each item + /// Type of the filtered elements + internal sealed class AsyncFilterExpression + { + private readonly Func m_filter; + private readonly Func> m_asyncFilter; + + public AsyncFilterExpression(Func filter) + { + if (filter == null) throw new ArgumentNullException("filter"); + m_filter = filter; + } + + public AsyncFilterExpression(Func> asyncFilter) + { + if (asyncFilter == null) throw new ArgumentNullException("asyncFilter"); + m_asyncFilter = asyncFilter; + } + + public bool Async { get { return m_asyncFilter != null; } } + + public bool Invoke(TSource item) + { + if (m_filter == null) FailInvalidOperation(); + return m_filter(item); + } + + public Task InvokeAsync(TSource item, CancellationToken ct) + { + if (m_asyncFilter != null) + { + return m_asyncFilter(item, ct); + } + else + { + return Task.FromResult(m_filter(item)); + } + } + + [ContractAnnotation("=> halt")] + private static void FailInvalidOperation() + { + throw new InvalidOperationException("Cannot invoke asynchronous filter synchronously"); + } + + [NotNull] + public AsyncFilterExpression AndAlso([NotNull] AsyncFilterExpression expr) + { + return AndAlso(this, expr); + } + + [NotNull] + public AsyncFilterExpression OrElse([NotNull] AsyncFilterExpression expr) + { + return OrElse(this, expr); + } + + [NotNull] + public static AsyncFilterExpression AndAlso([NotNull] AsyncFilterExpression left, [NotNull] AsyncFilterExpression right) + { + if (left == null) throw new ArgumentNullException("left"); + if (right == null) throw new ArgumentNullException("epxr"); + + // combine two expressions into a logical AND expression. + // Note: if the first expression returns false, the second one will NOT be evaluated + + if (left.m_filter != null) + { // we are async + var f = left.m_filter; + if (right.m_filter != null) + { // so is the next one + var g = right.m_filter; + return new AsyncFilterExpression((x) => f(x) && g(x)); + } + else + { // next one is async + var g = right.m_asyncFilter; + return new AsyncFilterExpression((x, ct) => f(x) ? g(x, ct) : TaskHelpers.FalseTask); + } + } + else + { // we are async + var f = left.m_asyncFilter; + if (right.m_asyncFilter != null) + { // so is the next one + var g = left.m_asyncFilter; + return new AsyncFilterExpression(async (x, ct) => (await f(x, ct).ConfigureAwait(false)) && (await g(x, ct).ConfigureAwait(false))); + } + else + { + var g = left.m_filter; + return new AsyncFilterExpression(async (x, ct) => (await f(x, ct).ConfigureAwait(false)) && g(x)); + } + } + } + + [NotNull] + public static AsyncFilterExpression OrElse([NotNull] AsyncFilterExpression left, [NotNull] AsyncFilterExpression right) + { + if (left == null) throw new ArgumentNullException("left"); + if (right == null) throw new ArgumentNullException("epxr"); + + // combine two expressions into a logical OR expression. + // Note: if the first expression returns true, the second one will NOT be evaluated + + if (left.m_filter != null) + { // we are async + var f = left.m_filter; + if (right.m_filter != null) + { // so is the next one + var g = right.m_filter; + return new AsyncFilterExpression((x) => f(x) || g(x)); + } + else + { // next one is async + var g = right.m_asyncFilter; + return new AsyncFilterExpression((x, ct) => f(x) ? TaskHelpers.TrueTask : g(x, ct)); + } + } + else + { // we are async + var f = left.m_asyncFilter; + if (right.m_asyncFilter != null) + { // so is the next one + var g = left.m_asyncFilter; + return new AsyncFilterExpression(async (x, ct) => (await f(x, ct).ConfigureAwait(false)) || (await g(x, ct).ConfigureAwait(false))); + } + else + { + var g = left.m_filter; + return new AsyncFilterExpression(async (x, ct) => (await f(x, ct).ConfigureAwait(false)) || g(x)); + } + } + } + } + +} diff --git a/FoundationDB.Client/Linq/Expressions/AsyncObserverExpression.cs b/FoundationDB.Client/Linq/Expressions/AsyncObserverExpression.cs new file mode 100644 index 000000000..9cbf71362 --- /dev/null +++ b/FoundationDB.Client/Linq/Expressions/AsyncObserverExpression.cs @@ -0,0 +1,131 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Linq +{ + using JetBrains.Annotations; + using System; + using System.Threading; + using System.Threading.Tasks; + + /// Expression that execute an action on each item, but does not change the source expression in anyway + /// Type of observed items + internal sealed class AsyncObserverExpression + { + private readonly Action m_handler; + private readonly Func m_asyncHandler; + + public AsyncObserverExpression(Action handler) + { + if (handler == null) throw new ArgumentNullException("record"); + m_handler = handler; + } + + public AsyncObserverExpression(Func asyncHandler) + { + if (asyncHandler == null) throw new ArgumentNullException("asyncObserver"); + m_asyncHandler = asyncHandler; + } + + public bool Async + { + get { return m_handler != null; } + } + + public TSource Invoke(TSource item) + { + if (m_handler == null) FailInvalidOperation(); + m_handler(item); + return item; + } + + public async Task InvokeAsync(TSource item, CancellationToken ct) + { + if (m_asyncHandler != null) + { + await m_asyncHandler(item, ct).ConfigureAwait(false); + } + else + { + m_handler(item); + } + + return item; + } + + [ContractAnnotation("=> halt")] + private static void FailInvalidOperation() + { + throw new InvalidOperationException("Cannot invoke asynchronous observer synchronously"); + } + + [NotNull] + public AsyncObserverExpression Then([NotNull] AsyncObserverExpression expr) + { + return Then(this, expr); + } + + [NotNull] + public static AsyncObserverExpression Then([NotNull] AsyncObserverExpression left, [NotNull] AsyncObserverExpression right) + { + if (left == null) throw new ArgumentNullException("left"); + if (right == null) throw new ArgumentNullException("right"); + + if (left.m_handler != null) + { + var f = left.m_handler; + if (right.m_handler != null) + { + var g = right.m_handler; + return new AsyncObserverExpression((x) => { f(x); g(x); }); + } + else + { + var g = right.m_asyncHandler; + return new AsyncObserverExpression((x, ct) => { f(x); return g(x, ct); }); + } + } + else + { + var f = left.m_asyncHandler; + if (right.m_asyncHandler != null) + { + var g = right.m_asyncHandler; + return new AsyncObserverExpression(async (x, ct) => { await f(x, ct).ConfigureAwait(false); await g(x, ct).ConfigureAwait(false); }); + } + else + { + var g = right.m_handler; + return new AsyncObserverExpression(async (x, ct) => { await f(x, ct).ConfigureAwait(false); g(x); }); + } + } + } + + } + +} diff --git a/FoundationDB.Client/Linq/Expressions/AsyncTransformExpression.cs b/FoundationDB.Client/Linq/Expressions/AsyncTransformExpression.cs new file mode 100644 index 000000000..e35ac589c --- /dev/null +++ b/FoundationDB.Client/Linq/Expressions/AsyncTransformExpression.cs @@ -0,0 +1,170 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Linq +{ + using FoundationDB.Async; + using JetBrains.Annotations; + using System; + using System.Threading; + using System.Threading.Tasks; + + /// Expression that applies a transformation on each item + /// Type of the source items + /// Type of the transformed items + internal sealed class AsyncTransformExpression + { + private readonly Func m_transform; + private readonly Func> m_asyncTransform; + + public AsyncTransformExpression(Func transform) + { + if (transform == null) throw new ArgumentNullException("transform"); + m_transform = transform; + } + + public AsyncTransformExpression(Func> asyncTransform) + { + if (asyncTransform == null) throw new ArgumentNullException("asyncTransform"); + m_asyncTransform = asyncTransform; + } + + public bool Async + { + get { return m_asyncTransform != null; } + } + + public bool IsIdentity() + { + //note: Identity Function is not async, and is only possible if TSource == TResult, so we can skip checking the types ourselves... + return m_transform != null && object.ReferenceEquals(m_transform, TaskHelpers.Cache.Identity); + } + + public TResult Invoke(TSource item) + { + if (m_transform == null) FailInvalidOperation(); + return m_transform(item); + } + + public Task InvokeAsync(TSource item, CancellationToken ct) + { + if (m_asyncTransform != null) + { + return m_asyncTransform(item, ct); + } + else + { + return Task.FromResult(m_transform(item)); + } + } + + [ContractAnnotation("=> halt")] + private static void FailInvalidOperation() + { + throw new InvalidOperationException("Cannot invoke asynchronous transform synchronously"); + } + + [NotNull] + public AsyncTransformExpression Cast() + { + if (typeof(TCasted) == typeof(TResult)) + { // we are alredy of the correct type, we just need to fool the compiler into believing it! + return (AsyncTransformExpression)(object)this; + } + else + { + //note: if TCasted and TResult are not compatible, this will just blow up at execution time. + if (m_transform != null) + { + var f = m_transform; + return new AsyncTransformExpression((x) => (TCasted)(object)f(x)); + } + else + { + var f = m_asyncTransform; + return new AsyncTransformExpression(async (x, ct) => (TCasted)(object)(await f(x, ct).ConfigureAwait(false))); + } + } + } + + [NotNull] + public AsyncTransformExpression Then([NotNull] AsyncTransformExpression expr) + { + return Then(this, expr); + } + + [NotNull] + public static AsyncTransformExpression Then([NotNull] AsyncTransformExpression left, [NotNull] AsyncTransformExpression right) + { + if (left == null) throw new ArgumentNullException("left"); + if (right == null) throw new ArgumentNullException("right"); + + if (left.IsIdentity()) + { // we can optimize the left expression away, since we know that TSource == TResult ! + //note: fool the compiler into believing that TSource == TResult + return (AsyncTransformExpression)(object)right; + } + + if (right.IsIdentity()) + { // we can optimize the right expression away, since we know that TResult == TOuter ! + return (AsyncTransformExpression)(object)left; + } + + if (left.m_transform != null) + { + var f = left.m_transform; + if (right.m_transform != null) + { + var g = right.m_transform; + return new AsyncTransformExpression((x) => g(f(x))); + } + else + { + var g = right.m_asyncTransform; + return new AsyncTransformExpression((x, ct) => g(f(x), ct)); + } + } + else + { + var f = left.m_asyncTransform; + if (right.m_asyncTransform != null) + { + var g = right.m_asyncTransform; + return new AsyncTransformExpression(async (x, ct) => await g(await f(x, ct).ConfigureAwait(false), ct).ConfigureAwait(false)); + } + else + { + var g = right.m_transform; + return new AsyncTransformExpression(async (x, ct) => g(await f(x, ct).ConfigureAwait(false))); + } + } + } + + } + +} diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs index 80d0f94ed..d4c6ae4cd 100644 --- a/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.Iterators.cs @@ -47,7 +47,10 @@ public static partial class FdbAsyncEnumerable /// Source async sequence that will be wrapped /// Factory method called when the outer sequence starts iterating. Must return an async enumerator /// New async sequence - internal static FdbAsyncSequence Create(IFdbAsyncEnumerable source, Func, IFdbAsyncEnumerator> factory) + internal static FdbAsyncSequence Create( + IFdbAsyncEnumerable source, + Func, + IFdbAsyncEnumerator> factory) { return new FdbAsyncSequence(source, factory); } @@ -58,13 +61,18 @@ internal static FdbAsyncSequence Create(IFdb /// Source sequence that will be wrapped /// Factory method called when the outer sequence starts iterating. Must return an async enumerator /// New async sequence - internal static EnumerableSequence Create(IEnumerable source, Func, IFdbAsyncEnumerator> factory) + internal static EnumerableSequence Create( + IEnumerable source, + Func, + IFdbAsyncEnumerator> factory) { return new EnumerableSequence(source, factory); } /// Create a new async sequence from a factory method - public static IFdbAsyncEnumerable Create(Func> factory, object state = null) + public static IFdbAsyncEnumerable Create( + Func> factory, + object state = null) { return new AnonymousIterable(factory, state); } @@ -94,89 +102,64 @@ public IFdbAsyncEnumerator GetEnumerator(FdbAsyncMode _) #endregion - #region Flatten... + #region Helpers... - internal static FdbSelectManyAsyncIterator Flatten(IFdbAsyncEnumerable source, Func> selector) + internal static FdbSelectManyAsyncIterator Flatten( + IFdbAsyncEnumerable source, + AsyncTransformExpression> selector) { - return new FdbSelectManyAsyncIterator(source, selector, null); + return new FdbSelectManyAsyncIterator(source, selector); } - internal static FdbSelectManyAsyncIterator Flatten(IFdbAsyncEnumerable source, Func>> asyncSelector) - { - return new FdbSelectManyAsyncIterator(source, null, asyncSelector); - } - - internal static FdbSelectManyAsyncIterator Flatten(IFdbAsyncEnumerable source, Func> collectionSelector, Func resultSelector) + internal static FdbSelectManyAsyncIterator Flatten( + IFdbAsyncEnumerable source, AsyncTransformExpression> collectionSelector, + Func resultSelector) { return new FdbSelectManyAsyncIterator( source, collectionSelector, - null, - resultSelector - ); - } - - internal static FdbSelectManyAsyncIterator Flatten(IFdbAsyncEnumerable source, Func>> asyncCollectionSelector, Func resultSelector) - { - return new FdbSelectManyAsyncIterator( - source, - null, - asyncCollectionSelector, resultSelector ); } - #endregion - - #region Map... - - internal static FdbWhereSelectAsyncIterator Map(IFdbAsyncEnumerable source, Func selector, int? limit = null, int? offset = null) - { - return new FdbWhereSelectAsyncIterator(source, filter: null, asyncFilter: null, transform: selector, asyncTransform: null, limit: limit, offset: offset); - } - internal static FdbWhereSelectAsyncIterator Map(IFdbAsyncEnumerable source, Func> asyncSelector, int? limit = null, int? offset = null) - { - return new FdbWhereSelectAsyncIterator(source, filter: null, asyncFilter: null, transform: null, asyncTransform: asyncSelector, limit: limit, offset: offset); - } - - #endregion - - #region Filter... - - [NotNull] - internal static FdbWhereAsyncIterator Filter(IFdbAsyncEnumerable source, Func predicate) + internal static FdbWhereSelectAsyncIterator Map( + IFdbAsyncEnumerable source, AsyncTransformExpression selector, + int? limit = null, int? + offset = null) { - return new FdbWhereAsyncIterator(source, predicate, null); + return new FdbWhereSelectAsyncIterator(source, filter: null, transform: selector, limit: limit, offset: offset); } [NotNull] - internal static FdbWhereAsyncIterator Filter(IFdbAsyncEnumerable source, Func> asyncPredicate) + internal static FdbWhereAsyncIterator Filter( + [NotNull] IFdbAsyncEnumerable source, + [NotNull] AsyncFilterExpression filter) { - return new FdbWhereAsyncIterator(source, null, asyncPredicate); + return new FdbWhereAsyncIterator(source, filter); } - #endregion - - #region Offset... - [NotNull] - internal static FdbWhereSelectAsyncIterator Offset(IFdbAsyncEnumerable source, int offset) + internal static FdbWhereSelectAsyncIterator Offset( + IFdbAsyncEnumerable source, + int offset) { - return new FdbWhereSelectAsyncIterator(source, filter: null, asyncFilter: null, transform: TaskHelpers.Cache.Identity, asyncTransform: null, limit: null, offset: offset); + return new FdbWhereSelectAsyncIterator(source, filter: null, transform: new AsyncTransformExpression(TaskHelpers.Cache.Identity), limit: null, offset: offset); } - #endregion - - #region Limit... - [NotNull] - internal static FdbWhereSelectAsyncIterator Limit(IFdbAsyncEnumerable source, int limit) + internal static FdbWhereSelectAsyncIterator Limit( + IFdbAsyncEnumerable source, + int limit) { - return new FdbWhereSelectAsyncIterator(source, filter: null, asyncFilter: null, transform: TaskHelpers.Cache.Identity, asyncTransform: null, limit: limit, offset: null); + return new FdbWhereSelectAsyncIterator(source, filter: null, transform: new AsyncTransformExpression(TaskHelpers.Cache.Identity), limit: limit, offset: null); } [NotNull] - internal static FdbTakeWhileAsyncIterator Limit(IFdbAsyncEnumerable source, Func condition) + internal static FdbTakeWhileAsyncIterator Limit( + IFdbAsyncEnumerable source, + Func condition) { return new FdbTakeWhileAsyncIterator(source, condition); } @@ -245,6 +228,47 @@ private void Grow() this.Index = 0; } + private T[] MergeChunks() + { + var tmp = new T[this.Count]; + int count = this.Count; + int index = 0; + for (int i = 0; i < this.Chunks.Length - 1; i++) + { + var chunk = this.Chunks[i]; + Array.Copy(chunk, 0, tmp, index, chunk.Length); + index += chunk.Length; + count -= chunk.Length; + } + Array.Copy(this.Current, 0, tmp, index, count); + return tmp; + } + + /// Return a buffer containing all of the items + /// Buffer that contains all the items, and may be larger than required + /// This is equivalent to calling ToArray(), except that if the buffer is empty, or if it consists of a single page, then no new allocations will be performed. + public T[] GetBuffer() + { + //note: this is called by internal operator like OrderBy + // In this case we want to reduce the copying as much as possible, + // and we can suppose that the buffer won't be exposed to the application + + if (this.Count == 0) + { // empty + return new T[0]; + } + else if (this.Chunks.Length == 1) + { // everything fits in a single chunk + return this.Current; + } + else + { // we need to stitch all the buffers together + return MergeChunks(); + } + } + + /// Return the content of the buffer + /// Array of size containing all the items in this buffer public T[] ToArray() { if (this.Count == 0) @@ -252,26 +276,17 @@ public T[] ToArray() return new T[0]; } else if (this.Chunks.Length == 1 && this.Current.Length == this.Count) - { // we are really lucky + { // a single buffer page was used return this.Current; } else - { // concatenate all the small buffers into one big array - var tmp = new T[this.Count]; - int count = this.Count; - int index = 0; - for (int i = 0; i < this.Chunks.Length - 1;i++) - { - var chunk = this.Chunks[i]; - Array.Copy(chunk, 0, tmp, index, chunk.Length); - index += chunk.Length; - count -= chunk.Length; - } - Array.Copy(this.Current, 0, tmp, index, count); - return tmp; + { // concatenate all the buffer pages into one big array + return MergeChunks(); } } + /// Return the content of the buffer + /// List of size containing all the items in this buffer public List ToList() { int count = this.Count; @@ -307,6 +322,7 @@ public List ToList() return list; } + } /// Immediately execute an action on each element of an async sequence diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.OrderedSequence.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.OrderedSequence.cs new file mode 100644 index 000000000..af7e1ef94 --- /dev/null +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.OrderedSequence.cs @@ -0,0 +1,256 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Linq +{ + using FoundationDB.Async; + using FoundationDB.Client.Utils; + using JetBrains.Annotations; + using System; + using System.Collections.Generic; + using System.Threading; + using System.Threading.Tasks; + + public static partial class FdbAsyncEnumerable + { + + /// Represent an async sequence that returns its elements according to a specific sort order + /// Type of the elements of the sequence + internal class OrderedSequence : IFdbAsyncOrderedEnumerable + { + // If an instance of the base class is constructed, it will sort by the items themselves (using a Comparer) + // If an instance of the derived class is constructed, then it will sort the a key extracted the each item (sing a Comparer) + + protected readonly IFdbAsyncEnumerable m_source; + private readonly IComparer m_comparer; // null if comparing using keys + protected readonly bool m_descending; + protected readonly OrderedSequence m_parent;// null if primary sort key + + public OrderedSequence(IFdbAsyncEnumerable source, IComparer comparer, bool descending, OrderedSequence parent) + { + Contract.Requires(source != null && comparer != null); + + m_source = source; + m_descending = descending; + m_comparer = comparer ?? Comparer.Default; + m_parent = parent; + } + + protected OrderedSequence(IFdbAsyncEnumerable source, bool descending, OrderedSequence parent) + { + Contract.Requires(source != null); + + m_source = source; + m_descending = descending; + m_parent = parent; + } + + [NotNull] + internal virtual SequenceSorter GetEnumerableSorter(SequenceSorter next) + { + var sorter = new SequenceByElementSorter(m_comparer, m_descending, next); + if (m_parent == null) return sorter; + return m_parent.GetEnumerableSorter(sorter); + } + + [NotNull] + public IFdbAsyncEnumerator GetEnumerator(FdbAsyncMode mode = FdbAsyncMode.Default) + { + var sorter = GetEnumerableSorter(null); + var enumerator = default(IFdbAsyncEnumerator); + try + { + enumerator = m_source.GetEnumerator(mode); + return new OrderedEnumerator(enumerator, sorter); + } + catch (Exception) + { + if (enumerator != null) enumerator.Dispose(); + throw; + } + } + + IAsyncEnumerator IAsyncEnumerable.GetEnumerator() + { + return GetEnumerator(FdbAsyncMode.All); + } + + [NotNull] + public IFdbAsyncOrderedEnumerable ThenBy([NotNull] Func keySelector, IComparer keyComparer = null) + { + if (keySelector == null) throw new ArgumentNullException("keySelector"); + + return new OrderedSequence(this, keySelector, keyComparer, false, this); + } + + [NotNull] + public IFdbAsyncOrderedEnumerable ThenByDescending([NotNull] Func keySelector, IComparer keyComparer = null) + { + if (keySelector == null) throw new ArgumentNullException("keySelector"); + + return new OrderedSequence(this, keySelector, keyComparer, true, this); + } + + } + + /// Represent an async sequence that returns its elements according to a specific sort order + /// Type of the elements of the sequence + /// Type of the keys used to sort the elements + internal sealed class OrderedSequence : OrderedSequence + { + private readonly Func m_keySelector; + private readonly IComparer m_keyComparer; + + public OrderedSequence(IFdbAsyncEnumerable source, Func keySelector, IComparer comparer, bool descending, OrderedSequence parent) + : base(source, descending, parent) + { + m_keySelector = keySelector; + m_keyComparer = comparer ?? Comparer.Default; + } + + [NotNull] + internal override SequenceSorter GetEnumerableSorter(SequenceSorter next) + { + var sorter = new SequenceByKeySorter(m_keySelector, m_keyComparer, m_descending, next); + if (m_parent == null) return sorter; + return m_parent.GetEnumerableSorter(sorter); + } + + } + + /// Iterator that will sort all the items produced by an inner iterator, before outputting the results all at once + internal sealed class OrderedEnumerator : IFdbAsyncEnumerator + { + // This iterator must first before EVERY items of the source in memory, before being able to sort them. + // The first MoveNext() will return only once the inner sequence has finished (succesfully), which can take some time! + // Ordering is done in-memory using QuickSort + + private readonly IFdbAsyncEnumerator m_inner; + private readonly SequenceSorter m_sorter; + + private TSource[] m_items; + private int[] m_map; + private int m_offset; + private TSource m_current; + + public OrderedEnumerator(IFdbAsyncEnumerator enumerator, SequenceSorter sorter) + { + Contract.Requires(enumerator != null && sorter != null); + m_inner = enumerator; + m_sorter = sorter; + } + + private async Task ReadAllThenSort(CancellationToken ct) + { + if (m_offset == -1) return false; // already EOF or Disposed + + // first we need to spool everything from the inner iterator into memory + var buffer = new FdbAsyncEnumerable.Buffer(); + + var inner = m_inner; + var iterator = inner as FdbAsyncIterator; + if (iterator != null) + { + await iterator.ExecuteAsync((x) => buffer.Add(x), ct).ConfigureAwait(false); + } + else + { + while (await inner.MoveNext(ct).ConfigureAwait(false)) + { + buffer.Add(inner.Current); + } + } + + if (buffer.Count == 0) + { // the list was empty! + Completed(); + return false; + } + + // then we need to sort everything... + m_items = buffer.GetBuffer(); + m_map = m_sorter.Sort(m_items, buffer.Count); + + // and only then we can start outputing the first value + // (after that, all MoveNext operations will be non-async + Publish(0); + return true; + } + + public Task MoveNext(CancellationToken cancellationToken) + { + // Firt call will be slow (and async), but the rest of the calls will use the results already sorted in memory, and should be as fast as possible! + + if (m_map == null) + { + return ReadAllThenSort(cancellationToken); + } + + int pos = checked(m_offset + 1); + if (pos < m_map.Length) + { + Publish(pos); + return TaskHelpers.TrueTask; + } + else + { + Completed(); + return TaskHelpers.FalseTask; + } + } + + private void Publish(int offset) + { + Contract.Requires(m_items != null && m_map != null && offset >= 0 && offset < m_map.Length); + m_current = m_items[m_map[offset]]; + m_offset = offset; + } + + private void Completed() + { + m_current = default(TSource); + m_offset = -1; + m_items = null; + m_map = null; + + } + + public TSource Current + { + get { throw new NotImplementedException(); } + } + + public void Dispose() + { + Completed(); + } + } + + } + +} diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.Sorters.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.Sorters.cs new file mode 100644 index 000000000..fd1cecabc --- /dev/null +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.Sorters.cs @@ -0,0 +1,156 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Linq +{ + using FoundationDB.Client.Utils; + using JetBrains.Annotations; + using System; + using System.Collections.Generic; + + public static partial class FdbAsyncEnumerable + { + // These classes contain the logic to sort items (by themselves or by keys) + // They are single-use and constructed at runtime, when an ordered sequence starts enumerating. + // In case of multiple sort keys, each sorter is linked to the next (from primary key, to the last key) + + /// Helper class for sorting a sequence of + /// Type of the sorted elements + internal abstract class SequenceSorter + { + /// Fill the array with all the keys extracted from the source + /// + /// + internal abstract void ComputeKeys([NotNull] TSource[] items, int count); + + internal abstract int CompareKeys(int index1, int index2); + + [NotNull] + internal int[] Sort([NotNull] TSource[] items, int count) + { + ComputeKeys(items, count); + var map = new int[count]; + for (int i = 0; i < map.Length; i++) map[i] = i; + QuickSort(map, 0, count - 1); + return map; + } + + private void QuickSort([NotNull] int[] map, int left, int right) + { + throw new NotImplementedException(); + } + + } + + /// Helper class for sorting a sequence of + /// Type of the sorted elements + internal sealed class SequenceByElementSorter : SequenceSorter + { + private readonly IComparer m_comparer; + private readonly bool m_descending; + + private SequenceSorter m_next; + private TSource[] m_items; + + public SequenceByElementSorter(IComparer comparer, bool descending, SequenceSorter next) + { + Contract.Requires(comparer != null); + + m_comparer = comparer; + m_descending = descending; + m_next = next; + } + + internal override void ComputeKeys([NotNull] TSource[] items, int count) + { + m_items = items; + } + + internal override int CompareKeys(int index1, int index2) + { + var items = m_items; + int c = m_comparer.Compare(items[index1], items[index2]); + if (c == 0) + { + SequenceSorter next; + return (next = m_next) == null ? (index1 - index2) : next.CompareKeys(index1, index2); + } + return !m_descending ? c : -c; + } + + } + + /// Helper class for sorting a sequence of using a key of + /// Type of the sorted elements + /// Type of the keys used to sort the elements + internal sealed class SequenceByKeySorter : SequenceSorter + { + private readonly Func m_keySelector; + private readonly IComparer m_comparer; + private readonly bool m_descending; + + private SequenceSorter m_next; + private TKey[] m_keys; + + public SequenceByKeySorter(Func keySelector, IComparer comparer, bool descending, SequenceSorter next) + { + Contract.Requires(keySelector != null && comparer != null); + + m_keySelector = keySelector; + m_comparer = comparer; + m_descending = descending; + m_next = next; + } + + internal override void ComputeKeys([NotNull] TSource[] items, int count) + { + var selector = m_keySelector; + var keys = new TKey[count]; + for (int i = 0; i < keys.Length; i++) + { + keys[i] = selector(items[i]); + } + m_keys = keys; + } + + internal override int CompareKeys(int index1, int index2) + { + var keys = m_keys; + int c = m_comparer.Compare(keys[index1], keys[index2]); + if (c == 0) + { + SequenceSorter next; + return (next = m_next) == null ? (index1 - index2) : next.CompareKeys(index1, index2); + } + return !m_descending ? c : -c; + } + + } + + } +} diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs index 3214d97e7..682d72b7f 100644 --- a/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs @@ -127,7 +127,7 @@ public static IFdbAsyncEnumerable SelectMany(this IFd return iterator.SelectMany(selector); } - return Flatten(source, selector); + return Flatten(source, new AsyncTransformExpression>(selector)); } /// Projects each element of an async sequence to an and flattens the resulting sequences into one async sequence. @@ -153,7 +153,7 @@ public static IFdbAsyncEnumerable SelectMany(this IFd return iterator.SelectMany(asyncSelector); } - return Flatten(source, asyncSelector); + return Flatten(source, new AsyncTransformExpression>(asyncSelector)); } /// Projects each element of an async sequence to an flattens the resulting sequences into one async sequence, and invokes a result selector function on each element therein. @@ -170,7 +170,7 @@ public static IFdbAsyncEnumerable SelectMany(collectionSelector, resultSelector); } - return Flatten(source, collectionSelector, resultSelector); + return Flatten(source, new AsyncTransformExpression>(collectionSelector), resultSelector); } /// Projects each element of an async sequence to an flattens the resulting sequences into one async sequence, and invokes a result selector function on each element therein. @@ -198,7 +198,7 @@ public static IFdbAsyncEnumerable SelectMany(asyncCollectionSelector, resultSelector); } - return Flatten(source, asyncCollectionSelector, resultSelector); + return Flatten(source, new AsyncTransformExpression>(asyncCollectionSelector), resultSelector); } #endregion @@ -218,7 +218,7 @@ public static IFdbAsyncEnumerable Select(this IFdbAsy return iterator.Select(selector); } - return Map(source, selector); + return Map(source, new AsyncTransformExpression(selector)); } /// Projects each element of an async sequence into a new form. @@ -244,7 +244,7 @@ public static IFdbAsyncEnumerable Select(this IFdbAsy return iterator.Select(asyncSelector); } - return Map(source, asyncSelector); + return Map(source, new AsyncTransformExpression(asyncSelector)); } #endregion @@ -264,7 +264,7 @@ public static IFdbAsyncEnumerable Where(this IFdbAsyncEnumerab return iterator.Where(predicate); } - return Filter(source, predicate); + return Filter(source, new AsyncFilterExpression(predicate)); } /// Filters an async sequence of values based on a predicate. @@ -290,7 +290,7 @@ public static IFdbAsyncEnumerable Where(this IFdbAsyncEnumerab return iterator.Where(asyncPredicate); } - return Filter(source, asyncPredicate); + return Filter(source, new AsyncFilterExpression(asyncPredicate)); } #endregion @@ -333,6 +333,22 @@ public static IFdbAsyncEnumerable TakeWhile(this IFdbAsyncEnum return FdbAsyncEnumerable.Limit(source, condition); } + public static IFdbAsyncEnumerable TakeWhile(this IFdbAsyncEnumerable source, [NotNull] Func condition, out QueryStatistics stopped) + { + var signal = new QueryStatistics(false); + stopped = signal; + + // to trigger the signal, we just intercept the condition returning false (which only happen once!) + Func wrapped = (x) => + { + if (condition(x)) return true; + signal.Update(true); + return false; + }; + + return TakeWhile(source, wrapped); + } + #endregion #region Skip... @@ -402,8 +418,10 @@ public static IFdbAsyncEnumerable Distinct(this IFdbAsyncEnume { return iterator.ExecuteAsync(action, ct); } - - return Run(source, FdbAsyncMode.All, action, ct); + else + { + return Run(source, FdbAsyncMode.All, action, ct); + } } /// Execute an async action for each element of an async sequence @@ -411,7 +429,15 @@ public static IFdbAsyncEnumerable Distinct(this IFdbAsyncEnume { if (asyncAction == null) throw new ArgumentNullException("asyncAction"); - return ForEachAsync(source, TaskHelpers.WithCancellation(asyncAction), ct); + var iterator = source as FdbAsyncIterator; + if (iterator != null) + { + return iterator.ExecuteAsync(TaskHelpers.WithCancellation(asyncAction), ct); + } + else + { + return ForEachAsync(source, TaskHelpers.WithCancellation(asyncAction), ct); + } } /// Execute an async action for each element of an async sequence @@ -425,105 +451,95 @@ public static IFdbAsyncEnumerable Distinct(this IFdbAsyncEnume { return iterator.ExecuteAsync(asyncAction, ct); } - - return Run(source, FdbAsyncMode.All, asyncAction, ct); + else + { + return Run(source, FdbAsyncMode.All, asyncAction, ct); + } } /// Create a list from an async sequence. - public static async Task> ToListAsync(this IFdbAsyncEnumerable source, CancellationToken ct = default(CancellationToken)) + public static Task> ToListAsync(this IFdbAsyncEnumerable source, CancellationToken ct = default(CancellationToken)) { Contract.Requires(source != null); - var buffer = new Buffer(); - - await ForEachAsync(source, (x) => buffer.Add(x), ct).ConfigureAwait(false); - - return buffer.ToList(); + return AggregateAsync( + source, + new Buffer(), + (buffer, x) => buffer.Add(x), + (buffer) => buffer.ToList(), + ct + ); } /// Create an array from an async sequence. - public static async Task ToArrayAsync(this IFdbAsyncEnumerable source, CancellationToken ct = default(CancellationToken)) + public static Task ToArrayAsync(this IFdbAsyncEnumerable source, CancellationToken cancellationToken = default(CancellationToken)) { Contract.Requires(source != null); - var buffer = new Buffer(); - - await ForEachAsync(source, (x) => buffer.Add(x), ct).ConfigureAwait(false); - - return buffer.ToArray(); + return AggregateAsync( + source, + new Buffer(), + (buffer, x) => buffer.Add(x), + (buffer) => buffer.ToArray(), + cancellationToken + ); } /// Create an array from an async sequence, knowing a rough estimation of the number of elements. - internal static async Task ToArrayAsync(this IFdbAsyncEnumerable source, int estimatedSize, CancellationToken ct = default(CancellationToken)) + internal static Task ToArrayAsync(this IFdbAsyncEnumerable source, int estimatedSize, CancellationToken cancellationToken = default(CancellationToken)) { Contract.Requires(source != null && estimatedSize >= 0); - var list = new List(estimatedSize); - await ForEachAsync(source, (x) => list.Add(x), ct).ConfigureAwait(false); - return list.ToArray(); + return AggregateAsync( + source, + new List(estimatedSize), + (buffer, x) => buffer.Add(x), + (buffer) => buffer.ToArray(), + cancellationToken + ); } /// Creates a Dictionary from an async sequence according to a specified key selector function and key comparer. - public static async Task> ToDictionaryAsync(this IFdbAsyncEnumerable source, [NotNull] Func keySelector, IEqualityComparer comparer = null, CancellationToken cancellationToken = default(CancellationToken)) + public static Task> ToDictionaryAsync(this IFdbAsyncEnumerable source, [NotNull] Func keySelector, IEqualityComparer comparer = null, CancellationToken cancellationToken = default(CancellationToken)) { if (source == null) throw new ArgumentNullException("source"); if (keySelector == null) throw new ArgumentNullException("keySelector"); - cancellationToken.ThrowIfCancellationRequested(); - var results = new Dictionary(comparer ?? EqualityComparer.Default); - using (var iterator = source.GetEnumerator(FdbAsyncMode.All)) - { - Contract.Assert(iterator != null, "The sequence returned a null async iterator"); - - while (await iterator.MoveNext(cancellationToken).ConfigureAwait(false)) - { - results[keySelector(iterator.Current)] = iterator.Current; - } - } - - return results; + return AggregateAsync( + source, + new Dictionary(comparer ?? EqualityComparer.Default), + (results, x) => { results[keySelector(x)] = x; }, + cancellationToken + ); } /// Creates a Dictionary from an async sequence according to a specified key selector function, a comparer, and an element selector function. - public static async Task> ToDictionaryAsync(this IFdbAsyncEnumerable source, [NotNull] Func keySelector, [NotNull] Func elementSelector, IEqualityComparer comparer = null, CancellationToken cancellationToken = default(CancellationToken)) + public static Task> ToDictionaryAsync(this IFdbAsyncEnumerable source, [NotNull] Func keySelector, [NotNull] Func elementSelector, IEqualityComparer comparer = null, CancellationToken cancellationToken = default(CancellationToken)) { if (source == null) throw new ArgumentNullException("source"); if (keySelector == null) throw new ArgumentNullException("keySelector"); if (elementSelector == null) throw new ArgumentNullException("elementSelector"); - cancellationToken.ThrowIfCancellationRequested(); - var results = new Dictionary(comparer ?? EqualityComparer.Default); - using (var iterator = source.GetEnumerator(FdbAsyncMode.All)) - { - Contract.Assert(iterator != null, "The sequence returned a null async iterator"); - - while (await iterator.MoveNext(cancellationToken).ConfigureAwait(false)) - { - results[keySelector(iterator.Current)] = elementSelector(iterator.Current); - } - } - - return results; + return AggregateAsync( + source, + new Dictionary(comparer ?? EqualityComparer.Default), + (results, x) => { results[keySelector(x)] = elementSelector(x); }, + cancellationToken + ); } /// Creates a Dictionary from an async sequence of pairs of keys and values. - public static async Task> ToDictionaryAsync(this IFdbAsyncEnumerable> source, IEqualityComparer comparer = null, CancellationToken cancellationToken = default(CancellationToken)) + public static Task> ToDictionaryAsync(this IFdbAsyncEnumerable> source, IEqualityComparer comparer = null, CancellationToken cancellationToken = default(CancellationToken)) { if (source == null) throw new ArgumentNullException("source"); cancellationToken.ThrowIfCancellationRequested(); - var results = new Dictionary(comparer ?? EqualityComparer.Default); - using (var iterator = source.GetEnumerator(FdbAsyncMode.All)) - { - Contract.Assert(iterator != null, "The sequence returned a null async iterator"); - - while (await iterator.MoveNext(cancellationToken).ConfigureAwait(false)) - { - results[iterator.Current.Key] = iterator.Current.Value; - } - } - - return results; + return AggregateAsync( + source, + new Dictionary(comparer ?? EqualityComparer.Default), + (results, x) => { results[x.Key] = x.Value; }, + cancellationToken + ); } /// Applies an accumulator function over an async sequence. @@ -558,18 +574,20 @@ public static IFdbAsyncEnumerable Distinct(this IFdbAsyncEnume if (source == null) throw new ArgumentNullException("source"); if (aggregator == null) throw new ArgumentNullException("aggregator"); - cancellationToken.ThrowIfCancellationRequested(); - using (var iterator = source.GetEnumerator(FdbAsyncMode.All)) - { - Contract.Assert(iterator != null, "The sequence returned a null async iterator"); + var accumulate = seed; + await ForEachAsync(source, (x) => { accumulate = aggregator(accumulate, x); }, cancellationToken).ConfigureAwait(false); + return accumulate; + } - var accumulate = seed; - while (await iterator.MoveNext(cancellationToken).ConfigureAwait(false)) - { - accumulate = aggregator(accumulate, iterator.Current); - } - return accumulate; - } + /// Applies an accumulator function over an async sequence. + public static async Task AggregateAsync(this IFdbAsyncEnumerable source, TAccumulate seed, [NotNull] Action aggregator, CancellationToken cancellationToken = default(CancellationToken)) + { + if (source == null) throw new ArgumentNullException("source"); + if (aggregator == null) throw new ArgumentNullException("aggregator"); + + var accumulate = seed; + await ForEachAsync(source, (x) => { aggregator(accumulate, x); }, cancellationToken).ConfigureAwait(false); + return accumulate; } /// Applies an accumulator function over an async sequence. @@ -579,18 +597,20 @@ public static IFdbAsyncEnumerable Distinct(this IFdbAsyncEnume if (aggregator == null) throw new ArgumentNullException("aggregator"); if (resultSelector == null) throw new ArgumentNullException("resultSelector"); - cancellationToken.ThrowIfCancellationRequested(); var accumulate = seed; - using (var iterator = source.GetEnumerator(FdbAsyncMode.All)) - { - Contract.Assert(iterator != null, "The sequence returned a null async iterator"); + await ForEachAsync(source, (x) => { accumulate = aggregator(accumulate, x); }, cancellationToken).ConfigureAwait(false); + return resultSelector(accumulate); + } - while (await iterator.MoveNext(cancellationToken).ConfigureAwait(false)) - { - accumulate = aggregator(accumulate, iterator.Current); - } - } + /// Applies an accumulator function over an async sequence. + public static async Task AggregateAsync(this IFdbAsyncEnumerable source, TAccumulate seed, [NotNull] Action aggregator, [NotNull] Func resultSelector, CancellationToken cancellationToken = default(CancellationToken)) + { + if (source == null) throw new ArgumentNullException("source"); + if (aggregator == null) throw new ArgumentNullException("aggregator"); + if (resultSelector == null) throw new ArgumentNullException("resultSelector"); + var accumulate = seed; + await ForEachAsync(source, (x) => aggregator(accumulate, x), cancellationToken); return resultSelector(accumulate); } @@ -656,7 +676,7 @@ public static IFdbAsyncEnumerable Distinct(this IFdbAsyncEnume bool found = false; T last = default(T); - await Run(source, FdbAsyncMode.All, (x) => { found = true; last = x; }, ct).ConfigureAwait(false); + await ForEachAsync(source, (x) => { found = true; last = x; }, ct).ConfigureAwait(false); if (!found) throw new InvalidOperationException("The sequence was empty"); return last; @@ -673,7 +693,9 @@ public static IFdbAsyncEnumerable Distinct(this IFdbAsyncEnume bool found = false; T last = default(T); - await Run(source, FdbAsyncMode.All, (x) => { found = true; last = x; }, ct).ConfigureAwait(false); + + await ForEachAsync(source, (x) => { found = true; last = x; }, ct).ConfigureAwait(false); + return found ? last : default(T); } @@ -716,6 +738,8 @@ await Run( int counter = index; T item = default(T); + + //TODO: use ExecuteAsync() if the source is an Iterator! await Run( source, FdbAsyncMode.All, @@ -738,7 +762,9 @@ await Run( ct.ThrowIfCancellationRequested(); int count = 0; - await Run(source, FdbAsyncMode.All, (_) => { ++count; }, ct).ConfigureAwait(false); + + await ForEachAsync(source, (_) => { ++count; }, ct).ConfigureAwait(false); + return count; } @@ -749,7 +775,9 @@ await Run( if (predicate == null) throw new ArgumentNullException("predicate"); int count = 0; - await Run(source, FdbAsyncMode.All, (x) => { if (predicate(x)) ++count; }, ct).ConfigureAwait(false); + + await ForEachAsync(source, (x) => { if (predicate(x)) ++count; }, ct).ConfigureAwait(false); + return count; } @@ -759,7 +787,9 @@ await Run( if (source == null) throw new ArgumentNullException("source"); ulong sum = 0; - await Run(source, FdbAsyncMode.All, (x) => { sum += x; }, ct).ConfigureAwait(false); + + await ForEachAsync(source, (x) => { sum += x; }, ct).ConfigureAwait(false); + return sum; } @@ -770,7 +800,9 @@ await Run( if (predicate == null) throw new ArgumentNullException("predicate"); ulong sum = 0; - await Run(source, FdbAsyncMode.All, (x) => { if (predicate(x)) sum += x; }, ct).ConfigureAwait(false); + + await ForEachAsync(source, (x) => { if (predicate(x)) sum += x; }, ct).ConfigureAwait(false); + return sum; } @@ -780,7 +812,9 @@ await Run( if (source == null) throw new ArgumentNullException("source"); long sum = 0; - await Run(source, FdbAsyncMode.All, (x) => { sum += x; }, ct).ConfigureAwait(false); + + await ForEachAsync(source, (x) => { sum += x; }, ct).ConfigureAwait(false); + return sum; } @@ -791,7 +825,9 @@ await Run( if (predicate == null) throw new ArgumentNullException("predicate"); long sum = 0; - await Run(source, FdbAsyncMode.All, (x) => { if (predicate(x)) sum += x; }, ct).ConfigureAwait(false); + + await ForEachAsync(source, (x) => { if (predicate(x)) sum += x; }, ct).ConfigureAwait(false); + return sum; } @@ -804,9 +840,8 @@ await Run( bool found = false; T min = default(T); - await Run( + await ForEachAsync( source, - FdbAsyncMode.All, (x) => { if (!found || comparer.Compare(x, min) < 0) @@ -831,9 +866,8 @@ await Run( bool found = false; T max = default(T); - await Run( + await ForEachAsync( source, - FdbAsyncMode.All, (x) => { if (!found || comparer.Compare(x, max) > 0) @@ -911,6 +945,136 @@ await Run( #endregion + #region Query Statistics... + + //TODO: move this somewhere else? + + public class QueryStatistics + { + public QueryStatistics() + { } + + public QueryStatistics(TData value) + { + this.Value = value; + } + + public TData Value { get; protected set; } + + public void Update(TData newValue) + { + this.Value = newValue; + } + } + + public class KeyValueSize + { + /// Total number of pairs of keys and values that have flowed through this point + public long Count { get; private set; } + + /// Total size of all keys and values combined + public long Size { get { return checked(this.KeySize + this.ValueSize); } } + + /// Total size of all keys combined + public long KeySize { get; private set; } + + /// Total size of all values combined + public long ValueSize { get; private set; } + + public void Add(int keySize, int valueSize) + { + this.Count++; + this.KeySize = checked(keySize + this.KeySize); + this.ValueSize = checked(valueSize + this.ValueSize); + } + } + + public class DataSize + { + /// Total number of items that have flowed through this point + public long Count { get; private set; } + + /// Total size of all items that have flowed through this point + public long Size { get; private set; } + + public void Add(int size) + { + this.Count++; + this.Size = checked(size + this.Size); + } + } + + /// Measure the number of items that pass through this point of the query + /// The values returned in are only safe to read once the query has ended + public static IFdbAsyncEnumerable WithCountStatistics(this IFdbAsyncEnumerable source, out QueryStatistics counter) + { + var signal = new QueryStatistics(0); + counter = signal; + + // to count, we just increment the signal each type a value flows through here + Func wrapped = (x) => + { + signal.Update(checked(signal.Value + 1)); + return x; + }; + + return Select(source, wrapped); + } + + /// Measure the number and size of slices that pass through this point of the query + /// The values returned in are only safe to read once the query has ended + public static IFdbAsyncEnumerable> WithSizeStatistics(this IFdbAsyncEnumerable> source, out QueryStatistics statistics) + { + var data = new KeyValueSize(); + statistics = new QueryStatistics(data); + + // to count, we just increment the signal each type a value flows through here + Func, KeyValuePair> wrapped = (kvp) => + { + data.Add(kvp.Key.Count, kvp.Value.Count); + return kvp; + }; + + return Select(source, wrapped); + } + + /// Measure the number and sizes of the keys and values that pass through this point of the query + /// The values returned in are only safe to read once the query has ended + public static IFdbAsyncEnumerable WithSizeStatistics(this IFdbAsyncEnumerable source, out QueryStatistics statistics) + { + var data = new DataSize(); + statistics = new QueryStatistics(data); + + // to count, we just increment the signal each type a value flows through here + Func wrapped = (x) => + { + data.Add(x.Count); + return x; + }; + + return Select(source, wrapped); + } + + /// Execute an action on each item passing through the sequence, without modifying the original sequence + /// The is execute inline before passing the item down the line, and should not block + public static IFdbAsyncEnumerable Observe(this IFdbAsyncEnumerable source, [NotNull] Action handler) + { + if (handler == null) throw new ArgumentNullException("handler"); + + return new FdbObserverIterator(source, new AsyncObserverExpression(handler)); + } + + /// Execute an action on each item passing through the sequence, without modifying the original sequence + /// The is execute inline before passing the item down the line, and should not block + public static IFdbAsyncEnumerable Observe(this IFdbAsyncEnumerable source, [NotNull] Func asyncHandler) + { + if (asyncHandler == null) throw new ArgumentNullException("asyncHandler"); + + return new FdbObserverIterator(source, new AsyncObserverExpression(asyncHandler)); + } + + #endregion + } } diff --git a/FoundationDB.Client/Linq/IFdbAsyncOrderedEnumerable.cs b/FoundationDB.Client/Linq/IFdbAsyncOrderedEnumerable.cs new file mode 100644 index 000000000..28267c5ea --- /dev/null +++ b/FoundationDB.Client/Linq/IFdbAsyncOrderedEnumerable.cs @@ -0,0 +1,45 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Linq +{ + using FoundationDB.Async; + using FoundationDB.Client.Utils; + using JetBrains.Annotations; + using System; + using System.Collections.Generic; + using System.Threading; + using System.Threading.Tasks; + + public interface IFdbAsyncOrderedEnumerable : IFdbAsyncEnumerable + { + IFdbAsyncOrderedEnumerable ThenBy(Func keySelector, IComparer comparer = null); + IFdbAsyncOrderedEnumerable ThenByDescending(Func keySelector, IComparer comparer = null); + } + +} diff --git a/FoundationDB.Client/Linq/FdbAsyncFilter.cs b/FoundationDB.Client/Linq/Iterators/FdbAsyncFilterIterator.cs similarity index 79% rename from FoundationDB.Client/Linq/FdbAsyncFilter.cs rename to FoundationDB.Client/Linq/Iterators/FdbAsyncFilterIterator.cs index 73751368c..3debce980 100644 --- a/FoundationDB.Client/Linq/FdbAsyncFilter.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbAsyncFilterIterator.cs @@ -35,7 +35,7 @@ namespace FoundationDB.Linq using System.Threading; using System.Threading.Tasks; - internal abstract class FdbAsyncFilter : FdbAsyncIterator + internal abstract class FdbAsyncFilterIterator : FdbAsyncIterator { /// Source sequence (when in iterable mode) protected IFdbAsyncEnumerable m_source; @@ -43,12 +43,23 @@ internal abstract class FdbAsyncFilter : FdbAsyncIteratorActive iterator on the source (when in iterator mode) protected IFdbAsyncEnumerator m_iterator; - protected FdbAsyncFilter([NotNull] IFdbAsyncEnumerable source) + protected FdbAsyncFilterIterator([NotNull] IFdbAsyncEnumerable source) { Contract.Requires(source != null); m_source = source; } + /// Start the inner iterator + protected virtual IFdbAsyncEnumerator StartInner() + { + // filtering changes the number of items, so that means that, even if the underlying caller wants one item, we may need to read more. + // => change all "Head" requests into "Iterator" to prevent any wrong optimizations by the underlying source (ex: using a too small batch size) + var mode = m_mode; + if (mode == FdbAsyncMode.Head) mode = FdbAsyncMode.Iterator; + + return m_source.GetEnumerator(mode); + } + protected override Task OnFirstAsync(CancellationToken ct) { // on the first call to MoveNext, we have to hook up with the source iterator @@ -56,12 +67,7 @@ protected override Task OnFirstAsync(CancellationToken ct) IFdbAsyncEnumerator iterator = null; try { - // filtering changes the number of items, so that means that, even if the underlying caller wants one item, we may need to read more. - // => change all "Head" requests into "Iterator" to prevent any wrong optimizations by the underlying source (ex: using a too small batch size) - var mode = m_mode; - if (mode == FdbAsyncMode.Head) mode = FdbAsyncMode.Iterator; - - iterator = m_source.GetEnumerator(mode); + iterator = StartInner(); return iterator != null ? TaskHelpers.TrueTask : TaskHelpers.FalseTask; } catch (Exception) diff --git a/FoundationDB.Client/Linq/FdbAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs similarity index 90% rename from FoundationDB.Client/Linq/FdbAsyncIterator.cs rename to FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs index 81117db00..d2a5359a5 100644 --- a/FoundationDB.Client/Linq/FdbAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs @@ -141,7 +141,7 @@ public virtual FdbAsyncIterator Where([NotNull] Func pre { if (predicate == null) throw new ArgumentNullException("predicate"); - return FdbAsyncEnumerable.Filter(this, predicate); + return FdbAsyncEnumerable.Filter(this, new AsyncFilterExpression(predicate)); } [NotNull] @@ -149,7 +149,7 @@ public virtual FdbAsyncIterator Where([NotNull] Func(this, asyncPredicate); + return FdbAsyncEnumerable.Filter(this, new AsyncFilterExpression(asyncPredicate)); } [NotNull] @@ -157,7 +157,7 @@ public virtual FdbAsyncIterator Select([NotNull] Func { if (selector == null) throw new ArgumentNullException("selector"); - return FdbAsyncEnumerable.Map(this, selector); + return FdbAsyncEnumerable.Map(this, new AsyncTransformExpression(selector)); } [NotNull] @@ -165,7 +165,7 @@ public virtual FdbAsyncIterator Select([NotNull] Func(this, asyncSelector); + return FdbAsyncEnumerable.Map(this, new AsyncTransformExpression(asyncSelector)); } [NotNull] @@ -173,7 +173,7 @@ public virtual FdbAsyncIterator SelectMany([NotNull] Func(this, selector); + return FdbAsyncEnumerable.Flatten(this, new AsyncTransformExpression>(selector)); } [NotNull] @@ -181,7 +181,7 @@ public virtual FdbAsyncIterator SelectMany([NotNull] Func(this, asyncSelector); + return FdbAsyncEnumerable.Flatten(this, new AsyncTransformExpression>(asyncSelector)); } [NotNull] @@ -190,7 +190,7 @@ public virtual FdbAsyncIterator SelectMany([NotNull] Fu if (collectionSelector == null) throw new ArgumentNullException("collectionSelector"); if (resultSelector == null) throw new ArgumentNullException("resultSelector"); - return FdbAsyncEnumerable.Flatten(this, collectionSelector, resultSelector); + return FdbAsyncEnumerable.Flatten(this, new AsyncTransformExpression>(collectionSelector), resultSelector); } [NotNull] @@ -199,7 +199,7 @@ public virtual FdbAsyncIterator SelectMany([NotNull] Fu if (asyncCollectionSelector == null) throw new ArgumentNullException("asyncCollectionSelector"); if (resultSelector == null) throw new ArgumentNullException("resultSelector"); - return FdbAsyncEnumerable.Flatten(this, asyncCollectionSelector, resultSelector); + return FdbAsyncEnumerable.Flatten(this, new AsyncTransformExpression>(asyncCollectionSelector), resultSelector); } [NotNull] @@ -220,6 +220,7 @@ public virtual FdbAsyncIterator Skip(int count) return FdbAsyncEnumerable.Offset(this, count); } + /// Execute an action on the result of this async sequence [NotNull] public virtual Task ExecuteAsync([NotNull] Action action, CancellationToken ct) { diff --git a/FoundationDB.Client/Linq/FdbAsyncIteratorPump.cs b/FoundationDB.Client/Linq/Iterators/FdbAsyncIteratorPump.cs similarity index 100% rename from FoundationDB.Client/Linq/FdbAsyncIteratorPump.cs rename to FoundationDB.Client/Linq/Iterators/FdbAsyncIteratorPump.cs diff --git a/FoundationDB.Client/Linq/FdbDistinctAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbDistinctAsyncIterator.cs similarity index 99% rename from FoundationDB.Client/Linq/FdbDistinctAsyncIterator.cs rename to FoundationDB.Client/Linq/Iterators/FdbDistinctAsyncIterator.cs index 687ba1097..57678dba7 100644 --- a/FoundationDB.Client/Linq/FdbDistinctAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbDistinctAsyncIterator.cs @@ -37,7 +37,7 @@ namespace FoundationDB.Linq /// Filters duplicate items from an async sequence /// Type of elements of the async sequence - internal sealed class FdbDistinctAsyncIterator : FdbAsyncFilter + internal sealed class FdbDistinctAsyncIterator : FdbAsyncFilterIterator { private readonly IEqualityComparer m_comparer; diff --git a/FoundationDB.Client/Linq/Iterators/FdbObserverIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbObserverIterator.cs new file mode 100644 index 000000000..ba5c4d6f0 --- /dev/null +++ b/FoundationDB.Client/Linq/Iterators/FdbObserverIterator.cs @@ -0,0 +1,86 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Linq +{ + using FoundationDB.Async; + using FoundationDB.Client.Utils; + using JetBrains.Annotations; + using System; + using System.Threading; + using System.Threading.Tasks; + + + /// Obsere the items of an async sequence + /// Type of the observed elements + internal sealed class FdbObserverIterator : FdbAsyncFilterIterator + { + + private readonly AsyncObserverExpression m_observer; + + public FdbObserverIterator(IFdbAsyncEnumerable source, AsyncObserverExpression observer) + : base(source) + { + if (observer == null) throw new ArgumentNullException("observer"); + m_observer = observer; + } + + protected override FdbAsyncIterator Clone() + { + return new FdbObserverIterator(m_source, m_observer); + } + + protected override async Task OnNextAsync(CancellationToken cancellationToken) + { + while (!cancellationToken.IsCancellationRequested) + { + if (!await m_iterator.MoveNext(cancellationToken).ConfigureAwait(false)) + { // completed + return Completed(); + } + + if (cancellationToken.IsCancellationRequested) break; + + TSource current = m_iterator.Current; + if (!m_observer.Async) + { + m_observer.Invoke(current); + } + else + { + await m_observer.InvokeAsync(current, cancellationToken).ConfigureAwait(false); + } + + return Publish(current); + } + + return Canceled(cancellationToken); + } + } + +} diff --git a/FoundationDB.Client/Linq/FdbParallelSelectAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs similarity index 99% rename from FoundationDB.Client/Linq/FdbParallelSelectAsyncIterator.cs rename to FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs index 0391d41b5..7f7c6ed1b 100644 --- a/FoundationDB.Client/Linq/FdbParallelSelectAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs @@ -42,7 +42,7 @@ namespace FoundationDB.Linq /// [EXPERIMENTAL] Iterates over an async sequence of items, kick off an async task in parallel, and returning the results in order /// Type of elements of the inner async sequence /// Type of elements of the outer async sequence - internal sealed class FdbParallelSelectAsyncIterator : FdbAsyncFilter + internal sealed class FdbParallelSelectAsyncIterator : FdbAsyncFilterIterator { /// Default max concurrency when doing batch queries /// TODO: this is a placeholder value ! diff --git a/FoundationDB.Client/Linq/FdbSelectManyAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbSelectManyAsyncIterator.cs similarity index 79% rename from FoundationDB.Client/Linq/FdbSelectManyAsyncIterator.cs rename to FoundationDB.Client/Linq/Iterators/FdbSelectManyAsyncIterator.cs index 4fa84e5d2..fbf8ef6ce 100644 --- a/FoundationDB.Client/Linq/FdbSelectManyAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbSelectManyAsyncIterator.cs @@ -38,25 +38,23 @@ namespace FoundationDB.Linq /// Iterates over an async sequence of items /// Type of elements of the inner async sequence /// Type of elements of the outer async sequence - internal sealed class FdbSelectManyAsyncIterator : FdbAsyncFilter + internal sealed class FdbSelectManyAsyncIterator : FdbAsyncFilterIterator { - private readonly Func> m_selector; - private readonly Func>> m_asyncSelector; + private readonly AsyncTransformExpression> m_selector; private IEnumerator m_batch; - public FdbSelectManyAsyncIterator([NotNull] IFdbAsyncEnumerable source, Func> selector, Func>> asyncSelector) + public FdbSelectManyAsyncIterator([NotNull] IFdbAsyncEnumerable source, AsyncTransformExpression> selector) : base(source) { // Must have at least one, but not both - Contract.Requires(selector != null ^ asyncSelector != null); + Contract.Requires(selector != null); m_selector = selector; - m_asyncSelector = asyncSelector; } protected override FdbAsyncIterator Clone() { - return new FdbSelectManyAsyncIterator(m_source, m_selector, m_asyncSelector); + return new FdbSelectManyAsyncIterator(m_source, m_selector); } protected override async Task OnNextAsync(CancellationToken cancellationToken) @@ -78,13 +76,13 @@ protected override async Task OnNextAsync(CancellationToken cancellationTo if (cancellationToken.IsCancellationRequested) break; IEnumerable sequence; - if (m_selector != null) + if (!m_selector.Async) { - sequence = m_selector(m_iterator.Current); + sequence = m_selector.Invoke(m_iterator.Current); } else { - sequence = await m_asyncSelector(m_iterator.Current, cancellationToken).ConfigureAwait(false); + sequence = await m_selector.InvokeAsync(m_iterator.Current, cancellationToken).ConfigureAwait(false); } if (sequence == null) throw new InvalidOperationException("The inner sequence returned a null collection"); @@ -126,34 +124,29 @@ protected override void Cleanup() /// Type of elements of the inner async sequence /// Type of the elements of the sequences produced from each elements /// Type of elements of the outer async sequence - internal sealed class FdbSelectManyAsyncIterator : FdbAsyncFilter + internal sealed class FdbSelectManyAsyncIterator : FdbAsyncFilterIterator { - private readonly Func> m_collectionSelector; - private readonly Func>> m_asyncCollectionSelector; + private readonly AsyncTransformExpression> m_collectionSelector; private readonly Func m_resultSelector; private TSource m_sourceCurrent; private IEnumerator m_batch; public FdbSelectManyAsyncIterator( [NotNull] IFdbAsyncEnumerable source, - Func> collectionSelector, - Func>> asyncCollectionSelector, + AsyncTransformExpression> collectionSelector, [NotNull] Func resultSelector ) : base(source) { - // must have at least one but not both - Contract.Requires(collectionSelector != null ^ asyncCollectionSelector != null); - Contract.Requires(resultSelector != null); + Contract.Requires(collectionSelector != null && resultSelector != null); m_collectionSelector = collectionSelector; - m_asyncCollectionSelector = asyncCollectionSelector; m_resultSelector = resultSelector; } protected override FdbAsyncIterator Clone() { - return new FdbSelectManyAsyncIterator(m_source, m_collectionSelector, m_asyncCollectionSelector, m_resultSelector); + return new FdbSelectManyAsyncIterator(m_source, m_collectionSelector, m_resultSelector); } protected override async Task OnNextAsync(CancellationToken cancellationToken) @@ -178,13 +171,13 @@ protected override async Task OnNextAsync(CancellationToken cancellationTo IEnumerable sequence; - if (m_collectionSelector != null) + if (!m_collectionSelector.Async) { - sequence = m_collectionSelector(m_sourceCurrent); + sequence = m_collectionSelector.Invoke(m_sourceCurrent); } else { - sequence = await m_asyncCollectionSelector(m_sourceCurrent, cancellationToken).ConfigureAwait(false); + sequence = await m_collectionSelector.InvokeAsync(m_sourceCurrent, cancellationToken).ConfigureAwait(false); } if (sequence == null) throw new InvalidOperationException("The inner sequence returned a null collection"); diff --git a/FoundationDB.Client/Linq/FdbTakeWhileAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbTakeWhileAsyncIterator.cs similarity index 98% rename from FoundationDB.Client/Linq/FdbTakeWhileAsyncIterator.cs rename to FoundationDB.Client/Linq/Iterators/FdbTakeWhileAsyncIterator.cs index fb8531380..efb9dd840 100644 --- a/FoundationDB.Client/Linq/FdbTakeWhileAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbTakeWhileAsyncIterator.cs @@ -36,7 +36,7 @@ namespace FoundationDB.Linq /// Reads an async sequence of items until a condition becomes false /// Type of elements of the async sequence - internal sealed class FdbTakeWhileAsyncIterator : FdbAsyncFilter + internal sealed class FdbTakeWhileAsyncIterator : FdbAsyncFilterIterator { private readonly Func m_condition; //TODO: also accept a Func> ? diff --git a/FoundationDB.Client/Linq/FdbWhereAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbWhereAsyncIterator.cs similarity index 70% rename from FoundationDB.Client/Linq/FdbWhereAsyncIterator.cs rename to FoundationDB.Client/Linq/Iterators/FdbWhereAsyncIterator.cs index 1310b0b0f..acafcb712 100644 --- a/FoundationDB.Client/Linq/FdbWhereAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbWhereAsyncIterator.cs @@ -37,28 +37,25 @@ namespace FoundationDB.Linq /// Filters an async sequence of items /// Type of elements of the async sequence - internal sealed class FdbWhereAsyncIterator : FdbAsyncFilter + internal sealed class FdbWhereAsyncIterator : FdbAsyncFilterIterator { - private readonly Func m_filter; - private readonly Func> m_asyncFilter; + private readonly AsyncFilterExpression m_filter; - public FdbWhereAsyncIterator([NotNull] IFdbAsyncEnumerable source, Func filter, Func> asyncFilter) + public FdbWhereAsyncIterator([NotNull] IFdbAsyncEnumerable source, AsyncFilterExpression filter) : base(source) { - Contract.Requires(filter != null ^ asyncFilter != null, "there can be only one kind of filter specified"); + Contract.Requires(filter != null, "there can be only one kind of filter specified"); m_filter = filter; - m_asyncFilter = asyncFilter; } protected override FdbAsyncIterator Clone() { - return new FdbWhereAsyncIterator(m_source, m_filter, m_asyncFilter); + return new FdbWhereAsyncIterator(m_source, m_filter); } protected override async Task OnNextAsync(CancellationToken cancellationToken) { - while (!cancellationToken.IsCancellationRequested) { if (!await m_iterator.MoveNext(cancellationToken).ConfigureAwait(false)) @@ -69,16 +66,16 @@ protected override async Task OnNextAsync(CancellationToken cancellationTo if (cancellationToken.IsCancellationRequested) break; TSource current = m_iterator.Current; - if (m_filter != null) + if (!m_filter.Async) { - if (!m_filter(current)) + if (!m_filter.Invoke(current)) { continue; } } else { - if (!await m_asyncFilter(current, cancellationToken).ConfigureAwait(false)) + if (!await m_filter.InvokeAsync(current, cancellationToken).ConfigureAwait(false)) { continue; } @@ -92,38 +89,18 @@ protected override async Task OnNextAsync(CancellationToken cancellationTo public override FdbAsyncIterator Where(Func predicate) { - if (m_asyncFilter != null) - { - return FdbAsyncEnumerable.Filter( - m_source, - async (x, ct) => (await m_asyncFilter(x, ct).ConfigureAwait(false)) && predicate(x) - ); - } - else - { - return FdbAsyncEnumerable.Filter( - m_source, - (x) => m_filter(x) && predicate(x) - ); - } + return FdbAsyncEnumerable.Filter( + m_source, + m_filter.AndAlso(new AsyncFilterExpression(predicate)) + ); } public override FdbAsyncIterator Where(Func> asyncPredicate) { - if (m_asyncFilter != null) - { - return FdbAsyncEnumerable.Filter( - m_source, - async (x, ct) => (await m_asyncFilter(x, ct).ConfigureAwait(false)) && (await asyncPredicate(x, ct).ConfigureAwait(false)) - ); - } - else - { - return FdbAsyncEnumerable.Filter( - m_source, - async (x, ct) => m_filter(x) && (await asyncPredicate(x, ct).ConfigureAwait(false)) - ); - } + return FdbAsyncEnumerable.Filter( + m_source, + m_filter.AndAlso(new AsyncFilterExpression(asyncPredicate)) + ); } public override FdbAsyncIterator Select(Func selector) @@ -131,9 +108,7 @@ public override FdbAsyncIterator Select(Func selector return new FdbWhereSelectAsyncIterator( m_source, m_filter, - m_asyncFilter, - transform: selector, - asyncTransform: null, + new AsyncTransformExpression(selector), limit: null, offset: null ); @@ -144,9 +119,7 @@ public override FdbAsyncIterator Select(Func( m_source, m_filter, - m_asyncFilter, - transform: null, - asyncTransform: asyncSelector, + new AsyncTransformExpression(asyncSelector), limit: null, offset: null ); @@ -159,9 +132,7 @@ public override FdbAsyncIterator Take(int limit) return new FdbWhereSelectAsyncIterator( m_source, m_filter, - m_asyncFilter, - transform: TaskHelpers.Cache.Identity, - asyncTransform: null, + new AsyncTransformExpression(TaskHelpers.Cache.Identity), limit: limit, offset: null ); @@ -173,17 +144,14 @@ public override async Task ExecuteAsync(Action handler, CancellationTok if (ct.IsCancellationRequested) ct.ThrowIfCancellationRequested(); - var mode = m_mode; - if (mode == FdbAsyncMode.Head) mode = FdbAsyncMode.Iterator; - - using (var iter = m_source.GetEnumerator(mode)) + using (var iter = StartInner()) { - if (m_filter != null) + if (!m_filter.Async) { while (!ct.IsCancellationRequested && (await iter.MoveNext(ct).ConfigureAwait(false))) { var current = iter.Current; - if (m_filter(current)) + if (m_filter.Invoke(current)) { handler(current); } @@ -194,7 +162,7 @@ public override async Task ExecuteAsync(Action handler, CancellationTok while (!ct.IsCancellationRequested && (await iter.MoveNext(ct).ConfigureAwait(false))) { var current = iter.Current; - if (await m_asyncFilter(current, ct).ConfigureAwait(false)) + if (await m_filter.InvokeAsync(current, ct).ConfigureAwait(false)) { handler(current); } @@ -202,8 +170,7 @@ public override async Task ExecuteAsync(Action handler, CancellationTok } } - if (ct.IsCancellationRequested) ct.ThrowIfCancellationRequested(); - + ct.ThrowIfCancellationRequested(); } public override async Task ExecuteAsync(Func asyncHandler, CancellationToken ct) @@ -212,17 +179,14 @@ public override async Task ExecuteAsync(Func a if (ct.IsCancellationRequested) ct.ThrowIfCancellationRequested(); - var mode = m_mode; - if (mode == FdbAsyncMode.Head) mode = FdbAsyncMode.Iterator; - - using (var iter = m_source.GetEnumerator(mode)) + using (var iter = StartInner()) { - if (m_filter != null) + if (!m_filter.Async) { while (!ct.IsCancellationRequested && (await iter.MoveNext(ct).ConfigureAwait(false))) { var current = iter.Current; - if (m_filter(current)) + if (m_filter.Invoke(current)) { await asyncHandler(current, ct).ConfigureAwait(false); } @@ -233,7 +197,7 @@ public override async Task ExecuteAsync(Func a while (!ct.IsCancellationRequested && (await iter.MoveNext(ct).ConfigureAwait(false))) { var current = iter.Current; - if (await m_asyncFilter(current, ct).ConfigureAwait(false)) + if (await m_filter.InvokeAsync(current, ct).ConfigureAwait(false)) { await asyncHandler(current, ct).ConfigureAwait(false); } diff --git a/FoundationDB.Client/Linq/FdbWhereSelectAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbWhereSelectAsyncIterator.cs similarity index 53% rename from FoundationDB.Client/Linq/FdbWhereSelectAsyncIterator.cs rename to FoundationDB.Client/Linq/Iterators/FdbWhereSelectAsyncIterator.cs index 2fb523970..403aaebc8 100644 --- a/FoundationDB.Client/Linq/FdbWhereSelectAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbWhereSelectAsyncIterator.cs @@ -28,6 +28,7 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Linq { + using FoundationDB.Async; using FoundationDB.Client.Utils; using JetBrains.Annotations; using System; @@ -38,13 +39,10 @@ namespace FoundationDB.Linq /// Iterates over an async sequence of items /// Type of elements of the inner async sequence /// Type of elements of the outer async sequence - internal sealed class FdbWhereSelectAsyncIterator : FdbAsyncFilter + internal sealed class FdbWhereSelectAsyncIterator : FdbAsyncFilterIterator { - private readonly Func m_filter; - private readonly Func> m_asyncFilter; - - private readonly Func m_transform; - private readonly Func> m_asyncTransform; + private readonly AsyncFilterExpression m_filter; + private readonly AsyncTransformExpression m_transform; //note: both limit and offset are applied AFTER filtering! private readonly int? m_limit; @@ -55,31 +53,25 @@ internal sealed class FdbWhereSelectAsyncIterator : FdbAsyncFi public FdbWhereSelectAsyncIterator( [NotNull] IFdbAsyncEnumerable source, - Func filter, - Func> asyncFilter, - Func transform, - Func> asyncTransform, + AsyncFilterExpression filter, + AsyncTransformExpression transform, int? limit, int? offset ) : base(source) { - Contract.Requires(transform != null ^ asyncTransform != null); // at least one but not both - Contract.Requires(filter == null || asyncFilter == null); // can have none, but not both - Contract.Requires(limit == null || limit.Value >= 0); - Contract.Requires(offset == null || offset.Value >= 0); + Contract.Requires(transform != null); // must do at least something + Contract.Requires((limit ?? 0) >= 0 && (offset ?? 0) >= 0); // bounds cannot be negative m_filter = filter; - m_asyncFilter = asyncFilter; m_transform = transform; - m_asyncTransform = asyncTransform; m_limit = limit; m_offset = offset; } protected override FdbAsyncIterator Clone() { - return new FdbWhereSelectAsyncIterator(m_source, m_filter, m_asyncFilter, m_transform, m_asyncTransform, m_limit, m_offset); + return new FdbWhereSelectAsyncIterator(m_source, m_filter, m_transform, m_limit, m_offset); } protected override Task OnFirstAsync(CancellationToken ct) @@ -109,11 +101,14 @@ protected override async Task OnNextAsync(CancellationToken cancellationTo TSource current = m_iterator.Current; if (m_filter != null) { - if (!m_filter(current)) continue; - } - else if (m_asyncFilter != null) - { - if (!await m_asyncFilter(current, cancellationToken).ConfigureAwait(false)) continue; + if (!m_filter.Async) + { + if (!m_filter.Invoke(current)) continue; + } + else + { + if (!await m_filter.InvokeAsync(current, cancellationToken).ConfigureAwait(false)) continue; + } } #endregion @@ -136,13 +131,13 @@ protected override async Task OnNextAsync(CancellationToken cancellationTo #region Transforming... TResult result; - if (m_transform != null) + if (!m_transform.Async) { - result = m_transform(current); + result = m_transform.Invoke(current); } else { - result = await m_asyncTransform(current, cancellationToken).ConfigureAwait(false); + result = await m_transform.InvokeAsync(current, cancellationToken).ConfigureAwait(false); } #endregion @@ -155,7 +150,7 @@ protected override async Task OnNextAsync(CancellationToken cancellationTo } return Publish(result); - + #endregion } @@ -166,84 +161,38 @@ public override FdbAsyncIterator Select(Func selector { if (selector == null) throw new ArgumentNullException("selector"); - if (m_transform != null) - { - return new FdbWhereSelectAsyncIterator( - m_source, - m_filter, - m_asyncFilter, - (x) => selector(m_transform(x)), - null, - m_limit, - m_offset - ); - } - else - { - return new FdbWhereSelectAsyncIterator( - m_source, - m_filter, - m_asyncFilter, - null, - async (x, ct) => selector(await m_asyncTransform(x, ct).ConfigureAwait(false)), - m_limit, - m_offset - ); - } + return new FdbWhereSelectAsyncIterator( + m_source, + m_filter, + m_transform.Then(new AsyncTransformExpression(selector)), + m_limit, + m_offset + ); } public override FdbAsyncIterator Select(Func> asyncSelector) { if (asyncSelector == null) throw new ArgumentNullException("asyncSelector"); - if (m_transform != null) - { - return new FdbWhereSelectAsyncIterator( - m_source, - m_filter, - m_asyncFilter, - null, - (x, ct) => asyncSelector(m_transform(x), ct), - m_limit, - m_offset - ); - } - else - { - return new FdbWhereSelectAsyncIterator( - m_source, - m_filter, - m_asyncFilter, - null, - async (x, ct) => await asyncSelector(await m_asyncTransform(x, ct).ConfigureAwait(false), ct).ConfigureAwait(false), - m_limit, - m_offset - ); - } + return new FdbWhereSelectAsyncIterator( + m_source, + m_filter, + m_transform.Then(new AsyncTransformExpression(asyncSelector)), + m_limit, + m_offset + ); } public override FdbAsyncIterator SelectMany(Func> selector) { if (selector == null) throw new ArgumentNullException("selector"); - if (m_filter == null && m_asyncFilter == null && m_limit == null && m_offset == null) + if (m_filter == null && m_limit == null && m_offset == null) { - if (m_transform != null) - { - return new FdbSelectManyAsyncIterator( - m_source, - selector: (x) => selector(m_transform(x)), - asyncSelector: null - ); - } - else - { - return new FdbSelectManyAsyncIterator( - m_source, - selector: null, - asyncSelector: async (x, ct) => selector(await m_asyncTransform(x, ct).ConfigureAwait(false)) - ); - } + return new FdbSelectManyAsyncIterator( + m_source, + m_transform.Then(new AsyncTransformExpression>(selector)) + ); } // other cases are too complex :( @@ -254,24 +203,12 @@ public override FdbAsyncIterator SelectMany(Func( - m_source, - selector: null, - asyncSelector: (x, ct) => asyncSelector(m_transform(x), ct) - ); - } - else - { - return new FdbSelectManyAsyncIterator( - m_source, - selector: null, - asyncSelector: async (x, ct) => await asyncSelector(await m_asyncTransform(x, ct).ConfigureAwait(false), ct).ConfigureAwait(false) - ); - } + return new FdbSelectManyAsyncIterator( + m_source, + m_transform.Then(new AsyncTransformExpression>(asyncSelector)) + ); } // other cases are too complex :( @@ -291,9 +228,7 @@ public override FdbAsyncIterator Take(int limit) return new FdbWhereSelectAsyncIterator( m_source, m_filter, - m_asyncFilter, m_transform, - m_asyncTransform, limit, m_offset ); @@ -310,9 +245,7 @@ public override FdbAsyncIterator Skip(int offset) return new FdbWhereSelectAsyncIterator( m_source, m_filter, - m_asyncFilter, m_transform, - m_asyncTransform, m_limit, offset ); @@ -324,19 +257,22 @@ public override FdbAsyncIterator Where(Func predicate) // note: the only possible optimization here is if TSource == TResult, then we can combine both predicates // remember: limit/offset are applied AFTER the filtering, so can only combine if they are null - if (m_asyncFilter == null && m_limit == null && (m_offset == null || m_offset.Value == 0) && typeof(TSource) == typeof(TResult)) + // also, since transform is done after filtering, we can only optimize if transform is null (not allowed!) or the identity function + if (m_limit == null + && (m_offset == null || m_offset.Value == 0) + && typeof(TSource) == typeof(TResult) //BUGBUG: type comparison maybe should check derived classes also ? + && m_transform.IsIdentity()) { - var func = (Func)(Delegate)predicate; - var filter = m_filter == null - ? func - : (x) => m_filter(x) && func(x); + var filter = new AsyncFilterExpression((Func)(Delegate)predicate); + if (m_filter != null) filter = m_filter.AndAlso(filter); + + //BUGBUG: if the query already has a select, it should be evaluated BEFORE the new filter, + // but currently FdbWhereSelectAsyncIterator<> filters before transformations ! return new FdbWhereSelectAsyncIterator( m_source, filter, - null, m_transform, - m_asyncTransform, m_limit, m_offset ); @@ -351,19 +287,19 @@ public override FdbAsyncIterator Where(Func>)(Delegate)asyncPredicate; - var asyncFilter = m_asyncFilter == null - ? func - : async (x, ct) => await m_asyncFilter(x, ct) && await func(x, ct); + var asyncFilter = new AsyncFilterExpression((Func>)(Delegate)asyncPredicate); + if (m_filter != null) asyncFilter = m_filter.AndAlso(asyncFilter); + + //BUGBUG: if the query already has a select, it should be evaluated BEFORE the new filter, + // but currently FdbWhereSelectAsyncIterator<> filters before transformations ! return new FdbWhereSelectAsyncIterator( m_source, - null, asyncFilter, m_transform, - m_asyncTransform, m_limit, m_offset ); @@ -373,6 +309,143 @@ public override FdbAsyncIterator Where(Func action, CancellationToken ct) + { + if (action == null) throw new ArgumentNullException("action"); + + int? remaining = m_limit; + int? skipped = m_offset; + + using (var iterator = StartInner()) + { + while (remaining == null || remaining.Value > 0) + { + if (!await iterator.MoveNext(ct).ConfigureAwait(false)) + { // completed + break; + } + + // Filter... + + TSource current = iterator.Current; + if (m_filter != null) + { + if (!m_filter.Async) + { + if (!m_filter.Invoke(current)) continue; + } + else + { + if (!await m_filter.InvokeAsync(current, ct).ConfigureAwait(false)) continue; + } + } + + // Skip... + + if (skipped != null) + { + if (skipped.Value > 0) + { // skip this result + skipped = skipped.Value - 1; + continue; + } + // we can now start outputing results... + skipped = null; + } + + // Transform... + + TResult result; + if (!m_transform.Async) + { + result = m_transform.Invoke(current); + } + else + { + result = await m_transform.InvokeAsync(current, ct).ConfigureAwait(false); + } + + // Publish... + + if (remaining != null) + { // decrement remaining quota + remaining = remaining.Value - 1; + } + action(result); + } + + ct.ThrowIfCancellationRequested(); + } + } + + public override async Task ExecuteAsync(Func asyncAction, CancellationToken ct) + { + if (asyncAction == null) throw new ArgumentNullException("asyncAction"); + + int? remaining = m_limit; + int? skipped = m_offset; + + using (var iterator = StartInner()) + { + while (remaining == null || remaining.Value > 0) + { + if (!await iterator.MoveNext(ct).ConfigureAwait(false)) + { // completed + break; + } + + // Filter... + + TSource current = iterator.Current; + if (m_filter != null) + { + if (!m_filter.Async) + { + if (!m_filter.Invoke(current)) continue; + } + else + { + if (!await m_filter.InvokeAsync(current, ct).ConfigureAwait(false)) continue; + } + } + + // Skip... + + if (skipped != null) + { + if (skipped.Value > 0) + { // skip this result + skipped = skipped.Value - 1; + continue; + } + // we can now start outputing results... + skipped = null; + } + + // Transform... + + TResult result; + if (!m_transform.Async) + { + result = m_transform.Invoke(current); + } + else + { + result = await m_transform.InvokeAsync(current, ct).ConfigureAwait(false); + } + + // Publish... + + if (remaining != null) + { // decrement remaining quota + remaining = remaining.Value - 1; + } + await asyncAction(result, ct).ConfigureAwait(false); + } + + ct.ThrowIfCancellationRequested(); + } + } } -} +} \ No newline at end of file diff --git a/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs b/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs index d23e988a4..22c10bfdd 100644 --- a/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs +++ b/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs @@ -1102,7 +1102,7 @@ public async Task Test_AsyncLinq_vs_LinqToObject() // note: we will also create a third LINQ query using lambda expressions, just to be able to have a nicer ToString() in case of errors int[] SourceOfInts = new int[] { 1, 7, 42, -456, 123, int.MaxValue, -1, 1023, 0, short.MinValue, 5, 13, -273, 2013, 4534, -999 }; - + const int N = 1000; var rnd = new Random(); // new Random(1234) @@ -1282,10 +1282,44 @@ await VerifyResult( ); } - } + [Test] + public async Task Test_Record_Items() + { + + var items = Enumerable.Range(0, 10); + var source = items.ToAsyncEnumerable(); + + var before = new List(); + var after = new List(); + + var a = source.Observe((x) => before.Add(x)); + var b = a.Where((x) => x % 2 == 1); + var c = b.Observe((x) => after.Add(x)); + var d = c.Select((x) => x + 1); + + var query = source + .Observe((x) => before.Add(x)) + .Where((x) => x % 2 == 1) + .Observe((x) => after.Add(x)) + .Select((x) => x + 1); + + Console.WriteLine("query: " + query); + + var results = await query.ToListAsync(); + + Console.WriteLine("input : " + String.Join(", ", items)); + Console.WriteLine("before: " + String.Join(", ", before)); + Console.WriteLine("after : " + String.Join(", ", after)); + Console.WriteLine("output: " + String.Join(", ", results)); + + Assert.That(before, Is.EqualTo(Enumerable.Range(0, 10).ToList())); + Assert.That(after, Is.EqualTo(Enumerable.Range(0, 10).Where(x => x % 2 == 1).ToList())); + Assert.That(results, Is.EqualTo(Enumerable.Range(1, 5).Select(x => x * 2).ToList())); + + } } } From 8e18a5ac719c0cd506df486f8e994adbf24076c8 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Wed, 26 Nov 2014 09:57:59 +0100 Subject: [PATCH 17/63] Fixed crash in FDBShell command 'count' if directory path not found --- FdbShell/Commands/BasicCommands.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/FdbShell/Commands/BasicCommands.cs b/FdbShell/Commands/BasicCommands.cs index 5a4813d6f..8d2f83c5a 100644 --- a/FdbShell/Commands/BasicCommands.cs +++ b/FdbShell/Commands/BasicCommands.cs @@ -212,7 +212,7 @@ public static async Task Count(string[] path, IFdbTuple extras, IFdbDatabase db, var folder = (await TryOpenCurrentDirectoryAsync(path, db, ct)) as FdbDirectorySubspace; if (folder == null) { - log.WriteLine("# Directory {0} does not exist", path); + log.WriteLine("# Directory {0} does not exist", String.Join("/", path)); return; } From 0e2165637dd69438b199a102babb7b3732bf7389 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 28 Nov 2014 14:03:14 +0100 Subject: [PATCH 18/63] AsyncLinq: better callstack - try to preserve the callstack if a lambda in a Select(..) or Where(..) throws --- .../Linq/Iterators/FdbAsyncIterator.cs | 23 ++++++------------- .../FdbParallelSelectAsyncIterator.cs | 6 ++++- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs index d2a5359a5..f75849ae4 100644 --- a/FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs @@ -126,9 +126,10 @@ public async Task MoveNext(CancellationToken ct) return await OnNextAsync(ct).ConfigureAwait(false); } - catch (Exception e) + catch (Exception) { - return Failed(e); + MarkAsFailed(); + throw; } } @@ -264,26 +265,16 @@ protected bool Completed() return false; } - [ContractAnnotation("=> halt")] - protected bool Failed([NotNull] Exception e) + /// Mark the current iterator as failed, and clean up the state + protected void MarkAsFailed() { + //TODO: store the state "failed" somewhere? this.Dispose(); - //return false; - throw e; } -#if !NET_4_0 - [ContractAnnotation("=> halt")] - protected bool Failed([NotNull] ExceptionDispatchInfo e) - { - this.Dispose(); - e.Throw(); - return false; - } -#endif - protected bool Canceled(CancellationToken cancellationToken) { + //TODO: store the state "canceled" somewhere? this.Dispose(); cancellationToken.ThrowIfCancellationRequested(); return false; diff --git a/FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs index 7f7c6ed1b..a4e326b52 100644 --- a/FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs @@ -133,7 +133,11 @@ protected override async Task OnNextAsync(CancellationToken cancellationTo if (next.HasFailed) { LogDebug("[OnNextAsync] received failure"); - return Failed(next.Error); + // we want to make sure that the exception callstack is as clean as possible, + // so we rely on Maybe.ThrowIfFailed() to do the correct thing! + MarkAsFailed(); + next.ThrowIfFailed(); + return false; } else { From aa3a3996874bd39f65c5b2011874db377e9198fd Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 28 Nov 2014 17:12:58 +0100 Subject: [PATCH 19/63] Renamed FdbEncoderSubspace<...>.EncodeKeyRange(...) to EncodeKeys(...) --- .../Encoders/FdbEncoderSubspace`1.cs | 19 +++++++++---------- .../Documents/FdbDocumentCollection.cs | 2 +- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs index 09c835aae..90347facb 100644 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs +++ b/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs @@ -75,12 +75,12 @@ public Task GetAsync([NotNull] IFdbReadOnlyTransaction trans, T key) public Task GetValuesAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] T[] keys) { - return trans.GetValuesAsync(EncodeKeyRange(keys)); + return trans.GetValuesAsync(EncodeKeys(keys)); } public Task GetValuesAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IEnumerable keys) { - return trans.GetValuesAsync(EncodeKeyRange(keys)); + return trans.GetValuesAsync(EncodeKeys(keys)); } #endregion @@ -93,19 +93,19 @@ public Slice EncodeKey(T key) } [NotNull] - public Slice[] EncodeKeyRange([NotNull] IEnumerable keys) + public Slice[] EncodeKeys([NotNull] IEnumerable keys) { return ConcatKeys(m_encoder.EncodeRange(keys)); } [NotNull] - public Slice[] EncodeKeyRange([NotNull] params T[] keys) + public Slice[] EncodeKeys([NotNull] params T[] keys) { return ConcatKeys(m_encoder.EncodeRange(keys)); } [NotNull] - public Slice[] EncodeKeyRange([NotNull] TElement[] elements, Func selector) + public Slice[] EncodeKeys([NotNull] TElement[] elements, Func selector) { return ConcatKeys(m_encoder.EncodeRange(elements, selector)); } @@ -116,13 +116,13 @@ public T DecodeKey(Slice encoded) } [NotNull] - public T[] DecodeKeyRange([NotNull] IEnumerable encoded) + public T[] DecodeKeys([NotNull] IEnumerable encoded) { return m_encoder.DecodeRange(ExtractKeys(encoded, boundCheck: true)); } [NotNull] - public T[] DecodeKeyRange([NotNull] params Slice[] encoded) + public T[] DecodeKeys([NotNull] params Slice[] encoded) { return m_encoder.DecodeRange(ExtractKeys(encoded, boundCheck: true)); } @@ -135,7 +135,7 @@ public virtual FdbKeyRange ToRange(T key) [NotNull] public FdbKeyRange[] ToRange([NotNull] T[] keys) { - var packed = EncodeKeyRange(keys); + var packed = EncodeKeys(keys); var ranges = new FdbKeyRange[keys.Length]; for (int i = 0; i < ranges.Length; i++) @@ -147,7 +147,6 @@ public FdbKeyRange[] ToRange([NotNull] T[] keys) #endregion - } - + } diff --git a/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs b/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs index b3d085891..b2810bc5b 100644 --- a/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs +++ b/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs @@ -184,7 +184,7 @@ public void DeleteMultiple(IFdbTransaction trans, IEnumerable ids) if (trans == null) throw new ArgumentNullException("trans"); if (ids == null) throw new ArgumentNullException("ids"); - foreach (var key in this.Location.Partial.EncodeKeyRange(ids)) + foreach (var key in this.Location.Partial.EncodeKeys(ids)) { trans.ClearRange(FdbKeyRange.StartsWith(key)); } From 1ae59961b75edb1a05e6802a4052591f3b0c00d7 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Sat, 29 Nov 2014 15:14:43 +0100 Subject: [PATCH 20/63] Tupels: added an API for custom decoders to parse packed tuple segments - Added enum FdbTupleSegmentType - Made all the FdbTupleParser.ParseXYZ public - Added FdbTupleParser.VisitNext(...) helper to decode one segment at a time - Added FdbTupleParser.Skip(...) to skip N first tuple segments (ie: decoder wants to decode only the 3rd segment...) --- .../Layers/Tuples/FdbTupleParser.cs | 66 +++++++++++++++---- .../Layers/Tuples/FdbTupleTypes.cs | 42 ++++++++++++ 2 files changed, 94 insertions(+), 14 deletions(-) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs index ba976354f..662a0f829 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs @@ -674,7 +674,9 @@ public static void EndTuple(ref TupleWriter writer) #region Deserialization... - internal static long ParseInt64(int type, Slice slice) + /// Parse a tuple segment containing a signed 64-bit integer + /// This method should only be used by custom decoders. + public static long ParseInt64(int type, Slice slice) { int bytes = type - FdbTupleTypes.IntBase; if (bytes == 0) return 0L; @@ -749,9 +751,10 @@ internal static ArraySegment UnescapeByteStringSlow(byte[] buffer, int off return new ArraySegment(tmp, 0, i); } - internal static Slice ParseBytes(Slice slice) + /// Parse a tuple segment containing a byte array + public static Slice ParseBytes(Slice slice) { - Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Bytes && slice[slice.Count - 1] == 0); + Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Bytes && slice[-1] == 0); if (slice.Count <= 2) return Slice.Empty; var decoded = UnescapeByteString(slice.Array, slice.Offset + 1, slice.Count - 2); @@ -759,9 +762,10 @@ internal static Slice ParseBytes(Slice slice) return new Slice(decoded.Array, decoded.Offset, decoded.Count); } - internal static string ParseAscii(Slice slice) + /// Parse a tuple segment containing an ASCII string stored as a byte array + public static string ParseAscii(Slice slice) { - Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Bytes && slice[slice.Count - 1] == 0); + Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Bytes && slice[-1] == 0); if (slice.Count <= 2) return String.Empty; @@ -770,9 +774,10 @@ internal static string ParseAscii(Slice slice) return Encoding.Default.GetString(decoded.Array, decoded.Offset, decoded.Count); } - internal static string ParseUnicode(Slice slice) + /// Parse a tuple segment containing a unicode string + public static string ParseUnicode(Slice slice) { - Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Utf8); + Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Utf8 && slice[-1] == 0); if (slice.Count <= 2) return String.Empty; //TODO: check args @@ -780,15 +785,17 @@ internal static string ParseUnicode(Slice slice) return Encoding.UTF8.GetString(decoded.Array, decoded.Offset, decoded.Count); } - internal static IFdbTuple ParseTuple(Slice slice) + /// Parse a tuple segment containing an embedded tuple + public static IFdbTuple ParseTuple(Slice slice) { - Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.TupleStart); + Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.TupleStart && slice[-1] == 0); if (slice.Count <= 2) return FdbTuple.Empty; return FdbTuplePackers.Unpack(slice.Substring(1, slice.Count - 2), true); } - internal static float ParseSingle(Slice slice) + /// Parse a tuple segment containing a single precision number (float32) + public static float ParseSingle(Slice slice) { Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Single); @@ -817,7 +824,8 @@ internal static float ParseSingle(Slice slice) return value; } - internal static double ParseDouble(Slice slice) + /// Parse a tuple segment containing a double precision number (float64) + public static double ParseDouble(Slice slice) { Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Double); @@ -847,7 +855,8 @@ internal static double ParseDouble(Slice slice) return value; } - internal static Guid ParseGuid(Slice slice) + /// Parse a tuple segment containing a 128-bit GUID + public static Guid ParseGuid(Slice slice) { Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Uuid128); @@ -860,7 +869,8 @@ internal static Guid ParseGuid(Slice slice) return Uuid128.Convert(new Slice(slice.Array, slice.Offset + 1, 16)); } - internal static Uuid128 ParseUuid128(Slice slice) + /// Parse a tuple segment containing a 128-bit UUID + public static Uuid128 ParseUuid128(Slice slice) { Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Uuid128); @@ -872,7 +882,8 @@ internal static Uuid128 ParseUuid128(Slice slice) return new Uuid128(new Slice(slice.Array, slice.Offset + 1, 16)); } - internal static Uuid64 ParseUuid64(Slice slice) + /// Parse a tuple segment containing a 64-bit UUID + public static Uuid64 ParseUuid64(Slice slice) { Contract.Requires(slice.HasValue && slice[0] == FdbTupleTypes.Uuid64); @@ -1006,6 +1017,33 @@ internal static Slice ReadEmbeddedTupleBytes(ref TupleReader reader) throw new FormatException(String.Format("Truncated embedded tuple started at index {0}/{1}", start, reader.Input.Buffer.Count)); } + /// Skip a number of tokens + /// Cursor in the packed tuple to decode + /// Number of tokens to skip + /// True if there was tokens, false if the reader was too small. + /// Even if this method return true, you need to check that the reader has not reached the end before reading more token! + public static bool Skip(ref TupleReader reader, int count) + { + while (count-- > 0) + { + if (!reader.Input.HasMore) return false; + var token = FdbTupleParser.ParseNext(ref reader); + if (token.IsNull) return false; + } + return true; + } + + /// Visit the different tokens of a packed tuple + /// Reader positionned at the start of a packed tuple + /// Lambda called for each segment of a tuple. Returns true to continue parsing, or false to stop + /// Number of tokens that have been visited until either returned false, or reached the end. + public static T VisitNext(ref TupleReader reader, Func visitor) + { + if (!reader.Input.HasMore) throw new InvalidOperationException("The reader has already reached the end"); + var token = FdbTupleParser.ParseNext(ref reader); + return visitor(token, FdbTupleTypes.DecodeSegmentType(ref token)); + } + #endregion #region Bits Twiddling... diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleTypes.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleTypes.cs index d37163b23..3fd25bc45 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleTypes.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleTypes.cs @@ -28,6 +28,7 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Layers.Tuples { + using FoundationDB.Client; using System; /// @@ -85,6 +86,47 @@ internal static class FdbTupleTypes /// Standard prefix of the System keys, or frequent suffix with key ranges /// This is not a part of the tuple encoding itself, but helps the tuple decoder pretty-print End keys from ranges, that would otherwise be unparsable. internal const byte AliasSystem = 255; + + /// Return the type of a tuple segment, from its header + public static FdbTupleSegmentType DecodeSegmentType(ref Slice segment) + { + if (segment.Count == 0) return FdbTupleSegmentType.Nil; + + int type = segment[0]; + switch(type) + { + case Nil: return FdbTupleSegmentType.Nil; + case Bytes: return FdbTupleSegmentType.ByteString; + case Utf8: return FdbTupleSegmentType.UnicodeString; + case TupleStart: return FdbTupleSegmentType.Tuple; + case Single: return FdbTupleSegmentType.Single; + case Double: return FdbTupleSegmentType.Double; + case Uuid128: return FdbTupleSegmentType.Uuid128; + case Uuid64: return FdbTupleSegmentType.Uuid64; + } + + if (type <= IntPos8 && type >= IntNeg8) + { + return FdbTupleSegmentType.Integer; + } + + return FdbTupleSegmentType.Invalid; + } + } + + /// Logical type of packed element of a tuple + public enum FdbTupleSegmentType + { + Invalid = -1, + Nil = 0, + ByteString = 1, + UnicodeString = 2, + Tuple = 3, + Integer = 20, + Single = 32, + Double = 33, + Uuid128 = 48, + Uuid64 = 49, } } From e731b98cc089c5f15547f4713d7084b49e1c1e5c Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Sat, 29 Nov 2014 15:17:12 +0100 Subject: [PATCH 21/63] AsyncHelpers: made some change to Maybe and IAsyncTarget to be able to build on .NET 4.0 - ExceptionDispatchInfo does not exist in NET40 so we use regular Exceptions - renamed Maybe.ThrowIfFailed() to ThrowForNonSuccess() so that it matches TaskAwaiter.ThrowForNonSuccess() --- FoundationDB.Client/Async/AsyncBuffer.cs | 15 +++++ FoundationDB.Client/Async/AsyncHelpers.cs | 60 +++++++++++++++++-- .../Async/AsyncProducerConsumerQueue.cs | 4 ++ FoundationDB.Client/Async/AsyncPump.cs | 2 +- FoundationDB.Client/Async/AsyncTaskBuffer.cs | 16 +++++ FoundationDB.Client/Async/AsyncTransform.cs | 14 +++++ FoundationDB.Client/Async/IAsyncTarget.cs | 4 ++ FoundationDB.Client/Async/Maybe.cs | 31 ++++++---- .../FdbParallelSelectAsyncIterator.cs | 2 +- .../Linq/FdbAsyncEnumerableFacts.cs | 2 +- 10 files changed, 129 insertions(+), 21 deletions(-) diff --git a/FoundationDB.Client/Async/AsyncBuffer.cs b/FoundationDB.Client/Async/AsyncBuffer.cs index 526ea98e0..638a1142b 100644 --- a/FoundationDB.Client/Async/AsyncBuffer.cs +++ b/FoundationDB.Client/Async/AsyncBuffer.cs @@ -108,6 +108,20 @@ public override void OnCompleted() } } +#if NET_4_0 + public override void OnError(Exception error) + { + lock (m_lock) + { + if (!m_done) + { + LogProducer("Error received: " + error.Message); + m_queue.Enqueue(Maybe.Error(error)); + WakeUpBlockedConsumer_NeedsLocking(); + } + } + } +#else public override void OnError(ExceptionDispatchInfo error) { lock (m_lock) @@ -120,6 +134,7 @@ public override void OnError(ExceptionDispatchInfo error) } } } +#endif private void Enqueue_NeedsLocking(Maybe value) { diff --git a/FoundationDB.Client/Async/AsyncHelpers.cs b/FoundationDB.Client/Async/AsyncHelpers.cs index a1b4c6d36..9bc4dd899 100644 --- a/FoundationDB.Client/Async/AsyncHelpers.cs +++ b/FoundationDB.Client/Async/AsyncHelpers.cs @@ -35,35 +35,49 @@ namespace FoundationDB.Async using System.Threading; using System.Threading.Tasks; - /// - /// Helper methods for creating and manipulating async sequences. - /// + /// Helper methods for creating and manipulating async sequences. public static class AsyncHelpers { internal static readonly Action NoOpCompletion = () => { }; +#if NET_4_0 + internal static readonly Action NoOpError = (e) => { }; + internal static readonly Action RethrowError = (e) => { throw e; }; +#else internal static readonly Action NoOpError = (e) => { }; internal static readonly Action RethrowError = (e) => { e.Throw(); }; +#endif #region Targets... + /// Create a new async target from a set of callbacks public static IAsyncTarget CreateTarget( Func onNextAsync, - Action onCompleted, - Action onError + Action onCompleted = null, +#if NET_4_0 + Action onError = null +#else + Action onError = null +#endif ) { return new AnonymousAsyncTarget(onNextAsync, onCompleted, onError); } + /// Create a new async target from a set of callbacks public static IAsyncTarget CreateTarget( Action onNext, Action onCompleted = null, +#if NET_4_0 + Action onError = null +#else Action onError = null +#endif ) { return new AnonymousTarget(onNext, onCompleted, onError); } + /// Publish a new result on this async target, by correclty handling success, termination and failure public static Task Publish(this IAsyncTarget target, Maybe result, CancellationToken ct) { Contract.Requires(target != null); @@ -76,7 +90,11 @@ public static Task Publish(this IAsyncTarget target, Maybe result, Canc } else if (result.HasFailed) { +#if NET_4_0 + target.OnError(result.Error); +#else target.OnError(result.CapturedError); +#endif return TaskHelpers.CompletedTask; } else @@ -86,6 +104,7 @@ public static Task Publish(this IAsyncTarget target, Maybe result, Canc } } + /// Wrapper class for use with async lambda callbacks internal sealed class AnonymousAsyncTarget : IAsyncTarget { @@ -93,12 +112,20 @@ internal sealed class AnonymousAsyncTarget : IAsyncTarget private readonly Action m_onCompleted; +#if NET_4_0 + private readonly Action m_onError; +#else private readonly Action m_onError; +#endif public AnonymousAsyncTarget( Func onNextAsync, Action onCompleted, +#if NET_4_0 + Action onError +#else Action onError +#endif ) { m_onNextAsync = onNextAsync; @@ -116,12 +143,17 @@ public void OnCompleted() m_onCompleted(); } +#if NET_4_0 + public void OnError(Exception error) +#else public void OnError(ExceptionDispatchInfo error) +#endif { m_onError(error); } } + /// Wrapper class for use with non-async lambda callbacks internal sealed class AnonymousTarget : IAsyncTarget { @@ -129,12 +161,20 @@ internal sealed class AnonymousTarget : IAsyncTarget private readonly Action m_onCompleted; +#if NET_4_0 + private readonly Action m_onError; +#else private readonly Action m_onError; +#endif public AnonymousTarget( Action onNext, Action onCompleted, +#if NET_4_0 + Action onError +#else Action onError +#endif ) { if (onNext == null) throw new ArgumentNullException("onNext"); @@ -157,6 +197,15 @@ public void OnCompleted() } } +#if NET_4_0 + public void OnError(Exception error) + { + if (m_onError != null) + m_onError(error); + else + throw error; + } +#else public void OnError(ExceptionDispatchInfo error) { if (m_onError != null) @@ -164,6 +213,7 @@ public void OnError(ExceptionDispatchInfo error) else error.Throw(); } +#endif } #endregion diff --git a/FoundationDB.Client/Async/AsyncProducerConsumerQueue.cs b/FoundationDB.Client/Async/AsyncProducerConsumerQueue.cs index 7457965e5..aba21f517 100644 --- a/FoundationDB.Client/Async/AsyncProducerConsumerQueue.cs +++ b/FoundationDB.Client/Async/AsyncProducerConsumerQueue.cs @@ -70,7 +70,11 @@ protected AsyncProducerConsumerQueue(int capacity) public abstract void OnCompleted(); +#if NET_4_0 + public abstract void OnError(Exception error); +#else public abstract void OnError(ExceptionDispatchInfo error); +#endif /// Delcare the producer as beeing blocked on a full queue /// diff --git a/FoundationDB.Client/Async/AsyncPump.cs b/FoundationDB.Client/Async/AsyncPump.cs index 8ea40d875..7cf52dedb 100644 --- a/FoundationDB.Client/Async/AsyncPump.cs +++ b/FoundationDB.Client/Async/AsyncPump.cs @@ -131,7 +131,7 @@ public async Task PumpAsync(bool stopOnFirstError, CancellationToken cancellatio { m_state = STATE_FAILED; LogPump("Stopping after this error"); - current.ThrowIfFailed(); + current.ThrowForNonSuccess(); } else if (current.IsEmpty) { diff --git a/FoundationDB.Client/Async/AsyncTaskBuffer.cs b/FoundationDB.Client/Async/AsyncTaskBuffer.cs index 576cffe93..2ab910137 100644 --- a/FoundationDB.Client/Async/AsyncTaskBuffer.cs +++ b/FoundationDB.Client/Async/AsyncTaskBuffer.cs @@ -154,6 +154,21 @@ public override void OnCompleted() } } +#if NET_4_0 + public override void OnError(Exception error) + { + lock (m_lock) + { + if (!m_done) + { + LogProducer("Error received: " + error.Message); + m_queue.AddLast(new LinkedListNode>(TaskHelpers.FromException(error))); + WakeUpBlockedConsumer_NeedsLocking(); + if (m_mode == AsyncOrderingMode.CompletionOrder) NotifyConsumerOfTaskCompletion_NeedsLocking(); + } + } + } +#else public override void OnError(ExceptionDispatchInfo error) { lock (m_lock) @@ -167,6 +182,7 @@ public override void OnError(ExceptionDispatchInfo error) } } } +#endif private void Enqueue_NeedsLocking(Task task) { diff --git a/FoundationDB.Client/Async/AsyncTransform.cs b/FoundationDB.Client/Async/AsyncTransform.cs index 63da928e7..2eb9fbd8b 100644 --- a/FoundationDB.Client/Async/AsyncTransform.cs +++ b/FoundationDB.Client/Async/AsyncTransform.cs @@ -98,7 +98,11 @@ public Task OnNextAsync(T value, CancellationToken cancellationToken) } catch(Exception e) { +#if NET_4_0 + m_target.OnError(e); +#else m_target.OnError(ExceptionDispatchInfo.Capture(e)); +#endif return TaskHelpers.FromException(e); } } @@ -112,6 +116,15 @@ public void OnCompleted() } } +#if NET_4_0 + public void OnError(Exception e) + { + if (!m_done) + { + m_target.OnError(e); + } + } +#else public void OnError(ExceptionDispatchInfo e) { if (!m_done) @@ -119,6 +132,7 @@ public void OnError(ExceptionDispatchInfo e) m_target.OnError(e); } } +#endif #endregion diff --git a/FoundationDB.Client/Async/IAsyncTarget.cs b/FoundationDB.Client/Async/IAsyncTarget.cs index 3cc1bac42..9142ac5c0 100644 --- a/FoundationDB.Client/Async/IAsyncTarget.cs +++ b/FoundationDB.Client/Async/IAsyncTarget.cs @@ -51,7 +51,11 @@ public interface IAsyncTarget /// Notifies the target that tere was an exception, and that no more values will be published /// The error that occurred +#if NET_4_0 + void OnError(Exception error); +#else void OnError(ExceptionDispatchInfo error); +#endif } } diff --git a/FoundationDB.Client/Async/Maybe.cs b/FoundationDB.Client/Async/Maybe.cs index 86c15e2a1..19cf5be00 100644 --- a/FoundationDB.Client/Async/Maybe.cs +++ b/FoundationDB.Client/Async/Maybe.cs @@ -47,10 +47,16 @@ public struct Maybe : IEquatable>, IEquatable public readonly T Value; /// If HasValue is false optinally holds an error that was captured - private readonly object m_errorContainer; + private readonly object m_errorContainer; // either an Exception, or an ExceptionDispatchInfo internal Maybe(bool hasValue, T value, object errorContainer) { +#if NET_4_0 + Contract.Requires(errorContainer == null || (errorContainer is Exception)); +#else + Contract.Requires(errorContainer == null || (errorContainer is Exception) || (errorContainer is ExceptionDispatchInfo)); +#endif + this.HasValue = hasValue; this.Value = value; m_errorContainer = errorContainer; @@ -66,7 +72,7 @@ internal Maybe(bool hasValue, T value, object errorContainer) /// public T GetValueOrDefault() { - ThrowIfFailed(); + ThrowForNonSuccess(); return this.Value; } @@ -81,16 +87,15 @@ public Exception Error { get { - var exception = m_errorContainer as Exception; - if (exception != null) return exception; - +#if !NET_4_0 var edi = m_errorContainer as ExceptionDispatchInfo; if (edi != null) return edi.SourceException; - - return null; +#endif + return m_errorContainer as Exception; } } +#if !NET_4_0 /// Return the captured error context, or null if there wasn't any public ExceptionDispatchInfo CapturedError { @@ -105,21 +110,21 @@ public ExceptionDispatchInfo CapturedError return null; } } +#endif /// Rethrows any captured error, if there was one. - public void ThrowIfFailed() + public void ThrowForNonSuccess() { if (m_errorContainer != null) { var exception = m_errorContainer as Exception; - if (exception != null) - { - throw exception; - } - else +#if !NET_4_0 + if (exception == null) { ((ExceptionDispatchInfo)m_errorContainer).Throw(); } +#endif + throw exception; } } diff --git a/FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs index a4e326b52..664f212b9 100644 --- a/FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbParallelSelectAsyncIterator.cs @@ -136,7 +136,7 @@ protected override async Task OnNextAsync(CancellationToken cancellationTo // we want to make sure that the exception callstack is as clean as possible, // so we rely on Maybe.ThrowIfFailed() to do the correct thing! MarkAsFailed(); - next.ThrowIfFailed(); + next.ThrowForNonSuccess(); return false; } else diff --git a/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs b/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs index 22c10bfdd..c7fba65bd 100644 --- a/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs +++ b/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs @@ -887,7 +887,7 @@ public async Task Test_FdbAsyncBuffer() else if (msg.HasValue) { Console.WriteLine("[consumer] Got error: " + msg.Error); - msg.ThrowIfFailed(); + msg.ThrowForNonSuccess(); break; } else From b81821acf42f466502950eff3203db2a720b544f Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Sat, 29 Nov 2014 15:18:16 +0100 Subject: [PATCH 22/63] KeyValueEncoders: renamed all EncodeRange/DecodeRange to EncodeKeys/DecodeKeys and EncodeValues/DecodeValues - Matches with the names for tuples, and also remove ambiguity between encoding keys and values --- .../Encoders/FdbEncoderSubspace`1.cs | 10 +++--- .../Encoders/KeyValueEncoders.cs | 32 +++++++++---------- .../FdbTransactionExtensions.cs | 2 +- .../Collections/FdbQueue`1.cs | 4 +-- 4 files changed, 24 insertions(+), 24 deletions(-) diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs index 90347facb..36df37878 100644 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs +++ b/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs @@ -95,19 +95,19 @@ public Slice EncodeKey(T key) [NotNull] public Slice[] EncodeKeys([NotNull] IEnumerable keys) { - return ConcatKeys(m_encoder.EncodeRange(keys)); + return ConcatKeys(m_encoder.EncodeKeys(keys)); } [NotNull] public Slice[] EncodeKeys([NotNull] params T[] keys) { - return ConcatKeys(m_encoder.EncodeRange(keys)); + return ConcatKeys(m_encoder.EncodeKeys(keys)); } [NotNull] public Slice[] EncodeKeys([NotNull] TElement[] elements, Func selector) { - return ConcatKeys(m_encoder.EncodeRange(elements, selector)); + return ConcatKeys(m_encoder.EncodeKeys(elements, selector)); } public T DecodeKey(Slice encoded) @@ -118,13 +118,13 @@ public T DecodeKey(Slice encoded) [NotNull] public T[] DecodeKeys([NotNull] IEnumerable encoded) { - return m_encoder.DecodeRange(ExtractKeys(encoded, boundCheck: true)); + return m_encoder.DecodeKeys(ExtractKeys(encoded, boundCheck: true)); } [NotNull] public T[] DecodeKeys([NotNull] params Slice[] encoded) { - return m_encoder.DecodeRange(ExtractKeys(encoded, boundCheck: true)); + return m_encoder.DecodeKeys(ExtractKeys(encoded, boundCheck: true)); } public virtual FdbKeyRange ToRange(T key) diff --git a/FoundationDB.Client/Encoders/KeyValueEncoders.cs b/FoundationDB.Client/Encoders/KeyValueEncoders.cs index 5e44469e3..75ae52c82 100644 --- a/FoundationDB.Client/Encoders/KeyValueEncoders.cs +++ b/FoundationDB.Client/Encoders/KeyValueEncoders.cs @@ -796,7 +796,7 @@ public static IKeyEncoder Bind([NotNull] Func encoder, [NotNull] /// Convert an array of s into an array of slices, using a serializer (or the default serializer if none is provided) [NotNull] - public static Slice[] EncodeRange(this IKeyEncoder encoder, [NotNull] T[] values) + public static Slice[] EncodeKeys([NotNull] this IKeyEncoder encoder, [NotNull] params T[] values) { if (encoder == null) throw new ArgumentNullException("encoder"); if (values == null) throw new ArgumentNullException("values"); @@ -811,7 +811,7 @@ public static Slice[] EncodeRange(this IKeyEncoder encoder, [NotNull] T[] /// Convert an array of s into an array of slices, using a serializer (or the default serializer if none is provided) [NotNull] - public static Slice[] EncodeRange(this IKeyEncoder encoder, [NotNull] TElement[] elements, Func selector) + public static Slice[] EncodeKeys([NotNull] this IKeyEncoder encoder, [NotNull] TElement[] elements, Func selector) { if (encoder == null) throw new ArgumentNullException("encoder"); if (elements == null) throw new ArgumentNullException("elements"); @@ -827,7 +827,7 @@ public static Slice[] EncodeRange(this IKeyEncoder encoder /// Transform a sequence of s into a sequence of slices, using a serializer (or the default serializer if none is provided) [NotNull] - public static IEnumerable EncodeRange(this IKeyEncoder encoder, [NotNull] IEnumerable values) + public static IEnumerable EncodeKeys([NotNull] this IKeyEncoder encoder, [NotNull] IEnumerable values) { if (encoder == null) throw new ArgumentNullException("encoder"); if (values == null) throw new ArgumentNullException("values"); @@ -837,7 +837,7 @@ public static IEnumerable EncodeRange(this IKeyEncoder encoder, [No var array = values as T[]; if (array != null) { // optimized path for arrays - return EncodeRange(encoder, array); + return EncodeKeys(encoder, array); } var coll = values as ICollection; @@ -857,7 +857,7 @@ public static IEnumerable EncodeRange(this IKeyEncoder encoder, [No /// Convert an array of slices back into an array of s, using a serializer (or the default serializer if none is provided) [NotNull] - public static T[] DecodeRange(this IKeyEncoder encoder, [NotNull] Slice[] slices) + public static T[] DecodeKeys([NotNull] this IKeyEncoder encoder, [NotNull] params Slice[] slices) { if (encoder == null) throw new ArgumentNullException("encoder"); if (slices == null) throw new ArgumentNullException("slices"); @@ -872,7 +872,7 @@ public static T[] DecodeRange(this IKeyEncoder encoder, [NotNull] Slice[] /// Convert the keys of an array of key value pairs of slices back into an array of s, using a serializer (or the default serializer if none is provided) [NotNull] - public static T[] DecodeRange(this IKeyEncoder encoder, [NotNull] KeyValuePair[] items) + public static T[] DecodeKeys([NotNull] this IKeyEncoder encoder, [NotNull] KeyValuePair[] items) { if (encoder == null) throw new ArgumentNullException("encoder"); if (items == null) throw new ArgumentNullException("items"); @@ -887,7 +887,7 @@ public static T[] DecodeRange(this IKeyEncoder encoder, [NotNull] KeyValue /// Transform a sequence of slices back into a sequence of s, using a serializer (or the default serializer if none is provided) [NotNull] - public static IEnumerable DecodeRange(this IKeyEncoder encoder, [NotNull] IEnumerable slices) + public static IEnumerable DecodeKeys([NotNull] this IKeyEncoder encoder, [NotNull] IEnumerable slices) { if (encoder == null) throw new ArgumentNullException("encoder"); if (slices == null) throw new ArgumentNullException("slices"); @@ -898,14 +898,14 @@ public static IEnumerable DecodeRange(this IKeyEncoder encoder, [NotNul } /// Returns a partial encoder that will only encode the first element - public static HeadEncoder Head(this ICompositeKeyEncoder encoder) + public static HeadEncoder Head([NotNull] this ICompositeKeyEncoder encoder) { if (encoder == null) throw new ArgumentNullException("encoder"); return new HeadEncoder(encoder); } /// Returns a partial encoder that will only encode the first element - public static HeadEncoder Head(this ICompositeKeyEncoder encoder) + public static HeadEncoder Head([NotNull] this ICompositeKeyEncoder encoder) { if (encoder == null) throw new ArgumentNullException("encoder"); @@ -913,7 +913,7 @@ public static HeadEncoder Head(this ICompositeKeyEncoder } /// Returns a partial encoder that will only encode the first and second elements - public static PairEncoder Pair(this ICompositeKeyEncoder encoder) + public static PairEncoder Pair([NotNull] this ICompositeKeyEncoder encoder) { if (encoder == null) throw new ArgumentNullException("encoder"); @@ -926,7 +926,7 @@ public static PairEncoder Pair(this ICompositeKeyEncoder /// Convert an array of s into an array of slices, using a serializer (or the default serializer if none is provided) [NotNull] - public static Slice[] EncodeRange(this IValueEncoder encoder, [NotNull] T[] values) + public static Slice[] EncodeValues([NotNull] this IValueEncoder encoder, [NotNull] params T[] values) { if (encoder == null) throw new ArgumentNullException("encoder"); if (values == null) throw new ArgumentNullException("values"); @@ -942,7 +942,7 @@ public static Slice[] EncodeRange(this IValueEncoder encoder, [NotNull] T[ /// Transform a sequence of s into a sequence of slices, using a serializer (or the default serializer if none is provided) [NotNull] - public static IEnumerable EncodeRange(this IValueEncoder encoder, [NotNull] IEnumerable values) + public static IEnumerable EncodeValues([NotNull] this IValueEncoder encoder, [NotNull] IEnumerable values) { if (encoder == null) throw new ArgumentNullException("encoder"); if (values == null) throw new ArgumentNullException("values"); @@ -952,7 +952,7 @@ public static IEnumerable EncodeRange(this IValueEncoder encoder, [ var array = values as T[]; if (array != null) { // optimized path for arrays - return EncodeRange(encoder, array); + return EncodeValues(encoder, array); } var coll = values as ICollection; @@ -971,7 +971,7 @@ public static IEnumerable EncodeRange(this IValueEncoder encoder, [ /// Convert an array of slices back into an array of s, using a serializer (or the default serializer if none is provided) [NotNull] - public static T[] DecodeRange(this IValueEncoder encoder, Slice[] slices) + public static T[] DecodeValues([NotNull] this IValueEncoder encoder, [NotNull] params Slice[] slices) { if (encoder == null) throw new ArgumentNullException("encoder"); if (slices == null) throw new ArgumentNullException("slices"); @@ -987,7 +987,7 @@ public static T[] DecodeRange(this IValueEncoder encoder, Slice[] slices) /// Convert the values of an array of key value pairs of slices back into an array of s, using a serializer (or the default serializer if none is provided) [NotNull] - public static T[] DecodeRange(this IValueEncoder encoder, KeyValuePair[] items) + public static T[] DecodeValues([NotNull] this IValueEncoder encoder, [NotNull] KeyValuePair[] items) { if (encoder == null) throw new ArgumentNullException("encoder"); if (items == null) throw new ArgumentNullException("items"); @@ -1003,7 +1003,7 @@ public static T[] DecodeRange(this IValueEncoder encoder, KeyValuePairTransform a sequence of slices back into a sequence of s, using a serializer (or the default serializer if none is provided) [NotNull] - public static IEnumerable DecodeRange(this IValueEncoder encoder, [NotNull] IEnumerable slices) + public static IEnumerable DecodeValues([NotNull] this IValueEncoder encoder, [NotNull] IEnumerable slices) { if (encoder == null) throw new ArgumentNullException("encoder"); if (slices == null) throw new ArgumentNullException("slices"); diff --git a/FoundationDB.Client/FdbTransactionExtensions.cs b/FoundationDB.Client/FdbTransactionExtensions.cs index fe225295b..cff5ccb3e 100644 --- a/FoundationDB.Client/FdbTransactionExtensions.cs +++ b/FoundationDB.Client/FdbTransactionExtensions.cs @@ -858,7 +858,7 @@ public static async Task GetValuesAsync(this IFdbReadOnlyTrans { if (decoder == null) throw new ArgumentNullException("decoder"); - return decoder.DecodeRange(await GetValuesAsync(trans, keys).ConfigureAwait(false)); + return decoder.DecodeValues(await GetValuesAsync(trans, keys).ConfigureAwait(false)); } /// diff --git a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs index aa403ee33..124a5b847 100644 --- a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs +++ b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs @@ -226,7 +226,7 @@ public Task ExportAsync(IFdbDatabase db, Action handler, Cancellation this.QueueItem.Tuples.ToRange(), (kvs, offset, ct) => { - handler(this.Encoder.DecodeRange(kvs), offset); + handler(this.Encoder.DecodeValues(kvs), offset); return TaskHelpers.CompletedTask; }, cancellationToken @@ -243,7 +243,7 @@ public Task ExportAsync(IFdbDatabase db, Func handler, Cancella return Fdb.Bulk.ExportAsync( db, this.QueueItem.Tuples.ToRange(), - (kvs, offset, ct) => handler(this.Encoder.DecodeRange(kvs), offset), + (kvs, offset, ct) => handler(this.Encoder.DecodeValues(kvs), offset), cancellationToken ); } From 4c56b4c54391b1f06824973205e7562ee37e3cc0 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Mon, 1 Dec 2014 11:43:26 +0100 Subject: [PATCH 23/63] Maybe: more changes to keep the original error callstack untouched, and somes fixed for building on .NET 4.0 --- FoundationDB.Client/Async/AsyncTaskBuffer.cs | 4 ++ .../Async/AsyncTransformQueue.cs | 8 ++++ FoundationDB.Client/Async/Maybe.cs | 40 ++++++++++++++++--- .../Filters/Logging/FdbLoggedTransaction.cs | 8 ++++ 4 files changed, 55 insertions(+), 5 deletions(-) diff --git a/FoundationDB.Client/Async/AsyncTaskBuffer.cs b/FoundationDB.Client/Async/AsyncTaskBuffer.cs index 2ab910137..7952885ee 100644 --- a/FoundationDB.Client/Async/AsyncTaskBuffer.cs +++ b/FoundationDB.Client/Async/AsyncTaskBuffer.cs @@ -312,7 +312,11 @@ private async Task> WaitForTaskToCompleteAsync([NotNull] Task task, catch(Exception e) { LogConsumer("Notified that task #" + task + " failed"); +#if NET_4_0 return Maybe.Error(e); +#else + return Maybe.Error(ExceptionDispatchInfo.Capture(e)); +#endif } } diff --git a/FoundationDB.Client/Async/AsyncTransformQueue.cs b/FoundationDB.Client/Async/AsyncTransformQueue.cs index 862c8acb0..1f232908d 100644 --- a/FoundationDB.Client/Async/AsyncTransformQueue.cs +++ b/FoundationDB.Client/Async/AsyncTransformQueue.cs @@ -131,7 +131,11 @@ private static async Task> ProcessItemHandler(object state) } catch (Exception e) { +#if NET_4_0 return Maybe.Error(e); +#else + return Maybe.Error(ExceptionDispatchInfo.Capture(e)); +#endif } } @@ -297,7 +301,11 @@ private async Task> ReceiveWhenDoneAsync(Task> tas } catch(Exception e) { +#if NET_4_0 return Maybe.Error(e); +#else + return Maybe.Error(ExceptionDispatchInfo.Capture(e)); +#endif } finally { diff --git a/FoundationDB.Client/Async/Maybe.cs b/FoundationDB.Client/Async/Maybe.cs index 19cf5be00..4e68f4cef 100644 --- a/FoundationDB.Client/Async/Maybe.cs +++ b/FoundationDB.Client/Async/Maybe.cs @@ -229,11 +229,13 @@ public static Maybe Error(Exception e) return new Maybe(false, default(T), e); } +#if !NET_4_0 /// Capture an exception into a public static Maybe Error(ExceptionDispatchInfo e) { return new Maybe(false, default(T), e); } +#endif /// Immediately apply a function to a value, and capture the result into a public static Maybe Apply(T value, Func lambda) @@ -245,7 +247,11 @@ public static Maybe Apply(T value, Func lambda) } catch (Exception e) { +#if NET_4_0 return Error(e); +#else + return Error(ExceptionDispatchInfo.Capture(e)); +#endif } } @@ -259,7 +265,11 @@ public static Maybe Apply(T value, [NotNull] Func> lambda) } catch (Exception e) { +#if NET_4_0 return Error(e); +#else + return Error(ExceptionDispatchInfo.Capture(e)); +#endif } } @@ -269,7 +279,11 @@ public static Maybe Apply(Maybe value, [NotNull] Func lambda) Contract.Requires(lambda != null); if (!value.HasValue) { - if (value.HasFailed) return Error(value.Error); + if (value.HasFailed) + { + // keep the original error untouched + return new Maybe(false, default(R), value.ErrorContainer); + } return Nothing(); } try @@ -288,7 +302,11 @@ public static Maybe Apply(Maybe value, [NotNull] Func> l Contract.Requires(lambda != null); if (!value.HasValue) { - if (value.HasFailed) return Error(value.Error); + if (value.HasFailed) + { + // keep the original error untouched + return new Maybe(false, default(R), value.ErrorContainer); + } return Nothing(); } try @@ -313,7 +331,7 @@ public static Maybe FromTask([NotNull] Task task) } case TaskStatus.Faulted: { - // note: we want to have a nice stack, so we unwrap the aggregate exception + //TODO: pass the failed task itself as the error container? (we would keep the original callstack that way...) var aggEx = task.Exception.Flatten(); if (aggEx.InnerExceptions.Count == 1) { @@ -344,7 +362,13 @@ public static Maybe FromTask([NotNull] Task> task) } case TaskStatus.Faulted: { - return Error(task.Exception); + //TODO: pass the failed task itself as the error container? (we would keep the original callstack that way...) + var aggEx = task.Exception.Flatten(); + if (aggEx.InnerExceptions.Count == 1) + { + return Maybe.Error(aggEx.InnerException); + } + return Maybe.Error(aggEx); } case TaskStatus.Canceled: { @@ -369,7 +393,13 @@ public static Task> Unwrap([NotNull] Task> task) } case TaskStatus.Faulted: { - return Task.FromResult(Error(task.Exception)); + //TODO: pass the failed task itself as the error container? (we would keep the original callstack that way...) + var aggEx = task.Exception.Flatten(); + if (aggEx.InnerExceptions.Count == 1) + { + return Task.FromResult(Maybe.Error(aggEx.InnerException)); + } + return Task.FromResult(Maybe.Error(aggEx)); } case TaskStatus.Canceled: { diff --git a/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs b/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs index 66a078921..b49bf19a7 100644 --- a/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs +++ b/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs @@ -219,7 +219,11 @@ private async Task ExecuteAsync(TCommand cmd, Func(e); +#else + cmd.Result = Maybe.Error(System.Runtime.ExceptionServices.ExceptionDispatchInfo.Capture(e)); +#endif throw; } finally @@ -419,7 +423,11 @@ private async Task ExecuteAsync(TCommand cmd, Func(e); +#else + cmd.Result = Maybe.Error(System.Runtime.ExceptionServices.ExceptionDispatchInfo.Capture(e)); +#endif throw; } finally From abf2574e750848037865966670c6cb7b2b549230 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Tue, 2 Dec 2014 14:24:55 +0100 Subject: [PATCH 24/63] Removed IFdbTransactional overrides on test layers - IFdbTransactional don't compose very well with each other - It is better to only expose methods that take IFdb[ReadOnly]Transaction arguments, and use the various retry loops (db.ReadWriteAsync, etc...) to compose one or more layer calls inside the same transaction. --- .../Documents/FdbDocumentCollection.cs | 58 ------------------- .../Layers/DocumentCollectionFacts.cs | 12 ++-- 2 files changed, 6 insertions(+), 64 deletions(-) diff --git a/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs b/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs index b2810bc5b..49681f970 100644 --- a/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs +++ b/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs @@ -215,64 +215,6 @@ public void DeleteMultiple(IFdbTransaction trans, IEnumerable documen DeleteMultiple(trans, documents.Select(document => this.IdSelector(document))); } - #region Transactional... - - public async Task InsertAsync(IFdbTransactional db, TDocument document, CancellationToken cancellationToken) - { - if (db == null) throw new ArgumentNullException("db"); - - await db.WriteAsync((tr) => this.Insert(tr, document), cancellationToken); - - } - - public Task LoadAsync(IFdbReadOnlyTransactional db, TId id, CancellationToken cancellationToken) - { - if (db == null) throw new ArgumentNullException("db"); - if (id == null) throw new ArgumentNullException("id"); - - return db.ReadAsync((tr) => LoadAsync(tr, id), cancellationToken); - } - - public Task> LoadMultipleAsync(IFdbReadOnlyTransactional db, IEnumerable ids, CancellationToken cancellationToken) - { - if (db == null) throw new ArgumentNullException("db"); - if (ids == null) throw new ArgumentNullException("ids"); - - //note: if the source is not already a collection, we have to assume that it is not safe to read it multiple times (it may be a LINQ query or an iterator) - var coll = ids as ICollection ?? ids.ToList(); - - return db.ReadAsync((tr) => LoadMultipleAsync(tr, coll), cancellationToken); - } - - public Task DeleteAsync(IFdbTransactional db, TId id, CancellationToken cancellationToken) - { - if (db == null) throw new ArgumentNullException("db"); - if (id == null) throw new ArgumentNullException("id"); - - return db.WriteAsync((tr) => this.Delete(tr, id), cancellationToken); - } - - public Task DeleteMultipleAsync(IFdbTransactional db, IEnumerable ids, CancellationToken cancellationToken) - { - if (db == null) throw new ArgumentNullException("db"); - if (ids == null) throw new ArgumentNullException("ids"); - - //note: if the source is not already a collection, we have to assume that it is not safe to read it multiple times (it may be a LINQ query or an iterator) - var coll = ids as ICollection ?? ids.ToList(); - - return db.WriteAsync((tr) => this.DeleteMultiple(tr, coll), cancellationToken); - } - - public Task DeleteAsync(IFdbTransactional db, TDocument document, CancellationToken cancellationToken) - { - if (db == null) throw new ArgumentNullException("db"); - if (document == null) throw new ArgumentNullException("document"); - - return db.WriteAsync((tr) => this.Delete(tr, document), cancellationToken); - } - - #endregion - } } diff --git a/FoundationDB.Tests/Layers/DocumentCollectionFacts.cs b/FoundationDB.Tests/Layers/DocumentCollectionFacts.cs index 0d9c28c18..43505b7af 100644 --- a/FoundationDB.Tests/Layers/DocumentCollectionFacts.cs +++ b/FoundationDB.Tests/Layers/DocumentCollectionFacts.cs @@ -88,13 +88,13 @@ public async Task Test_Can_Insert_And_Retrieve_Json_Documents() // store a document var book1 = books[0]; - await docs.InsertAsync(db, book1, this.Cancellation); + await db.WriteAsync((tr) => docs.Insert(tr, book1), this.Cancellation); #if DEBUG await DumpSubspace(db, location); #endif // retrieve the document - var copy = await docs.LoadAsync(db, book1.Id, this.Cancellation); + var copy = await db.ReadAsync((tr) =>docs.LoadAsync(tr, book1.Id), this.Cancellation); Assert.That(copy, Is.Not.Null); Assert.That(copy.Id, Is.EqualTo(book1.Id)); @@ -105,7 +105,7 @@ public async Task Test_Can_Insert_And_Retrieve_Json_Documents() // store another document var book2 = books[1]; - await docs.InsertAsync(db, book2, this.Cancellation); + await db.WriteAsync((tr) => docs.Insert(tr, book2), this.Cancellation); #if DEBUG await DumpSubspace(db, location); #endif @@ -134,13 +134,13 @@ public async Task Test_Can_Insert_And_Retrieve_ProtoBuf_Documents() // store a document var book1 = books[0]; - await docs.InsertAsync(db, book1, this.Cancellation); + await db.WriteAsync((tr) => docs.Insert(tr, book1), this.Cancellation); #if DEBUG await DumpSubspace(db, location); #endif // retrieve the document - var copy = await docs.LoadAsync(db, 42, this.Cancellation); + var copy = await db.ReadAsync((tr) => docs.LoadAsync(tr, 42), this.Cancellation); Assert.That(copy, Is.Not.Null); Assert.That(copy.Id, Is.EqualTo(book1.Id)); @@ -151,7 +151,7 @@ public async Task Test_Can_Insert_And_Retrieve_ProtoBuf_Documents() // store another document var book2 = books[1]; - await docs.InsertAsync(db, book2, this.Cancellation); + await db.WriteAsync((tr) => docs.Insert(tr, book2), this.Cancellation); #if DEBUG await DumpSubspace(db, location); #endif From c53bc563fec4f84085430a414d812ed6cb7d8b02 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Tue, 2 Dec 2014 17:19:13 +0100 Subject: [PATCH 25/63] Renamed IFdbTransactional into IFdbRetryable and remove uneeded overrides in the various experimental layers - See Issue #45 for rationale being the change --- FoundationDB.Client/FdbDatabase.cs | 2 +- FoundationDB.Client/FdbTransaction.cs | 3 +- .../FdbTransactionExtensions.cs | 2 +- .../FoundationDB.Client.csproj | 6 +- FoundationDB.Client/IFdbDatabase.cs | 2 +- FoundationDB.Client/IFdbReadOnlyRetryable.cs | 93 +++++++++++++ .../IFdbReadOnlyTransaction.cs | 7 +- .../IFdbReadOnlyTransactional.cs | 59 -------- ...{IFdbTransactional.cs => IFdbRetryable.cs} | 38 ++++- ...actionals.cs => FdbDirectoryExtensions.cs} | 85 ++++++------ .../Subspaces/FdbSubspaceExtensions.cs | 2 +- .../Blobs/FdbBlobTransactionals.cs | 109 --------------- .../Collections/FdbMapTransactionals.cs | 109 --------------- .../Collections/FdbQueueTransactionals.cs | 78 ----------- .../Collections/FdbQueue`1.cs | 2 +- .../Collections/FdbVectorTransactionals.cs | 130 ------------------ .../Collections/FdbVector`1.cs | 8 +- .../FoundationDB.Layers.Common.csproj | 4 - .../Interning/FdbStringIntern.cs | 25 ---- FoundationDB.Layers.Common/Optional`1.cs | 13 +- .../Messaging/FdbWorkerPool.cs | 2 +- FoundationDB.Tests/Layers/BlobFacts.cs | 2 +- FoundationDB.Tests/Layers/QueuesFacts.cs | 60 +++++--- 23 files changed, 233 insertions(+), 608 deletions(-) create mode 100644 FoundationDB.Client/IFdbReadOnlyRetryable.cs delete mode 100644 FoundationDB.Client/IFdbReadOnlyTransactional.cs rename FoundationDB.Client/{IFdbTransactional.cs => IFdbRetryable.cs} (56%) rename FoundationDB.Client/Layers/Directories/{FdbDirectoryTransactionals.cs => FdbDirectoryExtensions.cs} (89%) delete mode 100644 FoundationDB.Layers.Common/Blobs/FdbBlobTransactionals.cs delete mode 100644 FoundationDB.Layers.Common/Collections/FdbMapTransactionals.cs delete mode 100644 FoundationDB.Layers.Common/Collections/FdbQueueTransactionals.cs delete mode 100644 FoundationDB.Layers.Common/Collections/FdbVectorTransactionals.cs diff --git a/FoundationDB.Client/FdbDatabase.cs b/FoundationDB.Client/FdbDatabase.cs index f269f2d68..c99fe66fc 100644 --- a/FoundationDB.Client/FdbDatabase.cs +++ b/FoundationDB.Client/FdbDatabase.cs @@ -44,7 +44,7 @@ namespace FoundationDB.Client /// FoundationDB database session handle /// An instance of this class can be used to create any number of concurrent transactions that will read and/or write to this particular database. [DebuggerDisplay("Name={m_name}, GlobalSpace={m_globalSpace}")] - public class FdbDatabase : IFdbDatabase, IFdbTransactional, IDisposable + public class FdbDatabase : IFdbDatabase, IFdbRetryable, IDisposable { #region Private Fields... diff --git a/FoundationDB.Client/FdbTransaction.cs b/FoundationDB.Client/FdbTransaction.cs index 613f1a1e7..bdd1d2526 100644 --- a/FoundationDB.Client/FdbTransaction.cs +++ b/FoundationDB.Client/FdbTransaction.cs @@ -44,9 +44,10 @@ namespace FoundationDB.Client /// FounrationDB transaction handle. /// An instance of this class can be used to read from and/or write to a snapshot of a FoundationDB database. - [DebuggerDisplay("Id={Id}, StillAlive={StillAlive}")] + [DebuggerDisplay("Id={Id}, StillAlive={StillAlive}, Size={Size}")] public sealed partial class FdbTransaction : IFdbTransaction, IFdbReadOnlyTransaction, IDisposable { + #region Private Members... internal const int STATE_INIT = 0; diff --git a/FoundationDB.Client/FdbTransactionExtensions.cs b/FoundationDB.Client/FdbTransactionExtensions.cs index cff5ccb3e..4c59c0663 100644 --- a/FoundationDB.Client/FdbTransactionExtensions.cs +++ b/FoundationDB.Client/FdbTransactionExtensions.cs @@ -1013,7 +1013,7 @@ public static Task[]> GetBatchAsync(th /// Lambda function that returns an async enumerable. The function may be called multiple times if the transaction conflicts. /// Token used to cancel the operation /// Task returning the list of all the elements of the async enumerable returned by the last successfull call to . - public static Task> QueryAsync(this IFdbReadOnlyTransactional db, [NotNull] Func> handler, CancellationToken cancellationToken) + public static Task> QueryAsync(this IFdbReadOnlyRetryable db, [NotNull] Func> handler, CancellationToken cancellationToken) { if (db == null) throw new ArgumentNullException("db"); if (handler == null) throw new ArgumentNullException("handler"); diff --git a/FoundationDB.Client/FoundationDB.Client.csproj b/FoundationDB.Client/FoundationDB.Client.csproj index 0447f1d75..5fad9bd9c 100644 --- a/FoundationDB.Client/FoundationDB.Client.csproj +++ b/FoundationDB.Client/FoundationDB.Client.csproj @@ -104,9 +104,9 @@ - + - + @@ -141,7 +141,7 @@ - + diff --git a/FoundationDB.Client/IFdbDatabase.cs b/FoundationDB.Client/IFdbDatabase.cs index 4459884e5..398047db2 100644 --- a/FoundationDB.Client/IFdbDatabase.cs +++ b/FoundationDB.Client/IFdbDatabase.cs @@ -33,7 +33,7 @@ namespace FoundationDB.Client using System.Threading; /// Database connection context. - public interface IFdbDatabase : IFdbReadOnlyTransactional, IFdbTransactional, IFdbSubspace, IFdbKey, IDisposable + public interface IFdbDatabase : IFdbReadOnlyRetryable, IFdbRetryable, IFdbSubspace, IFdbKey, IDisposable { /// Name of the database string Name { [NotNull] get; } diff --git a/FoundationDB.Client/IFdbReadOnlyRetryable.cs b/FoundationDB.Client/IFdbReadOnlyRetryable.cs new file mode 100644 index 000000000..887aa8f7e --- /dev/null +++ b/FoundationDB.Client/IFdbReadOnlyRetryable.cs @@ -0,0 +1,93 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +namespace FoundationDB.Client +{ + using JetBrains.Annotations; + using System; + using System.Threading; + using System.Threading.Tasks; + + /// Transactional context that can execute, inside a retry loop, idempotent actions using read-only transactions. + public interface IFdbReadOnlyRetryable + { + #region Important Note: Differences with Python's @transactional and Java's TransactionContext + + // This interface is supposed to be the equivalent of the @transactional Python attribute, and the TransactionContext base class in Java, + // but it has one MAJOR difference with the other bindings! + // + // In the other bindings, the notion of @transactional is a way to hide, from the caller, the behaviour of the object in case of failures: + // - sometimes the errors will bubble up (if the instance is a Transaction), and the caller has to deal with it + // - sometimes the errors will be retried under the hood an unspecified number of times, and may or may not still blow up at some point. + + // I think that this is a very dangerous thing, for the following reasons: + // 1. This can easily create race conditions and weird bugs: since your lambda may or may not be called multiple times, the code must be aware of this fact! + // => the most common bug is to update a global cache or state inside the lambda, BEFORE the transaction is committed. This can either fill the case with invalid value (commit fails), + // or worse you could update multiple times a global value if the transaction is retried. + // 2. This is not composable: even though the actions themselves could be composable, the code that must deal with errors and cancellation is NOT. + // If you want to write robust code you HAVE to know the exact behavior of the instance, which would force you to have different code path depending on the KIND of transactional instance you got. + // => If you have to check the actual type (Transcation or Database) anyway, why not explicity ask for one or the other? + // 3. Since .NET cannot simulate the @transactional Python attribute behavior easily, you are forced to add multiple version of the methods, ones that takes IFdbTransactions and ones that take an IFdbTransactional + // This create a lot of code duplication, and you end up with alias methods that are simply doing "FooAsync(IFdbTransaction dbOrTrans, ...) { return db.ReadWriteAsync((tr) => FooAsync(tr, ...), ...); } + // => the caller of the layer could easily write the same thing, and now would be in a position to compose multiple calls to the layer (and other layers) in the same retry loop. + + // For these reasons, regular IFdbTransaction DO NOT implement this interfafe, and the interface is called "IFdbRetryable" instead of "IFdbTransactional" (which used to be the name of this interface before the design change) + + #endregion + + //note: since there are no non-async read methods on transactions, there is no need for an overrides that takes an Action<....> + + /// Runs a transactional lambda function inside a read-only transaction, which can be executed more than once if any retryable error occurs. + /// Asynchronous handler that will be retried until it succeeds, or a non-recoverable error occurs. + /// Token used to cancel the operation + /// + /// Since the handler can run more than once, and that there is no guarantee that the transaction commits once it returns, you MAY NOT mutate any global state (counters, cache, global dictionary) inside this lambda! + /// You must wait for the Task to complete successfully before updating the global state of the application. + /// + Task ReadAsync([NotNull][InstantHandle] Func asyncHandler, CancellationToken cancellationToken); + + /// Runs a transactional lambda function inside a read-only transaction, which can be executed more than once if any retryable error occurs. + /// Asynchronous handler that will be retried until it succeeds, or a non-recoverable error occurs. + /// Token used to cancel the operation + /// + /// Since the handler can run more than once, and that there is no guarantee that the transaction commits once it returns, you MAY NOT mutate any global state (counters, cache, global dictionary) inside this lambda! + /// You must wait for the Task to complete successfully before updating the global state of the application. + /// + Task ReadAsync([NotNull][InstantHandle] Func> asyncHandler, CancellationToken cancellationToken); + + //REVIEW: should we keep these ? + + /// [EXPERIMENTAL] do not use yet!. + Task ReadAsync([NotNull][InstantHandle] Func asyncHandler, [InstantHandle] Action onDone, CancellationToken cancellationToken); + + /// [EXPERIMENTAL] do not use yet!. + Task ReadAsync([NotNull][InstantHandle] Func> asyncHandler, [InstantHandle] Action onDone, CancellationToken cancellationToken); + + } + +} diff --git a/FoundationDB.Client/IFdbReadOnlyTransaction.cs b/FoundationDB.Client/IFdbReadOnlyTransaction.cs index 6f8a981ba..c1728be1f 100644 --- a/FoundationDB.Client/IFdbReadOnlyTransaction.cs +++ b/FoundationDB.Client/IFdbReadOnlyTransaction.cs @@ -37,6 +37,7 @@ namespace FoundationDB.Client /// Transaction that allows read operations public interface IFdbReadOnlyTransaction : IDisposable { + /// Local id of the transaction /// This id is only guaranteed unique inside the current AppDomain or process and is reset on every restart. It should only be used for diagnostics and/or logging. int Id { get; } @@ -98,7 +99,7 @@ public interface IFdbReadOnlyTransaction : IDisposable /// If streaming mode is FdbStreamingMode.Iterator, this parameter should start at 1 and be incremented by 1 for each successive call while reading this range. In all other cases it is ignored. /// Task GetRangeAsync(FdbKeySelector beginInclusive, FdbKeySelector endExclusive, FdbRangeOptions options = null, int iteration = 0); - + /// /// Create a new range query that will read all key-value pairs in the database snapshot represented by the transaction /// @@ -137,7 +138,7 @@ public interface IFdbReadOnlyTransaction : IDisposable /// /// Implements the recommended retry and backoff behavior for a transaction. - /// + /// /// This function knows which of the error codes generated by other query functions represent temporary error conditions and which represent application errors that should be handled by the application. /// It also implements an exponential backoff strategy to avoid swamping the database cluster with excessive retries when there is a high level of conflict between transactions. /// @@ -148,7 +149,7 @@ public interface IFdbReadOnlyTransaction : IDisposable /// Set an option on this transaction that does not take any parameter /// Option to set void SetOption(FdbTransactionOption option); - + /// Set an option on this transaction that takes a string value /// Option to set /// Value of the parameter (can be null) diff --git a/FoundationDB.Client/IFdbReadOnlyTransactional.cs b/FoundationDB.Client/IFdbReadOnlyTransactional.cs deleted file mode 100644 index a4cb706f8..000000000 --- a/FoundationDB.Client/IFdbReadOnlyTransactional.cs +++ /dev/null @@ -1,59 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Client -{ - using JetBrains.Annotations; - using System; - using System.Threading; - using System.Threading.Tasks; - - /// Transactional context that can execute read-only transactions - public interface IFdbReadOnlyTransactional - { - //note: since there are no non-async read methods on transactions, there is no need for an override that takes an Action<....> - - /// - /// Runs a transactional lambda function inside a read-only transaction context, with optional retry-logic. - /// - Task ReadAsync([NotNull][InstantHandle] Func asyncHandler, CancellationToken cancellationToken); - - //REVIEW: should we keep this? - Task ReadAsync([NotNull][InstantHandle] Func asyncHandler, [InstantHandle] Action onDone, CancellationToken cancellationToken); - - /// - /// Runs a transactional lambda function inside a read-only transaction context, with optional retry-logic. - /// - Task ReadAsync([NotNull][InstantHandle] Func> asyncHandler, CancellationToken cancellationToken); - - //REVIEW: should we keep this? - Task ReadAsync([NotNull][InstantHandle] Func> asyncHandler, [InstantHandle] Action onDone, CancellationToken cancellationToken); - - } - -} diff --git a/FoundationDB.Client/IFdbTransactional.cs b/FoundationDB.Client/IFdbRetryable.cs similarity index 56% rename from FoundationDB.Client/IFdbTransactional.cs rename to FoundationDB.Client/IFdbRetryable.cs index cb0ea3cc7..7a1145e99 100644 --- a/FoundationDB.Client/IFdbTransactional.cs +++ b/FoundationDB.Client/IFdbRetryable.cs @@ -33,30 +33,54 @@ namespace FoundationDB.Client using System.Threading; using System.Threading.Tasks; - /// Transactional context that can execute read and/or write transactions - public interface IFdbTransactional : IFdbReadOnlyTransactional + /// Transactional context that can execute, inside a retry loop, idempotent actions using read and/or write transactions. + public interface IFdbRetryable : IFdbReadOnlyRetryable { - /// Runs an idempotent transactional block inside a write-only transaction context, with optional retry logic. - /// Idempotent handler that will be retried until the transaction commits, or a non-recoverable error occurs. + // note: see IFdbReadOnlyRetryable for comments about the differences between the .NET binding and other binding regarding the design of Transactionals + + /// Run an idempotent transaction block inside a write-only transaction, which can be executed more than once if any retryable error occurs. + /// Idempotent handler that should only call write methods on the transation, and may be retried until the transaction commits, or a non-recoverable error occurs. /// Token used to cancel the operation + /// + /// You do not need to commit the transaction inside the handler, it will be done automatically. + /// Since the handler can run more than once, and that there is no guarantee that the transaction commits once it returns, you MAY NOT mutate any global state (counters, cache, global dictionary) inside this lambda! + /// You must wait for the Task to complete successfully before updating the global state of the application. + /// Task WriteAsync([NotNull][InstantHandle] Action handler, CancellationToken cancellationToken); - /// Runs an idempotent transactional block inside a write-only transaction context, with optional retry logic. + /// Run an idempotent transactional block inside a write-only transaction, which can be executed more than once if any retryable error occurs. /// Idempotent async handler that will be retried until the transaction commits, or a non-recoverable error occurs. /// Token used to cancel the operation + /// + /// You do not need to commit the transaction inside the handler, it will be done automatically. + /// Since the handler can run more than once, and that there is no guarantee that the transaction commits once it returns, you MAY NOT mutate any global state (counters, cache, global dictionary) inside this lambda! + /// You must wait for the Task to complete successfully before updating the global state of the application. + /// Task WriteAsync([NotNull][InstantHandle] Func handler, CancellationToken cancellationToken); - /// Runs an idempotent transactional block inside a read-write transaction context, with optional retry logic. + /// Run an idempotent transactional block inside a read-write transaction, which can be executed more than once if any retryable error occurs. /// Idempotent asynchronous handler that will be retried until the transaction commits, or a non-recoverable error occurs. /// Token used to cancel the operation + /// + /// You do not need to commit the transaction inside the handler, it will be done automatically. + /// Since the handler can run more than once, and that there is no guarantee that the transaction commits once it returns, you MAY NOT mutate any global state (counters, cache, global dictionary) inside this lambda! + /// You must wait for the Task to complete successfully before updating the global state of the application. + /// Task ReadWriteAsync([NotNull][InstantHandle] Func asyncHandler, CancellationToken cancellationToken); - /// Runs an idempotent transactional block that returns a value, inside a read-write transaction context, with optional retry logic. + /// Run an idempotent transactional block that returns a value, inside a read-write transaction, which can be executed more than once if any retryable error occurs. /// Idempotent asynchronous lambda function that will be retried until the transaction commits, or a non-recoverable error occurs. The returned value of the last call will be the result of the operation. /// Token used to cancel the operation /// Result of the lambda function if the transaction committed sucessfully. + /// + /// You do not need to commit the transaction inside the handler, it will be done automatically. + /// Since the handler can run more than once, and that there is no guarantee that the transaction commits once it returns, you MAY NOT mutate any global state (counters, cache, global dictionary) inside this lambda! + /// You must wait for the Task to complete successfully before updating the global state of the application. + /// Task ReadWriteAsync([NotNull][InstantHandle] Func> asyncHandler, CancellationToken cancellationToken); + //REVIEW: should we keep these ? + /// [EXPERIMENTAL] do not use yet!. Task WriteAsync([NotNull][InstantHandle] Action handler, [NotNull][InstantHandle] Action onDone, CancellationToken cancellationToken); diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectoryTransactionals.cs b/FoundationDB.Client/Layers/Directories/FdbDirectoryExtensions.cs similarity index 89% rename from FoundationDB.Client/Layers/Directories/FdbDirectoryTransactionals.cs rename to FoundationDB.Client/Layers/Directories/FdbDirectoryExtensions.cs index e432ecc5f..4e8687287 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectoryTransactionals.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectoryExtensions.cs @@ -35,8 +35,9 @@ namespace FoundationDB.Layers.Directories using System.Threading; using System.Threading.Tasks; - public static class FdbDirectoryTransactionals + public static class FdbDirectoryExtensions { + // this helper class contain extension methods to help deal with IFdbDatabase vs IFdbTransaction #region CreateOrOpen... @@ -44,7 +45,7 @@ public static class FdbDirectoryTransactionals /// If the directory does not exist, it is created (creating parent directories if necessary). /// If layer is specified, it is checked against the layer of an existing directory or set as the layer of a new directory. /// - public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable path, CancellationToken cancellationToken) + public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -56,7 +57,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// If the directory does not exist, it is created (creating parent directories if necessary). /// If layer is specified, it is checked against the layer of an existing directory or set as the layer of a new directory. /// - public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable path, Slice layer, CancellationToken cancellationToken) + public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -68,7 +69,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// If the directory does not exist, it is created (creating parent directories if necessary). /// If layer is specified, it is checked against the layer of an existing directory or set as the layer of a new directory. /// - public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbTransactional db, string name, CancellationToken cancellationToken) + public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -80,7 +81,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// If the directory does not exist, it is created (creating parent directories if necessary). /// If layer is specified, it is checked against the layer of an existing directory or set as the layer of a new directory. /// - public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbTransactional db, string name, Slice layer, CancellationToken cancellationToken) + public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbRetryable db, string name, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -146,7 +147,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// Creates a directory with the given (creating parent directories if necessary). /// An error is raised if the given directory already exists. /// - public static Task CreateAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable path, CancellationToken cancellationToken) + public static Task CreateAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -158,7 +159,7 @@ public static Task CreateAsync(this IFdbDirectory director /// An error is raised if the given directory already exists. /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// - public static Task CreateAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable path, Slice layer, CancellationToken cancellationToken) + public static Task CreateAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -169,7 +170,7 @@ public static Task CreateAsync(this IFdbDirectory director /// Creates a directory with the given . /// An error is raised if the given directory already exists. /// - public static Task CreateAsync(this IFdbDirectory directory, IFdbTransactional db, string name, CancellationToken cancellationToken) + public static Task CreateAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -181,7 +182,7 @@ public static Task CreateAsync(this IFdbDirectory director /// An error is raised if the given directory already exists. /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// - public static Task CreateAsync(this IFdbDirectory directory, IFdbTransactional db, string name, Slice layer, CancellationToken cancellationToken) + public static Task CreateAsync(this IFdbDirectory directory, IFdbRetryable db, string name, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -202,7 +203,7 @@ public static Task CreateAsync(this IFdbDirectory director } /// Attempts to create a directory with the given (creating parent directories if necessary). - public static Task TryCreateAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable path, CancellationToken cancellationToken) + public static Task TryCreateAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -213,7 +214,7 @@ public static Task TryCreateAsync(this IFdbDirectory direc /// Attempts to create a directory with the given (creating parent directories if necessary). /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// - public static Task TryCreateAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable path, Slice layer, CancellationToken cancellationToken) + public static Task TryCreateAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -222,7 +223,7 @@ public static Task TryCreateAsync(this IFdbDirectory direc } /// Attempts to create a directory with the given . - public static Task TryCreateAsync(this IFdbDirectory directory, IFdbTransactional db, string name, CancellationToken cancellationToken) + public static Task TryCreateAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -233,7 +234,7 @@ public static Task TryCreateAsync(this IFdbDirectory direc /// Attempts to create a directory with the given . /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// - public static Task TryCreateAsync(this IFdbDirectory directory, IFdbTransactional db, string name, Slice layer, CancellationToken cancellationToken) + public static Task TryCreateAsync(this IFdbDirectory directory, IFdbRetryable db, string name, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -259,7 +260,7 @@ public static Task TryCreateAsync(this IFdbDirectory direc /// Opens the directory with the given . /// An error is raised if the directory does not exist. /// - public static Task OpenAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable path, CancellationToken cancellationToken) + public static Task OpenAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -270,7 +271,7 @@ public static Task OpenAsync(this IFdbDirectory directory, /// Opens the directory with the given . /// An error is raised if the directory does not exist, or if a layer is specified and a different layer was specified when the directory was created. /// - public static Task OpenAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable path, Slice layer, CancellationToken cancellationToken) + public static Task OpenAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -281,7 +282,7 @@ public static Task OpenAsync(this IFdbDirectory directory, /// Opens the sub-directory with the given . /// An error is raised if the directory does not exist. /// - public static Task OpenAsync(this IFdbDirectory directory, IFdbTransactional db, string name, CancellationToken cancellationToken) + public static Task OpenAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -292,7 +293,7 @@ public static Task OpenAsync(this IFdbDirectory directory, /// Opens the sub-directory with the given . /// An error is raised if the directory does not exist, or if a layer is specified and a different layer was specified when the directory was created. /// - public static Task OpenAsync(this IFdbDirectory directory, IFdbTransactional db, string name, Slice layer, CancellationToken cancellationToken) + public static Task OpenAsync(this IFdbDirectory directory, IFdbRetryable db, string name, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -312,7 +313,7 @@ public static Task OpenAsync(this IFdbDirectory directory, } /// Attempts to open the directory with the given . - public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, IEnumerable path, CancellationToken cancellationToken) + public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -321,7 +322,7 @@ public static Task TryOpenAsync(this IFdbDirectory directo } /// Attempts to open the directory with the given . - public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, IEnumerable path, Slice layer, CancellationToken cancellationToken) + public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, IEnumerable path, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -330,7 +331,7 @@ public static Task TryOpenAsync(this IFdbDirectory directo } /// Attempts to open the directory with the given . - public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, string name, CancellationToken cancellationToken) + public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -339,7 +340,7 @@ public static Task TryOpenAsync(this IFdbDirectory directo } /// Attempts to open the directory with the given . - public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, string name, Slice layer, CancellationToken cancellationToken) + public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, string name, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -373,7 +374,7 @@ public static Task TryOpenAsync(this IFdbDirectory directo /// There is no effect on the physical prefix of the given directory, or on clients that already have the directory open. /// An error is raised if the old directory does not exist, a directory already exists at `new_path`, or the parent directory of `new_path` does not exist. /// - public static Task MoveAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable oldPath, IEnumerable newPath, CancellationToken cancellationToken) + public static Task MoveAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable oldPath, IEnumerable newPath, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -385,7 +386,7 @@ public static Task MoveAsync(this IFdbDirectory directory, /// Attempts to move the directory found at to . /// There is no effect on the physical prefix of the given directory, or on clients that already have the directory open. /// - public static Task TryMoveAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable oldPath, IEnumerable newPath, CancellationToken cancellationToken) + public static Task TryMoveAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable oldPath, IEnumerable newPath, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -402,7 +403,7 @@ public static Task TryMoveAsync(this IFdbDirectory directo /// There is no effect on the physical prefix of the given directory, or on clients that already have the directory open. /// An error is raised if a directory already exists at `new_path`, or if the new path points to a child of the current directory. /// - public static Task MoveToAsync(this FdbDirectorySubspace subspace, IFdbTransactional db, IEnumerable newPath, CancellationToken cancellationToken) + public static Task MoveToAsync(this FdbDirectorySubspace subspace, IFdbRetryable db, IEnumerable newPath, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); @@ -413,7 +414,7 @@ public static Task MoveToAsync(this FdbDirectorySubspace s /// Attempts to move the current directory to . /// There is no effect on the physical prefix of the given directory, or on clients that already have the directory open. /// - public static Task TryMoveToAsync(this FdbDirectorySubspace subspace, IFdbTransactional db, IEnumerable newPath, CancellationToken cancellationToken) + public static Task TryMoveToAsync(this FdbDirectorySubspace subspace, IFdbRetryable db, IEnumerable newPath, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); @@ -428,7 +429,7 @@ public static Task TryMoveToAsync(this FdbDirectorySubspac /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task RemoveAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable path, CancellationToken cancellationToken) + public static Task RemoveAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -438,7 +439,7 @@ public static Task RemoveAsync(this IFdbDirectory directory, IFdbTransactional d /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task RemoveAsync(this IFdbDirectory directory, IFdbTransactional db, string name, CancellationToken cancellationToken) + public static Task RemoveAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -449,7 +450,7 @@ public static Task RemoveAsync(this IFdbDirectory directory, IFdbTransactional d /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task RemoveAsync(this IFdbDirectory directory, IFdbTransactional db, CancellationToken cancellationToken) + public static Task RemoveAsync(this IFdbDirectory directory, IFdbRetryable db, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -470,7 +471,7 @@ public static Task RemoveAsync(this IFdbDirectory directory, IFdbTransaction tra /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbTransactional db, IEnumerable path, CancellationToken cancellationToken) + public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -480,7 +481,7 @@ public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbTransa /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbTransactional db, string name, CancellationToken cancellationToken) + public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -505,7 +506,7 @@ public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbTransa /// Checks if a directory already exists /// Returns true if the directory exists, otherwise false. - public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, IEnumerable path, CancellationToken cancellationToken) + public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -514,7 +515,7 @@ public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyT /// Checks if a directory already exists /// Returns true if the directory exists, otherwise false. - public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, string name, CancellationToken cancellationToken) + public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -534,7 +535,7 @@ public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyT /// Checks if this directory exists /// Returns true if the directory exists, otherwise false. - public static Task ExistsAsync(this FdbDirectorySubspace subspace, IFdbReadOnlyTransactional db, CancellationToken cancellationToken) + public static Task ExistsAsync(this FdbDirectorySubspace subspace, IFdbReadOnlyRetryable db, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); @@ -546,7 +547,7 @@ public static Task ExistsAsync(this FdbDirectorySubspace subspace, IFdbRea #region List / TryList... /// Returns the list of subdirectories of directory at . - public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, IEnumerable path, CancellationToken cancellationToken) + public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -555,7 +556,7 @@ public static Task> ListAsync(this IFdbDirectory directory, IFdbRea } /// Returns the list of subdirectories of the sub-directory with the given . - public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, string name, CancellationToken cancellationToken) + public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -564,7 +565,7 @@ public static Task> ListAsync(this IFdbDirectory directory, IFdbRea } /// Returns the list of subdirectories of the current directory. - public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, CancellationToken cancellationToken) + public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -589,7 +590,7 @@ public static Task> ListAsync(this IFdbDirectory directory, IFdbRea } /// Returns the list of subdirectories of directory at , if it exists - public static Task> TryListAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, IEnumerable path, CancellationToken cancellationToken) + public static Task> TryListAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -598,7 +599,7 @@ public static Task> TryListAsync(this IFdbDirectory directory, IFdb } /// Returns the list of subdirectories of the sub-directory with the given , if it exists - public static Task> TryListAsync(this IFdbDirectory directory, IFdbReadOnlyTransactional db, string name, CancellationToken cancellationToken) + public static Task> TryListAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -616,7 +617,7 @@ public static Task> TryListAsync(this IFdbDirectory directory, IFdb } /// Returns the list of all the subdirectories of the current directory. - public static Task> ListAsync(this FdbDirectorySubspace subspace, IFdbReadOnlyTransactional db, CancellationToken cancellationToken) + public static Task> ListAsync(this FdbDirectorySubspace subspace, IFdbReadOnlyRetryable db, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); @@ -624,7 +625,7 @@ public static Task> ListAsync(this FdbDirectorySubspace subspace, I } /// Returns the list of all the subdirectories of the current directory, it it exists. - public static Task> TryListAsync(this FdbDirectorySubspace subspace, IFdbReadOnlyTransactional db, CancellationToken cancellationToken) + public static Task> TryListAsync(this FdbDirectorySubspace subspace, IFdbReadOnlyRetryable db, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); @@ -636,7 +637,7 @@ public static Task> TryListAsync(this FdbDirectorySubspace subspace #region Metadata /// Change the layer id of the directory at - public static Task ChangeLayerAsync(this FdbDirectoryLayer directory, IFdbTransactional db, IEnumerable path, Slice newLayer, CancellationToken cancellationToken) + public static Task ChangeLayerAsync(this FdbDirectoryLayer directory, IFdbRetryable db, IEnumerable path, Slice newLayer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -645,7 +646,7 @@ public static Task ChangeLayerAsync(this FdbDirectoryLayer } /// Change the layer id of this directory - public static Task ChangeLayerAsync(this FdbDirectorySubspace subspace, IFdbTransactional db, Slice newLayer, CancellationToken cancellationToken) + public static Task ChangeLayerAsync(this FdbDirectorySubspace subspace, IFdbRetryable db, Slice newLayer, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs b/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs index ecde905d5..1e3126afa 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs @@ -51,7 +51,7 @@ public static void ClearRange(this IFdbTransaction trans, [NotNull] IFdbSubspace } /// Clear the entire content of a subspace - public static Task ClearRangeAsync(this IFdbTransactional db, [NotNull] IFdbSubspace subspace, CancellationToken cancellationToken) + public static Task ClearRangeAsync(this IFdbRetryable db, [NotNull] IFdbSubspace subspace, CancellationToken cancellationToken) { if (db == null) throw new ArgumentNullException("db"); if (subspace == null) throw new ArgumentNullException("subspace"); diff --git a/FoundationDB.Layers.Common/Blobs/FdbBlobTransactionals.cs b/FoundationDB.Layers.Common/Blobs/FdbBlobTransactionals.cs deleted file mode 100644 index f38e3dea4..000000000 --- a/FoundationDB.Layers.Common/Blobs/FdbBlobTransactionals.cs +++ /dev/null @@ -1,109 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013, Doxense SARL -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Layers.Blobs -{ - using FoundationDB.Client; - using System; - using System.Threading; - using System.Threading.Tasks; - - /// - /// Transactional methods for the FdbBlob class - /// - public static class FdbBlobTransactionals - { - - /// - /// Delete all key-value pairs associated with the blob. - /// - public static Task DeleteAsync(this FdbBlob blob, IFdbTransactional db, CancellationToken cancellationToken) - { - if (blob == null) throw new ArgumentNullException("blob"); - if (db == null) throw new ArgumentNullException("db"); - - return db.WriteAsync((tr) => blob.Delete(tr), cancellationToken); - } - - /// - /// Get the size (in bytes) of the blob. - /// - public static Task GetSizeAsync(this FdbBlob blob, IFdbReadOnlyTransactional db, CancellationToken cancellationToken) - { - if (blob == null) throw new ArgumentNullException("blob"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadAsync((tr) => blob.GetSizeAsync(tr), cancellationToken); - } - - /// - /// Read from the blob, starting at , retrieving up to bytes (fewer then n bytes are returned when the end of the blob is reached). - /// - public static Task ReadAsync(this FdbBlob blob, IFdbReadOnlyTransactional db, long offset, int n, CancellationToken cancellationToken) - { - if (blob == null) throw new ArgumentNullException("blob"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadAsync((tr) => blob.ReadAsync(tr, offset, n), cancellationToken); - } - - /// - /// Write to the blob, starting at and overwriting any existing data at that location. The length of the blob is increased if necessary. - /// - public static Task WriteAsync(this FdbBlob blob, IFdbTransactional db, long offset, Slice data, CancellationToken cancellationToken) - { - if (blob == null) throw new ArgumentNullException("blob"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadWriteAsync((tr) => blob.WriteAsync(tr, offset, data), cancellationToken); - } - - /// - /// Append the contents of onto the end of the blob. - /// - public static Task AppendAsync(this FdbBlob blob, IFdbTransactional db, Slice data, CancellationToken cancellationToken) - { - if (blob == null) throw new ArgumentNullException("blob"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadWriteAsync((tr) => blob.AppendAsync(tr, data), cancellationToken); - } - - /// - /// Change the blob length to , erasing any data when shrinking, and filling new bytes with 0 when growing. - /// - public static Task TruncateAsync(this FdbBlob blob, IFdbTransactional db, long newLength, CancellationToken cancellationToken) - { - if (blob == null) throw new ArgumentNullException("blob"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadWriteAsync((tr) => blob.TruncateAsync(tr, newLength), cancellationToken); - } - } - -} diff --git a/FoundationDB.Layers.Common/Collections/FdbMapTransactionals.cs b/FoundationDB.Layers.Common/Collections/FdbMapTransactionals.cs deleted file mode 100644 index 96e96a1f3..000000000 --- a/FoundationDB.Layers.Common/Collections/FdbMapTransactionals.cs +++ /dev/null @@ -1,109 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Layers.Collections -{ - using FoundationDB.Client; - using JetBrains.Annotations; - using System; - using System.Threading; - using System.Threading.Tasks; - - public static class FdbMapExtensions - { - - /// Returns the value of an existing entry in the map - /// Transactional used for the operation - /// Key of the entry to read from the map - /// Token used to cancel the operation - /// Value of the entry if it exists; otherwise, throws an exception - /// If either or is null. - /// If the map does not contain an entry with this key. - public static Task GetAsync(this FdbMap map, [NotNull] IFdbReadOnlyTransactional db, TKey id, CancellationToken cancellationToken) - { - if (map == null) throw new ArgumentNullException("map"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadAsync((tr) => map.GetAsync(tr, id), cancellationToken); - } - - /// Returns the value of an entry in the map if it exists. - /// Transactional used for the operation - /// Key of the entry to read from the map - /// Token used to cancel the operation - /// Optional with the value of the entry it it exists, or an empty result if it is not present in the map. - public static Task> TryGetAsync(this FdbMap map, [NotNull] IFdbReadOnlyTransactional db, TKey id, CancellationToken cancellationToken) - { - if (map == null) throw new ArgumentNullException("map"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadAsync((tr) => map.TryGetAsync(tr, id), cancellationToken); - } - - /// Add or update an entry in the map - /// Transactional used for the operation - /// Key of the entry to add or update - /// New value of the entry - /// Token used to cancel the operation - /// If the entry did not exist, it will be created. If not, its value will be replace with . - public static Task SetAsync(this FdbMap map, [NotNull] IFdbTransactional db, TKey id, TValue value, CancellationToken cancellationToken) - { - if (map == null) throw new ArgumentNullException("map"); - if (db == null) throw new ArgumentNullException("db"); - - return db.WriteAsync((tr) => map.Set(tr, id, value), cancellationToken); - } - - /// Remove a single entry from the map - /// Transactional used for the operation - /// Key of the entry to remove - /// Token used to cancel the operation - /// If the entry did not exist, the operation will not do anything. - public static Task RemoveAsync(this FdbMap map, [NotNull] IFdbTransactional db, TKey id, CancellationToken cancellationToken) - { - if (map == null) throw new ArgumentNullException("map"); - if (db == null) throw new ArgumentNullException("db"); - - return db.WriteAsync((tr) => map.Remove(tr, id), cancellationToken); - } - - /// Clear all the entries in the map - /// Transactional used for the operation - /// Transaction used for the operation - /// This will delete EVERYTHING in the map! - public static Task ClearAsync(this FdbMap map, [NotNull] IFdbTransactional db, CancellationToken cancellationToken) - { - if (map == null) throw new ArgumentNullException("map"); - if (db == null) throw new ArgumentNullException("db"); - - return db.WriteAsync((tr) => map.Clear(tr), cancellationToken); - } - - } - -} diff --git a/FoundationDB.Layers.Common/Collections/FdbQueueTransactionals.cs b/FoundationDB.Layers.Common/Collections/FdbQueueTransactionals.cs deleted file mode 100644 index 194063b26..000000000 --- a/FoundationDB.Layers.Common/Collections/FdbQueueTransactionals.cs +++ /dev/null @@ -1,78 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Layers.Collections -{ - using FoundationDB.Client; - using JetBrains.Annotations; - using System; - using System.Threading; - using System.Threading.Tasks; - - public static class FdbQueueTransactionals - { - - /// Remove all items from the queue. - public static Task ClearAsync(this FdbQueue queue, [NotNull] IFdbTransactional db, CancellationToken cancellationToken) - { - if (queue == null) throw new ArgumentNullException("queue"); - if (db == null) throw new ArgumentNullException("db"); - - return db.WriteAsync((tr) => queue.ClearAsync(tr), cancellationToken); - } - - /// Test whether the queue is empty. - public static Task EmptyAsync(this FdbQueue queue, [NotNull] IFdbReadOnlyTransactional db, CancellationToken cancellationToken) - { - if (queue == null) throw new ArgumentNullException("queue"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadAsync((tr) => queue.EmptyAsync(tr), cancellationToken); - } - - /// Push a single item onto the queue. - public static Task PushAsync(this FdbQueue queue, [NotNull] IFdbTransactional db, T value, CancellationToken cancellationToken) - { - if (queue == null) throw new ArgumentNullException("queue"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadWriteAsync((tr) => queue.PushAsync(tr, value), cancellationToken); - } - - /// Get the value of the next item in the queue without popping it. - public static Task> PeekAsync(this FdbQueue queue, [NotNull] IFdbTransactional db, CancellationToken cancellationToken) - { - if (queue == null) throw new ArgumentNullException("queue"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadWriteAsync((tr) => queue.PeekAsync(tr), cancellationToken); - } - - } - -} diff --git a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs index 124a5b847..871412c4d 100644 --- a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs +++ b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs @@ -98,7 +98,7 @@ public FdbQueue([NotNull] IFdbSubspace subspace, bool highContention, [NotNull] internal IFdbSubspace QueueItem { get; private set; } /// Remove all items from the queue. - public void ClearAsync([NotNull] IFdbTransaction trans) + public void Clear([NotNull] IFdbTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); diff --git a/FoundationDB.Layers.Common/Collections/FdbVectorTransactionals.cs b/FoundationDB.Layers.Common/Collections/FdbVectorTransactionals.cs deleted file mode 100644 index de1eedb4f..000000000 --- a/FoundationDB.Layers.Common/Collections/FdbVectorTransactionals.cs +++ /dev/null @@ -1,130 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Layers.Collections -{ - using FoundationDB.Client; - using JetBrains.Annotations; - using System; - using System.Threading; - using System.Threading.Tasks; - - public static class FdbVectorTransactionals - { - - #region Empty / Size - - /// Remove all items from the Vector. - public static Task EmptyAsync(this FdbVector vector, [NotNull] IFdbReadOnlyTransactional db, CancellationToken cancellationToken) - { - if (vector == null) throw new ArgumentNullException("vector"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadAsync((tr) => vector.EmptyAsync(tr), cancellationToken); - } - - /// Get the number of items in the Vector. This number includes the sparsely represented items. - public static Task SizeAsync(this FdbVector vector, [NotNull] IFdbReadOnlyTransactional db, CancellationToken cancellationToken) - { - if (vector == null) throw new ArgumentNullException("vector"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadAsync((tr) => vector.SizeAsync(tr), cancellationToken); - } - - #endregion - - #region Clear / Resize - - /// Remove all items from the Vector. - public static Task ClearAsync(this FdbVector vector, [NotNull] IFdbTransactional db, CancellationToken cancellationToken) - { - if (vector == null) throw new ArgumentNullException("vector"); - if (db == null) throw new ArgumentNullException("db"); - - return db.WriteAsync((tr) => vector.Clear(tr), cancellationToken); - } - - /// Grow or shrink the size of the Vector. - public static Task ResizeAsync(this FdbVector vector, [NotNull] IFdbTransactional db, long length, CancellationToken cancellationToken) - { - if (vector == null) throw new ArgumentNullException("vector"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadWriteAsync((tr) => vector.ResizeAsync(tr, length), cancellationToken); - } - - #endregion - - #region Push / Pop - - /// Get and pops the last item off the Vector. - public static Task> PopAsync(this FdbVector vector, [NotNull] IFdbTransactional db, CancellationToken cancellationToken) - { - if (vector == null) throw new ArgumentNullException("vector"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadWriteAsync((tr) => vector.PopAsync(tr), cancellationToken); - } - - /// Push a single item onto the end of the Vector. - public static Task PushAsync(this FdbVector vector, [NotNull] IFdbTransactional db, T value, CancellationToken cancellationToken) - { - if (vector == null) throw new ArgumentNullException("vector"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadWriteAsync((tr) => vector.PushAsync(tr, value), cancellationToken); - } - - #endregion - - #region Get / Set - - /// Get the item at the specified index. - public static Task GetAsync(this FdbVector vector, [NotNull] IFdbReadOnlyTransactional db, long index, CancellationToken cancellationToken) - { - if (vector == null) throw new ArgumentNullException("vector"); - if (db == null) throw new ArgumentNullException("db"); - - return db.ReadAsync((tr) => vector.GetAsync(tr, index), cancellationToken); - } - - /// Set the value at a particular index in the Vector. - public static Task SetAsync(this FdbVector vector, [NotNull] IFdbTransactional db, long index, T value, CancellationToken cancellationToken) - { - if (vector == null) throw new ArgumentNullException("vector"); - if (db == null) throw new ArgumentNullException("db"); - - return db.WriteAsync((tr) => vector.Set(tr, index, value), cancellationToken); - } - - #endregion - - } - -} diff --git a/FoundationDB.Layers.Common/Collections/FdbVector`1.cs b/FoundationDB.Layers.Common/Collections/FdbVector`1.cs index cf387cde6..8cd034581 100644 --- a/FoundationDB.Layers.Common/Collections/FdbVector`1.cs +++ b/FoundationDB.Layers.Common/Collections/FdbVector`1.cs @@ -43,14 +43,14 @@ public class FdbVector // Vector stores each of its values using its index as the key. // The size of a vector is equal to the index of its last key + 1. - // + // // For indexes smaller than the vector's size that have no associated key // in the database, the value will be the specified defaultValue. - // + // // If the last value in the vector has the default value, its key will // always be set so that size can be determined. - // - // By creating Vector with a Subspace, all kv pairs modified by the + // + // By creating Vector with a Subspace, all kv pairs modified by the // layer will have keys that start within that Subspace. // Implementation note: diff --git a/FoundationDB.Layers.Common/FoundationDB.Layers.Common.csproj b/FoundationDB.Layers.Common/FoundationDB.Layers.Common.csproj index 8ad86e4b7..d0b6420c4 100644 --- a/FoundationDB.Layers.Common/FoundationDB.Layers.Common.csproj +++ b/FoundationDB.Layers.Common/FoundationDB.Layers.Common.csproj @@ -52,21 +52,17 @@ Properties\VersionInfo.cs - - - - diff --git a/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs b/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs index 5dc4186ed..f7e4a01c5 100644 --- a/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs +++ b/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs @@ -329,31 +329,6 @@ private async Task LookupSlowAsync(IFdbReadOnlyTransaction trans, Slice #endregion - #region Transactionals... - - /// Look up string in the intern database and return its normalized representation. If value already exists, intern returns the existing representation. - /// Fdb database - /// String to intern - /// Normalized representation of the string - /// The length of the string must not exceed the maximum FoundationDB value size - public Task InternAsync(IFdbTransactional db, string value, CancellationToken cancellationToken) - { - if (db == null) throw new ArgumentNullException("db"); - return db.ReadWriteAsync((tr) => this.InternAsync(tr, value), cancellationToken); - } - - /// Return the long string associated with the normalized representation - /// Fdb database - /// Interned uid of the string - /// Original value of the interned string, or an exception if it does it does not exist - public Task LookupAsync(IFdbReadOnlyTransactional db, Slice uid, CancellationToken cancellationToken) - { - if (db == null) throw new ArgumentNullException("db"); - return db.ReadAsync((tr) => this.LookupAsync(tr, uid), cancellationToken); - } - - #endregion - public void Dispose() { Dispose(true); diff --git a/FoundationDB.Layers.Common/Optional`1.cs b/FoundationDB.Layers.Common/Optional`1.cs index c1c2d6dad..6cef1c4b6 100644 --- a/FoundationDB.Layers.Common/Optional`1.cs +++ b/FoundationDB.Layers.Common/Optional`1.cs @@ -253,6 +253,8 @@ public struct Optional : IEquatable>, IEquatable // The main difference is that, 'null' is a legal value for reference types, which is distinct from "no value" // i.e.: new Optional(null).HasValue == true + //REVIEW: this looks very similar to Maybe, except without the handling of errors. Maybe we could merge both? + private readonly bool m_hasValue; private readonly T m_value; @@ -301,12 +303,12 @@ public override string ToString() public bool Equals(Optional value) { - return m_hasValue == value.m_hasValue && object.Equals(m_value, value.m_value); + return m_hasValue == value.m_hasValue && EqualityComparer.Default.Equals(m_value, value.m_value); } public bool Equals(T value) { - return m_hasValue && object.Equals(m_value, value); + return m_hasValue && EqualityComparer.Default.Equals(m_value, value); } public override int GetHashCode() @@ -318,8 +320,9 @@ public override int GetHashCode() /// Indicates whether the current object is equal to a specified object. public override bool Equals(object obj) { - if (!m_hasValue) return obj == null; - return object.Equals(m_value, obj); + if (obj is T) return Equals((T)obj); + if (obj is Optional) return Equals((Optional)obj); + return m_hasValue ? object.Equals(m_value, obj) : object.ReferenceEquals(obj, null); } public static bool operator ==(Optional a, Optional b) @@ -363,7 +366,7 @@ public override bool Equals(object obj) // Needed to be able to write stuff like "if (optional != null)", the compiler will automatically lift "foo != null" to nullables if foo is a struct implements the '!=' operator return !a.GetValueOrDefault().Equals(b.GetValueOrDefault()); } - + public static explicit operator T(Optional value) { return value.Value; diff --git a/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs b/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs index a4d36e3f0..213b63d55 100644 --- a/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs +++ b/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs @@ -199,7 +199,7 @@ private void ClearTask(IFdbTransaction tr, Slice taskId) /// /// /// - public async Task ScheduleTaskAsync(IFdbTransactional db, Slice taskId, Slice taskBody, CancellationToken ct = default(CancellationToken)) + public async Task ScheduleTaskAsync(IFdbRetryable db, Slice taskId, Slice taskBody, CancellationToken ct = default(CancellationToken)) { if (db == null) throw new ArgumentNullException("db"); var now = DateTime.UtcNow; diff --git a/FoundationDB.Tests/Layers/BlobFacts.cs b/FoundationDB.Tests/Layers/BlobFacts.cs index cec7fcba9..1451328fd 100644 --- a/FoundationDB.Tests/Layers/BlobFacts.cs +++ b/FoundationDB.Tests/Layers/BlobFacts.cs @@ -59,7 +59,7 @@ public async Task Test_FdbBlob_NotFound_Blob_Is_Empty() Assert.That(size, Is.Null, "Non existing blob should have no size"); } - size = await blob.GetSizeAsync(db, this.Cancellation); + size = await db.ReadAsync((tr) => blob.GetSizeAsync(tr), this.Cancellation); Assert.That(size, Is.Null, "Non existing blob should have no size"); } diff --git a/FoundationDB.Tests/Layers/QueuesFacts.cs b/FoundationDB.Tests/Layers/QueuesFacts.cs index 4b849dcfd..ab39ff736 100644 --- a/FoundationDB.Tests/Layers/QueuesFacts.cs +++ b/FoundationDB.Tests/Layers/QueuesFacts.cs @@ -57,53 +57,67 @@ public async Task Test_Queue_Fast() var queue = new FdbQueue(location, highContention: false); Console.WriteLine("Clear Queue"); - await queue.ClearAsync(db, this.Cancellation); + await db.WriteAsync((tr) => queue.Clear(tr), this.Cancellation); - Console.WriteLine("Empty? " + await queue.EmptyAsync(db, this.Cancellation)); + Console.WriteLine("Empty? " + await db.ReadAsync((tr) => queue.EmptyAsync(tr), this.Cancellation)); Console.WriteLine("Push 10, 8, 6"); - await queue.PushAsync(db, 10, this.Cancellation); - await queue.PushAsync(db, 8, this.Cancellation); - await queue.PushAsync(db, 6, this.Cancellation); + await db.ReadWriteAsync((tr) => queue.PushAsync(tr, 10), this.Cancellation); + await db.ReadWriteAsync((tr) => queue.PushAsync(tr, 8), this.Cancellation); + await db.ReadWriteAsync((tr) => queue.PushAsync(tr, 6), this.Cancellation); #if DEBUG await DumpSubspace(db, location); #endif - Console.WriteLine("Empty? " + await queue.EmptyAsync(db, this.Cancellation)); - - Console.WriteLine("Pop item: " + await queue.PopAsync(db, this.Cancellation)); - Console.WriteLine("Next item: " + await queue.PeekAsync(db, this.Cancellation)); + // Empty? + bool empty = await db.ReadAsync((tr) => queue.EmptyAsync(tr), this.Cancellation); + Console.WriteLine("Empty? " + empty); + Assert.That(empty, Is.False); + + Optional item = await queue.PopAsync(db, this.Cancellation); + Console.WriteLine("Pop item: " + item); + Assert.That((int)item, Is.EqualTo(10)); + item = await db.ReadWriteAsync((tr) => queue.PeekAsync(tr), this.Cancellation); + Console.WriteLine("Next item: " + item); + Assert.That((int)item, Is.EqualTo(8)); #if DEBUG await DumpSubspace(db, location); #endif - Console.WriteLine("Pop item: " + await queue.PopAsync(db, this.Cancellation)); + item = await queue.PopAsync(db, this.Cancellation); + Console.WriteLine("Pop item: " + item); + Assert.That((int)item, Is.EqualTo(8)); #if DEBUG await DumpSubspace(db, location); #endif - Console.WriteLine("Pop item: " + await queue.PopAsync(db, this.Cancellation)); + item = await queue.PopAsync(db, this.Cancellation); + Console.WriteLine("Pop item: " + item); + Assert.That((int)item, Is.EqualTo(6)); #if DEBUG await DumpSubspace(db, location); #endif - - Console.WriteLine("Empty? " + await queue.EmptyAsync(db, this.Cancellation)); + empty = await db.ReadAsync((tr) => queue.EmptyAsync(tr), this.Cancellation); + Console.WriteLine("Empty? " + empty); + Assert.That(empty, Is.True); Console.WriteLine("Push 5"); - await queue.PushAsync(db, 5, this.Cancellation); + await db.ReadWriteAsync((tr) => queue.PushAsync(tr, 5), this.Cancellation); #if DEBUG await DumpSubspace(db, location); #endif Console.WriteLine("Clear Queue"); - await queue.ClearAsync(db, this.Cancellation); + await db.WriteAsync((tr) => queue.Clear(tr), this.Cancellation); #if DEBUG await DumpSubspace(db, location); #endif - Console.WriteLine("Empty? " + await queue.EmptyAsync(db, this.Cancellation)); + empty = await db.ReadAsync((tr) => queue.EmptyAsync(tr), this.Cancellation); + Console.WriteLine("Empty? " + empty); + Assert.That(empty, Is.True); } } @@ -116,11 +130,11 @@ public async Task Test_Single_Client() var queue = new FdbQueue(location, highContention: false); - await queue.ClearAsync(db, this.Cancellation); + await db.WriteAsync((tr) => queue.Clear(tr), this.Cancellation); for (int i = 0; i < 10; i++) { - await queue.PushAsync(db, i, this.Cancellation); + await db.ReadWriteAsync((tr) => queue.PushAsync(tr, i), this.Cancellation); } for (int i = 0; i < 10; i++) @@ -130,7 +144,8 @@ public async Task Test_Single_Client() Assert.That(r.Value, Is.EqualTo(i)); } - Assert.That(await queue.EmptyAsync(db, this.Cancellation), Is.True); + bool empty = await db.ReadAsync((tr) => queue.EmptyAsync(tr), this.Cancellation); + Assert.That(empty, Is.True); } } @@ -140,7 +155,7 @@ private static async Task RunMultiClientTest(IFdbDatabase db, FdbSubspace locati Console.WriteLine("Starting {0} test with {1} threads and {2} iterations", desc, K, NUM); var queue = new FdbQueue(location, highContention); - await queue.ClearAsync(db, ct); + await db.WriteAsync((tr) => queue.Clear(tr), ct); // use a CTS to ensure that everything will stop in case of problems... using (var go = new CancellationTokenSource(TimeSpan.FromSeconds(30))) @@ -165,7 +180,7 @@ private static async Task RunMultiClientTest(IFdbDatabase db, FdbSubspace locati for (int i = 0; i < NUM; i++) { var item = id.ToString() + "." + i.ToString(); - await queue.PushAsync(db, item, tok).ConfigureAwait(false); + await db.ReadWriteAsync((tr) => queue.PushAsync(tr, item), tok).ConfigureAwait(false); Interlocked.Increment(ref pushCount); res.Add(item); @@ -232,7 +247,8 @@ private static async Task RunMultiClientTest(IFdbDatabase db, FdbSubspace locati Assert.That(poppedItems, Is.EquivalentTo(pushedItems)); // the queue should be empty - Assert.That(await queue.EmptyAsync(db, ct), Is.True); + bool empty = await db.ReadAsync((tr) => queue.EmptyAsync(tr), ct); + Assert.That(empty, Is.True); } } From f2652f8005a25cc9cf13cf6285fb9ee97b85d4c3 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Thu, 4 Dec 2014 10:57:49 +0100 Subject: [PATCH 26/63] Cleaned up the FdbEncoderSubspace - added the "Base" property that returns the untyped original subspace --- .../Encoders/FdbEncoderSubspace`1.cs | 36 +++++++++++++++++-- .../Encoders/FdbEncoderSubspace`2.cs | 26 ++++++++++---- .../Encoders/FdbEncoderSubspace`3.cs | 36 ++++++++++++++----- .../Encoders/KeyValueEncoders.cs | 34 ++++++++++++++++++ 4 files changed, 113 insertions(+), 19 deletions(-) diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs index 36df37878..da1000003 100644 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs +++ b/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs @@ -36,20 +36,35 @@ namespace FoundationDB.Client using System.Linq; using System.Threading.Tasks; + /// Subspace that knows how to encode and decode its key + /// Type of the key handled by this subspace public class FdbEncoderSubspace : FdbSubspace, IKeyEncoder { - protected readonly IFdbSubspace m_parent; - protected readonly IKeyEncoder m_encoder; + /// Reference to the wrapped subspace + private readonly IFdbSubspace m_base; + /// Encoder used to handle keys + private readonly IKeyEncoder m_encoder; + + /// Wrap an existing subspace with a specific key encoder + /// Original subspace + /// Key encoder public FdbEncoderSubspace([NotNull] IFdbSubspace subspace, [NotNull] IKeyEncoder encoder) : base(subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); if (encoder == null) throw new ArgumentNullException("encoder"); - m_parent = subspace; + m_base = subspace; m_encoder = encoder; } + /// Untyped version of this subspace + public IFdbSubspace Base + { + get { return m_base; } + } + + /// Encoder used by this subpsace to format keys public IKeyEncoder Encoder { [NotNull] @@ -60,9 +75,18 @@ public IKeyEncoder Encoder public void Set([NotNull] IFdbTransaction trans, T key, Slice value) { + if (trans == null) throw new ArgumentNullException("trans"); trans.Set(EncodeKey(key), value); } + public void SetValues([NotNull] IFdbTransaction trans, [NotNull] IEnumerable> items) + { + if (trans == null) throw new ArgumentNullException("trans"); + if (items == null) throw new ArgumentNullException("items"); + //TODO: find a way to mass convert all the keys using the same buffer? + trans.SetValues(items.Select(item => new KeyValuePair(EncodeKey(item.Key), item.Value))); + } + public void Clear([NotNull] IFdbTransaction trans, T key) { trans.Clear(EncodeKey(key)); @@ -104,6 +128,12 @@ public Slice[] EncodeKeys([NotNull] params T[] keys) return ConcatKeys(m_encoder.EncodeKeys(keys)); } + [NotNull] + public Slice[] EncodeKeys([NotNull] IEnumerable elements, Func selector) + { + return ConcatKeys(m_encoder.EncodeKeys(elements, selector)); + } + [NotNull] public Slice[] EncodeKeys([NotNull] TElement[] elements, Func selector) { diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs index 434e2d330..cf978f73f 100644 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs +++ b/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs @@ -31,25 +31,37 @@ namespace FoundationDB.Client using FoundationDB.Layers.Tuples; using JetBrains.Annotations; using System; - using System.Collections.Generic; - using System.Linq; using System.Threading.Tasks; + /// Subspace that knows how to encode and decode its key + /// Type of the first item of the keys handled by this subspace + /// Type of the second item of the keys handled by this subspace public class FdbEncoderSubspace : FdbSubspace, ICompositeKeyEncoder { - protected readonly IFdbSubspace m_parent; - protected readonly ICompositeKeyEncoder m_encoder; - protected volatile FdbEncoderSubspace m_head; + /// Reference to the wrapped subspace + private readonly IFdbSubspace m_base; + + /// Encoder used to handle keys + private readonly ICompositeKeyEncoder m_encoder; + + /// Version of this subspace that encodes only the first key + private volatile FdbEncoderSubspace m_head; public FdbEncoderSubspace([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) : base(subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); if (encoder == null) throw new ArgumentNullException("encoder"); - m_parent = subspace; + m_base = subspace; m_encoder = encoder; } + /// Untyped version of this subspace + public IFdbSubspace Base + { + get { return m_base; } + } + /// Gets the key encoder public ICompositeKeyEncoder Encoder { @@ -61,7 +73,7 @@ public ICompositeKeyEncoder Encoder public FdbEncoderSubspace Partial { [NotNull] - get { return m_head ?? (m_head = new FdbEncoderSubspace(m_parent, KeyValueEncoders.Head(m_encoder))); } + get { return m_head ?? (m_head = new FdbEncoderSubspace(m_base, KeyValueEncoders.Head(m_encoder))); } } #region Transaction Helpers... diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs index 62ce21e5c..2078962ab 100644 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs +++ b/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs @@ -31,42 +31,60 @@ namespace FoundationDB.Client using FoundationDB.Layers.Tuples; using JetBrains.Annotations; using System; - using System.Collections.Generic; - using System.Linq; using System.Threading.Tasks; + /// Subspace that knows how to encode and decode its key + /// Type of the first item of the keys handled by this subspace + /// Type of the second item of the keys handled by this subspace + /// Type of the thrid item of the keys handled by this subspace public class FdbEncoderSubspace : FdbSubspace, ICompositeKeyEncoder { - protected readonly IFdbSubspace m_parent; - protected readonly ICompositeKeyEncoder m_encoder; - protected volatile FdbEncoderSubspace m_head; - protected volatile FdbEncoderSubspace m_partial; + /// Reference to the wrapped subspace + private readonly IFdbSubspace m_base; + + /// Encoder used to handle keys + private readonly ICompositeKeyEncoder m_encoder; + + /// Version of this subspace that encodes only the first key + private volatile FdbEncoderSubspace m_head; + + /// Version of this subspace that encodes only the first and second keys + private volatile FdbEncoderSubspace m_partial; public FdbEncoderSubspace([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) : base(subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); if (encoder == null) throw new ArgumentNullException("encoder"); - m_parent = subspace; + m_base = subspace; m_encoder = encoder; } + /// Untyped version of this subspace + public IFdbSubspace Base + { + get { return m_base; } + } + + /// Gets the key encoder public ICompositeKeyEncoder Encoder { [NotNull] get { return m_encoder; } } + /// Returns a partial encoder for (T1,) public FdbEncoderSubspace Head { [NotNull] - get { return m_head ?? (m_head = new FdbEncoderSubspace(m_parent, KeyValueEncoders.Head(m_encoder))); } + get { return m_head ?? (m_head = new FdbEncoderSubspace(m_base, KeyValueEncoders.Head(m_encoder))); } } + /// Returns a partial encoder for (T1,T2) public FdbEncoderSubspace Partial { [NotNull] - get { return m_partial ?? (m_partial = new FdbEncoderSubspace(m_parent, KeyValueEncoders.Pair(m_encoder))); } + get { return m_partial ?? (m_partial = new FdbEncoderSubspace(m_base, KeyValueEncoders.Pair(m_encoder))); } } #region Transaction Helpers... diff --git a/FoundationDB.Client/Encoders/KeyValueEncoders.cs b/FoundationDB.Client/Encoders/KeyValueEncoders.cs index 75ae52c82..afc24ae15 100644 --- a/FoundationDB.Client/Encoders/KeyValueEncoders.cs +++ b/FoundationDB.Client/Encoders/KeyValueEncoders.cs @@ -809,6 +809,40 @@ public static Slice[] EncodeKeys([NotNull] this IKeyEncoder encoder, [NotN return slices; } + /// Convert an array of s into an array of slices, using a serializer (or the default serializer if none is provided) + [NotNull] + public static Slice[] EncodeKeys([NotNull] this IKeyEncoder encoder, [NotNull] IEnumerable elements, Func selector) + { + if (encoder == null) throw new ArgumentNullException("encoder"); + if (elements == null) throw new ArgumentNullException("elements"); + if (selector == null) throw new ArgumentNullException("selector"); + + TElement[] arr; + ICollection coll; + + if ((arr = elements as TElement[]) != null) + { // fast path for arrays + return EncodeKeys(encoder, arr, selector); + } + else if ((coll = elements as ICollection) != null) + { // we can pre-allocate the result array + var slices = new Slice[coll.Count]; + int p = 0; + foreach(var item in coll) + { + slices[p++] = encoder.EncodeKey(selector(item)); + } + return slices; + } + else + { // slow path + return elements + .Select((item) => encoder.EncodeKey(selector(item))) + .ToArray(); + } + + } + /// Convert an array of s into an array of slices, using a serializer (or the default serializer if none is provided) [NotNull] public static Slice[] EncodeKeys([NotNull] this IKeyEncoder encoder, [NotNull] TElement[] elements, Func selector) From 94ddab05d4bffda71a339ec2485359ba425ba364 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 5 Dec 2014 19:26:20 +0100 Subject: [PATCH 27/63] Tuples: fixed pretty print of Guids inside tuples - add {...} around Guid, Uuid128 and Uuid64 when converting a tuple into a display string --- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 6 +++++- FoundationDB.Client/Utils/Uuid64.cs | 6 +++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 1dcfccb26..079b9e2f1 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -1110,7 +1110,7 @@ internal static string Stringify(object item) if (item is int) return ((int)item).ToString(null, CultureInfo.InvariantCulture); if (item is long) return ((long)item).ToString(null, CultureInfo.InvariantCulture); - if (item is char) return TokenSingleQuote + (char)item + TokenSingleQuote; /* 'X' */ + if (item is char) return TokenSingleQuote + (char)item + TokenSingleQuote; /* 'X' */ if (item is Slice) return ((Slice)item).ToAsciiOrHexaString(); if (item is byte[]) return Slice.Create(item as byte[]).ToAsciiOrHexaString(); @@ -1121,6 +1121,10 @@ internal static string Stringify(object item) if (item is double) return ((double)item).ToString("R", CultureInfo.InvariantCulture); if (item is float) return ((float)item).ToString("R", CultureInfo.InvariantCulture); + if (item is Guid) return ((Guid)item).ToString("B", CultureInfo.InstalledUICulture); /* {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} */ + if (item is Uuid128) return ((Uuid128)item).ToString("B", CultureInfo.InstalledUICulture); /* {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} */ + if (item is Uuid64) return ((Uuid64)item).ToString("B", CultureInfo.InstalledUICulture); /* {xxxxxxxx-xxxxxxxx} */ + var f = item as IFormattable; if (f != null) return f.ToString(null, CultureInfo.InvariantCulture); diff --git a/FoundationDB.Client/Utils/Uuid64.cs b/FoundationDB.Client/Utils/Uuid64.cs index 8720cb617..0097f762c 100644 --- a/FoundationDB.Client/Utils/Uuid64.cs +++ b/FoundationDB.Client/Utils/Uuid64.cs @@ -258,7 +258,7 @@ public string ToString(string format, IFormatProvider formatProvider) { case "D": case "d": - { // Default format is "xxxxxxxx-xxxxxxxx" + { // Default format is "xxxxxxxx-xxxxxxxx" return Encode16(m_value, separator: true, quotes: false); } @@ -272,7 +272,7 @@ public string ToString(string format, IFormatProvider formatProvider) { // base 62, padded with '0' up to 11 chars return Encode62(m_value, padded: true); } - + case "R": case "r": { // Integer: "1234567890" @@ -323,7 +323,7 @@ public int CompareTo(Uuid64 other) } #endregion - + #region Base16 encoding... private static char HexToChar(int a) From e5876f754d3a7d90477aedd11f4bd5089890d976 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Sun, 7 Dec 2014 20:53:45 +0100 Subject: [PATCH 28/63] AsyncLINQ: finished port or OrderBy/ThenBy - added missing QuickSort method, and fixed a few bugs - added OrderBy(), OrderByDescending(), ThenBy() and ThenByDescending() extension methods - should behave identical to regular LINQ... - added unit tests --- .../FdbAsyncEnumerable.OrderedSequence.cs | 14 +--- .../Linq/FdbAsyncEnumerable.Sorters.cs | 45 ++++++++++- .../Linq/FdbAsyncEnumerable.cs | 40 +++++++++- .../Linq/IFdbAsyncOrderedEnumerable.cs | 3 +- .../Linq/FdbAsyncEnumerableFacts.cs | 77 ++++++++++++++++++- 5 files changed, 162 insertions(+), 17 deletions(-) diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.OrderedSequence.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.OrderedSequence.cs index af7e1ef94..8cd72fc24 100644 --- a/FoundationDB.Client/Linq/FdbAsyncEnumerable.OrderedSequence.cs +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.OrderedSequence.cs @@ -101,19 +101,11 @@ IAsyncEnumerator IAsyncEnumerable.GetEnumerator() } [NotNull] - public IFdbAsyncOrderedEnumerable ThenBy([NotNull] Func keySelector, IComparer keyComparer = null) + public IFdbAsyncOrderedEnumerable CreateOrderedEnumerable([NotNull] Func keySelector, IComparer comparer, bool descending) { if (keySelector == null) throw new ArgumentNullException("keySelector"); - return new OrderedSequence(this, keySelector, keyComparer, false, this); - } - - [NotNull] - public IFdbAsyncOrderedEnumerable ThenByDescending([NotNull] Func keySelector, IComparer keyComparer = null) - { - if (keySelector == null) throw new ArgumentNullException("keySelector"); - - return new OrderedSequence(this, keySelector, keyComparer, true, this); + return new OrderedSequence(this, keySelector, comparer, descending, this); } } @@ -242,7 +234,7 @@ private void Completed() public TSource Current { - get { throw new NotImplementedException(); } + get { return m_current; } } public void Dispose() diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.Sorters.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.Sorters.cs index fd1cecabc..6e4c603df 100644 --- a/FoundationDB.Client/Linq/FdbAsyncEnumerable.Sorters.cs +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.Sorters.cs @@ -62,9 +62,44 @@ internal int[] Sort([NotNull] TSource[] items, int count) private void QuickSort([NotNull] int[] map, int left, int right) { - throw new NotImplementedException(); + do + { + int i = left; + int j = right; + int x = map[i + ((j - i) >> 1)]; + do + { + while (i < map.Length && CompareKeys(x, map[i]) > 0) + { + i++; + } + while (j >= 0 && CompareKeys(x, map[j]) < 0) + { + j--; + } + if (i > j) break; + if (i < j) + { + int temp = map[i]; + map[i] = map[j]; + map[j] = temp; + } + i++; + j--; + } while (i <= j); + + if (j - left <= right - i) + { + if (left < j) QuickSort(map, left, j); + left = i; + } + else + { + if (i < right) QuickSort(map, i, right); + right = j; + } + } while (left < right); } - } /// Helper class for sorting a sequence of @@ -136,10 +171,16 @@ internal override void ComputeKeys([NotNull] TSource[] items, int count) keys[i] = selector(items[i]); } m_keys = keys; + if (m_next != null) + { + m_next.ComputeKeys(items, count); + } } internal override int CompareKeys(int index1, int index2) { + Contract.Requires(m_keys != null); + Contract.Requires(m_comparer != null); var keys = m_keys; int c = m_comparer.Compare(keys[index1], keys[index2]); if (c == 0) diff --git a/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs b/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs index 682d72b7f..e071aae7f 100644 --- a/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs +++ b/FoundationDB.Client/Linq/FdbAsyncEnumerable.cs @@ -389,7 +389,7 @@ public static IFdbAsyncEnumerable SelectAsync(this IF public static IFdbAsyncEnumerable Distinct(this IFdbAsyncEnumerable source, IEqualityComparer comparer = null) { - if (source == null) throw new ArgumentNullException("count"); + if (source == null) throw new ArgumentNullException("source"); comparer = comparer ?? EqualityComparer.Default; return new FdbDistinctAsyncIterator(source, comparer); @@ -397,6 +397,44 @@ public static IFdbAsyncEnumerable Distinct(this IFdbAsyncEnume #endregion + #region OrderBy... + + [NotNull] + public static IFdbAsyncOrderedEnumerable OrderBy([NotNull] this IFdbAsyncEnumerable source, [NotNull] Func keySelector, IComparer comparer = null) + { + if (source == null) throw new ArgumentNullException("source"); + if (keySelector == null) throw new ArgumentNullException("keySelector"); + comparer = comparer ?? Comparer.Default; + + return new OrderedSequence(source, keySelector, comparer, descending: false, parent: null); + } + + [NotNull] + public static IFdbAsyncOrderedEnumerable OrderByDescending([NotNull] this IFdbAsyncEnumerable source, [NotNull] Func keySelector, IComparer comparer = null) + { + if (source == null) throw new ArgumentNullException("source"); + if (keySelector == null) throw new ArgumentNullException("keySelector"); + comparer = comparer ?? Comparer.Default; + + return new OrderedSequence(source, keySelector, comparer, descending: true, parent: null); + } + + [NotNull] + public static IFdbAsyncOrderedEnumerable ThenBy([NotNull] this IFdbAsyncOrderedEnumerable source, [NotNull] Func keySelector, IComparer comparer = null) + { + if (source == null) throw new ArgumentNullException("keySelector"); + return source.CreateOrderedEnumerable(keySelector, comparer, descending: false); + } + + [NotNull] + public static IFdbAsyncOrderedEnumerable ThenByDescending([NotNull] this IFdbAsyncOrderedEnumerable source, [NotNull] Func keySelector, IComparer comparer = null) + { + if (source == null) throw new ArgumentNullException("keySelector"); + return source.CreateOrderedEnumerable(keySelector, comparer, descending: true); + } + + #endregion + // If you are bored, maybe consider adding: // - DefaultIfEmpty // - Zip diff --git a/FoundationDB.Client/Linq/IFdbAsyncOrderedEnumerable.cs b/FoundationDB.Client/Linq/IFdbAsyncOrderedEnumerable.cs index 28267c5ea..47981d213 100644 --- a/FoundationDB.Client/Linq/IFdbAsyncOrderedEnumerable.cs +++ b/FoundationDB.Client/Linq/IFdbAsyncOrderedEnumerable.cs @@ -38,8 +38,7 @@ namespace FoundationDB.Linq public interface IFdbAsyncOrderedEnumerable : IFdbAsyncEnumerable { - IFdbAsyncOrderedEnumerable ThenBy(Func keySelector, IComparer comparer = null); - IFdbAsyncOrderedEnumerable ThenByDescending(Func keySelector, IComparer comparer = null); + IFdbAsyncOrderedEnumerable CreateOrderedEnumerable(Func keySelector, IComparer comparer, bool descending); } } diff --git a/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs b/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs index c7fba65bd..e9f470b48 100644 --- a/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs +++ b/FoundationDB.Tests/Linq/FdbAsyncEnumerableFacts.cs @@ -703,6 +703,81 @@ public async Task Test_Can_Sum_Unsigned() Assert.That(sum, Is.EqualTo(0)); } + [Test] + public async Task Test_Can_OrderBy() + { + var rnd = new Random(1234); + var items = Enumerable.Range(0, 100).Select(_ => rnd.Next()).ToList(); + + var source = items.ToAsyncEnumerable(); + + var query = source.OrderBy((x) => x); + Assert.That(query, Is.Not.Null); + var res = await query.ToListAsync(); + Assert.That(res, Is.Not.Null); + Assert.That(res, Is.EqualTo(items.OrderBy((x) => x).ToList())); + + query = source.OrderByDescending((x) => x); + Assert.That(query, Is.Not.Null); + res = await query.ToListAsync(); + Assert.That(res, Is.Not.Null); + Assert.That(res, Is.EqualTo(items.OrderByDescending((x) => x).ToList())); + } + + [Test] + public async Task Test_Can_OrderBy_With_Custom_Comparer() + { + var items = new[] { "c", "B", "a", "D" }; + + var source = items.ToAsyncEnumerable(); + + // ordinal should put upper before lower + var query = source.OrderBy((x) => x, StringComparer.Ordinal); + Assert.That(query, Is.Not.Null); + var res = await query.ToListAsync(); + Assert.That(res, Is.Not.Null); + Assert.That(res, Is.EqualTo(new [] { "B", "D", "a", "c" })); + + // ordinal ingore case should mixe upper and lower + query = source.OrderBy((x) => x, StringComparer.OrdinalIgnoreCase); + Assert.That(query, Is.Not.Null); + res = await query.ToListAsync(); + Assert.That(res, Is.Not.Null); + Assert.That(res, Is.EqualTo(new[] { "a", "B", "c", "D" })); + } + + [Test] + public async Task Test_Can_ThenBy() + { + var rnd = new Random(1234); + var pairs = Enumerable.Range(0, 100).Select(_ => new KeyValuePair(rnd.Next(10), rnd.Next())).ToList(); + var source = pairs.ToAsyncEnumerable(); + + var query = source.OrderBy(kvp => kvp.Key).ThenBy(kvp => kvp.Value); + Assert.That(query, Is.Not.Null); + var res = await query.ToListAsync(); + Assert.That(res, Is.Not.Null); + Assert.That(res, Is.EqualTo(pairs.OrderBy(kvp => kvp.Key).ThenBy(kvp => kvp.Value).ToList())); + + query = source.OrderBy(kvp => kvp.Key).ThenByDescending(kvp => kvp.Value); + Assert.That(query, Is.Not.Null); + res = await query.ToListAsync(); + Assert.That(res, Is.Not.Null); + Assert.That(res, Is.EqualTo(pairs.OrderBy(kvp => kvp.Key).ThenByDescending(kvp => kvp.Value).ToList())); + + query = source.OrderByDescending(kvp => kvp.Key).ThenBy(kvp => kvp.Value); + Assert.That(query, Is.Not.Null); + res = await query.ToListAsync(); + Assert.That(res, Is.Not.Null); + Assert.That(res, Is.EqualTo(pairs.OrderByDescending(kvp => kvp.Key).ThenBy(kvp => kvp.Value).ToList())); + + query = source.OrderByDescending(kvp => kvp.Key).ThenByDescending(kvp => kvp.Value); + Assert.That(query, Is.Not.Null); + res = await query.ToListAsync(); + Assert.That(res, Is.Not.Null); + Assert.That(res, Is.EqualTo(pairs.OrderByDescending(kvp => kvp.Key).ThenByDescending(kvp => kvp.Value).ToList())); + } + [Test] public async Task Test_Can_Select_Anonymous_Types() { @@ -727,7 +802,7 @@ public async Task Test_Can_Select_With_LINQ_Syntax() { // ensure that we can also use the "from ... select ... where" syntax - var results = await + var results = await (from x in Enumerable.Range(0, 10).ToAsyncEnumerable() let t = new { Value = x, Square = x * x, Root = Math.Sqrt(x), Odd = x % 2 == 1 } where t.Odd From 6f2ac8e85cac2252c872f18424685ca80f132326 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Sun, 7 Dec 2014 21:06:28 +0100 Subject: [PATCH 29/63] Fix unit test after changes to Guid pretty printing in tuples --- FoundationDB.Tests/KeyFacts.cs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/FoundationDB.Tests/KeyFacts.cs b/FoundationDB.Tests/KeyFacts.cs index 58b0c3029..80a77704e 100644 --- a/FoundationDB.Tests/KeyFacts.cs +++ b/FoundationDB.Tests/KeyFacts.cs @@ -353,11 +353,11 @@ public void Test_FdbKey_PrettyPrint() Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(1.0d, Math.PI, Math.E)), Is.EqualTo("(1, 3.1415926535897931, 2.7182818284590451)"), "Doubles should used dot and have full precision (17 digits)"); Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(1.0f, (float)Math.PI, (float)Math.E)), Is.EqualTo("(1, 3.14159274, 2.71828175)"), "Singles should used dot and have full precision (10 digits)"); var guid = Guid.NewGuid(); - Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(guid)), Is.EqualTo(String.Format("({0},)", guid.ToString("D"))), "GUIDs should be displayed as a string literal, without quotes"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(guid)), Is.EqualTo(String.Format("({0},)", guid.ToString("B"))), "GUIDs should be displayed as a string literal, surrounded by {...}, and without quotes"); var uuid128 = Uuid128.NewUuid(); - Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(uuid128)), Is.EqualTo(String.Format("({0},)", uuid128.ToString("D"))), "Uuid128s should be displayed as a string literal, without quotes"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(uuid128)), Is.EqualTo(String.Format("({0},)", uuid128.ToString("B"))), "Uuid128s should be displayed as a string literal, surrounded by {...}, and without quotes"); var uuid64 = Uuid64.NewUuid(); - Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(uuid64)), Is.EqualTo(String.Format("({0},)", uuid64.ToString("D"))), "Uuid64s should be displayed as a string literal, without quotes"); + Assert.That(FdbKey.Dump(FdbTuple.EncodeKey(uuid64)), Is.EqualTo(String.Format("({0},)", uuid64.ToString("B"))), "Uuid64s should be displayed as a string literal, surrounded by {...}, and without quotes"); // ranges should be decoded when possible var key = FdbTuple.ToRange(FdbTuple.Create("hello")); From c1e314d53988ec129f45509a95291d1bf8750939 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gr=C3=A9goire=20Castre?= Date: Mon, 8 Dec 2014 12:25:00 +0100 Subject: [PATCH 30/63] AsyncLINQ: fixed error handling when an interator is disposed at the same time that the transaction fails --- FoundationDB.Client/FdbRangeQuery.ResultIterator.cs | 9 +++++++-- FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/FoundationDB.Client/FdbRangeQuery.ResultIterator.cs b/FoundationDB.Client/FdbRangeQuery.ResultIterator.cs index abfedfc2d..c2fe6bd36 100644 --- a/FoundationDB.Client/FdbRangeQuery.ResultIterator.cs +++ b/FoundationDB.Client/FdbRangeQuery.ResultIterator.cs @@ -124,11 +124,16 @@ private async Task ReadAnotherBatchAsync(CancellationToken cancellationTok { Contract.Requires(m_itemsRemainingInChunk == 0 && m_currentOffsetInChunk == -1 && !m_outOfChunks); + var iterator = m_chunkIterator; + // start reading the next batch - if (await m_chunkIterator.MoveNext(cancellationToken).ConfigureAwait(false)) + if (await iterator.MoveNext(cancellationToken).ConfigureAwait(false)) { // we got a new chunk ! - var chunk = m_chunkIterator.Current; + //note: Dispose() or Cleanup() maybe have been called concurrently! + ThrowInvalidState(); + + var chunk = iterator.Current; //note: if the range is empty, we may have an empty chunk, that is equivalent to no chunk if (chunk != null && chunk.Length > 0) diff --git a/FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs b/FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs index f75849ae4..10a96c375 100644 --- a/FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs +++ b/FoundationDB.Client/Linq/Iterators/FdbAsyncIterator.cs @@ -285,7 +285,7 @@ protected void ThrowInvalidState() switch (Volatile.Read(ref m_state)) { case STATE_SEQ: - throw new InvalidOperationException("The async iterator should have been initiliazed with a called to GetEnumerator()"); + throw new InvalidOperationException("The async iterator should have been initiliazed with a call to GetEnumerator()"); case STATE_ITERATING: break; From 4ae6536092518e303f041e33d10282856c5b6dd4 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Mon, 8 Dec 2014 17:02:15 +0100 Subject: [PATCH 31/63] Fixed invalid names in DebuggerDisplay attribute for FdbRangeQuery.ResultIterator --- FoundationDB.Client/FdbRangeQuery.ResultIterator.cs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/FoundationDB.Client/FdbRangeQuery.ResultIterator.cs b/FoundationDB.Client/FdbRangeQuery.ResultIterator.cs index c2fe6bd36..dae8b08ce 100644 --- a/FoundationDB.Client/FdbRangeQuery.ResultIterator.cs +++ b/FoundationDB.Client/FdbRangeQuery.ResultIterator.cs @@ -45,7 +45,7 @@ public partial class FdbRangeQuery { /// Async iterator that fetches the results by batch, but return them one by one - [DebuggerDisplay("State={m_state}, Current={m_current}, RemainingInBatch={m_remainingInBatch}, ReadLastBatch={m_lastBatchRead}")] + [DebuggerDisplay("State={m_state}, Current={m_current}, RemainingInChunk={m_itemsRemainingInChunk}, OutOfChunks={m_outOfChunks}")] private sealed class ResultIterator : FdbAsyncIterator { @@ -56,7 +56,6 @@ private sealed class ResultIterator : FdbAsyncIterator /// Lambda used to transform pairs of key/value into the expected result private readonly Func, T> m_resultTransform; - /// Iterator used to read chunks from the database private IFdbAsyncEnumerator[]> m_chunkIterator; From 8abf3cfac2d833d2141d808adeb3720e702ead61 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Wed, 10 Dec 2014 22:58:27 +0100 Subject: [PATCH 32/63] XML comments and code annotations - Still not over 9000 --- .../Filters/Logging/FdbLoggedDatabase.cs | 6 + .../Filters/Logging/FdbLoggedTransaction.cs | 1 + .../Filters/Logging/FdbLoggingExtensions.cs | 29 +++- .../Logging/FdbTransactionLog.Commands.cs | 8 + .../Filters/Logging/FdbTransactionLog.cs | 39 ++++- .../Filters/ReadOnlyTransactionFilter.cs | 1 + .../Directories/FdbDirectoryExtensions.cs | 116 ++++++------- .../Layers/Directories/FdbDirectoryLayer.cs | 156 +++++++++++------- .../Directories/FdbDirectorySubspace.cs | 51 +++--- .../Directories/FdbHighContentionAllocator.cs | 11 +- .../Layers/Directories/IFdbDirectory.cs | 2 +- 11 files changed, 254 insertions(+), 166 deletions(-) diff --git a/FoundationDB.Client/Filters/Logging/FdbLoggedDatabase.cs b/FoundationDB.Client/Filters/Logging/FdbLoggedDatabase.cs index dede51fc6..01ee3a092 100644 --- a/FoundationDB.Client/Filters/Logging/FdbLoggedDatabase.cs +++ b/FoundationDB.Client/Filters/Logging/FdbLoggedDatabase.cs @@ -39,12 +39,18 @@ public sealed class FdbLoggedDatabase : FdbDatabaseFilter /// Handler called everytime a transaction is successfully committed public Action OnCommitted { get; private set; } + /// Wrap a database with a filter that will log the activity of all transactions + /// Wrapped database + /// If true, deny all write operations. + /// If true, also dispose the wrapped database if this instance is disposed. + /// Handler that will be called when a transaction is either committed succesfully, or disposed. The log can be accessed via the property. public FdbLoggedDatabase(IFdbDatabase database, bool forceReadOnly, bool ownsDatabase, Action onCommitted) : base(database, forceReadOnly, ownsDatabase) { this.OnCommitted = onCommitted; } + /// Create a new logged transaction public override IFdbTransaction BeginTransaction(FdbTransactionMode mode, CancellationToken cancellationToken = default(CancellationToken), FdbOperationContext context = null) { return new FdbLoggedTransaction( diff --git a/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs b/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs index b49bf19a7..8cc3055b6 100644 --- a/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs +++ b/FoundationDB.Client/Filters/Logging/FdbLoggedTransaction.cs @@ -45,6 +45,7 @@ public sealed class FdbLoggedTransaction : FdbTransactionFilter /// Handler that will be called when this transaction commits successfully public Action Committed { get; private set; } + /// Wrap an existing transaction and log all operations performed public FdbLoggedTransaction(IFdbTransaction trans, bool ownsTransaction, Action onCommitted) : base(trans, false, ownsTransaction) { diff --git a/FoundationDB.Client/Filters/Logging/FdbLoggingExtensions.cs b/FoundationDB.Client/Filters/Logging/FdbLoggingExtensions.cs index cfc6836c7..04e2e1fe0 100644 --- a/FoundationDB.Client/Filters/Logging/FdbLoggingExtensions.cs +++ b/FoundationDB.Client/Filters/Logging/FdbLoggingExtensions.cs @@ -36,8 +36,12 @@ namespace FoundationDB.Filters.Logging public static class FdbLoggingExtensions { + /// Apply the Logging Filter to this database instance + /// Original database instance + /// Handler that will be called everytime a transaction commits successfully, or gets disposed. The log of all operations performed by the transaction can be accessed via the property. + /// Database filter, that will monitor all transactions initiated from it. Disposing this wrapper will NOT dispose the inner database. [NotNull] - public static FdbLoggedDatabase Logged(this IFdbDatabase database, [NotNull] Action handler) + public static FdbLoggedDatabase Logged([NotNull] this IFdbDatabase database, [NotNull] Action handler) { if (handler == null) throw new ArgumentNullException("handler"); @@ -50,15 +54,19 @@ public static FdbLoggedDatabase Logged(this IFdbDatabase database, [NotNull] Act /// Strip the logging behaviour of this database. Use this for boilerplate or test code that would pollute the logs otherwise. /// Database instance (that may or may not be logged) /// Either itself if it is not logged, or the inner database if it was. - public static IFdbDatabase WithoutLogging(this IFdbDatabase database) + [NotNull] + public static IFdbDatabase WithoutLogging([NotNull] this IFdbDatabase database) { + if (database == null) throw new ArgumentNullException("database"); + var logged = database as FdbLoggedDatabase; if (logged != null) return logged.GetInnerDatabase(); return database; } - internal static FdbLoggedTransaction GetLogger(IFdbReadOnlyTransaction trans) + [CanBeNull] + internal static FdbLoggedTransaction GetLogger([NotNull] IFdbReadOnlyTransaction trans) { //TODO: the logged transaction could also be wrapped in other filters. // => we need a recursive "FindFilter" method that would unwrap the filter onion looking for a specific one... @@ -67,7 +75,8 @@ internal static FdbLoggedTransaction GetLogger(IFdbReadOnlyTransaction trans) } /// Annotate a logged transaction - public static void Annotate(this IFdbReadOnlyTransaction trans, string message) + /// This method only applies to transactions created from a database instance. Calling this method on regular transaction is a no-op. + public static void Annotate([NotNull] this IFdbReadOnlyTransaction trans, [NotNull] string message) { var logged = GetLogger(trans); if (logged != null) @@ -77,32 +86,36 @@ public static void Annotate(this IFdbReadOnlyTransaction trans, string message) } /// Annotate a logged transaction + /// This method only applies to transactions created from a database instance. Calling this method on regular transaction is a no-op. [StringFormatMethod("format")] - public static void Annotate(this IFdbReadOnlyTransaction trans, string format, object arg0) + public static void Annotate([NotNull] this IFdbReadOnlyTransaction trans, [NotNull] string format, object arg0) { var logged = GetLogger(trans); if (logged != null) logged.Log.AddOperation(new FdbTransactionLog.LogCommand(String.Format(format, arg0)), countAsOperation: false); } /// Annotate a logged transaction + /// This method only applies to transactions created from a database instance. Calling this method on regular transaction is a no-op. [StringFormatMethod("format")] - public static void Annotate(this IFdbReadOnlyTransaction trans, string format, object arg0, object arg1) + public static void Annotate([NotNull] this IFdbReadOnlyTransaction trans, [NotNull] string format, object arg0, object arg1) { var logged = GetLogger(trans); if (logged != null) logged.Log.AddOperation(new FdbTransactionLog.LogCommand(String.Format(format, arg0, arg1)), countAsOperation: false); } /// Annotate a logged transaction + /// This method only applies to transactions created from a database instance. Calling this method on regular transaction is a no-op. [StringFormatMethod("format")] - public static void Annotate(this IFdbReadOnlyTransaction trans, string format, object arg0, object arg1, object arg2) + public static void Annotate([NotNull] this IFdbReadOnlyTransaction trans, [NotNull] string format, object arg0, object arg1, object arg2) { var logged = GetLogger(trans); if (logged != null) logged.Log.AddOperation(new FdbTransactionLog.LogCommand(String.Format(format, arg0, arg1, arg2)), countAsOperation: false); } /// Annotate a logged transaction + /// This method only applies to transactions created from a database instance. Calling this method on regular transaction is a no-op. [StringFormatMethod("format")] - public static void Annotate(this IFdbReadOnlyTransaction trans, string format, params object[] args) + public static void Annotate([NotNull] this IFdbReadOnlyTransaction trans, [NotNull] string format, params object[] args) { var logged = GetLogger(trans); if (logged != null) logged.Log.AddOperation(new FdbTransactionLog.LogCommand(String.Format(format, args)), countAsOperation: false); diff --git a/FoundationDB.Client/Filters/Logging/FdbTransactionLog.Commands.cs b/FoundationDB.Client/Filters/Logging/FdbTransactionLog.Commands.cs index b5d84c01c..466e7b4e7 100644 --- a/FoundationDB.Client/Filters/Logging/FdbTransactionLog.Commands.cs +++ b/FoundationDB.Client/Filters/Logging/FdbTransactionLog.Commands.cs @@ -401,6 +401,14 @@ public override string GetArguments() return String.Concat(FdbKey.Dump(this.Key), " ", this.Mutation.ToString(), " ", this.Param.ToAsciiOrHexaString()); } + public override string ToString() + { + var arg = this.GetArguments(); + var sb = new StringBuilder(); + if (this.Snapshot) sb.Append("Snapshot."); + sb.Append("Atomic_").Append(this.Mutation.ToString()).Append(' ').Append(FdbKey.Dump(this.Key)).Append(", <").Append(this.Param.ToHexaString(' ')).Append('>'); + return sb.ToString(); + } } public sealed class AddConflictRangeCommand : Command diff --git a/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs b/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs index 239c3728d..f541adc9a 100644 --- a/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs +++ b/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs @@ -29,14 +29,16 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Filters.Logging { using FoundationDB.Client; + using FoundationDB.Client.Utils; + using JetBrains.Annotations; using System; using System.Collections.Concurrent; - using System.Collections.Generic; using System.Diagnostics; using System.Globalization; using System.Text; using System.Threading; + /// Container that logs all operations performed by a transaction public sealed partial class FdbTransactionLog { private int m_step; @@ -45,6 +47,8 @@ public sealed partial class FdbTransactionLog private int m_readSize; private int m_writeSize; + /// Create an empty log for a newly created transaction + /// public FdbTransactionLog(IFdbTransaction trans) { this.Commands = new ConcurrentQueue(); @@ -57,7 +61,7 @@ public FdbTransactionLog(IFdbTransaction trans) public int Operations { get { return m_operations; } } /// List of all commands processed by the transaction - public ConcurrentQueue Commands { get; private set; } + public ConcurrentQueue Commands { [NotNull] get; private set; } /// Timestamp of the start of transaction public long StartTimestamp { get; private set; } @@ -135,8 +139,10 @@ public TimeSpan TotalDuration /// Marks the start of the transaction /// - public void Start(IFdbTransaction trans) + public void Start([NotNull] IFdbTransaction trans) { + Contract.Requires(trans != null); + this.Id = trans.Id; this.StartedUtc = DateTimeOffset.UtcNow; this.StartTimestamp = GetTimestamp(); @@ -144,19 +150,24 @@ public void Start(IFdbTransaction trans) /// Marks the end of the transaction /// - public void Stop(IFdbTransaction trans) + public void Stop([NotNull] IFdbTransaction trans) { + Contract.Requires(trans != null); + + //TODO: verify that the trans is the same one that was passed to Start(..)? if (!this.Completed) { this.Completed = true; this.StopTimestamp = GetTimestamp(); - this.StoppedUtc = DateTimeOffset.UtcNow; + this.StoppedUtc = DateTimeOffset.UtcNow; //TODO: use a configurable clock? } } /// Adds a new already completed command to the log - public void AddOperation(Command cmd, bool countAsOperation = true) + public void AddOperation([NotNull] Command cmd, bool countAsOperation = true) { + Contract.Requires(cmd != null); + var ts = GetTimeOffset(); int step = Volatile.Read(ref m_step); @@ -169,8 +180,10 @@ public void AddOperation(Command cmd, bool countAsOperation = true) } /// Start tracking the execution of a new command - public void BeginOperation(Command cmd) + public void BeginOperation([NotNull] Command cmd) { + Contract.Requires(cmd != null); + var ts = GetTimeOffset(); int step = Volatile.Read(ref m_step); @@ -183,8 +196,10 @@ public void BeginOperation(Command cmd) } /// Mark the end of the execution of a command - public void EndOperation(Command cmd, Exception error = null) + public void EndOperation([NotNull] Command cmd, Exception error = null) { + Contract.Requires(cmd != null); + var ts = GetTimeOffset(); var step = Interlocked.Increment(ref m_step); @@ -220,6 +235,7 @@ public string GetCommandsReport() } /// Generate a full ASCII report with the detailed timeline of all the commands that were executed by the transaction + [NotNull] public string GetTimingsReport(bool showCommands = false) { var culture = CultureInfo.InvariantCulture; @@ -368,6 +384,7 @@ private static string GetFancyGraph(int width, long offset, long duration, long return new string(tmp); } + /// List of all operation types supported by a transaction public enum Operation { Invalid = 0, @@ -394,13 +411,19 @@ public enum Operation Log, } + /// Categories of operations supported by a transaction public enum Mode { Invalid = 0, + /// Operation that reads keys and/or values from the database Read, + /// Operation that writes or clears keys from the database Write, + /// Operation that changes the state or behavior of the transaction Meta, + /// Operation that watch changes performed in the database, outside of the transaction Watch, + /// Comments, annotations, debug output attached to the transaction Annotation } diff --git a/FoundationDB.Client/Filters/ReadOnlyTransactionFilter.cs b/FoundationDB.Client/Filters/ReadOnlyTransactionFilter.cs index 5c5b1589c..e74321045 100644 --- a/FoundationDB.Client/Filters/ReadOnlyTransactionFilter.cs +++ b/FoundationDB.Client/Filters/ReadOnlyTransactionFilter.cs @@ -31,6 +31,7 @@ namespace FoundationDB.Filters using FoundationDB.Client; using System; + /// Filter that forces a read/write transaction to be read-only public sealed class ReadOnlyTransactionFilter : FdbTransactionFilter { public ReadOnlyTransactionFilter(IFdbTransaction trans, bool ownsTransaction) diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectoryExtensions.cs b/FoundationDB.Client/Layers/Directories/FdbDirectoryExtensions.cs index 4e8687287..43a113b47 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectoryExtensions.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectoryExtensions.cs @@ -29,15 +29,15 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Layers.Directories { using FoundationDB.Client; - using FoundationDB.Layers.Tuples; + using JetBrains.Annotations; using System; using System.Collections.Generic; using System.Threading; using System.Threading.Tasks; + /// Helper methods related to the Directory Layer public static class FdbDirectoryExtensions { - // this helper class contain extension methods to help deal with IFdbDatabase vs IFdbTransaction #region CreateOrOpen... @@ -45,7 +45,7 @@ public static class FdbDirectoryExtensions /// If the directory does not exist, it is created (creating parent directories if necessary). /// If layer is specified, it is checked against the layer of an existing directory or set as the layer of a new directory. /// - public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) + public static Task CreateOrOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -57,7 +57,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// If the directory does not exist, it is created (creating parent directories if necessary). /// If layer is specified, it is checked against the layer of an existing directory or set as the layer of a new directory. /// - public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, Slice layer, CancellationToken cancellationToken) + public static Task CreateOrOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable path, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -69,7 +69,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// If the directory does not exist, it is created (creating parent directories if necessary). /// If layer is specified, it is checked against the layer of an existing directory or set as the layer of a new directory. /// - public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) + public static Task CreateOrOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -81,7 +81,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// If the directory does not exist, it is created (creating parent directories if necessary). /// If layer is specified, it is checked against the layer of an existing directory or set as the layer of a new directory. /// - public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbRetryable db, string name, Slice layer, CancellationToken cancellationToken) + public static Task CreateOrOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] string name, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -93,7 +93,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// If the directory does not exist, it is created (creating parent directories if necessary). /// If layer is specified, it is checked against the layer of an existing directory or set as the layer of a new directory. /// - public static Task CreateOrOpenAsync(this IFdbDirectory directory, IFdbTransaction trans, string name, Slice layer = default(Slice)) + public static Task CreateOrOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbTransaction trans, [NotNull] string name, Slice layer = default(Slice)) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -111,7 +111,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// If true, do not make any modifications to the database, and return null if the directory does not exist. /// Optional layer ID that is checked with the opened directory. /// - public static Task TryCreateOrOpenAsync(this IFdbDirectory directory, IFdbTransaction trans, IEnumerable path, bool readOnly, Slice layer = default(Slice)) + public static Task TryCreateOrOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, bool readOnly, Slice layer = default(Slice)) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -133,7 +133,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// If true, do not make any modifications to the database, and return null if the directory does not exist. /// Optional layer ID that is checked with the opened directory. /// - public static Task TryCreateOrOpenAsync(this IFdbDirectory directory, IFdbTransaction trans, string name, bool readOnly, Slice layer = default(Slice)) + public static Task TryCreateOrOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbTransaction trans, [NotNull] string name, bool readOnly, Slice layer = default(Slice)) { if (name == null) throw new ArgumentNullException("name"); @@ -147,7 +147,7 @@ public static Task CreateOrOpenAsync(this IFdbDirectory di /// Creates a directory with the given (creating parent directories if necessary). /// An error is raised if the given directory already exists. /// - public static Task CreateAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) + public static Task CreateAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -159,7 +159,7 @@ public static Task CreateAsync(this IFdbDirectory director /// An error is raised if the given directory already exists. /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// - public static Task CreateAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, Slice layer, CancellationToken cancellationToken) + public static Task CreateAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable path, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -170,7 +170,7 @@ public static Task CreateAsync(this IFdbDirectory director /// Creates a directory with the given . /// An error is raised if the given directory already exists. /// - public static Task CreateAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) + public static Task CreateAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -182,7 +182,7 @@ public static Task CreateAsync(this IFdbDirectory director /// An error is raised if the given directory already exists. /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// - public static Task CreateAsync(this IFdbDirectory directory, IFdbRetryable db, string name, Slice layer, CancellationToken cancellationToken) + public static Task CreateAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] string name, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -194,7 +194,7 @@ public static Task CreateAsync(this IFdbDirectory director /// An error is raised if the given directory already exists. /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// - public static Task CreateAsync(this IFdbDirectory directory, IFdbTransaction trans, string name, Slice layer = default(Slice)) + public static Task CreateAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbTransaction trans, [NotNull] string name, Slice layer = default(Slice)) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -203,7 +203,7 @@ public static Task CreateAsync(this IFdbDirectory director } /// Attempts to create a directory with the given (creating parent directories if necessary). - public static Task TryCreateAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) + public static Task TryCreateAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -214,7 +214,7 @@ public static Task TryCreateAsync(this IFdbDirectory direc /// Attempts to create a directory with the given (creating parent directories if necessary). /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// - public static Task TryCreateAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, Slice layer, CancellationToken cancellationToken) + public static Task TryCreateAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable path, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -223,7 +223,7 @@ public static Task TryCreateAsync(this IFdbDirectory direc } /// Attempts to create a directory with the given . - public static Task TryCreateAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) + public static Task TryCreateAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -234,7 +234,7 @@ public static Task TryCreateAsync(this IFdbDirectory direc /// Attempts to create a directory with the given . /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// - public static Task TryCreateAsync(this IFdbDirectory directory, IFdbRetryable db, string name, Slice layer, CancellationToken cancellationToken) + public static Task TryCreateAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] string name, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -245,7 +245,7 @@ public static Task TryCreateAsync(this IFdbDirectory direc /// Attempts to create a directory with the given . /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// - public static Task TryCreateAsync(this IFdbDirectory directory, IFdbTransaction trans, string name, Slice layer = default(Slice)) + public static Task TryCreateAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbTransaction trans, [NotNull] string name, Slice layer = default(Slice)) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -260,7 +260,7 @@ public static Task TryCreateAsync(this IFdbDirectory direc /// Opens the directory with the given . /// An error is raised if the directory does not exist. /// - public static Task OpenAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) + public static Task OpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -271,7 +271,7 @@ public static Task OpenAsync(this IFdbDirectory directory, /// Opens the directory with the given . /// An error is raised if the directory does not exist, or if a layer is specified and a different layer was specified when the directory was created. /// - public static Task OpenAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, Slice layer, CancellationToken cancellationToken) + public static Task OpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable path, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -282,7 +282,7 @@ public static Task OpenAsync(this IFdbDirectory directory, /// Opens the sub-directory with the given . /// An error is raised if the directory does not exist. /// - public static Task OpenAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) + public static Task OpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -293,7 +293,7 @@ public static Task OpenAsync(this IFdbDirectory directory, /// Opens the sub-directory with the given . /// An error is raised if the directory does not exist, or if a layer is specified and a different layer was specified when the directory was created. /// - public static Task OpenAsync(this IFdbDirectory directory, IFdbRetryable db, string name, Slice layer, CancellationToken cancellationToken) + public static Task OpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] string name, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -304,7 +304,7 @@ public static Task OpenAsync(this IFdbDirectory directory, /// Opens the sub-directory with the given . /// An error is raised if the directory does not exist, or if a layer is specified and a different layer was specified when the directory was created. /// - public static Task OpenAsync(this IFdbDirectory directory, IFdbReadOnlyTransaction trans, string name, Slice layer = default(Slice)) + public static Task OpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyTransaction trans, [NotNull] string name, Slice layer = default(Slice)) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -313,7 +313,7 @@ public static Task OpenAsync(this IFdbDirectory directory, } /// Attempts to open the directory with the given . - public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, IEnumerable path, CancellationToken cancellationToken) + public static Task TryOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, [NotNull] IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -322,7 +322,7 @@ public static Task TryOpenAsync(this IFdbDirectory directo } /// Attempts to open the directory with the given . - public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, IEnumerable path, Slice layer, CancellationToken cancellationToken) + public static Task TryOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, [NotNull] IEnumerable path, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -331,7 +331,7 @@ public static Task TryOpenAsync(this IFdbDirectory directo } /// Attempts to open the directory with the given . - public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, string name, CancellationToken cancellationToken) + public static Task TryOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, [NotNull] string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -340,7 +340,7 @@ public static Task TryOpenAsync(this IFdbDirectory directo } /// Attempts to open the directory with the given . - public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, string name, Slice layer, CancellationToken cancellationToken) + public static Task TryOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, [NotNull] string name, Slice layer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -349,7 +349,7 @@ public static Task TryOpenAsync(this IFdbDirectory directo } /// Attempts to open the directory with the given . - public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyTransaction trans, string name) + public static Task TryOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyTransaction trans, [NotNull] string name) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -358,7 +358,7 @@ public static Task TryOpenAsync(this IFdbDirectory directo } /// Attempts to open the directory with the given . - public static Task TryOpenAsync(this IFdbDirectory directory, IFdbReadOnlyTransaction trans, string name, Slice layer) + public static Task TryOpenAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyTransaction trans, [NotNull] string name, Slice layer) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -374,7 +374,7 @@ public static Task TryOpenAsync(this IFdbDirectory directo /// There is no effect on the physical prefix of the given directory, or on clients that already have the directory open. /// An error is raised if the old directory does not exist, a directory already exists at `new_path`, or the parent directory of `new_path` does not exist. /// - public static Task MoveAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable oldPath, IEnumerable newPath, CancellationToken cancellationToken) + public static Task MoveAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable oldPath, [NotNull] IEnumerable newPath, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -386,7 +386,7 @@ public static Task MoveAsync(this IFdbDirectory directory, /// Attempts to move the directory found at to . /// There is no effect on the physical prefix of the given directory, or on clients that already have the directory open. /// - public static Task TryMoveAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable oldPath, IEnumerable newPath, CancellationToken cancellationToken) + public static Task TryMoveAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable oldPath, [NotNull] IEnumerable newPath, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -403,7 +403,7 @@ public static Task TryMoveAsync(this IFdbDirectory directo /// There is no effect on the physical prefix of the given directory, or on clients that already have the directory open. /// An error is raised if a directory already exists at `new_path`, or if the new path points to a child of the current directory. /// - public static Task MoveToAsync(this FdbDirectorySubspace subspace, IFdbRetryable db, IEnumerable newPath, CancellationToken cancellationToken) + public static Task MoveToAsync([NotNull] this FdbDirectorySubspace subspace, [NotNull] IFdbRetryable db, [NotNull] IEnumerable newPath, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); @@ -414,14 +414,14 @@ public static Task MoveToAsync(this FdbDirectorySubspace s /// Attempts to move the current directory to . /// There is no effect on the physical prefix of the given directory, or on clients that already have the directory open. /// - public static Task TryMoveToAsync(this FdbDirectorySubspace subspace, IFdbRetryable db, IEnumerable newPath, CancellationToken cancellationToken) + public static Task TryMoveToAsync([NotNull] this FdbDirectorySubspace subspace, [NotNull] IFdbRetryable db, [NotNull] IEnumerable newPath, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); if (newPath == null) throw new ArgumentNullException("newPath"); return db.ReadWriteAsync((tr) => subspace.TryMoveToAsync(tr, newPath), cancellationToken); } - + #endregion #region Remove / TryRemove... @@ -429,7 +429,7 @@ public static Task TryMoveToAsync(this FdbDirectorySubspac /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task RemoveAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) + public static Task RemoveAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -439,7 +439,7 @@ public static Task RemoveAsync(this IFdbDirectory directory, IFdbRetryable db, I /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task RemoveAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) + public static Task RemoveAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -450,7 +450,7 @@ public static Task RemoveAsync(this IFdbDirectory directory, IFdbRetryable db, s /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task RemoveAsync(this IFdbDirectory directory, IFdbRetryable db, CancellationToken cancellationToken) + public static Task RemoveAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -460,7 +460,7 @@ public static Task RemoveAsync(this IFdbDirectory directory, IFdbRetryable db, C /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task RemoveAsync(this IFdbDirectory directory, IFdbTransaction trans, string name) + public static Task RemoveAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbTransaction trans, [NotNull] string name) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -471,7 +471,7 @@ public static Task RemoveAsync(this IFdbDirectory directory, IFdbTransaction tra /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) + public static Task TryRemoveAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -481,7 +481,7 @@ public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbRetrya /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbRetryable db, string name, CancellationToken cancellationToken) + public static Task TryRemoveAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbRetryable db, [NotNull] string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -492,7 +492,7 @@ public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbRetrya /// Removes the directory, its contents, and all subdirectories. /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// - public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbTransaction trans, string name) + public static Task TryRemoveAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbTransaction trans, [NotNull] string name) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -506,7 +506,7 @@ public static Task TryRemoveAsync(this IFdbDirectory directory, IFdbTransa /// Checks if a directory already exists /// Returns true if the directory exists, otherwise false. - public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, IEnumerable path, CancellationToken cancellationToken) + public static Task ExistsAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -515,7 +515,7 @@ public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyR /// Checks if a directory already exists /// Returns true if the directory exists, otherwise false. - public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, string name, CancellationToken cancellationToken) + public static Task ExistsAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, [NotNull] string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -525,7 +525,7 @@ public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyR /// Checks if a directory already exists /// Returns true if the directory exists, otherwise false. - public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyTransaction trans, string name) + public static Task ExistsAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyTransaction trans, [NotNull] string name) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -535,7 +535,7 @@ public static Task ExistsAsync(this IFdbDirectory directory, IFdbReadOnlyT /// Checks if this directory exists /// Returns true if the directory exists, otherwise false. - public static Task ExistsAsync(this FdbDirectorySubspace subspace, IFdbReadOnlyRetryable db, CancellationToken cancellationToken) + public static Task ExistsAsync([NotNull] this FdbDirectorySubspace subspace, [NotNull] IFdbReadOnlyRetryable db, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); @@ -547,7 +547,7 @@ public static Task ExistsAsync(this FdbDirectorySubspace subspace, IFdbRea #region List / TryList... /// Returns the list of subdirectories of directory at . - public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, IEnumerable path, CancellationToken cancellationToken) + public static Task> ListAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, [NotNull] IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -556,7 +556,7 @@ public static Task> ListAsync(this IFdbDirectory directory, IFdbRea } /// Returns the list of subdirectories of the sub-directory with the given . - public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, string name, CancellationToken cancellationToken) + public static Task> ListAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, [NotNull] string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -565,7 +565,7 @@ public static Task> ListAsync(this IFdbDirectory directory, IFdbRea } /// Returns the list of subdirectories of the current directory. - public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, CancellationToken cancellationToken) + public static Task> ListAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -573,7 +573,7 @@ public static Task> ListAsync(this IFdbDirectory directory, IFdbRea } /// Returns the list of subdirectories of the current directory. - public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyTransaction trans) + public static Task> ListAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyTransaction trans) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -581,7 +581,7 @@ public static Task> ListAsync(this IFdbDirectory directory, IFdbRea } /// Returns the list of subdirectories of the sub-directory with the given . - public static Task> ListAsync(this IFdbDirectory directory, IFdbReadOnlyTransaction trans, string name) + public static Task> ListAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyTransaction trans, [NotNull] string name) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -590,7 +590,7 @@ public static Task> ListAsync(this IFdbDirectory directory, IFdbRea } /// Returns the list of subdirectories of directory at , if it exists - public static Task> TryListAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, IEnumerable path, CancellationToken cancellationToken) + public static Task> TryListAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, [NotNull] IEnumerable path, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -599,7 +599,7 @@ public static Task> TryListAsync(this IFdbDirectory directory, IFdb } /// Returns the list of subdirectories of the sub-directory with the given , if it exists - public static Task> TryListAsync(this IFdbDirectory directory, IFdbReadOnlyRetryable db, string name, CancellationToken cancellationToken) + public static Task> TryListAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyRetryable db, [NotNull] string name, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -608,7 +608,7 @@ public static Task> TryListAsync(this IFdbDirectory directory, IFdb } /// Returns the list of subdirectories of the sub-directory with the given , if it exists - public static Task> TryListAsync(this IFdbDirectory directory, IFdbReadOnlyTransaction trans, string name) + public static Task> TryListAsync([NotNull] this IFdbDirectory directory, [NotNull] IFdbReadOnlyTransaction trans, [NotNull] string name) { if (directory == null) throw new ArgumentNullException("directory"); if (trans == null) throw new ArgumentNullException("trans"); @@ -617,7 +617,7 @@ public static Task> TryListAsync(this IFdbDirectory directory, IFdb } /// Returns the list of all the subdirectories of the current directory. - public static Task> ListAsync(this FdbDirectorySubspace subspace, IFdbReadOnlyRetryable db, CancellationToken cancellationToken) + public static Task> ListAsync([NotNull] this FdbDirectorySubspace subspace, [NotNull] IFdbReadOnlyRetryable db, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); @@ -625,7 +625,7 @@ public static Task> ListAsync(this FdbDirectorySubspace subspace, I } /// Returns the list of all the subdirectories of the current directory, it it exists. - public static Task> TryListAsync(this FdbDirectorySubspace subspace, IFdbReadOnlyRetryable db, CancellationToken cancellationToken) + public static Task> TryListAsync([NotNull] this FdbDirectorySubspace subspace, [NotNull] IFdbReadOnlyRetryable db, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); @@ -637,7 +637,7 @@ public static Task> TryListAsync(this FdbDirectorySubspace subspace #region Metadata /// Change the layer id of the directory at - public static Task ChangeLayerAsync(this FdbDirectoryLayer directory, IFdbRetryable db, IEnumerable path, Slice newLayer, CancellationToken cancellationToken) + public static Task ChangeLayerAsync([NotNull] this FdbDirectoryLayer directory, [NotNull] IFdbRetryable db, [NotNull] IEnumerable path, Slice newLayer, CancellationToken cancellationToken) { if (directory == null) throw new ArgumentNullException("directory"); if (db == null) throw new ArgumentNullException("db"); @@ -646,7 +646,7 @@ public static Task ChangeLayerAsync(this FdbDirectoryLayer } /// Change the layer id of this directory - public static Task ChangeLayerAsync(this FdbDirectorySubspace subspace, IFdbRetryable db, Slice newLayer, CancellationToken cancellationToken) + public static Task ChangeLayerAsync([NotNull] this FdbDirectorySubspace subspace, [NotNull] IFdbRetryable db, Slice newLayer, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); if (db == null) throw new ArgumentNullException("db"); diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs index 57f512573..351f2af81 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs @@ -38,6 +38,7 @@ namespace FoundationDB.Layers.Directories using System.Diagnostics; using System.Linq; using System.Threading.Tasks; + using JetBrains.Annotations; /// Provides a FdbDirectoryLayer class for managing directories in FoundationDB. /// Directories are a recommended approach for administering layers and applications. Directories work in conjunction with subspaces. Each layer or application should create or open at least one directory with which to manage its subspace(s). @@ -62,30 +63,34 @@ public class FdbDirectoryLayer : IFdbDirectory public static bool AnnotateTransactions { get; set; } /// Subspace where the content of each folder will be stored - public IFdbSubspace ContentSubspace { get; private set; } + public IFdbSubspace ContentSubspace { [NotNull] get; private set; } /// Subspace where all the metadata nodes for each folder will be stored - public IFdbSubspace NodeSubspace { get; private set; } + public IFdbSubspace NodeSubspace { [NotNull] get; private set; } /// Root node of the directory - internal IFdbSubspace RootNode { get; private set; } + internal IFdbSubspace RootNode { [NotNull] get; private set; } /// Allocated used to generated prefix for new content - internal FdbHighContentionAllocator Allocator { get; private set; } + internal FdbHighContentionAllocator Allocator { [NotNull] get; private set; } /// Gets the path for the root node of this FdbDirectoryLayer. - internal IFdbTuple Location { get; private set; } + internal IFdbTuple Location { [NotNull] get; private set; } /// Name of root directory of this layer /// Returns String.Empty for the root Directory Layer, or the name of the partition - public string Name { get { return this.Path.Count == 0 ? String.Empty : this.Path[this.Path.Count - 1]; } } + public string Name + { + [NotNull] + get { return this.Path.Count == 0 ? String.Empty : this.Path[this.Path.Count - 1]; } + } /// Formatted path of the root directory of this layer - public string FullName { get { return string.Join("/", this.Path); } } + public string FullName { [NotNull] get { return string.Join("/", this.Path); } } /// Gets the path for the root node of this FdbDirectoryLayer /// Normally constructed DirectoryLayers have an empty path, but DirectoryLayers returned by for s inside of a could have non-empty paths. - public IReadOnlyList Path { get; private set; } + public IReadOnlyList Path { [NotNull] get; private set; } /// Returns the layer id for this FdbDirectoryLayer, which is always Slice.Empty. Slice IFdbDirectory.Layer { get { return Slice.Empty; } } @@ -94,6 +99,7 @@ public class FdbDirectoryLayer : IFdbDirectory FdbDirectoryLayer IFdbDirectory.DirectoryLayer { get { return this; } } /// Convert a relative path in this Directory Layer, into an absolute path from the root of partition of the database + [NotNull] internal IFdbTuple PartitionSubPath(IFdbTuple path = null) { // If the DL is the root, the path is already absolute @@ -146,6 +152,7 @@ internal FdbDirectoryLayer(IFdbSubspace nodeSubspace, IFdbSubspace contentSubspa } /// Create an instance of the default Directory Layer + [NotNull] public static FdbDirectoryLayer Create() { return new FdbDirectoryLayer(new FdbSubspace(FdbKey.Directory), FdbSubspace.Empty, null); @@ -154,6 +161,7 @@ public static FdbDirectoryLayer Create() /// Create an instance of a Directory Layer located under a specific prefix and path /// Prefix for the content. The nodes will be stored under + <FE> /// Optional path, if the Directory Layer is not located at the root of the database. + [NotNull] public static FdbDirectoryLayer Create(Slice prefix, IEnumerable path = null) { var subspace = FdbSubspace.Create(prefix); @@ -164,6 +172,7 @@ public static FdbDirectoryLayer Create(Slice prefix, IEnumerable path = /// Create an instance of a Directory Layer located under a specific subspace and path /// Subspace for the content. The nodes will be stored under .Key + <FE> /// Optional path, if the Directory Layer is not located at the root of the database. + [NotNull] public static FdbDirectoryLayer Create(IFdbSubspace subspace, IEnumerable path = null) { if (subspace == null) throw new ArgumentNullException("subspace"); @@ -175,6 +184,7 @@ public static FdbDirectoryLayer Create(IFdbSubspace subspace, IEnumerableSubspace for the nodes of the Directory Layer. /// Subspace for the content of the Directory Layer. /// Optional path, if the Directory Layer is not located at the root of the database + [NotNull] public static FdbDirectoryLayer Create(IFdbSubspace nodeSubspace, IFdbSubspace contentSubspace, IEnumerable path = null) { if (nodeSubspace == null) throw new ArgumentNullException("nodeSubspace"); @@ -194,7 +204,7 @@ public static FdbDirectoryLayer Create(IFdbSubspace nodeSubspace, IFdbSubspace c /// Transaction to use for the operation /// Path of the directory to create or open /// If layer is specified, it is checked against the layer of an existing directory or set as the layer of a new directory. - public Task CreateOrOpenAsync(IFdbTransaction trans, IEnumerable path, Slice layer = default(Slice)) + public Task CreateOrOpenAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, Slice layer = default(Slice)) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -208,7 +218,7 @@ public static FdbDirectoryLayer Create(IFdbSubspace nodeSubspace, IFdbSubspace c /// Transaction to use for the operation /// Path of the directory to open. /// Optional layer id of the directory. If it is different than the layer specified when creating the directory, an exception will be thrown. - public Task OpenAsync(IFdbReadOnlyTransaction trans, IEnumerable path, Slice layer = default(Slice)) + public Task OpenAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IEnumerable path, Slice layer = default(Slice)) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -222,7 +232,7 @@ public static FdbDirectoryLayer Create(IFdbSubspace nodeSubspace, IFdbSubspace c /// Transaction to use for the operation /// Path of the directory to create /// If is specified, it is recorded with the directory and will be checked by future calls to open. - public Task CreateAsync(IFdbTransaction trans, IEnumerable path, Slice layer = default(Slice)) + public Task CreateAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, Slice layer = default(Slice)) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -234,7 +244,7 @@ public static FdbDirectoryLayer Create(IFdbSubspace nodeSubspace, IFdbSubspace c /// Transaction to use for the operation /// Path of the directory to open. /// Optional layer id of the directory. If it is different than the layer specified when creating the directory, an exception will be thrown. - public Task TryOpenAsync(IFdbReadOnlyTransaction trans, IEnumerable path, Slice layer = default(Slice)) + public Task TryOpenAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IEnumerable path, Slice layer = default(Slice)) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -246,7 +256,7 @@ public static FdbDirectoryLayer Create(IFdbSubspace nodeSubspace, IFdbSubspace c /// Transaction to use for the operation /// Path of the directory to create /// If is specified, it is recorded with the directory and will be checked by future calls to open. - public Task TryCreateAsync(IFdbTransaction trans, IEnumerable path, Slice layer = default(Slice)) + public Task TryCreateAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, Slice layer = default(Slice)) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -259,7 +269,7 @@ public static FdbDirectoryLayer Create(IFdbSubspace nodeSubspace, IFdbSubspace c /// Path of the directory to create /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// The directory will be created with the given physical prefix; otherwise a prefix is allocated automatically. - public Task RegisterAsync(IFdbTransaction trans, IEnumerable path, Slice layer, Slice prefix) + public Task RegisterAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, Slice layer, Slice prefix) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -272,7 +282,7 @@ public Task RegisterAsync(IFdbTransaction trans, IEnumerab /// Path of the directory to create /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// The directory will be created with the given physical prefix; otherwise a prefix is allocated automatically. - public Task TryRegisterAsync(IFdbTransaction trans, IEnumerable path, Slice layer, Slice prefix) + public Task TryRegisterAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, Slice layer, Slice prefix) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -291,7 +301,7 @@ public Task TryRegisterAsync(IFdbTransaction trans, IEnume /// Transaction to use for the operation /// Path of the directory to move /// New path of the directory - public Task MoveAsync(IFdbTransaction trans, IEnumerable oldPath, IEnumerable newPath) + public Task MoveAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable oldPath, [NotNull] IEnumerable newPath) { if (trans == null) throw new ArgumentNullException("trans"); if (oldPath == null) throw new ArgumentNullException("oldPath"); @@ -312,7 +322,7 @@ public Task MoveAsync(IFdbTransaction trans, IEnumerableTransaction to use for the operation /// Path of the directory to move /// New path of the directory - public Task TryMoveAsync(IFdbTransaction trans, IEnumerable oldPath, IEnumerable newPath) + public Task TryMoveAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable oldPath, [NotNull] IEnumerable newPath) { if (trans == null) throw new ArgumentNullException("trans"); if (oldPath == null) throw new ArgumentNullException("oldPath"); @@ -330,12 +340,12 @@ public Task TryMoveAsync(IFdbTransaction trans, IEnumerabl #region MoveTo / TryMoveTo - public Task MoveToAsync(IFdbTransaction trans, IEnumerable newAbsolutePath) + Task IFdbDirectory.MoveToAsync(IFdbTransaction trans, IEnumerable newAbsolutePath) { throw new NotSupportedException("The root directory cannot be moved"); } - public Task TryMoveToAsync(IFdbTransaction trans, IEnumerable newAbsolutePath) + Task IFdbDirectory.TryMoveToAsync(IFdbTransaction trans, IEnumerable newAbsolutePath) { throw new NotSupportedException("The root directory cannot be moved"); } @@ -349,7 +359,7 @@ public Task TryMoveToAsync(IFdbTransaction trans, IEnumera /// /// Transaction to use for the operation /// Path of the directory to remove (including any subdirectories) - public Task RemoveAsync(IFdbTransaction trans, IEnumerable path) + public Task RemoveAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -362,7 +372,7 @@ public Task RemoveAsync(IFdbTransaction trans, IEnumerable path) /// /// Transaction to use for the operation /// Path of the directory to remove (including any subdirectories) - public Task TryRemoveAsync(IFdbTransaction trans, IEnumerable path) + public Task TryRemoveAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -380,7 +390,7 @@ public Task TryRemoveAsync(IFdbTransaction trans, IEnumerable path /// Transaction to use for the operation /// Path of the directory to remove (including any subdirectories) /// Returns true if the directory exists, otherwise false. - public Task ExistsAsync(IFdbReadOnlyTransaction trans, IEnumerable path) + public Task ExistsAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IEnumerable path) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -399,7 +409,7 @@ public Task ExistsAsync(IFdbReadOnlyTransaction trans, IEnumerable /// Returns the list of subdirectories of directory at /// Transaction to use for the operation /// Path of the directory to list - public Task> ListAsync(IFdbReadOnlyTransaction trans, IEnumerable path) + public Task> ListAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IEnumerable path) { if (trans == null) throw new ArgumentNullException("trans"); @@ -408,23 +418,24 @@ public Task> ListAsync(IFdbReadOnlyTransaction trans, IEnumerableReturns the list of subdirectories of the root directory /// Transaction to use for the operation - public Task> ListAsync(IFdbReadOnlyTransaction trans) + public Task> ListAsync([NotNull] IFdbReadOnlyTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); + return ListInternalAsync(trans, FdbTuple.Empty, throwIfMissing: true); } /// Returns the list of subdirectories of directory at , if it exists. /// Transaction to use for the operation /// Path of the directory to list - public Task> TryListAsync(IFdbReadOnlyTransaction trans, IEnumerable path) + public Task> TryListAsync([NotNull] IFdbReadOnlyTransaction trans, IEnumerable path) { if (trans == null) throw new ArgumentNullException("trans"); return ListInternalAsync(trans, ParsePath(path), throwIfMissing: false); } - public Task> TryListAsync(IFdbReadOnlyTransaction trans) + public Task> TryListAsync([NotNull] IFdbReadOnlyTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); return ListInternalAsync(trans, FdbTuple.Empty, throwIfMissing: false); @@ -432,11 +443,11 @@ public Task> TryListAsync(IFdbReadOnlyTransaction trans) #endregion - /// Change the layer id of the directory at + /// Change the layer id of the directory at /// Transaction to use for the operation /// Path of the directory to change /// New layer id of the directory - public async Task ChangeLayerAsync(IFdbTransaction trans, IEnumerable path, Slice newLayer) + public async Task ChangeLayerAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, Slice newLayer) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -477,7 +488,7 @@ public Node(IFdbSubspace subspace, IFdbTuple path, IFdbTuple targetPath, Slice l public bool Exists { get { return this.Subspace != null; } } - public IFdbTuple PartitionSubPath { get { return this.TargetPath.Substring(this.Path.Count); } } + public IFdbTuple PartitionSubPath { [NotNull] get { return this.TargetPath.Substring(this.Path.Count); } } public bool IsInPartition(bool includeEmptySubPath) { @@ -486,12 +497,13 @@ public bool IsInPartition(bool includeEmptySubPath) } - private static void SetLayer(IFdbTransaction trans, IFdbSubspace subspace, Slice layer) + private static void SetLayer([NotNull] IFdbTransaction trans, [NotNull] IFdbSubspace subspace, Slice layer) { if (layer.IsNull) layer = Slice.Empty; trans.Set(subspace.Tuples.EncodeKey(LayerSuffix), layer); } + [NotNull] internal static IFdbTuple ParsePath(IEnumerable path, string argName = null) { if (path == null) return FdbTuple.Empty; @@ -507,19 +519,17 @@ internal static IFdbTuple ParsePath(IEnumerable path, string argName = n return FdbTuple.FromArray(pathCopy); } - internal static IFdbTuple ParsePath(string name, string argName = null) + [NotNull] + internal static IFdbTuple ParsePath([NotNull] string name, string argName = null) { - Contract.Requires(name != null); - if (name == null) throw new ArgumentNullException(argName ?? "name"); return FdbTuple.Create(name); } - internal static IFdbTuple VerifyPath(IFdbTuple path, string argName = null) + [NotNull] + internal static IFdbTuple VerifyPath([NotNull] IFdbTuple path, string argName = null) { - Contract.Requires(path != null); - // The path should not contain any null strings if (path == null) throw new ArgumentNullException(argName ?? "path"); int count = path.Count; @@ -533,7 +543,8 @@ internal static IFdbTuple VerifyPath(IFdbTuple path, string argName = null) return path; } - internal IReadOnlyList ToAbsolutePath(IFdbTuple path) + [NotNull] + internal IReadOnlyList ToAbsolutePath([NotNull] IFdbTuple path) { if (path.Count == 0) return this.Path; var converted = path.ToArray(); @@ -542,7 +553,8 @@ internal IReadOnlyList ToAbsolutePath(IFdbTuple path) } /// Maps an absolute path to a relative path within this directory layer - internal IFdbTuple ToRelativePath(IFdbTuple path) + [NotNull] + internal IFdbTuple ToRelativePath([NotNull] IFdbTuple path) { if (path == null) throw new ArgumentNullException("path"); @@ -656,7 +668,7 @@ internal async Task CreateOrOpenInternalAsync(IFdbReadOnly return ContentsOfNode(node, path, layer); } - internal async Task MoveInternalAsync(IFdbTransaction trans, IFdbTuple oldPath, IFdbTuple newPath, bool throwOnError) + internal async Task MoveInternalAsync([NotNull] IFdbTransaction trans, [NotNull] IFdbTuple oldPath, [NotNull] IFdbTuple newPath, bool throwOnError) { Contract.Requires(trans != null && oldPath != null && newPath != null); @@ -715,7 +727,7 @@ internal async Task MoveInternalAsync(IFdbTransaction tran return ContentsOfNode(oldNode.Subspace, newPath, oldNode.Layer); } - internal async Task RemoveInternalAsync(IFdbTransaction trans, IFdbTuple path, bool throwIfMissing) + internal async Task RemoveInternalAsync([NotNull] IFdbTransaction trans, [NotNull] IFdbTuple path, bool throwIfMissing) { Contract.Requires(trans != null && path != null); @@ -745,7 +757,7 @@ internal async Task RemoveInternalAsync(IFdbTransaction trans, IFdbTuple p return true; } - internal async Task> ListInternalAsync(IFdbReadOnlyTransaction trans, IFdbTuple path, bool throwIfMissing) + internal async Task> ListInternalAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IFdbTuple path, bool throwIfMissing) { Contract.Requires(trans != null && path != null); @@ -770,7 +782,7 @@ internal async Task> ListInternalAsync(IFdbReadOnlyTransaction tran .ConfigureAwait(false); } - internal async Task ExistsInternalAsync(IFdbReadOnlyTransaction trans, IFdbTuple path) + internal async Task ExistsInternalAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IFdbTuple path) { Contract.Requires(trans != null && path != null); @@ -788,7 +800,7 @@ internal async Task ExistsInternalAsync(IFdbReadOnlyTransaction trans, IFd return true; } - internal async Task ChangeLayerInternalAsync(IFdbTransaction trans, IFdbTuple path, Slice newLayer) + internal async Task ChangeLayerInternalAsync([NotNull] IFdbTransaction trans, [NotNull] IFdbTuple path, Slice newLayer) { Contract.Requires(trans != null && path != null); @@ -810,8 +822,10 @@ internal async Task ChangeLayerInternalAsync(IFdbTransaction trans, IFdbTuple pa SetLayer(trans, node.Subspace, newLayer); } - private async Task CheckReadVersionAsync(IFdbReadOnlyTransaction trans) + private async Task CheckReadVersionAsync([NotNull] IFdbReadOnlyTransaction trans) { + Contract.Requires(trans != null); + var value = await trans.GetAsync(this.RootNode.Tuples.EncodeKey(VersionKey)).ConfigureAwait(false); if (!value.IsNullOrEmpty) { @@ -819,8 +833,10 @@ private async Task CheckReadVersionAsync(IFdbReadOnlyTransaction trans) } } - private async Task CheckWriteVersionAsync(IFdbTransaction trans) + private async Task CheckWriteVersionAsync([NotNull] IFdbTransaction trans) { + Contract.Requires(trans != null); + var value = await trans.GetAsync(this.RootNode.Tuples.EncodeKey(VersionKey)).ConfigureAwait(false); if (value.IsNullOrEmpty) { @@ -844,8 +860,10 @@ private static void CheckVersion(Slice value, bool writeAccess) if (writeAccess && minor > LayerVersion.Minor) throw new InvalidOperationException(String.Format("Directory with version {0}.{1}.{2} is read-only when opened using directory layer {3}", major, minor, upgrade, LayerVersion)); } - private void InitializeDirectory(IFdbTransaction trans) + private void InitializeDirectory([NotNull] IFdbTransaction trans) { + Contract.Requires(trans != null); + // Set the version key var writer = new SliceWriter(3 * 4); writer.WriteFixed32((uint)LayerVersion.Major); @@ -854,7 +872,7 @@ private void InitializeDirectory(IFdbTransaction trans) trans.Set(this.RootNode.Tuples.EncodeKey(VersionKey), writer.ToSlice()); } - private async Task NodeContainingKey(IFdbReadOnlyTransaction tr, Slice key) + private async Task NodeContainingKey([NotNull] IFdbReadOnlyTransaction tr, Slice key) { Contract.Requires(tr != null); @@ -886,6 +904,7 @@ private async Task NodeContainingKey(IFdbReadOnlyTransaction tr, S } /// Returns the subspace to a node metadata, given its prefix + [CanBeNull] private IFdbSubspace NodeWithPrefix(Slice prefix) { if (prefix.IsNullOrEmpty) return null; @@ -893,7 +912,8 @@ private IFdbSubspace NodeWithPrefix(Slice prefix) } /// Returns a new Directory Subspace given its node subspace, path and layer id - private FdbDirectorySubspace ContentsOfNode(IFdbSubspace node, IFdbTuple relativePath, Slice layer) + [NotNull] + private FdbDirectorySubspace ContentsOfNode([NotNull] IFdbSubspace node, [NotNull] IFdbTuple relativePath, Slice layer) { Contract.Requires(node != null); @@ -909,6 +929,7 @@ private FdbDirectorySubspace ContentsOfNode(IFdbSubspace node, IFdbTuple relativ } } + [NotNull] private FdbDirectoryPartition GetPartitionForNode(Node node) { Contract.Requires(node.Subspace != null && node.Path != null && FdbDirectoryPartition.LayerId.Equals(node.Layer)); @@ -917,7 +938,7 @@ private FdbDirectoryPartition GetPartitionForNode(Node node) /// Finds a node subspace, given its path, by walking the tree from the root. /// Node if it was found, or null - private async Task FindAsync(IFdbReadOnlyTransaction tr, IFdbTuple path) + private async Task FindAsync([NotNull] IFdbReadOnlyTransaction tr, [NotNull] IFdbTuple path) { Contract.Requires(tr != null && path != null); @@ -948,7 +969,8 @@ private async Task FindAsync(IFdbReadOnlyTransaction tr, IFdbTuple path) } /// Returns the list of names and nodes of all children of the specified node - private IFdbAsyncEnumerable> SubdirNamesAndNodes(IFdbReadOnlyTransaction tr, IFdbSubspace node) + [NotNull] + private IFdbAsyncEnumerable> SubdirNamesAndNodes([NotNull] IFdbReadOnlyTransaction tr, [NotNull] IFdbSubspace node) { Contract.Requires(tr != null && node != null); @@ -963,7 +985,7 @@ private IFdbAsyncEnumerable> SubdirNamesAndNo /// Remove an existing node from its parents /// True if the parent node was found, otherwise false - private async Task RemoveFromParent(IFdbTransaction tr, IFdbTuple path) + private async Task RemoveFromParent([NotNull] IFdbTransaction tr, [NotNull] IFdbTuple path) { Contract.Requires(tr != null && path != null); @@ -978,7 +1000,7 @@ private async Task RemoveFromParent(IFdbTransaction tr, IFdbTuple path) } /// Resursively remove a node (including the content), all its children - private async Task RemoveRecursive(IFdbTransaction tr, IFdbSubspace node) + private async Task RemoveRecursive([NotNull] IFdbTransaction tr, [NotNull] IFdbSubspace node) { Contract.Requires(tr != null && node != null); @@ -993,7 +1015,7 @@ private async Task RemoveRecursive(IFdbTransaction tr, IFdbSubspace node) tr.ClearRange(node.Tuples.ToRange()); } - private async Task IsPrefixFree(IFdbReadOnlyTransaction tr, Slice prefix) + private async Task IsPrefixFree([NotNull] IFdbReadOnlyTransaction tr, Slice prefix) { Contract.Requires(tr != null); @@ -1013,7 +1035,7 @@ private async Task IsPrefixFree(IFdbReadOnlyTransaction tr, Slice prefix) .ConfigureAwait(false); } - private static Slice GetSubDirKey(IFdbSubspace parent, string path) + private static Slice GetSubDirKey([NotNull] IFdbSubspace parent, [NotNull] string path) { Contract.Requires(parent != null && path != null); @@ -1022,10 +1044,15 @@ private static Slice GetSubDirKey(IFdbSubspace parent, string path) return parent.Tuples.EncodeKey(SUBDIRS, path); } + #endregion + + #region Path Utils... + /// Convert a tuple representing a path, into a string array /// Tuple that should only contain strings /// Array of strings - public static string[] ParsePath(IFdbTuple path) + [NotNull] + public static string[] ParsePath([NotNull] IFdbTuple path) { if (path == null) throw new ArgumentNullException("path"); var tmp = new string[path.Count]; @@ -1036,16 +1063,14 @@ public static string[] ParsePath(IFdbTuple path) return tmp; } - #endregion - - #region Path Utils... - - public static string[] Combine(IEnumerable parent, string path) + [NotNull] + public static string[] Combine([NotNull] IEnumerable parent, string path) { if (parent == null) throw new ArgumentNullException("parent"); return parent.Concat(new[] { path }).ToArray(); } + [NotNull] public static string[] Combine(IEnumerable parent, params string[] paths) { if (parent == null) throw new ArgumentNullException("parent"); @@ -1053,13 +1078,15 @@ public static string[] Combine(IEnumerable parent, params string[] paths return parent.Concat(paths).ToArray(); } - public static string[] Combine(IEnumerable parent, IEnumerable paths) + [NotNull] + public static string[] Combine([NotNull] IEnumerable parent, [NotNull] IEnumerable paths) { if (parent == null) throw new ArgumentNullException("parent"); if (paths == null) throw new ArgumentNullException("paths"); return parent.Concat(paths).ToArray(); } + [NotNull] public static string[] Parse(string path) { if (string.IsNullOrEmpty(path)) return new string[0]; @@ -1106,8 +1133,9 @@ public static string[] Parse(string path) } return paths.ToArray(); } - - public static string FormatPath(IEnumerable paths) + + [NotNull] + public static string FormatPath([NotNull] IEnumerable paths) { if (paths == null) throw new ArgumentNullException("paths"); @@ -1124,7 +1152,7 @@ public static string FormatPath(IEnumerable paths) })); } - + #endregion } diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs b/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs index e2eb68a36..fc035001a 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs @@ -31,6 +31,7 @@ namespace FoundationDB.Layers.Directories using FoundationDB.Client; using FoundationDB.Client.Utils; using FoundationDB.Layers.Tuples; + using JetBrains.Annotations; using System; using System.Collections.Generic; using System.Diagnostics; @@ -59,28 +60,30 @@ internal FdbDirectorySubspace(IFdbTuple location, IFdbTuple relativeLocation, Sl } /// Absolute location of the directory - protected IFdbTuple Location { get; set;} + protected IFdbTuple Location { [NotNull] get; private set; } /// Location of the directory relative to its parent Directory Layer - protected IFdbTuple RelativeLocation { get; set; } + protected IFdbTuple RelativeLocation { [NotNull] get; private set; } /// Absolute path of this directory - public IReadOnlyList Path { get; private set; } + public IReadOnlyList Path { [NotNull] get; private set; } /// Name of the directory public string Name { + [NotNull] get { return this.Path.Count == 0 ? String.Empty : this.Path[this.Path.Count - 1]; } } /// Formatted path of this directory public string FullName { + [NotNull] get { return String.Join("/", this.Path); } } /// Instance of the DirectoryLayer that was used to create or open this directory - public FdbDirectoryLayer DirectoryLayer { get; private set; } + public FdbDirectoryLayer DirectoryLayer { [NotNull] get; private set; } /// Layer id of this directory public Slice Layer { get; private set; } @@ -96,6 +99,7 @@ protected virtual FdbDirectoryLayer GetLayerForPath(IFdbTuple relativeLocation) /// Convert a path relative to this directory, into a path relative to the root of the current partition /// Path relative from this directory /// Path relative to the path of the current partition + [NotNull] protected virtual IFdbTuple ToRelativePath(IFdbTuple location) { return location == null ? this.RelativeLocation : this.RelativeLocation.Concat(location); @@ -104,6 +108,7 @@ protected virtual IFdbTuple ToRelativePath(IFdbTuple location) /// Convert a path relative to this directory, into a path relative to the root of the current partition /// Path relative from this directory /// Path relative to the path of the current partition + [NotNull] protected IFdbTuple ToRelativePath(IEnumerable path) { return ToRelativePath(path == null ? null : FdbTuple.FromEnumerable(path)); @@ -123,7 +128,7 @@ public void CheckLayer(Slice layer) /// Change the layer id of this directory /// Transaction to use for the operation /// New layer id of this directory - public async Task ChangeLayerAsync(IFdbTransaction trans, Slice newLayer) + public async Task ChangeLayerAsync([NotNull] IFdbTransaction trans, Slice newLayer) { if (trans == null) throw new ArgumentNullException("trans"); if (newLayer.IsNull) newLayer = Slice.Empty; @@ -155,7 +160,7 @@ public async Task ChangeLayerAsync(IFdbTransaction trans, /// Transaction to use for the operation /// Relative path of the subdirectory to create or open /// If is specified, it is checked against the layer of an existing subdirectory or set as the layer of a new subdirectory. - public Task CreateOrOpenAsync(IFdbTransaction trans, IEnumerable path, Slice layer = default(Slice)) + public Task CreateOrOpenAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, Slice layer = default(Slice)) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -169,7 +174,7 @@ public async Task ChangeLayerAsync(IFdbTransaction trans, /// Transaction to use for the operation /// Relative path of the subdirectory to open /// If specified, the opened directory must have the same layer id. - public Task OpenAsync(IFdbReadOnlyTransaction trans, IEnumerable path, Slice layer = default(Slice)) + public Task OpenAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IEnumerable path, Slice layer = default(Slice)) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -183,7 +188,7 @@ public async Task ChangeLayerAsync(IFdbTransaction trans, /// Relative path of the subdirectory to open /// If specified, the opened directory must have the same layer id. /// Returns the directory if it exists, or null if it was not found - public Task TryOpenAsync(IFdbReadOnlyTransaction trans, IEnumerable path, Slice layer = default(Slice)) + public Task TryOpenAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IEnumerable path, Slice layer = default(Slice)) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -196,7 +201,7 @@ public async Task ChangeLayerAsync(IFdbTransaction trans, /// Transaction to use for the operation /// Relative path of the subdirectory to create /// If is specified, it is recorded with the subdirectory and will be checked by future calls to open. - public Task CreateAsync(IFdbTransaction trans, IEnumerable path, Slice layer = default(Slice)) + public Task CreateAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, Slice layer = default(Slice)) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -209,7 +214,7 @@ public async Task ChangeLayerAsync(IFdbTransaction trans, /// Transaction to use for the operation /// Relative path of the subdirectory to create /// If is specified, it is recorded with the subdirectory and will be checked by future calls to open. - public Task TryCreateAsync(IFdbTransaction trans, IEnumerable path, Slice layer = default(Slice)) + public Task TryCreateAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, Slice layer = default(Slice)) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -221,7 +226,7 @@ public async Task ChangeLayerAsync(IFdbTransaction trans, /// Path of the directory to create /// If is specified, it is recorded with the directory and will be checked by future calls to open. /// The directory will be created with the given physical prefix; otherwise a prefix is allocated automatically. - public Task RegisterAsync(IFdbTransaction trans, IEnumerable path, Slice layer, Slice prefix) + public Task RegisterAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable path, Slice layer, Slice prefix) { if (trans == null) throw new ArgumentNullException("trans"); if (path == null) throw new ArgumentNullException("path"); @@ -234,7 +239,7 @@ public Task RegisterAsync(IFdbTransaction trans, IEnumerab /// /// Transaction to use for the operation /// Full path (from the root) where this directory will be moved - public Task MoveToAsync(IFdbTransaction trans, IEnumerable newAbsolutePath) + public Task MoveToAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable newAbsolutePath) { if (trans == null) throw new ArgumentNullException("trans"); if (newAbsolutePath == null) throw new ArgumentNullException("newAbsolutePath"); @@ -268,7 +273,7 @@ Task IFdbDirectory.MoveAsync(IFdbTransaction trans, IEnume /// /// Transaction to use for the operation /// Full path (from the root) where this directory will be moved - public Task TryMoveToAsync(IFdbTransaction trans, IEnumerable newPath) + public Task TryMoveToAsync([NotNull] IFdbTransaction trans, [NotNull] IEnumerable newPath) { if (trans == null) throw new ArgumentNullException("trans"); if (newPath == null) throw new ArgumentNullException("newPath"); @@ -300,7 +305,7 @@ Task IFdbDirectory.TryMoveAsync(IFdbTransaction trans, IEn /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// /// Transaction to use for the operation - public Task RemoveAsync(IFdbTransaction trans) + public Task RemoveAsync([NotNull] IFdbTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); @@ -315,7 +320,7 @@ public Task RemoveAsync(IFdbTransaction trans) /// /// Transaction to use for the operation /// Path of the sub-directory to remove (relative to this directory) - public Task RemoveAsync(IFdbTransaction trans, IEnumerable path) + public Task RemoveAsync([NotNull] IFdbTransaction trans, IEnumerable path) { if (trans == null) throw new ArgumentNullException("trans"); @@ -333,7 +338,7 @@ public Task RemoveAsync(IFdbTransaction trans, IEnumerable path) /// Warning: Clients that have already opened the directory might still insert data into its contents after it is removed. /// /// Transaction to use for the operation - public Task TryRemoveAsync(IFdbTransaction trans) + public Task TryRemoveAsync([NotNull] IFdbTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); @@ -348,7 +353,7 @@ public Task TryRemoveAsync(IFdbTransaction trans) /// /// Transaction to use for the operation /// Path of the sub-directory to remove (relative to this directory) - public Task TryRemoveAsync(IFdbTransaction trans, IEnumerable path) + public Task TryRemoveAsync([NotNull] IFdbTransaction trans, IEnumerable path) { if (trans == null) throw new ArgumentNullException("trans"); @@ -364,7 +369,7 @@ public Task TryRemoveAsync(IFdbTransaction trans, IEnumerable path /// Checks if this directory exists /// Returns true if the directory exists, otherwise false. - public Task ExistsAsync(IFdbReadOnlyTransaction trans) + public Task ExistsAsync([NotNull] IFdbReadOnlyTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); @@ -376,7 +381,7 @@ public Task ExistsAsync(IFdbReadOnlyTransaction trans) /// Checks if a sub-directory exists /// Returns true if the directory exists, otherwise false. - public Task ExistsAsync(IFdbReadOnlyTransaction trans, IEnumerable path) + public Task ExistsAsync([NotNull] IFdbReadOnlyTransaction trans, IEnumerable path) { if (trans == null) throw new ArgumentNullException("trans"); @@ -391,28 +396,28 @@ public Task ExistsAsync(IFdbReadOnlyTransaction trans, IEnumerable } /// Returns the list of all the subdirectories of the current directory. - public Task> ListAsync(IFdbReadOnlyTransaction trans) + public Task> ListAsync([NotNull] IFdbReadOnlyTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); return this.DirectoryLayer.ListInternalAsync(trans, this.RelativeLocation, throwIfMissing: true); } /// Returns the list of all the subdirectories of a sub-directory. - public Task> ListAsync(IFdbReadOnlyTransaction trans, IEnumerable path) + public Task> ListAsync([NotNull] IFdbReadOnlyTransaction trans, IEnumerable path) { if (trans == null) throw new ArgumentNullException("trans"); return this.DirectoryLayer.ListInternalAsync(trans, ToRelativePath(path), throwIfMissing: true); } /// Returns the list of all the subdirectories of a sub-directory, it it exists. - public Task> TryListAsync(IFdbReadOnlyTransaction trans) + public Task> TryListAsync([NotNull] IFdbReadOnlyTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); return this.DirectoryLayer.ListInternalAsync(trans, this.RelativeLocation, throwIfMissing: false); } /// Returns the list of all the subdirectories of the current directory, it it exists. - public Task> TryListAsync(IFdbReadOnlyTransaction trans, IEnumerable path) + public Task> TryListAsync([NotNull] IFdbReadOnlyTransaction trans, IEnumerable path) { if (trans == null) throw new ArgumentNullException("trans"); return this.DirectoryLayer.ListInternalAsync(trans, ToRelativePath(path), throwIfMissing: false); diff --git a/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs b/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs index 1ec2e79eb..55fd4d8b6 100644 --- a/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs +++ b/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs @@ -30,6 +30,7 @@ namespace FoundationDB.Layers.Directories { using FoundationDB.Client; using FoundationDB.Filters.Logging; + using JetBrains.Annotations; using System; using System.Diagnostics; using System.Threading.Tasks; @@ -55,21 +56,23 @@ public FdbHighContentionAllocator(IFdbSubspace subspace) } /// Location of the allocator - public IFdbSubspace Subspace { get; private set; } + public IFdbSubspace Subspace { [NotNull] get; private set; } /// Subspace used to store the allocation count for the current window - private IFdbSubspace Counters { get; set; } + private IFdbSubspace Counters { [NotNull] get; set; } /// Subspace used to store the prefixes allocated in the current window - private IFdbSubspace Recent { get; set; } + private IFdbSubspace Recent { [NotNull] get; set; } /// Returns a 64-bit integer that /// 1) has never and will never be returned by another call to this /// method on the same subspace /// 2) is nearly as short as possible given the above /// - public async Task AllocateAsync(IFdbTransaction trans) + public async Task AllocateAsync([NotNull] IFdbTransaction trans) { + if (trans == null) throw new ArgumentNullException("trans"); + // find the current window size, by reading the last entry in the 'counters' subspace long start = 0, count = 0; var kv = await trans diff --git a/FoundationDB.Client/Layers/Directories/IFdbDirectory.cs b/FoundationDB.Client/Layers/Directories/IFdbDirectory.cs index 3930f7891..0e9dd5b0a 100644 --- a/FoundationDB.Client/Layers/Directories/IFdbDirectory.cs +++ b/FoundationDB.Client/Layers/Directories/IFdbDirectory.cs @@ -42,7 +42,7 @@ namespace FoundationDB.Layers.Directories public interface IFdbDirectory { /// Name of this Directory. - string Name { get; } + string Name { [NotNull] get; } /// Formatted path of this Directory string FullName { [NotNull] get; } From 36612cee5a2dc3367fc28d678652ea32341df9d2 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Wed, 10 Dec 2014 23:01:26 +0100 Subject: [PATCH 33/63] Logging Filter: tweaked formatting of command and timing reports - log remembers if transaction was read-only or not - detailed timing report uses the timestamp of the first read operation as the start of the bargraphs, to offset impact of patterns like: CreateTransaction, CPU, CPU, CPU, SOME MORE CPU, Set()/Get() and Commit() where the time at the start does not count for the actual duration of the trans - added detailed version of commands report that dumps timestamps and durations - fixed some missing or extra newlines --- .../Filters/Logging/FdbTransactionLog.cs | 83 +++++++++++++++---- 1 file changed, 68 insertions(+), 15 deletions(-) diff --git a/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs b/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs index f541adc9a..2088dda91 100644 --- a/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs +++ b/FoundationDB.Client/Filters/Logging/FdbTransactionLog.cs @@ -57,6 +57,9 @@ public FdbTransactionLog(IFdbTransaction trans) /// Id of the logged transaction public int Id { get; private set; } + /// True if the transaction is Read Only + public bool IsReadOnly { get; private set; } + /// Number of operations performed by the transaction public int Operations { get { return m_operations; } } @@ -144,7 +147,8 @@ public void Start([NotNull] IFdbTransaction trans) Contract.Requires(trans != null); this.Id = trans.Id; - this.StartedUtc = DateTimeOffset.UtcNow; + this.IsReadOnly = trans.IsReadOnly; + this.StartedUtc = DateTimeOffset.UtcNow; //TODO: use a configurable clock? this.StartTimestamp = GetTimestamp(); } @@ -210,18 +214,46 @@ public void EndOperation([NotNull] Command cmd, Exception error = null) } /// Generate an ASCII report with all the commands that were executed by the transaction - public string GetCommandsReport() + [NotNull] + public string GetCommandsReport(bool detailed = false) { var culture = CultureInfo.InvariantCulture; var sb = new StringBuilder(); - sb.AppendLine(String.Format(culture, "Transaction #{0} command log:", this.Id)); - int reads = 0, writes = 0; + var cmds = this.Commands.ToArray(); + sb.AppendFormat(culture, "Transaction #{0} ({3}, {1} operations, started {2}Z", this.Id, cmds.Length, this.StartedUtc.TimeOfDay, this.IsReadOnly ? "read-only" : "read/write"); + if (this.StoppedUtc.HasValue) + sb.AppendFormat(culture, ", ended {0}Z)", this.StoppedUtc.Value.TimeOfDay); + else + sb.Append(", did not finish)"); + sb.AppendLine(); + + int reads = 0, writes = 0; for (int i = 0; i < cmds.Length; i++) { var cmd = cmds[i]; - sb.AppendFormat(culture, "{0,3}/{1,3} : {2}", i + 1, cmds.Length, cmd); + if (detailed) + { + sb.AppendFormat( + culture, + "{0,3} - T+{1,7:##0.000} ({2,7:##,##0} µs) : {3}", + /* 0 */ cmd.Step, + /* 1 */ cmd.StartOffset.TotalMilliseconds, + /* 2 */ cmd.Duration.Ticks / 10.0, + /* 3 */ cmd.ToString() + ); + } + else + { + sb.AppendFormat( + culture, + "{0,3} : {2}{1}", + /* 0 */ cmd.Step, + /* 1 */ cmd.ToString(), + /* 2 */ cmd.Error != null ? "[FAILED] " : "" + ); + } sb.AppendLine(); switch (cmd.Mode) { @@ -229,7 +261,10 @@ public string GetCommandsReport() case FdbTransactionLog.Mode.Write: ++writes; break; } } - sb.AppendLine(String.Format(culture, "Stats: {0:N0} operations ({1:N0} reads, {2:N0} writes), {3:N0} bytes read, {4:N0} bytes committed", this.Operations, reads, writes, this.ReadSize, this.CommitSize)); + if (this.Completed) + { + sb.AppendLine(String.Format(culture, "Stats: {0:N0} operations, {1:N0} reads ({3:N0} bytes), {2:N0} writes ({4:N0} bytes), {5:N2} ms", this.Operations, reads, writes, this.ReadSize, this.CommitSize, this.TotalDuration.TotalMilliseconds)); + } sb.AppendLine(); return sb.ToString(); } @@ -246,7 +281,8 @@ public string GetTimingsReport(bool showCommands = false) double scale = 0.0005d; int width; bool flag = false; - while ((width = (int)(duration.TotalSeconds / scale)) > 80) + int maxWidth = showCommands ? 80 : 160; + while ((width = (int)(duration.TotalSeconds / scale)) > maxWidth) { if (flag) scale *= 5d; else scale *= 2d; flag = !flag; @@ -255,17 +291,34 @@ public string GetTimingsReport(bool showCommands = false) var cmds = this.Commands.ToArray(); // Header - sb.AppendFormat(culture, "Transaction #{0} ({1} operations, '#' = {2:N1} ms, started {3}Z", this.Id, cmds.Length, (scale * 1000d), this.StartedUtc.TimeOfDay); + sb.AppendFormat(culture, "Transaction #{0} ({4}, {1} operations, '#' = {2:N1} ms, started {3}Z", this.Id, cmds.Length, (scale * 1000d), this.StartedUtc.TimeOfDay, this.IsReadOnly ? "read-only" : "read/write"); if (this.StoppedUtc.HasValue) - sb.AppendFormat(culture, ", ended {0}Z)", this.StoppedUtc.Value.TimeOfDay); + sb.AppendFormat(culture, ", ended {0}Z)", this.StoppedUtc.Value.TimeOfDay); else - sb.AppendLine(", did not finish"); + sb.Append(", did not finish"); sb.AppendLine(); if (cmds.Length > 0) { var bar = new string('─', width + 2); sb.AppendLine(String.Format(culture, "┌ oper. ┬{0}┬──── start ──── end ── duration ──┬─ sent recv ┐", bar)); + // look for the timestamps of the first and last commands + var first = TimeSpan.Zero; + var last = duration; + for (int i = 0; i < cmds.Length;i++) + { + if (cmds[i].Op == Operation.Log) continue; + first = cmds[i].StartOffset; + break; + } + for(int i = cmds.Length - 1; i >= 0; i--) + { + if (cmds[i].Op == Operation.Log) continue; + if (cmds[i].EndOffset.HasValue) duration = cmds[i].EndOffset.Value; + break; + } + duration -= first; + int step = -1; bool previousWasOnError = false; int attempts = 1; @@ -279,7 +332,7 @@ public string GetTimingsReport(bool showCommands = false) long ticks = cmd.Duration.Ticks; double r = 1.0d * ticks / duration.Ticks; - string w = GetFancyGraph(width, cmd.StartOffset.Ticks, ticks, duration.Ticks, charsToSkip); + string w = GetFancyGraph(width, (cmd.StartOffset - first).Ticks, ticks, duration.Ticks, charsToSkip); if (ticks > 0) { @@ -293,7 +346,7 @@ public string GetTimingsReport(bool showCommands = false) /* 4 */ (cmd.EndOffset ?? TimeSpan.Zero).TotalMilliseconds, /* 5 */ ticks / 10.0, /* 6 */ cmd.Step == step ? ":" : " ", - /* 7 */ ticks >= 100000 ? "*" : ticks >= 10000 ? "°" : " ", + /* 7 */ ticks >= TimeSpan.TicksPerMillisecond * 10 ? '*' : ticks >= TimeSpan.TicksPerMillisecond ? '°' : ' ', /* 8 */ cmd.ArgumentBytes, /* 9 */ cmd.ResultBytes, /* 10 */ cmd.Error != null ? "!" : " ", @@ -309,7 +362,7 @@ public string GetTimingsReport(bool showCommands = false) /* 1 */ cmd.Step, /* 2 */ cmd.Error != null ? "!" : " ", /* 3 */ cmd.ShortName, - /* 4 */ ticks >= 100000 ? "*" : ticks >= 10000 ? "°" : " ", + /* 4 */ ticks >= TimeSpan.TicksPerMillisecond * 10 ? '*' : ticks >= TimeSpan.TicksPerMillisecond ? '°' : ' ', /* 5 */ w, /* 6 */ cmd.StartOffset.TotalMilliseconds, /* 7 */ showCommands ? cmd.ToString() : String.Empty @@ -345,12 +398,12 @@ public string GetTimingsReport(bool showCommands = false) flag = true; } if (!flag) sb.Append("Completed"); - sb.AppendLine(String.Format(culture, " in {0:N3} ms and {1:N0} attempt(s)", duration.TotalMilliseconds, attempts)); + sb.AppendLine(String.Format(culture, " in {0:N3} ms and {1:N0} attempt(s)", this.TotalDuration.TotalMilliseconds, attempts)); } } else { // empty transaction - sb.AppendLine(String.Format(culture, "> Completed after {0:N3} ms without performing any operation", duration.TotalMilliseconds)); + sb.AppendLine(String.Format(culture, "> Completed after {0:N3} ms without performing any operation", this.TotalDuration.TotalMilliseconds)); } return sb.ToString(); } From 1d4ad311eab2ba07537b27eab775408510304664 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Thu, 11 Dec 2014 01:08:26 +0100 Subject: [PATCH 34/63] Fixed merge issue with new tuple api --- FoundationDB.Tests/TransactionFacts.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/FoundationDB.Tests/TransactionFacts.cs b/FoundationDB.Tests/TransactionFacts.cs index 6e2487f0c..1c8b096d5 100644 --- a/FoundationDB.Tests/TransactionFacts.cs +++ b/FoundationDB.Tests/TransactionFacts.cs @@ -709,14 +709,14 @@ public async Task Test_Can_Perform_Atomic_Operations() await PerformAtomicOperationAndCheck(db, key, 0x00FF00FF, FdbMutationType.BitXor, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x0F0F0F0F, FdbMutationType.BitXor, 0x018055AA); - key = location.Pack("max"); + key = location.Tuples.EncodeKey("max"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Max, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Max, 1); await PerformAtomicOperationAndCheck(db, key, 1, FdbMutationType.Max, 0); await PerformAtomicOperationAndCheck(db, key, 2, FdbMutationType.Max, 1); await PerformAtomicOperationAndCheck(db, key, 123456789, FdbMutationType.Max, 987654321); - key = location.Pack("min"); + key = location.Tuples.EncodeKey("min"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Min, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Min, 1); await PerformAtomicOperationAndCheck(db, key, 1, FdbMutationType.Min, 0); From 81e09ed40491ae0faee15d14a6fdfe67fa75d1f5 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Thu, 11 Dec 2014 20:38:39 +0100 Subject: [PATCH 35/63] Fixed merge --- FoundationDB.Tests/TransactionFacts.cs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/FoundationDB.Tests/TransactionFacts.cs b/FoundationDB.Tests/TransactionFacts.cs index d6028fcc9..273e75247 100644 --- a/FoundationDB.Tests/TransactionFacts.cs +++ b/FoundationDB.Tests/TransactionFacts.cs @@ -730,10 +730,9 @@ public async Task Test_Can_Perform_Atomic_Operations() // calling with an unsupported mutation type should fail using (var tr = db.BeginTransaction(this.Cancellation)) { - key = location.Pack("invalid"); + key = location.Tuples.EncodeKey("invalid"); Assert.That(() => tr.Atomic(key, Slice.FromFixed32(42), FdbMutationType.Max), Throws.InstanceOf().With.Property("Code").EqualTo(FdbError.InvalidMutationType)); } - } // calling with an invalid mutation type should fail From 2923ecdd25159616e611f97a390b297509c1802a Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Tue, 16 Dec 2014 17:41:38 +0100 Subject: [PATCH 36/63] Reduced duration of test Test_Can_Bulk_Batched_Insert_Items() --- FoundationDB.Tests/DatabaseBulkFacts.cs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/FoundationDB.Tests/DatabaseBulkFacts.cs b/FoundationDB.Tests/DatabaseBulkFacts.cs index 1f1384f9b..69c47c9c4 100644 --- a/FoundationDB.Tests/DatabaseBulkFacts.cs +++ b/FoundationDB.Tests/DatabaseBulkFacts.cs @@ -248,7 +248,7 @@ await Fdb.Bulk.ForEachAsync( [Test] public async Task Test_Can_Bulk_Batched_Insert_Items() { - const int N = 2000 * 1000; + const int N = 200 * 1000; using (var db = await OpenTestPartitionAsync()) { @@ -320,6 +320,8 @@ public async Task Test_Can_Bulk_Batched_Insert_Items() return tr.GetRange(location.Tuples.ToRange()).ToArrayAsync(); }, this.Cancellation); + Log("Read {0:N0} keys", stored.Length); + Assert.That(stored.Length, Is.EqualTo(N), "DB contains less or more items than expected"); for (int i = 0; i < stored.Length; i++) { From f32ee913dc0a62f2bae259bd041b7b4ef14f4074 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 19 Dec 2014 10:55:24 +0100 Subject: [PATCH 37/63] Fix merge --- FdbBurner/Program.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/FdbBurner/Program.cs b/FdbBurner/Program.cs index 53eb91df9..48aba5da8 100644 --- a/FdbBurner/Program.cs +++ b/FdbBurner/Program.cs @@ -88,7 +88,7 @@ private static async Task BurnerThread(IFdbDatabase db, CancellationToken ct) ? rnd.Next() : pos + i; - tr.Set(folder.Pack(x, suffix), value); + tr.Set(folder.Tuples.EncodeKey(x, suffix), value); Interlocked.Increment(ref Keys); } pos += N; From eb050f4910cd5b2372ea719c04a9a2ce5edadc2c Mon Sep 17 00:00:00 2001 From: Alban Lecocq Date: Fri, 16 Jan 2015 13:58:47 +0100 Subject: [PATCH 38/63] Ajount [InstantHandle] sur ExportAsync --- FoundationDB.Client/Fdb.Bulk.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/FoundationDB.Client/Fdb.Bulk.cs b/FoundationDB.Client/Fdb.Bulk.cs index ef85c245f..d2a1ac6b7 100644 --- a/FoundationDB.Client/Fdb.Bulk.cs +++ b/FoundationDB.Client/Fdb.Bulk.cs @@ -1323,7 +1323,7 @@ public static Task ExportAsync([NotNull] IFdbDatabase db, Slice beginInclu /// Token used to cancel the operation /// Number of keys exported /// This method cannot guarantee that all data will be read from the same snapshot of the database, which means that writes committed while the export is running may be seen partially. Only the items inside a single batch are guaranteed to be from the same snapshot of the database. - public static Task ExportAsync([NotNull] IFdbDatabase db, FdbKeyRange range, [NotNull] Func[], long, CancellationToken, Task> handler, CancellationToken cancellationToken) + public static Task ExportAsync([NotNull] IFdbDatabase db, FdbKeyRange range, [NotNull, InstantHandle] Func[], long, CancellationToken, Task> handler, CancellationToken cancellationToken) { return ExportAsync(db, FdbKeySelector.FirstGreaterOrEqual(range.Begin), FdbKeySelector.FirstGreaterOrEqual(range.End), handler, cancellationToken); } From 7e33bb8a41e5925acb2692a3c77421a3bbc470f3 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Tue, 27 Jan 2015 17:11:56 +0100 Subject: [PATCH 39/63] Reimpleted the mapping between Future handles and FdbFuture classes [BROKEN] - Future handles are no longer stored in safe handles, to prevent the GC from destroying the futures behind our back - FdbFutureContext is the base class of all "contexts" that can have futures (ie: clusters, transactions, ...) - There is a static dictionary that stores all active contexts (first routing step) - Each context has also its own dictionary that stores all active futures in this context (second rounting step) - future callback parameter is the concatenation of the context ID and the future ID and is used for the two-step routing - Multi-futures (FdbFutureArray) use refcounting and only fire once the last handle has fired --- FoundationDB.Client/FdbWatch.cs | 6 +- .../FoundationDB.Client.csproj | 2 + FoundationDB.Client/Native/FdbFuture.cs | 412 +-------------- FoundationDB.Client/Native/FdbFutureArray.cs | 324 ++++-------- .../Native/FdbFutureContext.cs | 477 ++++++++++++++++++ FoundationDB.Client/Native/FdbFutureSingle.cs | 257 +++------- FoundationDB.Client/Native/FdbNative.cs | 133 ++--- .../Native/FdbNativeCluster.cs | 80 +-- .../Native/FdbNativeDatabase.cs | 48 +- .../Native/FdbNativeTransaction.cs | 232 ++++----- .../Native/Handles/FutureHandle.cs | 4 + FoundationDB.Client/Native/IFdbFuture.cs | 53 ++ 12 files changed, 952 insertions(+), 1076 deletions(-) create mode 100644 FoundationDB.Client/Native/FdbFutureContext.cs create mode 100644 FoundationDB.Client/Native/IFdbFuture.cs diff --git a/FoundationDB.Client/FdbWatch.cs b/FoundationDB.Client/FdbWatch.cs index f447b5b36..4938aed65 100644 --- a/FoundationDB.Client/FdbWatch.cs +++ b/FoundationDB.Client/FdbWatch.cs @@ -88,10 +88,13 @@ public TaskAwaiter GetAwaiter() if (m_future != null) { +#if REFACTORING_IN_PROGRESS if (m_future.HasFlag(FdbFuture.Flags.DISPOSED)) { throw new ObjectDisposedException("Cannot await a watch that has already been disposed"); } + +#endif return m_future.Task.GetAwaiter(); } throw new InvalidOperationException("Cannot await an empty watch"); @@ -111,7 +114,8 @@ public void Dispose() { if (m_future != null) { - m_future.Dispose(); + //TODO: what should be do? (=> cancel the future?) + //m_future.Dispose(); } } diff --git a/FoundationDB.Client/FoundationDB.Client.csproj b/FoundationDB.Client/FoundationDB.Client.csproj index 82e12c10f..7fdf19767 100644 --- a/FoundationDB.Client/FoundationDB.Client.csproj +++ b/FoundationDB.Client/FoundationDB.Client.csproj @@ -73,6 +73,8 @@ + + diff --git a/FoundationDB.Client/Native/FdbFuture.cs b/FoundationDB.Client/Native/FdbFuture.cs index ad4106c00..0827e7adc 100644 --- a/FoundationDB.Client/Native/FdbFuture.cs +++ b/FoundationDB.Client/Native/FdbFuture.cs @@ -31,382 +31,39 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Client.Native { - using FoundationDB.Client.Utils; - using JetBrains.Annotations; using System; - using System.Collections.Concurrent; - using System.Collections.Generic; using System.Diagnostics; using System.Runtime.CompilerServices; - using System.Threading; using System.Threading.Tasks; - /// Helper class to create FDBFutures - internal static class FdbFuture - { - - public static class Flags - { - /// The future has completed (either success or failure) - public const int COMPLETED = 1; - - /// A completion/failure/cancellation has been posted on the thread pool - public const int HAS_POSTED_ASYNC_COMPLETION = 2; - - /// The future has been cancelled from an external source (manually, or via then CancellationTokeb) - public const int CANCELLED = 4; - - /// The resources allocated by this future have been released - public const int MEMORY_RELEASED = 8; - - /// The future has been constructed, and is listening for the callbacks - public const int READY = 64; - - /// Dispose has been called - public const int DISPOSED = 128; - } - - /// Create a new from an FDBFuture* pointer - /// Type of the result of the task - /// FDBFuture* pointer - /// Func that will be called to get the result once the future completes (and did not fail) - /// Optional cancellation token that can be used to cancel the future - /// Object that tracks the execution of the FDBFuture handle - [NotNull] - public static FdbFutureSingle FromHandle([NotNull] FutureHandle handle, [NotNull] Func selector, CancellationToken cancellationToken) - { - return new FdbFutureSingle(handle, selector, cancellationToken); - } - - /// Create a new from an array of FDBFuture* pointers - /// Type of the items of the arrayreturn by the task - /// Array of FDBFuture* pointers - /// Func that will be called for each future that complete (and did not fail) - /// Optional cancellation token that can be used to cancel the future - /// Object that tracks the execution of all the FDBFuture handles - [NotNull] - public static FdbFutureArray FromHandleArray([NotNull] FutureHandle[] handles, [NotNull] Func selector, CancellationToken cancellationToken) - { - return new FdbFutureArray(handles, selector, cancellationToken); - } - - /// Wrap a FDBFuture* pointer into a - /// Type of the result of the task - /// FDBFuture* pointer - /// Lambda that will be called once the future completes sucessfully, to extract the result from the future handle. - /// Optional cancellation token that can be used to cancel the future - /// Task that will either return the result of the continuation lambda, or an exception - public static Task CreateTaskFromHandle([NotNull] FutureHandle handle, [NotNull] Func continuation, CancellationToken cancellationToken) - { - return new FdbFutureSingle(handle, continuation, cancellationToken).Task; - } - - /// Wrap multiple handles into a single that returns an array of T - /// Type of the result of the task - /// Array of FDBFuture* pointers - /// Lambda that will be called once for each future that completes sucessfully, to extract the result from the future handle. - /// Optional cancellation token that can be used to cancel the future - /// Task that will either return all the results of the continuation lambdas, or an exception - /// If at least one future fails, the whole task will fail. - public static Task CreateTaskFromHandleArray([NotNull] FutureHandle[] handles, [NotNull] Func continuation, CancellationToken cancellationToken) - { - // Special case, because FdbFutureArray does not support empty arrays - //TODO: technically, there is no reason why FdbFutureArray would not accept an empty array. We should simplify this by handling the case in the ctor (we are already allocating something anyway...) - if (handles.Length == 0) return Task.FromResult(new T[0]); - - return new FdbFutureArray(handles, continuation, cancellationToken).Task; - } - - } - /// Base class for all FDBFuture wrappers /// Type of the Task's result - [DebuggerDisplay("Flags={m_flags}, State={this.Task.Status}")] - internal abstract class FdbFuture : TaskCompletionSource, IDisposable + [DebuggerDisplay("Label={Label}, Cookie={Cookie}, State={Task.Status}")] + internal abstract class FdbFuture : TaskCompletionSource, IFdbFuture { #region Private Members... - /// Flags of the future (bit field of FLAG_xxx values) - private int m_flags; - - /// Future key in the callback dictionary - protected IntPtr m_key; - - /// Optionnal registration on the parent Cancellation Token - /// Is only valid if FLAG_HAS_CTR is set - protected CancellationTokenRegistration m_ctr; - - #endregion - - #region State Management... - - internal bool HasFlag(int flag) - { - return (Volatile.Read(ref m_flags) & flag) == flag; - } - - internal bool HasAnyFlags(int flags) - { - return (Volatile.Read(ref m_flags) & flags) != 0; - } - - protected void SetFlag(int flag) - { - var flags = m_flags; - Thread.MemoryBarrier(); - m_flags = flags | flag; - } - - protected bool TrySetFlag(int flag) - { - var wait = new SpinWait(); - while (true) - { - var flags = Volatile.Read(ref m_flags); - if ((flags & flag) != 0) - { - return false; - } - if (Interlocked.CompareExchange(ref m_flags, flags | flag, flags) == flags) - { - return true; - } - wait.SpinOnce(); - } - } - - protected bool TryCleanup() - { - // We try to cleanup the future handle as soon as possible, meaning as soon as we have the result, or an error, or a cancellation - - if (TrySetFlag(FdbFuture.Flags.COMPLETED)) - { - DoCleanup(); - return true; - } - return false; - } - - private void DoCleanup() - { - try - { - // unsubscribe from the parent cancellation token if there was one - UnregisterCancellationRegistration(); - - // ensure that the task always complete ! - // note: always defer the completion on the threadpool, because we don't want to dead lock here (we can be called by Dispose) - if (!this.Task.IsCompleted && TrySetFlag(FdbFuture.Flags.HAS_POSTED_ASYNC_COMPLETION)) - { - PostCancellationOnThreadPool(this); - } - - // The only surviving value after this would be a Task and an optional WorkItem on the ThreadPool that will signal it... - } - finally - { - CloseHandles(); - } - } - - /// Close all the handles managed by this future - protected abstract void CloseHandles(); - - /// Cancel all the handles managed by this future - protected abstract void CancelHandles(); - - /// Release all memory allocated by this future - protected abstract void ReleaseMemory(); - - /// Set the result of this future - /// Result of the future - /// If true, called from the network thread callback and will defer the operation on the ThreadPool. If false, may run the continuations inline. - protected void SetResult(T result, bool fromCallback) - { - if (!fromCallback) - { - this.TrySetResult(result); - } - else if (TrySetFlag(FdbFuture.Flags.HAS_POSTED_ASYNC_COMPLETION)) - { - PostCompletionOnThreadPool(this, result); - } - } - - /// Fault the future's Task - /// Error that will be the result of the task - /// If true, called from the network thread callback and will defer the operation on the ThreadPool. If false, may run the continuations inline. - protected void SetFaulted(Exception e, bool fromCallback) - { - if (!fromCallback) - { - this.TrySetException(e); - } - else if (TrySetFlag(FdbFuture.Flags.HAS_POSTED_ASYNC_COMPLETION)) - { - PostFailureOnThreadPool(this, e); - } - } - - /// Fault the future's Task - /// Error that will be the result of the task - /// If true, called from the network thread callback and will defer the operation on the ThreadPool. If false, may run the continuations inline. - protected void SetFaulted(IEnumerable errors, bool fromCallback) - { - if (!fromCallback) - { - this.TrySetException(errors); - } - else if (TrySetFlag(FdbFuture.Flags.HAS_POSTED_ASYNC_COMPLETION)) - { - PostFailureOnThreadPool(this, errors); - } - } - - /// Cancel the future's Task - /// If true, called from the network thread callback and will defer the operation on the ThreadPool. If false, may run the continuations inline. - protected void SetCanceled(bool fromCallback) - { - if (!fromCallback) - { - this.TrySetCanceled(); - } - else if (TrySetFlag(FdbFuture.Flags.HAS_POSTED_ASYNC_COMPLETION)) - { - PostCancellationOnThreadPool(this); - } - } - - /// Defer setting the result of a TaskCompletionSource on the ThreadPool - private static void PostCompletionOnThreadPool(TaskCompletionSource future, T result) - { - ThreadPool.UnsafeQueueUserWorkItem( - (_state) => - { - var prms = (Tuple, T>)_state; - prms.Item1.TrySetResult(prms.Item2); - }, - Tuple.Create(future, result) - ); - } + ///// Optionnal registration on the parent Cancellation Token + ///// Is only valid if FLAG_HAS_CTR is set + //protected CancellationTokenRegistration m_ctr; - /// Defer failing a TaskCompletionSource on the ThreadPool - private static void PostFailureOnThreadPool(TaskCompletionSource future, Exception error) + protected FdbFuture(IntPtr cookie, string label) { - ThreadPool.UnsafeQueueUserWorkItem( - (_state) => - { - var prms = (Tuple, Exception>)_state; - prms.Item1.TrySetException(prms.Item2); - }, - Tuple.Create(future, error) - ); - } - - /// Defer failing a TaskCompletionSource on the ThreadPool - private static void PostFailureOnThreadPool(TaskCompletionSource future, IEnumerable errors) - { - ThreadPool.UnsafeQueueUserWorkItem( - (_state) => - { - var prms = (Tuple, IEnumerable>)_state; - prms.Item1.TrySetException(prms.Item2); - }, - Tuple.Create(future, errors) - ); + this.Cookie = cookie; + this.Label = label; } - /// Defer cancelling a TaskCompletionSource on the ThreadPool - private static void PostCancellationOnThreadPool(TaskCompletionSource future) - { - ThreadPool.UnsafeQueueUserWorkItem( - (_state) => ((TaskCompletionSource)_state).TrySetCanceled(), - future - ); - } - - #endregion - - #region Callbacks... - - /// List of all pending futures that have not yet completed - private static readonly ConcurrentDictionary> s_futures = new ConcurrentDictionary>(); - - /// Internal counter to generated a unique parameter value for each futures - private static long s_futureCounter; - - /// Register a future in the callback context and return the corresponding callback parameter - /// Future instance - /// Parameter that can be passed to FutureSetCallback and that uniquely identify this future. - /// The caller MUST call ClearCallbackHandler to ensure that the future instance is removed from the list - internal static IntPtr RegisterCallback([NotNull] FdbFuture future) - { - Contract.Requires(future != null); - - // generate a new unique id for this future, that will be use to lookup the future instance in the callback handler - long id = Interlocked.Increment(ref s_futureCounter); - var prm = new IntPtr(id); // note: we assume that we can only run in 64-bit mode, so it is safe to cast a long into an IntPtr - // critical region - try { } - finally - { - Volatile.Write(ref future.m_key, prm); -#if DEBUG_FUTURES - Contract.Assert(!s_futures.ContainsKey(prm)); -#endif - s_futures[prm] = future; - Interlocked.Increment(ref DebugCounters.CallbackHandlesTotal); - Interlocked.Increment(ref DebugCounters.CallbackHandles); - } - return prm; - } - - /// Remove a future from the callback handler dictionary - /// Future that has just completed, or is being destroyed - internal static void UnregisterCallback([NotNull] FdbFuture future) - { - Contract.Requires(future != null); - - // critical region - try - { } - finally - { - var key = Interlocked.Exchange(ref future.m_key, IntPtr.Zero); - if (key != IntPtr.Zero) - { - FdbFuture _; - if (s_futures.TryRemove(key, out _)) - { - Interlocked.Decrement(ref DebugCounters.CallbackHandles); - } - } - } - } + public IntPtr Cookie { get; private set; } - internal static FdbFuture GetFutureFromCallbackParameter(IntPtr parameter) - { - FdbFuture future; - if (s_futures.TryGetValue(parameter, out future)) - { - if (future != null && Volatile.Read(ref future.m_key) == parameter) - { - return future; - } -#if DEBUG_FUTURES - // If you breakpoint here, that means that a future callback fired but was not able to find a matching registration - // => either the FdbFuture was incorrectly disposed, or there is some problem in the callback dictionary - if (System.Diagnostics.Debugger.IsAttached) System.Diagnostics.Debugger.Break(); -#endif - } - return null; - } + public string Label { get; private set; } #endregion #region Cancellation... +#if REFACTORED + protected void RegisterForCancellation(CancellationToken cancellationToken) { //note: if the token is already cancelled, the callback handler will run inline and any exception would bubble up here @@ -437,8 +94,14 @@ private static void CancellationHandler(object state) } } +#endif + #endregion + public abstract bool Visit(IntPtr handle); + + public abstract void OnFired(); + /// Return true if the future has completed (successfully or not) public bool IsReady { @@ -454,6 +117,8 @@ public TaskAwaiter GetAwaiter() /// Try to abort the task (if it is still running) public void Cancel() { + throw new NotImplementedException("FIXME: Future Cancellation!"); +#if REFACTORED if (HasAnyFlags(FdbFuture.Flags.DISPOSED | FdbFuture.Flags.COMPLETED | FdbFuture.Flags.CANCELLED)) { return; @@ -475,42 +140,7 @@ public void Cancel() TryCleanup(); } } - } - - /// Free memory allocated by this future after it has completed. - /// This method provides no benefit to most application code, and should only be called when attempting to write thread-safe custom layers. - public void Clear() - { - if (HasFlag(FdbFuture.Flags.DISPOSED)) - { - return; - } - - if (!this.Task.IsCompleted) - { - throw new InvalidOperationException("Cannot release memory allocated by a future that has not yet completed"); - } - - if (TrySetFlag(FdbFuture.Flags.MEMORY_RELEASED)) - { - ReleaseMemory(); - } - } - - public void Dispose() - { - if (TrySetFlag(FdbFuture.Flags.DISPOSED)) - { - try - { - TryCleanup(); - } - finally - { - if (Volatile.Read(ref m_key) != IntPtr.Zero) UnregisterCallback(this); - } - } - GC.SuppressFinalize(this); +#endif } } diff --git a/FoundationDB.Client/Native/FdbFutureArray.cs b/FoundationDB.Client/Native/FdbFutureArray.cs index 0a7989478..bffcf5c0a 100644 --- a/FoundationDB.Client/Native/FdbFutureArray.cs +++ b/FoundationDB.Client/Native/FdbFutureArray.cs @@ -30,7 +30,6 @@ namespace FoundationDB.Client.Native { using JetBrains.Annotations; using System; - using System.Collections.Generic; using System.Diagnostics; using System.Threading; @@ -38,286 +37,155 @@ namespace FoundationDB.Client.Native /// Type of result internal sealed class FdbFutureArray : FdbFuture { - // Wraps several FDBFuture* handles and return all the results at once #region Private Members... - /// Value of the 'FDBFuture*' - private readonly FutureHandle[] m_handles; + private IntPtr[] m_handles; - /// Counter of callbacks that still need to fire. private int m_pending; - /// Lambda used to extract the result of this FDBFuture - private readonly Func m_resultSelector; + private readonly Func m_resultSelector; - #endregion + private readonly object m_state; - #region Constructors... + #endregion - internal FdbFutureArray([NotNull] FutureHandle[] handles, [NotNull] Func selector, CancellationToken cancellationToken) + internal FdbFutureArray([NotNull] IntPtr[] handles, [NotNull] Func selector, object state, IntPtr cookie, string label) + : base(cookie, label) { - if (handles == null) throw new ArgumentNullException("handles"); - if (handles.Length == 0) throw new ArgumentException("Handle array cannot be empty", "handles"); - if (selector == null) throw new ArgumentNullException("selector"); - m_handles = handles; + m_pending = handles.Length; m_resultSelector = selector; - - bool abortAllHandles = false; - - try - { - if (cancellationToken.IsCancellationRequested) - { // already cancelled, we must abort everything - - SetFlag(FdbFuture.Flags.COMPLETED); - abortAllHandles = true; - m_resultSelector = null; - this.TrySetCanceled(); - return; - } - - // add this instance to the list of pending futures - var prm = RegisterCallback(this); - - foreach (var handle in handles) - { - - if (FdbNative.FutureIsReady(handle)) - { // this handle is already done - continue; - } - - Interlocked.Increment(ref m_pending); - - // register the callback handler - var err = FdbNative.FutureSetCallback(handle, CallbackHandler, prm); - if (Fdb.Failed(err)) - { // uhoh - Debug.WriteLine("Failed to set callback for Future<" + typeof(T).Name + "> 0x" + handle.Handle.ToString("x") + " !!!"); - throw Fdb.MapToException(err); - } - } - - // allow the callbacks to handle completion - TrySetFlag(FdbFuture.Flags.READY); - - if (Volatile.Read(ref m_pending) == 0) - { // all callbacks have already fired (or all handles were already completed) - UnregisterCallback(this); - HandleCompletion(fromCallback: false); - m_resultSelector = null; - abortAllHandles = true; - SetFlag(FdbFuture.Flags.COMPLETED); - } - else if (cancellationToken.CanBeCanceled) - { // register for cancellation (if needed) - RegisterForCancellation(cancellationToken); - } - } - catch - { - // this is bad news, since we are in the constructor, we need to clear everything - SetFlag(FdbFuture.Flags.DISPOSED); - - UnregisterCancellationRegistration(); - - UnregisterCallback(this); - - abortAllHandles = true; - - // this is technically not needed, but just to be safe... - this.TrySetCanceled(); - - throw; - } - finally - { - if (abortAllHandles) - { - CloseHandles(handles); - } - } - GC.KeepAlive(this); + m_state = state; } - #endregion - - protected override void CloseHandles() + public override bool Visit(IntPtr handle) { - CloseHandles(m_handles); + return 0 == Interlocked.Decrement(ref m_pending); } - protected override void CancelHandles() - { - CancelHandles(m_handles); - } + private const int CATEGORY_SUCCESS = 0; + private const int CATEGORY_RETRYABLE = 1; + private const int CATEGORY_CANCELLED = 2; + private const int CATEGORY_FAILURE = 3; - protected override void ReleaseMemory() + private static int ClassifyErrorSeverity(FdbError error) { - var handles = m_handles; - if (handles != null) + switch (error) { - foreach (var handle in handles) - { - if (handle != null && !handle.IsClosed && !handle.IsInvalid) - { - //REVIEW: there is a possibility of a race condition with Dispoe() that could potentially call FutureDestroy(handle) at the same time (not verified) - FdbNative.FutureReleaseMemory(handle); - } - } - } - } + case FdbError.Success: + return CATEGORY_SUCCESS; - private static void CloseHandles(FutureHandle[] handles) - { - if (handles != null) - { - foreach (var handle in handles) - { - if (handle != null) - { - //note: Dispose() will be a no-op if already called - handle.Dispose(); - } - } - } - } + case FdbError.PastVersion: + case FdbError.FutureVersion: + case FdbError.TimedOut: + case FdbError.TooManyWatches: + return CATEGORY_RETRYABLE; - private static void CancelHandles(FutureHandle[] handles) - { - if (handles != null) - { - foreach (var handle in handles) - { - if (handle != null && !handle.IsClosed && !handle.IsInvalid) - { - //REVIEW: there is a possibility of a race condition with Dispoe() that could potentially call FutureDestroy(handle) at the same time (not verified) - FdbNative.FutureCancel(handle); - } - } + case FdbError.OperationCancelled: + case FdbError.TransactionCancelled: + return CATEGORY_CANCELLED; + + default: + return CATEGORY_FAILURE; } } - /// Cached delegate of the future completion callback handler - private static readonly FdbNative.FdbFutureCallback CallbackHandler = FutureCompletionCallback; - - /// Handler called when a FDBFuture becomes ready - /// Handle on the future that became ready - /// Paramter to the callback (unused) - private static void FutureCompletionCallback(IntPtr futureHandle, IntPtr parameter) + public override void OnFired() { -#if DEBUG_FUTURES - Debug.WriteLine("Future<" + typeof(T).Name + ">.Callback(0x" + futureHandle.ToString("x") + ", " + parameter.ToString("x") + ") has fired on thread #" + Thread.CurrentThread.ManagedThreadId.ToString()); -#endif + var handles = Interlocked.Exchange(ref m_handles, null); + if (handles == null) return; // already disposed? - var future = (FdbFutureArray)GetFutureFromCallbackParameter(parameter); + Debug.WriteLine("Future{0}<{1}[]>.OnFired({2})", this.Label, typeof (T).Name, handles.Length); - if (future != null && Interlocked.Decrement(ref future.m_pending) == 0) - { // the last future handle has fired, we can proceed to read all the results + //README: + // - This callback will fire either from the ThreadPool (async ops) or inline form the ctor of the future (non-async ops, or ops that where served from some cache). + // - The method *MUST* dispose the future handle before returning, and *SHOULD* do so before signaling the task. + // => This is because continuations may run inline, and start new futures from there, while we still have our original future handle opened. - if (future.HasFlag(FdbFuture.Flags.READY)) + try + { + T[] results = new T[handles.Length]; + FdbError code = FdbError.Success; + int severity = 0; + Exception error = null; + try { - UnregisterCallback(future); - try - { - future.HandleCompletion(fromCallback: true); + if (this.Task.IsCompleted) + { // task has already been handled by someone else + return; } - catch(Exception) - { - //TODO ? - } - } - // else, the ctor will handle that - } - } - - /// Update the Task with the state of a ready Future - /// If true, the method is called from the network thread and must defer the continuations from the Thread Pool - /// True if we got a result, or false in case of error (or invalid state) - private void HandleCompletion(bool fromCallback) - { - if (HasAnyFlags(FdbFuture.Flags.DISPOSED | FdbFuture.Flags.COMPLETED)) - { - return; - } -#if DEBUG_FUTURES - Debug.WriteLine("FutureArray<" + typeof(T).Name + ">.Callback(...) handling completion on thread #" + Thread.CurrentThread.ManagedThreadId.ToString()); -#endif - - try - { - UnregisterCancellationRegistration(); + for (int i = 0; i < results.Length; i++) + { + var handle = handles[i]; + var err = FdbNative.FutureGetError(handle); + if (err == FdbError.Success) + { + if (code != FdbError.Success) + { // there's been at least one error before, so there is no point in computing the result, it would be discarded anyway + continue; + } - List errors = null; - bool cancellation = false; - var selector = m_resultSelector; + try + { + results[i] = m_resultSelector(handle, m_state); + } + catch (AccessViolationException e) + { // trouble in paradise! - var results = selector != null ? new T[m_handles.Length] : null; + Debug.WriteLine("EPIC FAIL: " + e.ToString()); - for (int i = 0; i < m_handles.Length; i++) - { - var handle = m_handles[i]; + // => THIS IS VERY BAD! We have no choice but to terminate the process immediately, because any new call to any method to the binding may end up freezing the whole process (best case) or sending corrupted data to the cluster (worst case) + if (Debugger.IsAttached) Debugger.Break(); - if (handle != null && !handle.IsClosed && !handle.IsInvalid) - { - FdbError err = FdbNative.FutureGetError(handle); - if (Fdb.Failed(err)) - { // it failed... - if (err != FdbError.OperationCancelled) - { // get the exception from the error code - var ex = Fdb.MapToException(err); - (errors ?? (errors = new List())).Add(ex); + Environment.FailFast("FIXME: FDB done goofed!", e); } - else + catch (Exception e) { - cancellation = true; + Debug.WriteLine("FAIL: " + e.ToString()); + code = FdbError.InternalError; + error = e; break; } } - else - { // it succeeded... - // try to get the result... - if (selector != null) - { - //note: result selector will execute from network thread, but this should be our own code that only calls into some fdb_future_get_XXXX(), which should be safe... - results[i] = selector(handle); + else if (code != err) + { + int cur = ClassifyErrorSeverity(err); + if (cur > severity) + { // error is more serious than before + severity = cur; + code = err; } } } } + finally + { + foreach (var handle in handles) + { + if (handle != IntPtr.Zero) FdbNative.FutureDestroy(handle); + } + } - if (cancellation) - { // the transaction has been cancelled - SetCanceled(fromCallback); + if (code == FdbError.Success) + { + TrySetResult(results); } - else if (errors != null) - { // there was at least one error - SetFaulted(errors, fromCallback); + else if (code == FdbError.OperationCancelled || code == FdbError.TransactionCancelled) + { + TrySetCanceled(); } else - { // success - SetResult(results, fromCallback); - } - - } - catch (Exception e) - { // something went wrong - if (e is ThreadAbortException) { - SetCanceled(fromCallback); - throw; + TrySetException(error ?? Fdb.MapToException(code)); } - SetFaulted(e, fromCallback); } - finally - { - TryCleanup(); + catch (Exception e) + { // we must not blow up the TP or the parent, so make sure to propagate all exceptions to the task + TrySetException(e); } } - } -} +} \ No newline at end of file diff --git a/FoundationDB.Client/Native/FdbFutureContext.cs b/FoundationDB.Client/Native/FdbFutureContext.cs new file mode 100644 index 000000000..2dba49c7a --- /dev/null +++ b/FoundationDB.Client/Native/FdbFutureContext.cs @@ -0,0 +1,477 @@ +#region BSD Licence +/* Copyright (c) 2013-2014, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +// enable this to capture the stacktrace of the ctor, when troubleshooting leaked transaction handles +#undef CAPTURE_STACKTRACES + +using System.IO.IsolatedStorage; + +namespace FoundationDB.Client.Native +{ + using FoundationDB.Client.Core; + using FoundationDB.Client.Utils; + using FoundationDB.Layers.Tuples; + using JetBrains.Annotations; + using System; + using System.Collections.Generic; + using System.Diagnostics; + using System.Runtime.CompilerServices; + using System.Threading; + using System.Threading.Tasks; + + internal class FdbFutureContext : IDisposable + { + + #region Private Constants... + + private const int FUTURE_COOKIE_SIZE = 32; + + private const int FUTURE_COOKIE_SHIFT = 0; + + private const ulong FUTURE_COOKIE_MASK = (1UL << FUTURE_COOKIE_SIZE) - 1; + + private const int CONTEXT_COOKIE_SIZE = 32; + + private const ulong CONTEXT_COOKIE_MASK = (1UL << CONTEXT_COOKIE_SIZE) - 1; + + private const int CONTEXT_COOKIE_SHIFT = FUTURE_COOKIE_SIZE; + + #endregion + + #region Static Stuff.... + + /// Counter used to generate the cookie values for each unique context + private static int s_globalCookieCounter; + + private static readonly Dictionary s_contexts = new Dictionary(); + + private static IntPtr MakeCallbackCookie(uint contextId, uint futureId) + { + ulong cookie = (contextId & CONTEXT_COOKIE_MASK) << CONTEXT_COOKIE_SHIFT; + cookie |= (futureId & FUTURE_COOKIE_MASK) << FUTURE_COOKIE_SHIFT; + return new IntPtr((long)cookie); + } + + private static readonly FdbNative.FdbFutureCallback GlobalCallback = FutureCallbackHandler; + + private static void FutureCallbackHandler(IntPtr handle, IntPtr cookie) + { + // cookie is the value that will help us find the corresponding context (upper 32 bits) and future within this context (lower 32 bits) that matches with this future handle. + + //note: this callback can be called either: + // - from the thread that is constructing the Future, if the future was already completed (called inline) + // - from the network thread, when the future completed asynchronously + + Debug.WriteLine("FutureCallbackHandler(0x{0}, {1:X8} | {2:X8}) called", handle.ToString("X"), cookie.ToInt64() >> 32, cookie.ToInt64() & uint.MaxValue); + + bool deferred = false; + try + { + + uint contextId = (uint) (((ulong) cookie.ToInt64() >> CONTEXT_COOKIE_SHIFT) & CONTEXT_COOKIE_MASK); + + FdbFutureContext context; + lock (s_contexts) // there will only be contentions on this lock if other a lot of threads are creating new contexts (ie: new transactions) + { + s_contexts.TryGetValue(contextId, out context); + } + + if (context != null) + { + Contract.Assert(context.m_contextId == contextId); + deferred = context.OnFired(handle, cookie); + } + } + finally + { + if (!deferred) FdbNative.FutureDestroy(handle); + } + } + + #endregion + + + /// Cookie for this context + /// Makes the 32-bits upper bits of the future callback parameter + private readonly uint m_contextId = (uint) Interlocked.Increment(ref s_globalCookieCounter); + + /// Counter used to generated the cookie for all futures created from this context + private int m_localCookieCounter; + + /// Dictionary used to store all the pending Futures for this context + /// All methods should take a lock on this instance before manipulating the state + private readonly Dictionary m_futures = new Dictionary(); + +#if CAPTURE_STACKTRACES + private StackTrace m_stackTrace; +#endif + + #region Constructors... + + protected FdbFutureContext() + { + lock (s_contexts) + { + s_contexts[m_contextId] = this; + } +#if CAPTURE_STACKTRACES + m_stackTrace = new StackTrace(); +#endif + } + + //REVIEW: do we really need a destructor ? The handle is a SafeHandle, and will take care of itself... + ~FdbFutureContext() + { +#if CAPTURE_STACKTRACES + Trace.WriteLine("A transaction handle (" + m_handle + ", " + m_payloadBytes + " bytes written) was leaked by " + m_stackTrace); +#endif +#if DEBUG + // If you break here, that means that a native transaction handler was leaked by a FdbTransaction instance (or that the transaction instance was leaked) + if (Debugger.IsAttached) Debugger.Break(); +#endif + Dispose(false); + } + + #endregion + + #region IDisposable... + + public void Dispose() + { + Dispose(true); + GC.SuppressFinalize(this); + } + + protected virtual void Dispose(bool disposing) + { + //Debug.WriteLine("Disposified from " + new StackTrace()); + if (disposing) + { + lock (s_contexts) + { + Debug.WriteLine("Disposed context {0}#{1} with {2} pending future(s) ({3} total)", this.GetType().Name, m_contextId, m_futures.Count, m_localCookieCounter); + s_contexts.Remove(m_contextId); + foreach (var ctx in s_contexts) + { + Debug.WriteLine("- {0}#{1} : {2} ({3})", ctx.Value.GetType().Name, ctx.Key, ctx.Value.m_futures.Count, ctx.Value.m_localCookieCounter); + } + } + } + } + + #endregion + + /// A callback has fire for a future handled by this context + /// + /// + /// If this flag is set to true, then the caller will NOT destroy the future. + private bool OnFired(IntPtr handle, IntPtr cookie) + { + IFdbFuture future; + lock (m_futures) + { + m_futures.TryGetValue(cookie, out future); + } + + if (future != null && future.Cookie == cookie) + { + if (future.Visit(handle)) + { // future is ready to process all the results + ThreadPool.UnsafeQueueUserWorkItem( + (state) => + { + ((IFdbFuture)state).OnFired(); + //TODO: if it fails, maybe we should remove it from m_futures? + }, + future + ); + } + // else: expecting more handles + + // handles will be destroyed when the future completes + return true; + } + + return false; + } + + /// Add a new future handle to this context + /// + /// Handle of the newly created future + /// Flag set to true if the future must be disposed by the caller (in case of error), or false if the future will be disposed by some other thread. + /// Method called when the future completes successfully + /// TODO: remove this? + /// Type of future (name of the caller) + /// + protected Task RegisterFuture( + IntPtr handle, + ref bool mustDispose, + Func selector, + object state, + CancellationToken ct, + string label + ) + { + FdbFutureSingle future = null; + IntPtr cookie = IntPtr.Zero; + try + { + uint futureId = (uint)Interlocked.Increment(ref m_localCookieCounter); + cookie = MakeCallbackCookie(m_contextId, futureId); + + future = new FdbFutureSingle(handle, selector, state, cookie, label); + + if (FdbNative.FutureIsReady(handle)) + { // the result is already computed + Debug.WriteLine("Future.{0} 0x{1} already completed!", label, handle.ToString("X")); + mustDispose = false; + future.OnFired(); + return future.Task; + } + + lock (m_futures) + { + //TODO: marke the future as "registered" (must unreg when it fires?) + m_futures[cookie] = future; + } + + var err = FdbNative.FutureSetCallback(handle, GlobalCallback, cookie); + if (!Fdb.Success(err)) + { + throw Fdb.MapToException(err); + } + mustDispose = false; + return future.Task; + } + catch (Exception e) + { + if (future != null) + { + future.TrySetException(e); + if (cookie != IntPtr.Zero) + { + lock (m_futures) + { + m_futures.Remove(cookie); + } + } + return future.Task; + } + throw; + } + } + + /// Add a new future handle to this context + /// + /// Handles of the newly created future + /// Flag set to true if the future must be disposed by the caller (in case of error), or false if the future will be disposed by some other thread. + /// Method called when the future completes successfully + /// TODO: remove this? + /// Type of future (name of the caller) + /// + protected Task RegisterFutures( + IntPtr[] handles, + ref bool mustDispose, + Func selector, + object state, + CancellationToken ct, + string label + ) + { + FdbFutureArray future = null; + IntPtr cookie = IntPtr.Zero; + try + { + uint futureId = (uint)Interlocked.Increment(ref m_localCookieCounter); + cookie = MakeCallbackCookie(m_contextId, futureId); + + // make a copy because we may diverge from the caller if we partially fail to register the callbacks below + var tmp = new IntPtr[handles.Length]; + handles.CopyTo(tmp, 0); + future = new FdbFutureArray(tmp, selector, state, cookie, label); + + //TODO: we could check if all handles are already completed/failed? + + lock (m_futures) + { + //TODO: mark the future as "registered" (must unreg when it fires?) + m_futures[cookie] = future; + } + + for (int i = 0; i < handles.Length; i++) + { + var err = FdbNative.FutureSetCallback(handles[i], GlobalCallback, cookie); + if (Fdb.Success(err)) + { + handles[i] = IntPtr.Zero; + } + else + { + // mute this future + lock (m_futures) + { + m_futures.Remove(cookie); + //TODO: mark the future as "unregistered" + for (int j = i + 1; j < handles.Length; j++) + { + tmp[j] = IntPtr.Zero; + } + } + + throw Fdb.MapToException(err); + } + } + mustDispose = false; + return future.Task; + } + catch (Exception e) + { + if (future != null) + { + future.TrySetException(e); + if (cookie != IntPtr.Zero) + { + lock (m_futures) + { + m_futures.Remove(cookie); + } + } + return future.Task; + } + throw; + } + } + + } + + internal class FdbFutureContext : FdbFutureContext + where THandle : FdbSafeHandle + { + + protected readonly THandle m_handle; + + protected FdbFutureContext([NotNull] THandle handle) + { + if (handle == null) throw new ArgumentNullException("handle"); + m_handle = handle; + } + + public THandle Handle { [NotNull] get { return m_handle; } } + + protected override void Dispose(bool disposing) + { + try + { + base.Dispose(disposing); + } + finally + { + if (disposing) + { + lock (this.Handle) + { + if (!this.Handle.IsClosed) this.Handle.Dispose(); + } + } + } + } + + /// Start a new async operation + /// Result of the operation + /// Argument passed to the generator + /// Lambda called to produce the future handle + /// Argument passed to . It will not be used after the handle has been constructed + /// Lambda called once the future completes (successfully) + /// State object passed to . It will be stored in the future has long as it is active. + /// Optional cancellation token used to cancel the task from an external source. + /// Optional label, used for logging and troubleshooting purpose (by default the name of the caller) + /// + protected Task StartNewFuture( + Func generator, + TArg argument, + Func selector, + object state, + CancellationToken ct, + [CallerMemberName] string label = null + ) + { + + bool mustDispose = true; + IntPtr h = IntPtr.Zero; + try + { + lock (this.Handle) + { + if (this.Handle.IsClosed) throw new ObjectDisposedException(this.GetType().Name); + h = generator(m_handle, argument); + } + return RegisterFuture(h, ref mustDispose, selector, state, ct, label); + } + finally + { + if (mustDispose && h != IntPtr.Zero) + { + FdbNative.FutureDestroy(h); + } + } + } + + protected Task StartNewFutures( + int count, + Action generator, + TArg arg, + Func selector, + object state, + CancellationToken ct, + [CallerMemberName] string label = null + + ) + { + bool mustDispose = true; + var handles = new IntPtr[count]; + try + { + lock (this.Handle) + { + if (this.Handle.IsClosed) throw new ObjectDisposedException(this.GetType().Name); + generator(m_handle, arg, handles); + } + return RegisterFutures(handles, ref mustDispose, selector, state, ct, label); + } + catch + { + foreach (var future in handles) + { + if (future != IntPtr.Zero) FdbNative.FutureDestroy(future); + } + throw; + } + } + } + +} diff --git a/FoundationDB.Client/Native/FdbFutureSingle.cs b/FoundationDB.Client/Native/FdbFutureSingle.cs index 48877f1ee..0bb0ff4ee 100644 --- a/FoundationDB.Client/Native/FdbFutureSingle.cs +++ b/FoundationDB.Client/Native/FdbFutureSingle.cs @@ -28,11 +28,14 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY #undef DEBUG_FUTURES +using System.Diagnostics; + namespace FoundationDB.Client.Native { + using FoundationDB.Client.Utils; using JetBrains.Annotations; using System; - using System.Diagnostics; + using System.Runtime.ExceptionServices; using System.Threading; /// FDBFuture wrapper @@ -42,226 +45,106 @@ internal sealed class FdbFutureSingle : FdbFuture #region Private Members... /// Value of the 'FDBFuture*' - private readonly FutureHandle m_handle; + private IntPtr m_handle; /// Lambda used to extract the result of this FDBFuture - private readonly Func m_resultSelector; + private readonly Func m_resultSelector; - #endregion + private readonly object m_state; - #region Constructors... + #endregion - internal FdbFutureSingle([NotNull] FutureHandle handle, [NotNull] Func selector, CancellationToken cancellationToken) + internal FdbFutureSingle(IntPtr handle, [NotNull] Func selector, object state, IntPtr cookie, string label) + : base(cookie, label) { - if (handle == null) throw new ArgumentNullException("handle"); + if (handle == IntPtr.Zero) throw new ArgumentException("Invalid future handle", "handle"); if (selector == null) throw new ArgumentNullException("selector"); m_handle = handle; m_resultSelector = selector; - - try - { - - if (handle.IsInvalid) - { // it's dead, Jim ! - SetFlag(FdbFuture.Flags.COMPLETED); - m_resultSelector = null; - return; - } - - if (FdbNative.FutureIsReady(handle)) - { // either got a value or an error -#if DEBUG_FUTURES - Debug.WriteLine("Future<" + typeof(T).Name + "> 0x" + handle.Handle.ToString("x") + " was already ready"); -#endif - HandleCompletion(fromCallback: false); -#if DEBUG_FUTURES - Debug.WriteLine("Future<" + typeof(T).Name + "> 0x" + handle.Handle.ToString("x") + " completed inline"); -#endif - return; - } - - // register for cancellation (if needed) - if (cancellationToken.CanBeCanceled) - { - if (cancellationToken.IsCancellationRequested) - { // we have already been cancelled - -#if DEBUG_FUTURES - Debug.WriteLine("Future<" + typeof(T).Name + "> 0x" + handle.Handle.ToString("x") + " will complete later"); -#endif - - // Abort the future and simulate a Canceled task - SetFlag(FdbFuture.Flags.COMPLETED); - // note: we don't need to call fdb_future_cancel because fdb_future_destroy will take care of everything - handle.Dispose(); - // also, don't keep a reference on the callback because it won't be needed - m_resultSelector = null; - this.TrySetCanceled(); - return; - } - - // token still active - RegisterForCancellation(cancellationToken); - } - -#if DEBUG_FUTURES - Debug.WriteLine("Future<" + typeof(T).Name + "> 0x" + handle.Handle.ToString("x") + " will complete later"); -#endif - - TrySetFlag(FdbFuture.Flags.READY); - - // add this instance to the list of pending futures - var prm = RegisterCallback(this); - - // register the callback handler - var err = FdbNative.FutureSetCallback(handle, CallbackHandler, prm); - if (Fdb.Failed(err)) - { // uhoh -#if DEBUG_FUTURES - Debug.WriteLine("Failed to set callback for Future<" + typeof(T).Name + "> 0x" + handle.Handle.ToString("x") + " !!!"); -#endif - throw Fdb.MapToException(err); - } - } - catch - { - // this is bad news, since we are in the constructor, we need to clear everything - SetFlag(FdbFuture.Flags.DISPOSED); - UnregisterCancellationRegistration(); - UnregisterCallback(this); - - // kill the future handle - m_handle.Dispose(); - - // this is technically not needed, but just to be safe... - this.TrySetCanceled(); - - throw; - } - GC.KeepAlive(this); + m_state = state; } - #endregion - - /// Cached delegate of the future completion callback handler - private static readonly FdbNative.FdbFutureCallback CallbackHandler = FutureCompletionCallback; - - /// Handler called when a FDBFuture becomes ready - /// Handle on the future that became ready - /// Paramter to the callback (unused) - private static void FutureCompletionCallback(IntPtr futureHandle, IntPtr parameter) + public override bool Visit(IntPtr handle) { -#if DEBUG_FUTURES - Debug.WriteLine("Future<" + typeof(T).Name + ">.Callback(0x" + futureHandle.ToString("x") + ", " + parameter.ToString("x") + ") has fired on thread #" + Thread.CurrentThread.ManagedThreadId.ToString()); -#endif - - var future = (FdbFutureSingle)GetFutureFromCallbackParameter(parameter); - if (future != null) - { - UnregisterCallback(future); - future.HandleCompletion(fromCallback: true); - } + Contract.Requires(handle == m_handle); + return true; } - /// Update the Task with the state of a ready Future - /// If true, we are called from the network thread - /// True if we got a result, or false in case of error (or invalid state) - private void HandleCompletion(bool fromCallback) + [HandleProcessCorruptedStateExceptions] // to be able to handle Access Violations and terminate the process + public override void OnFired() { - // note: if fromCallback is true, we are running on the network thread - // this means that we have to signal the TCS from the threadpool, if not continuations on the task may run inline. - // this is very frequent when we are called with await, or ContinueWith(..., TaskContinuationOptions.ExecuteSynchronously) + Debug.WriteLine("Future{0}<{1}>.OnFired(0x{2})", this.Label, typeof(T).Name, m_handle.ToString("X8")); - if (HasAnyFlags(FdbFuture.Flags.DISPOSED | FdbFuture.Flags.COMPLETED)) - { - return; - } + var handle = Interlocked.Exchange(ref m_handle, IntPtr.Zero); + if (handle == IntPtr.Zero) return; // already disposed? + + //README: + // - This callback will fire either from the ThreadPool (async ops) or inline form the ctor of the future (non-async ops, or ops that where served from some cache). + // - The method *MUST* dispose the future handle before returning, and *SHOULD* do so before signaling the task. + // => This is because continuations may run inline, and start new futures from there, while we still have our original future handle opened. -#if DEBUG_FUTURES - var sw = Stopwatch.StartNew(); -#endif try { - var handle = m_handle; - if (handle != null && !handle.IsClosed && !handle.IsInvalid) + T result = default(T); + FdbError code; + Exception error = null; + try { - UnregisterCancellationRegistration(); + if (this.Task.IsCompleted) + { // task has already been handled by someone else + return; + } + + code = FdbNative.FutureGetError(handle); + if (code == FdbError.Success) + { + try + { + result = m_resultSelector(handle, m_state); + } + catch (AccessViolationException e) + { // trouble in paradise! + + Debug.WriteLine("EPIC FAIL: " + e.ToString()); + + // => THIS IS VERY BAD! We have no choice but to terminate the process immediately, because any new call to any method to the binding may end up freezing the whole process (best case) or sending corrupted data to the cluster (worst case) + if (Debugger.IsAttached) Debugger.Break(); - FdbError err = FdbNative.FutureGetError(handle); - if (Fdb.Failed(err)) - { // it failed... -#if DEBUG_FUTURES - Debug.WriteLine("Future<" + typeof(T).Name + "> has FAILED: " + err); -#endif - if (err != FdbError.OperationCancelled) - { // get the exception from the error code - var ex = Fdb.MapToException(err); - SetFaulted(ex, fromCallback); - return; + Environment.FailFast("FIXME: FDB done goofed!", e); } - //else: will be handle below - } - else - { // it succeeded... - // try to get the result... -#if DEBUG_FUTURES - Debug.WriteLine("Future<" + typeof(T).Name + "> has completed successfully"); -#endif - var selector = m_resultSelector; - if (selector != null) + catch (Exception e) { - //note: result selector will execute from network thread, but this should be our own code that only calls into some fdb_future_get_XXXX(), which should be safe... - var result = selector(handle); - SetResult(result, fromCallback); - return; + Debug.WriteLine("FAIL: " + e.ToString()); + code = FdbError.InternalError; + error = e; } - //else: it will be handled below } } + finally + { + FdbNative.FutureDestroy(handle); + } - // most probably the future was cancelled or we are shutting down... - SetCanceled(fromCallback); - } - catch (Exception e) - { // something went wrong - if (e is ThreadAbortException) + if (code == FdbError.Success) + { + TrySetResult(result); + } + else if (code == FdbError.OperationCancelled || code == FdbError.TransactionCancelled) + { + TrySetCanceled(); + } + else { - SetCanceled(fromCallback); - throw; + TrySetException(error ?? Fdb.MapToException(code)); } - SetFaulted(e, fromCallback); } - finally - { -#if DEBUG_FUTURES - sw.Stop(); - Debug.WriteLine("Future<" + typeof(T).Name + "> callback completed in " + sw.Elapsed.TotalMilliseconds.ToString() + " ms"); -#endif - TryCleanup(); + catch (Exception e) + { // we must not blow up the TP or the parent, so make sure to propagate all exceptions to the task + TrySetException(e); } } - protected override void CloseHandles() - { - var handle = m_handle; - if (handle != null) handle.Dispose(); - } - - protected override void CancelHandles() - { - var handle = m_handle; - //REVIEW: there is a possibility of a race condition with Dispose() that could potentially call FutureDestroy(handle) at the same time (not verified) - if (handle != null && !handle.IsClosed && !handle.IsInvalid) FdbNative.FutureCancel(handle); - } - - protected override void ReleaseMemory() - { - var handle = m_handle; - //REVIEW: there is a possibility of a race condition with Dispose() that could potentially call FutureDestroy(handle) at the same time (not verified) - if (handle != null && !handle.IsClosed && !handle.IsInvalid) FdbNative.FutureReleaseMemory(handle); - } - } } diff --git a/FoundationDB.Client/Native/FdbNative.cs b/FoundationDB.Client/Native/FdbNative.cs index c066cb4ec..a3fba2dd4 100644 --- a/FoundationDB.Client/Native/FdbNative.cs +++ b/FoundationDB.Client/Native/FdbNative.cs @@ -97,7 +97,7 @@ internal static class NativeMethods // Cluster [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, BestFitMapping = false, ThrowOnUnmappableChar = true)] - public static extern FutureHandle fdb_create_cluster([MarshalAs(UnmanagedType.LPStr)] string clusterFilePath); + public static extern IntPtr fdb_create_cluster([MarshalAs(UnmanagedType.LPStr)] string clusterFilePath); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] public static extern void fdb_cluster_destroy(IntPtr cluster); @@ -106,7 +106,7 @@ internal static class NativeMethods public static extern FdbError fdb_cluster_set_option(ClusterHandle cluster, FdbClusterOption option, byte* value, int valueLength); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, BestFitMapping = false, ThrowOnUnmappableChar = true)] - public static extern FutureHandle fdb_cluster_create_database(ClusterHandle cluster, [MarshalAs(UnmanagedType.LPStr)] string dbName, int dbNameLength); + public static extern IntPtr fdb_cluster_create_database(ClusterHandle cluster, [MarshalAs(UnmanagedType.LPStr)] string dbName, int dbNameLength); // Database @@ -131,19 +131,19 @@ internal static class NativeMethods public static extern void fdb_transaction_set_read_version(TransactionHandle handle, long version); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FutureHandle fdb_transaction_get_read_version(TransactionHandle transaction); + public static extern IntPtr fdb_transaction_get_read_version(TransactionHandle transaction); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FutureHandle fdb_transaction_get(TransactionHandle transaction, byte* keyName, int keyNameLength, bool snapshot); + public static extern IntPtr fdb_transaction_get(TransactionHandle transaction, byte* keyName, int keyNameLength, bool snapshot); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FutureHandle fdb_transaction_get_addresses_for_key(TransactionHandle transaction, byte* keyName, int keyNameLength); + public static extern IntPtr fdb_transaction_get_addresses_for_key(TransactionHandle transaction, byte* keyName, int keyNameLength); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FutureHandle fdb_transaction_get_key(TransactionHandle transaction, byte* keyName, int keyNameLength, bool orEqual, int offset, bool snapshot); + public static extern IntPtr fdb_transaction_get_key(TransactionHandle transaction, byte* keyName, int keyNameLength, bool orEqual, int offset, bool snapshot); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FutureHandle fdb_transaction_get_range( + public static extern IntPtr fdb_transaction_get_range( TransactionHandle transaction, byte* beginKeyName, int beginKeyNameLength, bool beginOrEqual, int beginOffset, byte* endKeyName, int endKeyNameLength, bool endOrEqual, int endOffset, @@ -167,16 +167,16 @@ public static extern void fdb_transaction_clear_range( public static extern void fdb_transaction_atomic_op(TransactionHandle transaction, byte* keyName, int keyNameLength, byte* param, int paramLength, FdbMutationType operationType); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FutureHandle fdb_transaction_commit(TransactionHandle transaction); + public static extern IntPtr fdb_transaction_commit(TransactionHandle transaction); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] public static extern FdbError fdb_transaction_get_committed_version(TransactionHandle transaction, out long version); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FutureHandle fdb_transaction_watch(TransactionHandle transaction, byte* keyName, int keyNameLength); + public static extern IntPtr fdb_transaction_watch(TransactionHandle transaction, byte* keyName, int keyNameLength); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FutureHandle fdb_transaction_on_error(TransactionHandle transaction, FdbError error); + public static extern IntPtr fdb_transaction_on_error(TransactionHandle transaction, FdbError error); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] public static extern void fdb_transaction_reset(TransactionHandle transaction); @@ -193,43 +193,43 @@ public static extern void fdb_transaction_clear_range( public static extern void fdb_future_destroy(IntPtr future); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern void fdb_future_cancel(FutureHandle future); + public static extern void fdb_future_cancel(IntPtr future); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern void fdb_future_release_memory(FutureHandle future); + public static extern void fdb_future_release_memory(IntPtr future); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FdbError fdb_future_block_until_ready(FutureHandle futureHandle); + public static extern FdbError fdb_future_block_until_ready(IntPtr futureHandle); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern bool fdb_future_is_ready(FutureHandle futureHandle); + public static extern bool fdb_future_is_ready(IntPtr futureHandle); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FdbError fdb_future_get_error(FutureHandle futureHandle); + public static extern FdbError fdb_future_get_error(IntPtr futureHandle); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FdbError fdb_future_set_callback(FutureHandle future, FdbFutureCallback callback, IntPtr callbackParameter); + public static extern FdbError fdb_future_set_callback(IntPtr future, FdbFutureCallback callback, IntPtr callbackParameter); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FdbError fdb_future_get_version(FutureHandle future, out long version); + public static extern FdbError fdb_future_get_version(IntPtr future, out long version); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FdbError fdb_future_get_key(FutureHandle future, out byte* key, out int keyLength); + public static extern FdbError fdb_future_get_key(IntPtr future, out byte* key, out int keyLength); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FdbError fdb_future_get_cluster(FutureHandle future, out ClusterHandle cluster); + public static extern FdbError fdb_future_get_cluster(IntPtr future, out ClusterHandle cluster); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FdbError fdb_future_get_database(FutureHandle future, out DatabaseHandle database); + public static extern FdbError fdb_future_get_database(IntPtr future, out DatabaseHandle database); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FdbError fdb_future_get_value(FutureHandle future, out bool present, out byte* value, out int valueLength); + public static extern FdbError fdb_future_get_value(IntPtr future, out bool present, out byte* value, out int valueLength); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FdbError fdb_future_get_string_array(FutureHandle future, out byte** strings, out int count); + public static extern FdbError fdb_future_get_string_array(IntPtr future, out byte** strings, out int count); [DllImport(FDB_C_DLL, CallingConvention = CallingConvention.Cdecl)] - public static extern FdbError fdb_future_get_keyvalue_array(FutureHandle future, out FdbKeyValue* kv, out int count, out bool more); + public static extern FdbError fdb_future_get_keyvalue_array(IntPtr future, out FdbKeyValue* kv, out int count, out bool more); } @@ -346,51 +346,52 @@ public static int GetMaxApiVersion() #region Futures... - public static bool FutureIsReady(FutureHandle futureHandle) + public static bool FutureIsReady(IntPtr futureHandle) { return NativeMethods.fdb_future_is_ready(futureHandle); } - public static void FutureDestroy(IntPtr futureHandle) + public static void FutureDestroy(IntPtr futureHandle, [CallerMemberName] string caller = null) { + Debug.WriteLine("Native.FutureDestroy(0x{0}) from {1}", (object)futureHandle.ToString("X"), caller); if (futureHandle != IntPtr.Zero) { NativeMethods.fdb_future_destroy(futureHandle); } } - public static void FutureCancel(FutureHandle futureHandle) + public static void FutureCancel(IntPtr futureHandle) { NativeMethods.fdb_future_cancel(futureHandle); } - public static void FutureReleaseMemory(FutureHandle futureHandle) + public static void FutureReleaseMemory(IntPtr futureHandle) { NativeMethods.fdb_future_release_memory(futureHandle); } - public static FdbError FutureGetError(FutureHandle future) + public static FdbError FutureGetError(IntPtr future) { return NativeMethods.fdb_future_get_error(future); } - public static FdbError FutureBlockUntilReady(FutureHandle future) + public static FdbError FutureBlockUntilReady(IntPtr future) { #if DEBUG_NATIVE_CALLS - Debug.WriteLine("calling fdb_future_block_until_ready(0x" + future.Handle.ToString("x") + ")..."); + Debug.WriteLine("calling fdb_future_block_until_ready(0x" + future.ToString("x") + ")..."); #endif var err = NativeMethods.fdb_future_block_until_ready(future); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_future_block_until_ready(0x" + future.Handle.ToString("x") + ") => err=" + err); + Debug.WriteLine("fdb_future_block_until_ready(0x" + future.ToString("x") + ") => err=" + err); #endif return err; } - public static FdbError FutureSetCallback(FutureHandle future, FdbFutureCallback callback, IntPtr callbackParameter) + public static FdbError FutureSetCallback(IntPtr future, FdbFutureCallback callback, IntPtr callbackParameter) { var err = NativeMethods.fdb_future_set_callback(future, callback, callbackParameter); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_future_set_callback(0x" + future.Handle.ToString("x") + ", 0x" + ptrCallback.ToString("x") + ") => err=" + err); + Debug.WriteLine("fdb_future_set_callback(0x" + future.ToString("x") + ", 0x" + callbackParameter.ToString("x") + ") => err=" + err); #endif return err; } @@ -427,12 +428,12 @@ public static FdbError StopNetwork() #region Clusters... - public static FutureHandle CreateCluster(string path) + public static IntPtr CreateCluster(string path) { var future = NativeMethods.fdb_create_cluster(path); Contract.Assert(future != null); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_create_cluster(" + path + ") => 0x" + future.Handle.ToString("x")); + Debug.WriteLine("fdb_create_cluster(" + path + ") => 0x" + future.ToString("x")); #endif return future; @@ -451,11 +452,11 @@ public static FdbError ClusterSetOption(ClusterHandle cluster, FdbClusterOption return NativeMethods.fdb_cluster_set_option(cluster, option, value, valueLength); } - public static FdbError FutureGetCluster(FutureHandle future, out ClusterHandle cluster) + public static FdbError FutureGetCluster(IntPtr future, out ClusterHandle cluster) { var err = NativeMethods.fdb_future_get_cluster(future, out cluster); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_future_get_cluster(0x" + future.Handle.ToString("x") + ") => err=" + err + ", handle=0x" + cluster.Handle.ToString("x")); + Debug.WriteLine("fdb_future_get_cluster(0x" + future.ToString("x") + ") => err=" + err + ", handle=0x" + cluster.Handle.ToString("x")); #endif //TODO: check if err == Success ? return err; @@ -465,11 +466,11 @@ public static FdbError FutureGetCluster(FutureHandle future, out ClusterHandle c #region Databases... - public static FdbError FutureGetDatabase(FutureHandle future, out DatabaseHandle database) + public static FdbError FutureGetDatabase(IntPtr future, out DatabaseHandle database) { var err = NativeMethods.fdb_future_get_database(future, out database); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_future_get_database(0x" + future.Handle.ToString("x") + ") => err=" + err + ", handle=0x" + database.Handle.ToString("x")); + Debug.WriteLine("fdb_future_get_database(0x" + future.ToString("x") + ") => err=" + err + ", handle=0x" + database.Handle.ToString("x")); #endif //TODO: check if err == Success ? return err; @@ -488,7 +489,7 @@ public static void DatabaseDestroy(IntPtr handle) } } - public static FutureHandle ClusterCreateDatabase(ClusterHandle cluster, string name) + public static IntPtr ClusterCreateDatabase(ClusterHandle cluster, string name) { var future = NativeMethods.fdb_cluster_create_database(cluster, name, name == null ? 0 : name.Length); Contract.Assert(future != null); @@ -524,17 +525,17 @@ public static FdbError DatabaseCreateTransaction(DatabaseHandle database, out Tr return err; } - public static FutureHandle TransactionCommit(TransactionHandle transaction) + public static IntPtr TransactionCommit(TransactionHandle transaction) { var future = NativeMethods.fdb_transaction_commit(transaction); Contract.Assert(future != null); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_transaction_commit(0x" + transaction.Handle.ToString("x") + ") => 0x" + future.Handle.ToString("x")); + Debug.WriteLine("fdb_transaction_commit(0x" + transaction.Handle.ToString("x") + ") => 0x" + future.ToString("x")); #endif return future; } - public static FutureHandle TransactionWatch(TransactionHandle transaction, Slice key) + public static IntPtr TransactionWatch(TransactionHandle transaction, Slice key) { if (key.IsNullOrEmpty) throw new ArgumentException("Key cannot be null or empty", "key"); @@ -543,18 +544,18 @@ public static FutureHandle TransactionWatch(TransactionHandle transaction, Slice var future = NativeMethods.fdb_transaction_watch(transaction, ptrKey + key.Offset, key.Count); Contract.Assert(future != null); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_transaction_watch(0x" + transaction.Handle.ToString("x") + ", key: '" + FdbKey.Dump(key) + "') => 0x" + future.Handle.ToString("x")); + Debug.WriteLine("fdb_transaction_watch(0x" + transaction.Handle.ToString("x") + ", key: '" + FdbKey.Dump(key) + "') => 0x" + future.ToString("x")); #endif return future; } } - public static FutureHandle TransactionOnError(TransactionHandle transaction, FdbError errorCode) + public static IntPtr TransactionOnError(TransactionHandle transaction, FdbError errorCode) { var future = NativeMethods.fdb_transaction_on_error(transaction, errorCode); Contract.Assert(future != null); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_transaction_on_error(0x" + transaction.Handle.ToString("x") + ", " + errorCode + ") => 0x" + future.Handle.ToString("x")); + Debug.WriteLine("fdb_transaction_on_error(0x" + transaction.Handle.ToString("x") + ", " + errorCode + ") => 0x" + future.ToString("x")); #endif return future; } @@ -583,12 +584,12 @@ public static void TransactionSetReadVersion(TransactionHandle transaction, long NativeMethods.fdb_transaction_set_read_version(transaction, version); } - public static FutureHandle TransactionGetReadVersion(TransactionHandle transaction) + public static IntPtr TransactionGetReadVersion(TransactionHandle transaction) { var future = NativeMethods.fdb_transaction_get_read_version(transaction); Contract.Assert(future != null); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_transaction_get_read_version(0x" + transaction.Handle.ToString("x") + ") => 0x" + future.Handle.ToString("x")); + Debug.WriteLine("fdb_transaction_get_read_version(0x" + transaction.Handle.ToString("x") + ") => 0x" + future.ToString("x")); #endif return future; } @@ -601,15 +602,15 @@ public static FdbError TransactionGetCommittedVersion(TransactionHandle transact return NativeMethods.fdb_transaction_get_committed_version(transaction, out version); } - public static FdbError FutureGetVersion(FutureHandle future, out long version) + public static FdbError FutureGetVersion(IntPtr future, out long version) { #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_future_get_version(0x" + future.Handle.ToString("x") + ")"); + Debug.WriteLine("fdb_future_get_version(0x" + future.ToString("x") + ")"); #endif return NativeMethods.fdb_future_get_version(future, out version); } - public static FutureHandle TransactionGet(TransactionHandle transaction, Slice key, bool snapshot) + public static IntPtr TransactionGet(TransactionHandle transaction, Slice key, bool snapshot) { if (key.IsNull) throw new ArgumentException("Key cannot be null", "key"); @@ -621,13 +622,13 @@ public static FutureHandle TransactionGet(TransactionHandle transaction, Slice k var future = NativeMethods.fdb_transaction_get(transaction, ptrKey + key.Offset, key.Count, snapshot); Contract.Assert(future != null); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_transaction_get(0x" + transaction.Handle.ToString("x") + ", key: '" + FdbKey.Dump(key) + "', snapshot: " + snapshot + ") => 0x" + future.Handle.ToString("x")); + Debug.WriteLine("fdb_transaction_get(0x" + transaction.Handle.ToString("x") + ", key: '" + FdbKey.Dump(key) + "', snapshot: " + snapshot + ") => 0x" + future.ToString("x")); #endif return future; } } - public static FutureHandle TransactionGetRange(TransactionHandle transaction, FdbKeySelector begin, FdbKeySelector end, int limit, int targetBytes, FdbStreamingMode mode, int iteration, bool snapshot, bool reverse) + public static IntPtr TransactionGetRange(TransactionHandle transaction, FdbKeySelector begin, FdbKeySelector end, int limit, int targetBytes, FdbStreamingMode mode, int iteration, bool snapshot, bool reverse) { fixed (byte* ptrBegin = begin.Key.Array) fixed (byte* ptrEnd = end.Key.Array) @@ -639,13 +640,13 @@ public static FutureHandle TransactionGetRange(TransactionHandle transaction, Fd limit, targetBytes, mode, iteration, snapshot, reverse); Contract.Assert(future != null); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_transaction_get_range(0x" + transaction.Handle.ToString("x") + ", begin: " + begin.PrettyPrint(FdbKey.PrettyPrintMode.Begin) + ", end: " + end.PrettyPrint(FdbKey.PrettyPrintMode.End) + ", " + snapshot + ") => 0x" + future.Handle.ToString("x")); + Debug.WriteLine("fdb_transaction_get_range(0x" + transaction.Handle.ToString("x") + ", begin: " + begin.PrettyPrint(FdbKey.PrettyPrintMode.Begin) + ", end: " + end.PrettyPrint(FdbKey.PrettyPrintMode.End) + ", " + snapshot + ") => 0x" + future.ToString("x")); #endif return future; } } - public static FutureHandle TransactionGetKey(TransactionHandle transaction, FdbKeySelector selector, bool snapshot) + public static IntPtr TransactionGetKey(TransactionHandle transaction, FdbKeySelector selector, bool snapshot) { if (selector.Key.IsNull) throw new ArgumentException("Key cannot be null", "selector"); @@ -654,13 +655,13 @@ public static FutureHandle TransactionGetKey(TransactionHandle transaction, FdbK var future = NativeMethods.fdb_transaction_get_key(transaction, ptrKey + selector.Key.Offset, selector.Key.Count, selector.OrEqual, selector.Offset, snapshot); Contract.Assert(future != null); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_transaction_get_key(0x" + transaction.Handle.ToString("x") + ", " + selector.ToString() + ", " + snapshot + ") => 0x" + future.Handle.ToString("x")); + Debug.WriteLine("fdb_transaction_get_key(0x" + transaction.Handle.ToString("x") + ", " + selector.ToString() + ", " + snapshot + ") => 0x" + future.ToString("x")); #endif return future; } } - public static FutureHandle TransactionGetAddressesForKey(TransactionHandle transaction, Slice key) + public static IntPtr TransactionGetAddressesForKey(TransactionHandle transaction, Slice key) { if (key.IsNullOrEmpty) throw new ArgumentException("Key cannot be null or empty", "key"); @@ -669,19 +670,19 @@ public static FutureHandle TransactionGetAddressesForKey(TransactionHandle trans var future = NativeMethods.fdb_transaction_get_addresses_for_key(transaction, ptrKey + key.Offset, key.Count); Contract.Assert(future != null); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_transaction_get_addresses_for_key(0x" + transaction.Handle.ToString("x") + ", key: '" + FdbKey.Dump(key) + "') => 0x" + future.Handle.ToString("x")); + Debug.WriteLine("fdb_transaction_get_addresses_for_key(0x" + transaction.Handle.ToString("x") + ", key: '" + FdbKey.Dump(key) + "') => 0x" + future.ToString("x")); #endif return future; } } - public static FdbError FutureGetValue(FutureHandle future, out bool valuePresent, out Slice value) + public static FdbError FutureGetValue(IntPtr future, out bool valuePresent, out Slice value) { byte* ptr; int valueLength; var err = NativeMethods.fdb_future_get_value(future, out valuePresent, out ptr, out valueLength); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_future_get_value(0x" + future.Handle.ToString("x") + ") => err=" + err + ", present=" + valuePresent + ", valueLength=" + valueLength); + Debug.WriteLine("fdb_future_get_value(0x" + future.ToString("x") + ") => err=" + err + ", present=" + valuePresent + ", valueLength=" + valueLength); #endif if (ptr != null && valueLength >= 0) { @@ -696,13 +697,13 @@ public static FdbError FutureGetValue(FutureHandle future, out bool valuePresent return err; } - public static FdbError FutureGetKey(FutureHandle future, out Slice key) + public static FdbError FutureGetKey(IntPtr future, out Slice key) { byte* ptr; int keyLength; var err = NativeMethods.fdb_future_get_key(future, out ptr, out keyLength); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_future_get_key(0x" + future.Handle.ToString("x") + ") => err=" + err + ", keyLength=" + keyLength); + Debug.WriteLine("fdb_future_get_key(0x" + future.ToString("x") + ") => err=" + err + ", keyLength=" + keyLength); #endif // note: fdb_future_get_key is allowed to return NULL for the empty key (not to be confused with a key that has an empty value) @@ -719,7 +720,7 @@ public static FdbError FutureGetKey(FutureHandle future, out Slice key) return err; } - public static FdbError FutureGetKeyValueArray(FutureHandle future, out KeyValuePair[] result, out bool more) + public static FdbError FutureGetKeyValueArray(IntPtr future, out KeyValuePair[] result, out bool more) { result = null; @@ -728,7 +729,7 @@ public static FdbError FutureGetKeyValueArray(FutureHandle future, out KeyValueP var err = NativeMethods.fdb_future_get_keyvalue_array(future, out kvp, out count, out more); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_future_get_keyvalue_array(0x" + future.Handle.ToString("x") + ") => err=" + err + ", count=" + count + ", more=" + more); + Debug.WriteLine("fdb_future_get_keyvalue_array(0x" + future.ToString("x") + ") => err=" + err + ", count=" + count + ", more=" + more); #endif if (Fdb.Success(err)) @@ -789,7 +790,7 @@ public static FdbError FutureGetKeyValueArray(FutureHandle future, out KeyValueP return err; } - public static FdbError FutureGetStringArray(FutureHandle future, out string[] result) + public static FdbError FutureGetStringArray(IntPtr future, out string[] result) { result = null; @@ -798,7 +799,7 @@ public static FdbError FutureGetStringArray(FutureHandle future, out string[] re var err = NativeMethods.fdb_future_get_string_array(future, out strings, out count); #if DEBUG_NATIVE_CALLS - Debug.WriteLine("fdb_future_get_string_array(0x" + future.Handle.ToString("x") + ") => err=" + err + ", count=" + count); + Debug.WriteLine("fdb_future_get_string_array(0x" + future.ToString("x") + ") => err=" + err + ", count=" + count); #endif if (Fdb.Success(err)) diff --git a/FoundationDB.Client/Native/FdbNativeCluster.cs b/FoundationDB.Client/Native/FdbNativeCluster.cs index 1a8831418..7c5d8e42a 100644 --- a/FoundationDB.Client/Native/FdbNativeCluster.cs +++ b/FoundationDB.Client/Native/FdbNativeCluster.cs @@ -37,37 +37,59 @@ namespace FoundationDB.Client.Native using System.Threading.Tasks; /// Wraps a native FDBCluster* handle - internal sealed class FdbNativeCluster : IFdbClusterHandler + internal sealed class FdbNativeCluster : FdbFutureContext, IFdbClusterHandler { - private readonly ClusterHandle m_handle; + //private readonly ClusterHandle m_handle; public FdbNativeCluster(ClusterHandle handle) + : base(handle) { - Contract.Requires(handle != null); - m_handle = handle; } - public static Task CreateClusterAsync(string clusterFile, CancellationToken cancellationToken) + private static readonly GlobalNativeContext GlobalContext = new GlobalNativeContext(); + + private sealed class GlobalNativeContext : FdbFutureContext { - var future = FdbNative.CreateCluster(clusterFile); - return FdbFuture.CreateTaskFromHandle(future, - (h) => + + public Task CreateClusterAsync(string clusterFile, CancellationToken ct) + { + IntPtr handle = IntPtr.Zero; + bool dead = true; + try { - ClusterHandle cluster; - var err = FdbNative.FutureGetCluster(h, out cluster); - if (err != FdbError.Success) - { - cluster.Dispose(); - throw Fdb.MapToException(err); - } - var handler = new FdbNativeCluster(cluster); - return (IFdbClusterHandler) handler; - }, - cancellationToken - ); + return RegisterFuture( + FdbNative.CreateCluster(clusterFile), + ref dead, + (h, state) => + { + ClusterHandle cluster; + var err = FdbNative.FutureGetCluster(h, out cluster); + if (err != FdbError.Success) + { + cluster.Dispose(); + throw Fdb.MapToException(err); + } + var handler = new FdbNativeCluster(cluster); + return (IFdbClusterHandler) handler; + }, + null, + ct, + "CreateClusterAsync" + ); + } + finally + { + if (handle != IntPtr.Zero && dead) FdbNative.FutureDestroy(handle); + } + } + } - internal ClusterHandle Handle { get { return m_handle; } } + + public static Task CreateClusterAsync(string clusterFile, CancellationToken cancellationToken) + { + return GlobalContext.CreateClusterAsync(clusterFile, cancellationToken); + } public bool IsInvalid { get { return m_handle.IsInvalid; } } @@ -97,10 +119,10 @@ public Task OpenDatabaseAsync(string databaseName, Cancella { if (cancellationToken.IsCancellationRequested) return TaskHelpers.FromCancellation(cancellationToken); - var future = FdbNative.ClusterCreateDatabase(m_handle, databaseName); - return FdbFuture.CreateTaskFromHandle( - future, - (h) => + return StartNewFuture( + (handle, state) => FdbNative.ClusterCreateDatabase(handle, state), + databaseName, + (h, state) => { DatabaseHandle database; var err = FdbNative.FutureGetDatabase(h, out database); @@ -109,18 +131,14 @@ public Task OpenDatabaseAsync(string databaseName, Cancella database.Dispose(); throw Fdb.MapToException(err); } - var handler = new FdbNativeDatabase(database); + var handler = new FdbNativeDatabase(database, (string)state); return (IFdbDatabaseHandler) handler; }, + databaseName, cancellationToken ); } - public void Dispose() - { - if (m_handle != null) m_handle.Dispose(); - } - } diff --git a/FoundationDB.Client/Native/FdbNativeDatabase.cs b/FoundationDB.Client/Native/FdbNativeDatabase.cs index 2ba7e47c5..1cc8baf52 100644 --- a/FoundationDB.Client/Native/FdbNativeDatabase.cs +++ b/FoundationDB.Client/Native/FdbNativeDatabase.cs @@ -37,43 +37,20 @@ namespace FoundationDB.Client.Native /// Wraps a native FDBDatabase* handle [DebuggerDisplay("Handle={m_handle}, Closed={m_handle.IsClosed}")] - internal sealed class FdbNativeDatabase : IFdbDatabaseHandler + internal sealed class FdbNativeDatabase : FdbFutureContext, IFdbDatabaseHandler { - /// Handle that wraps the native FDB_DATABASE* - private readonly DatabaseHandle m_handle; - -#if CAPTURE_STACKTRACES - private readonly StackTrace m_stackTrace; -#endif - - public FdbNativeDatabase(DatabaseHandle handle) - { - if (handle == null) throw new ArgumentNullException("handle"); - - m_handle = handle; -#if CAPTURE_STACKTRACES - m_stackTrace = new StackTrace(); -#endif - } - - //REVIEW: do we really need a destructor ? The handle is a SafeHandle, and will take care of itself... - ~FdbNativeDatabase() + public FdbNativeDatabase(DatabaseHandle handle, string name) + : base(handle) { -#if CAPTURE_STACKTRACES - Trace.WriteLine("A database handle (" + m_handle + ") was leaked by " + m_stackTrace); -#endif -#if DEBUG - // If you break here, that means that a native database handler was leaked by a FdbDatabase instance (or that the database instance was leaked) - if (Debugger.IsAttached) Debugger.Break(); -#endif - Dispose(false); + this.Name = name; } - public bool IsInvalid { get { return m_handle.IsInvalid; } } public bool IsClosed { get { return m_handle.IsClosed; } } + public string Name { get; private set; } + public void SetOption(FdbDatabaseOption option, Slice data) { Fdb.EnsureNotOnNetworkThread(); @@ -113,19 +90,6 @@ public IFdbTransactionHandler CreateTransaction(FdbOperationContext context) } } - public void Dispose() - { - Dispose(true); - GC.SuppressFinalize(this); - } - - private void Dispose(bool disposing) - { - if (disposing) - { - if (m_handle != null) m_handle.Dispose(); - } - } } } diff --git a/FoundationDB.Client/Native/FdbNativeTransaction.cs b/FoundationDB.Client/Native/FdbNativeTransaction.cs index 63317a1b1..43f5ad3a7 100644 --- a/FoundationDB.Client/Native/FdbNativeTransaction.cs +++ b/FoundationDB.Client/Native/FdbNativeTransaction.cs @@ -35,6 +35,7 @@ namespace FoundationDB.Client.Native { using FoundationDB.Client.Core; using FoundationDB.Client.Utils; + using FoundationDB.Layers.Tuples; using JetBrains.Annotations; using System; using System.Collections.Generic; @@ -44,50 +45,25 @@ namespace FoundationDB.Client.Native /// Wraps a native FDB_TRANSACTION handle [DebuggerDisplay("Handle={m_handle}, Size={m_payloadBytes}, Closed={m_handle.IsClosed}")] - internal class FdbNativeTransaction : IFdbTransactionHandler + internal class FdbNativeTransaction : FdbFutureContext, IFdbTransactionHandler { + private readonly FdbNativeDatabase m_database; - /// FDB_TRANSACTION* handle - private readonly TransactionHandle m_handle; /// Estimated current size of the transaction private int m_payloadBytes; -#if CAPTURE_STACKTRACES - private StackTrace m_stackTrace; -#endif - - public FdbNativeTransaction(FdbNativeDatabase db, TransactionHandle handle) + public FdbNativeTransaction([NotNull] FdbNativeDatabase db, [NotNull] TransactionHandle handle) + : base(handle) { if (db == null) throw new ArgumentNullException("db"); - if (handle == null) throw new ArgumentNullException("handle"); m_database = db; - m_handle = handle; -#if CAPTURE_STACKTRACES - m_stackTrace = new StackTrace(); -#endif - } - - //REVIEW: do we really need a destructor ? The handle is a SafeHandle, and will take care of itself... - ~FdbNativeTransaction() - { -#if CAPTURE_STACKTRACES - Trace.WriteLine("A transaction handle (" + m_handle + ", " + m_payloadBytes + " bytes written) was leaked by " + m_stackTrace); -#endif -#if DEBUG - // If you break here, that means that a native transaction handler was leaked by a FdbTransaction instance (or that the transaction instance was leaked) - if (Debugger.IsAttached) Debugger.Break(); -#endif - Dispose(false); } #region Properties... public bool IsClosed { get { return m_handle.IsClosed; } } - /// Native FDB_TRANSACTION* handle - public TransactionHandle Handle { get { return m_handle; } } - /// Database handler that owns this transaction public FdbNativeDatabase Database { get { return m_database; } } @@ -133,18 +109,20 @@ public void SetOption(FdbTransactionOption option, Slice data) public Task GetReadVersionAsync(CancellationToken cancellationToken) { - var future = FdbNative.TransactionGetReadVersion(m_handle); - return FdbFuture.CreateTaskFromHandle(future, - (h) => + return StartNewFuture( + (handle, state) => FdbNative.TransactionGetReadVersion(handle), + default(object), + (future, state) => { long version; - var err = FdbNative.FutureGetVersion(h, out version); + var err = FdbNative.FutureGetVersion(future, out version); #if DEBUG_TRANSACTIONS Debug.WriteLine("FdbTransaction[" + m_id + "].GetReadVersion() => err=" + err + ", version=" + version); #endif Fdb.DieOnError(err); return version; }, + default(object), cancellationToken ); } @@ -154,10 +132,8 @@ public void SetReadVersion(long version) FdbNative.TransactionSetReadVersion(m_handle, version); } - private static bool TryGetValueResult(FutureHandle h, out Slice result) + private static bool TryGetValueResult(IntPtr h, out Slice result) { - Contract.Requires(h != null); - bool present; var err = FdbNative.FutureGetValue(h, out present, out result); #if DEBUG_TRANSACTIONS @@ -167,22 +143,21 @@ private static bool TryGetValueResult(FutureHandle h, out Slice result) return present; } - private static Slice GetValueResultBytes(FutureHandle h) + private static Slice GetValueResultBytes(IntPtr h) { - Contract.Requires(h != null); - Slice result; - if (!TryGetValueResult(h, out result)) - { - return Slice.Nil; - } - return result; + return !TryGetValueResult(h, out result) ? Slice.Nil : result; } public Task GetAsync(Slice key, bool snapshot, CancellationToken cancellationToken) { - var future = FdbNative.TransactionGet(m_handle, key, snapshot); - return FdbFuture.CreateTaskFromHandle(future, (h) => GetValueResultBytes(h), cancellationToken); + return StartNewFuture( + (handle, state) => FdbNative.TransactionGet(handle, state.Item1, state.Item2), + FdbTuple.Create(key, snapshot), + (future, state) => GetValueResultBytes(future), + null, + cancellationToken + ); } public Task GetValuesAsync(Slice[] keys, bool snapshot, CancellationToken cancellationToken) @@ -191,24 +166,24 @@ public Task GetValuesAsync(Slice[] keys, bool snapshot, CancellationTok if (keys.Length == 0) return Task.FromResult(Slice.EmptySliceArray); - var futures = new FutureHandle[keys.Length]; - try - { - for (int i = 0; i < keys.Length; i++) - { - futures[i] = FdbNative.TransactionGet(m_handle, keys[i], snapshot); - } - } - catch - { - for (int i = 0; i < keys.Length; i++) + return StartNewFutures( + keys.Length, + (handle, state, futures) => { - if (futures[i] == null) break; - futures[i].Dispose(); - } - throw; - } - return FdbFuture.CreateTaskFromHandleArray(futures, (h) => GetValueResultBytes(h), cancellationToken); + var _keys = state.Item1; + var _snapshot = state.Item2; + for (int i = 0; i < _keys.Length; i++) + { + var h = FdbNative.TransactionGet(handle, _keys[i], _snapshot); + if (h == IntPtr.Zero) throw new FdbException(FdbError.OperationFailed); + futures[i] = h; + } + }, + FdbTuple.Create(keys, snapshot), + (future, state) => GetValueResultBytes(future), + default(object), //TODO: buffer for the slices + cancellationToken + ); } /// Extract a chunk of result from a completed Future @@ -216,7 +191,7 @@ public Task GetValuesAsync(Slice[] keys, bool snapshot, CancellationTok /// Receives true if there are more result, or false if all results have been transmited /// Array of key/value pairs, or an exception [NotNull] - private static KeyValuePair[] GetKeyValueArrayResult(FutureHandle h, out bool more) + private static KeyValuePair[] GetKeyValueArrayResult(IntPtr h, out bool more) { KeyValuePair[] result; var err = FdbNative.FutureGetKeyValueArray(h, out result, out more); @@ -233,26 +208,26 @@ public Task GetRangeAsync(FdbKeySelector begin, FdbKeySelector en Contract.Requires(options != null); bool reversed = options.Reverse ?? false; - var future = FdbNative.TransactionGetRange(m_handle, begin, end, options.Limit ?? 0, options.TargetBytes ?? 0, options.Mode ?? FdbStreamingMode.Iterator, iteration, snapshot, reversed); - return FdbFuture.CreateTaskFromHandle( - future, - (h) => + + return StartNewFuture( + (handle, _) => FdbNative.TransactionGetRange(handle, begin, end, options.Limit ?? 0, options.TargetBytes ?? 0, options.Mode ?? FdbStreamingMode.Iterator, iteration, snapshot, reversed), + default(object), //TODO: pass options & co? + (future, state) => { // TODO: quietly return if disposed bool hasMore; - var chunk = GetKeyValueArrayResult(h, out hasMore); + var chunk = GetKeyValueArrayResult(future, out hasMore); return new FdbRangeChunk(hasMore, chunk, iteration, reversed); }, + default(object), //TODO: pass options & co? cancellationToken ); } - private static Slice GetKeyResult(FutureHandle h) + private static Slice GetKeyResult(IntPtr h) { - Contract.Requires(h != null); - Slice result; var err = FdbNative.FutureGetKey(h, out result); #if DEBUG_TRANSACTIONS @@ -264,10 +239,11 @@ private static Slice GetKeyResult(FutureHandle h) public Task GetKeyAsync(FdbKeySelector selector, bool snapshot, CancellationToken cancellationToken) { - var future = FdbNative.TransactionGetKey(m_handle, selector, snapshot); - return FdbFuture.CreateTaskFromHandle( - future, - (h) => GetKeyResult(h), + return StartNewFuture( + (handle, state) => FdbNative.TransactionGetKey(handle, state.Item1, state.Item2), + FdbTuple.Create(selector, snapshot), + (future, state) => GetKeyResult(future), + default(object), cancellationToken ); } @@ -276,25 +252,26 @@ public Task GetKeysAsync(FdbKeySelector[] selectors, bool snapshot, Can { Contract.Requires(selectors != null); - var futures = new FutureHandle[selectors.Length]; - try - { - for (int i = 0; i < selectors.Length; i++) - { - futures[i] = FdbNative.TransactionGetKey(m_handle, selectors[i], snapshot); - } - } - catch - { - for (int i = 0; i < selectors.Length; i++) - { - if (futures[i] == null) break; - futures[i].Dispose(); - } - throw; - } - return FdbFuture.CreateTaskFromHandleArray(futures, (h) => GetKeyResult(h), cancellationToken); + if (selectors.Length == 0) return Task.FromResult(Slice.EmptySliceArray); + return StartNewFutures( + selectors.Length, + (handle, state, futures) => + { + var _selectors = state.Item1; + var _snapshot = state.Item2; + for (int i = 0; i < _selectors.Length; i++) + { + var h = FdbNative.TransactionGetKey(handle, _selectors[i], _snapshot); + if (h == IntPtr.Zero) throw new FdbException(FdbError.OperationFailed); + futures[i] = h; + } + }, + FdbTuple.Create(selectors, snapshot), + (future, state) => GetKeyResult(future), + default(object), //TODO: buffer for the slices + cancellationToken + ); } #endregion @@ -340,10 +317,8 @@ public void AddConflictRange(Slice beginKeyInclusive, Slice endKeyExclusive, Fdb } [NotNull] - private static string[] GetStringArrayResult(FutureHandle h) + private static string[] GetStringArrayResult(IntPtr h) { - Contract.Requires(h != null); - string[] result; var err = FdbNative.FutureGetStringArray(h, out result); #if DEBUG_TRANSACTIONS @@ -356,10 +331,11 @@ private static string[] GetStringArrayResult(FutureHandle h) public Task GetAddressesForKeyAsync(Slice key, CancellationToken cancellationToken) { - var future = FdbNative.TransactionGetAddressesForKey(m_handle, key); - return FdbFuture.CreateTaskFromHandle( - future, - (h) => GetStringArrayResult(h), + return StartNewFuture( + (handle, state) => FdbNative.TransactionGetAddressesForKey(handle, state), + key, + (future, state) => GetStringArrayResult(future), + default(object), cancellationToken ); } @@ -370,12 +346,13 @@ public Task GetAddressesForKeyAsync(Slice key, CancellationToken cance public FdbWatch Watch(Slice key, CancellationToken cancellationToken) { - var future = FdbNative.TransactionWatch(m_handle, key); - return new FdbWatch( - FdbFuture.FromHandle(future, (h) => key, cancellationToken), - key, - Slice.Nil - ); + throw new NotImplementedException("FIXME: Future refactoring in progress! I owe you a beer (*) if I ever forget to remove this before committing! (*: if you come get it in person!)"); + //var future = FdbNative.TransactionWatch(m_handle, key); + //return new FdbWatch( + // FdbFuture.FromHandle(future, (h) => key, cancellationToken), + // key, + // Slice.Nil + //); } #endregion @@ -402,14 +379,28 @@ public long GetCommittedVersion() /// As with other client/server databases, in some failure scenarios a client may be unable to determine whether a transaction succeeded. In these cases, CommitAsync() will throw CommitUnknownResult error. The OnErrorAsync() function treats this error as retryable, so retry loops that don’t check for CommitUnknownResult could execute the transaction twice. In these cases, you must consider the idempotence of the transaction. public Task CommitAsync(CancellationToken cancellationToken) { - var future = FdbNative.TransactionCommit(m_handle); - return FdbFuture.CreateTaskFromHandle(future, (h) => null, cancellationToken); + return StartNewFuture( + (handle, state) => FdbNative.TransactionCommit(handle), + default(object), + (future, state) => state, + default(object), //TODO:? + cancellationToken + ); } public Task OnErrorAsync(FdbError code, CancellationToken cancellationToken) { - var future = FdbNative.TransactionOnError(m_handle, code); - return FdbFuture.CreateTaskFromHandle(future, (h) => { ResetInternal(); return null; }, cancellationToken); + return StartNewFuture( + (handle, state) => FdbNative.TransactionOnError(handle, state), + code, + (h, state) => + { + ((FdbNativeTransaction)state).ResetInternal(); + return default(object); + }, + this, + cancellationToken + ); } public void Reset() @@ -430,25 +421,6 @@ private void ResetInternal() #endregion - #region IDisposable... - - public void Dispose() - { - Dispose(true); - GC.SuppressFinalize(this); - } - - private void Dispose(bool disposing) - { - if (disposing) - { - // Dispose of the handle - if (!m_handle.IsClosed) m_handle.Dispose(); - } - } - - #endregion - } } diff --git a/FoundationDB.Client/Native/Handles/FutureHandle.cs b/FoundationDB.Client/Native/Handles/FutureHandle.cs index bc90a5a06..22b2c75aa 100644 --- a/FoundationDB.Client/Native/Handles/FutureHandle.cs +++ b/FoundationDB.Client/Native/Handles/FutureHandle.cs @@ -26,6 +26,8 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY */ #endregion +#if REFACTORED + namespace FoundationDB.Client.Native { using FoundationDB.Client.Utils; @@ -62,3 +64,5 @@ public override string ToString() } } + +#endif \ No newline at end of file diff --git a/FoundationDB.Client/Native/IFdbFuture.cs b/FoundationDB.Client/Native/IFdbFuture.cs new file mode 100644 index 000000000..317037938 --- /dev/null +++ b/FoundationDB.Client/Native/IFdbFuture.cs @@ -0,0 +1,53 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +// enable this to help debug Futures +#undef DEBUG_FUTURES + +namespace FoundationDB.Client.Native +{ + using System; + + internal interface IFdbFuture + { + /// Unique identifier of this future + IntPtr Cookie { get; } + + /// Label of the future (usually the name of the operation) + string Label { get; } + + /// Test if this was the last pending handle for this future, or not + /// Handle that completed + /// True if this was the last handle and OnFired() can be called, or False if more handles need to fire first. + bool Visit(IntPtr handle); + + /// Called when all handles tracked by this future have fired + void OnFired(); + } + +} From 6c3c00afec31c01891df0d35e970f410e6aed52c Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Wed, 28 Jan 2015 14:02:08 +0100 Subject: [PATCH 40/63] A bit of refactoring and cleanup --- FoundationDB.Client/Native/FdbFuture.cs | 91 +++------ FoundationDB.Client/Native/FdbFutureArray.cs | 180 +++++++++-------- .../Native/FdbFutureContext.cs | 186 +++++++++++++++--- FoundationDB.Client/Native/FdbFutureSingle.cs | 100 +++++----- FoundationDB.Client/Native/IFdbFuture.cs | 7 +- 5 files changed, 334 insertions(+), 230 deletions(-) diff --git a/FoundationDB.Client/Native/FdbFuture.cs b/FoundationDB.Client/Native/FdbFuture.cs index 0827e7adc..4a0387d7e 100644 --- a/FoundationDB.Client/Native/FdbFuture.cs +++ b/FoundationDB.Client/Native/FdbFuture.cs @@ -29,11 +29,14 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY // enable this to help debug Futures #undef DEBUG_FUTURES +using System.Diagnostics.Contracts; + namespace FoundationDB.Client.Native { using System; using System.Diagnostics; using System.Runtime.CompilerServices; + using System.Threading; using System.Threading.Tasks; /// Base class for all FDBFuture wrappers @@ -44,11 +47,12 @@ internal abstract class FdbFuture : TaskCompletionSource, IFdbFuture #region Private Members... - ///// Optionnal registration on the parent Cancellation Token - ///// Is only valid if FLAG_HAS_CTR is set - //protected CancellationTokenRegistration m_ctr; + /// Optionnal registration on the parent Cancellation Token + /// Is only valid if FLAG_HAS_CTR is set + internal CancellationTokenRegistration m_ctr; - protected FdbFuture(IntPtr cookie, string label) + protected FdbFuture(IntPtr cookie, string label, object state) + : base(state) { this.Cookie = cookie; this.Label = label; @@ -62,45 +66,11 @@ protected FdbFuture(IntPtr cookie, string label) #region Cancellation... -#if REFACTORED - - protected void RegisterForCancellation(CancellationToken cancellationToken) - { - //note: if the token is already cancelled, the callback handler will run inline and any exception would bubble up here - //=> this is not a problem because the ctor already has a try/catch that will clean up everything - m_ctr = cancellationToken.Register( - (_state) => { CancellationHandler(_state); }, - this, - false - ); - } - - protected void UnregisterCancellationRegistration() - { - // unsubscribe from the parent cancellation token if there was one - m_ctr.Dispose(); - m_ctr = default(CancellationTokenRegistration); - } - - private static void CancellationHandler(object state) - { - var future = state as FdbFuture; - if (future != null) - { -#if DEBUG_FUTURES - Debug.WriteLine("Future<" + typeof(T).Name + ">.Cancel(0x" + future.m_handle.Handle.ToString("x") + ") was called on thread #" + Thread.CurrentThread.ManagedThreadId.ToString()); -#endif - future.Cancel(); - } - } - -#endif - #endregion public abstract bool Visit(IntPtr handle); - public abstract void OnFired(); + public abstract void OnReady(); /// Return true if the future has completed (successfully or not) public bool IsReady @@ -117,30 +87,33 @@ public TaskAwaiter GetAwaiter() /// Try to abort the task (if it is still running) public void Cancel() { - throw new NotImplementedException("FIXME: Future Cancellation!"); -#if REFACTORED - if (HasAnyFlags(FdbFuture.Flags.DISPOSED | FdbFuture.Flags.COMPLETED | FdbFuture.Flags.CANCELLED)) + if (this.Task.IsCanceled) return; + + OnCancel(); + } + + protected abstract void OnCancel(); + + protected void PublishResult(T result) + { + TrySetResult(result); + } + + protected void PublishError(Exception error, FdbError code) + { + if (error != null) { - return; + TrySetException(error); } - - if (TrySetFlag(FdbFuture.Flags.CANCELLED)) + else if (FdbFutureContext.ClassifyErrorSeverity(code) == FdbFutureContext.CATEGORY_CANCELLED) + { + TrySetCanceled(); + } + else { - bool fromCallback = Fdb.IsNetworkThread; - try - { - if (!this.Task.IsCompleted) - { - CancelHandles(); - SetCanceled(fromCallback); - } - } - finally - { - TryCleanup(); - } + Contract.Assert(code != FdbError.Success); + TrySetException(Fdb.MapToException(code)); } -#endif } } diff --git a/FoundationDB.Client/Native/FdbFutureArray.cs b/FoundationDB.Client/Native/FdbFutureArray.cs index bffcf5c0a..72b0a0c45 100644 --- a/FoundationDB.Client/Native/FdbFutureArray.cs +++ b/FoundationDB.Client/Native/FdbFutureArray.cs @@ -37,26 +37,36 @@ namespace FoundationDB.Client.Native /// Type of result internal sealed class FdbFutureArray : FdbFuture { + // This future encapsulate multiple FDBFuture* handles and use ref-counting to detect when all the handles have fired + // The ref-counting is handled by the network thread, and invokation of future.OnReady() is deferred to the ThreadPool once the counter reaches zero + // The result array is computed once all FDBFuture are ready, from the ThreadPool. + // If at least one of the FDBFuture fails, the Task fails, using the most "serious" error found (ie: Non-Retryable > Cancelled > Retryable) #region Private Members... + /// Encapsulated handles + // May contains IntPtr.Zero handles if there was a problem when setting up the callbacks. + // Atomically set to null by the first thread that needs to destroy all the handles + [CanBeNull] private IntPtr[] m_handles; + /// Number of handles that haven't fired yet private int m_pending; + /// Lambda used to extract the result of one handle + // the first argument is the FDBFuture handle that must be ready and not failed + // the second argument is a state that is passed by the caller. + [NotNull] private readonly Func m_resultSelector; - private readonly object m_state; - #endregion internal FdbFutureArray([NotNull] IntPtr[] handles, [NotNull] Func selector, object state, IntPtr cookie, string label) - : base(cookie, label) + : base(cookie, label, state) { m_handles = handles; m_pending = handles.Length; m_resultSelector = selector; - m_state = state; } public override bool Visit(IntPtr handle) @@ -64,128 +74,116 @@ public override bool Visit(IntPtr handle) return 0 == Interlocked.Decrement(ref m_pending); } - private const int CATEGORY_SUCCESS = 0; - private const int CATEGORY_RETRYABLE = 1; - private const int CATEGORY_CANCELLED = 2; - private const int CATEGORY_FAILURE = 3; - - private static int ClassifyErrorSeverity(FdbError error) + public override void OnReady() { - switch (error) - { - case FdbError.Success: - return CATEGORY_SUCCESS; - - case FdbError.PastVersion: - case FdbError.FutureVersion: - case FdbError.TimedOut: - case FdbError.TooManyWatches: - return CATEGORY_RETRYABLE; - - case FdbError.OperationCancelled: - case FdbError.TransactionCancelled: - return CATEGORY_CANCELLED; - - default: - return CATEGORY_FAILURE; - } - } - - public override void OnFired() - { - var handles = Interlocked.Exchange(ref m_handles, null); - if (handles == null) return; // already disposed? - - Debug.WriteLine("Future{0}<{1}[]>.OnFired({2})", this.Label, typeof (T).Name, handles.Length); - //README: // - This callback will fire either from the ThreadPool (async ops) or inline form the ctor of the future (non-async ops, or ops that where served from some cache). // - The method *MUST* dispose the future handle before returning, and *SHOULD* do so before signaling the task. // => This is because continuations may run inline, and start new futures from there, while we still have our original future handle opened. + IntPtr[] handles = null; try { + // make sure that nobody can destroy our handles while we are using them. + handles = Interlocked.Exchange(ref m_handles, null); + if (handles == null) return; // already disposed? + + Debug.WriteLine("FutureArray.{0}<{1}[]>.OnReady([{2}])", this.Label, typeof(T).Name, handles.Length); + T[] results = new T[handles.Length]; FdbError code = FdbError.Success; int severity = 0; Exception error = null; - try - { - if (this.Task.IsCompleted) - { // task has already been handled by someone else - return; - } - for (int i = 0; i < results.Length; i++) + if (this.Task.IsCompleted) + { // task has already been handled by someone else + return; + } + + var state = this.Task.AsyncState; + for (int i = 0; i < results.Length; i++) + { + var handle = handles[i]; + var err = FdbNative.FutureGetError(handle); + if (err == FdbError.Success) { - var handle = handles[i]; - var err = FdbNative.FutureGetError(handle); - if (err == FdbError.Success) + if (code != FdbError.Success) + { // there's been at least one error before, so there is no point in computing the result, it would be discarded anyway + continue; + } + + try { - if (code != FdbError.Success) - { // there's been at least one error before, so there is no point in computing the result, it would be discarded anyway - continue; - } - - try - { - results[i] = m_resultSelector(handle, m_state); - } - catch (AccessViolationException e) - { // trouble in paradise! - - Debug.WriteLine("EPIC FAIL: " + e.ToString()); - - // => THIS IS VERY BAD! We have no choice but to terminate the process immediately, because any new call to any method to the binding may end up freezing the whole process (best case) or sending corrupted data to the cluster (worst case) - if (Debugger.IsAttached) Debugger.Break(); - - Environment.FailFast("FIXME: FDB done goofed!", e); - } - catch (Exception e) - { - Debug.WriteLine("FAIL: " + e.ToString()); - code = FdbError.InternalError; - error = e; - break; - } + results[i] = m_resultSelector(handle, state); } - else if (code != err) + catch (AccessViolationException e) + { // trouble in paradise! + + Debug.WriteLine("EPIC FAIL: " + e.ToString()); + + // => THIS IS VERY BAD! We have no choice but to terminate the process immediately, because any new call to any method to the binding may end up freezing the whole process (best case) or sending corrupted data to the cluster (worst case) + if (Debugger.IsAttached) Debugger.Break(); + + Environment.FailFast("FIXME: FDB done goofed!", e); + } + catch (Exception e) { - int cur = ClassifyErrorSeverity(err); - if (cur > severity) - { // error is more serious than before - severity = cur; - code = err; - } + Debug.WriteLine("FAIL: " + e.ToString()); + code = FdbError.InternalError; + error = e; + break; } } - } - finally - { - foreach (var handle in handles) + else if (code != err) { - if (handle != IntPtr.Zero) FdbNative.FutureDestroy(handle); + int cur = FdbFutureContext.ClassifyErrorSeverity(err); + if (cur > severity) + { // error is more serious than before + severity = cur; + code = err; + } } } + // since continuations may fire inline, make sure to release all the memory used by this handle first + FdbFutureContext.DestroyHandles(ref handles); + if (code == FdbError.Success) { - TrySetResult(results); - } - else if (code == FdbError.OperationCancelled || code == FdbError.TransactionCancelled) - { - TrySetCanceled(); + PublishResult(results); } else { - TrySetException(error ?? Fdb.MapToException(code)); + PublishError(error, code); } } catch (Exception e) { // we must not blow up the TP or the parent, so make sure to propagate all exceptions to the task TrySetException(e); } + finally + { + if (handles != null) FdbFutureContext.DestroyHandles(ref handles); + GC.KeepAlive(this); + } } + + protected override void OnCancel() + { + var handles = Volatile.Read(ref m_handles); + //TODO: we probably need locking to prevent concurrent destroy and cancel calls + if (handles != null) + { + foreach (var handle in handles) + { + if (handle != IntPtr.Zero) + { + FdbNative.FutureCancel(handle); + } + } + } + } + } } \ No newline at end of file diff --git a/FoundationDB.Client/Native/FdbFutureContext.cs b/FoundationDB.Client/Native/FdbFutureContext.cs index 2dba49c7a..ea7157c28 100644 --- a/FoundationDB.Client/Native/FdbFutureContext.cs +++ b/FoundationDB.Client/Native/FdbFutureContext.cs @@ -29,13 +29,11 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY // enable this to capture the stacktrace of the ctor, when troubleshooting leaked transaction handles #undef CAPTURE_STACKTRACES -using System.IO.IsolatedStorage; +using FoundationDB.Async; namespace FoundationDB.Client.Native { - using FoundationDB.Client.Core; using FoundationDB.Client.Utils; - using FoundationDB.Layers.Tuples; using JetBrains.Annotations; using System; using System.Collections.Generic; @@ -89,7 +87,7 @@ private static void FutureCallbackHandler(IntPtr handle, IntPtr cookie) Debug.WriteLine("FutureCallbackHandler(0x{0}, {1:X8} | {2:X8}) called", handle.ToString("X"), cookie.ToInt64() >> 32, cookie.ToInt64() & uint.MaxValue); - bool deferred = false; + bool keepAlive = false; try { @@ -104,12 +102,12 @@ private static void FutureCallbackHandler(IntPtr handle, IntPtr cookie) if (context != null) { Contract.Assert(context.m_contextId == contextId); - deferred = context.OnFired(handle, cookie); + keepAlive = context.OnFutureReady(handle, cookie); } } finally { - if (!deferred) FdbNative.FutureDestroy(handle); + if (!keepAlive) DestroyHandle(ref handle); } } @@ -189,8 +187,7 @@ protected virtual void Dispose(bool disposing) /// A callback has fire for a future handled by this context /// /// - /// If this flag is set to true, then the caller will NOT destroy the future. - private bool OnFired(IntPtr handle, IntPtr cookie) + private bool OnFutureReady(IntPtr handle, IntPtr cookie) { IFdbFuture future; lock (m_futures) @@ -205,7 +202,7 @@ private bool OnFired(IntPtr handle, IntPtr cookie) ThreadPool.UnsafeQueueUserWorkItem( (state) => { - ((IFdbFuture)state).OnFired(); + ((IFdbFuture)state).OnReady(); //TODO: if it fails, maybe we should remove it from m_futures? }, future @@ -237,23 +234,40 @@ protected Task RegisterFuture( string label ) { + if (ct.IsCancellationRequested) return TaskHelpers.FromCancellation(ct); + FdbFutureSingle future = null; IntPtr cookie = IntPtr.Zero; try { - uint futureId = (uint)Interlocked.Increment(ref m_localCookieCounter); + uint futureId = (uint) Interlocked.Increment(ref m_localCookieCounter); cookie = MakeCallbackCookie(m_contextId, futureId); future = new FdbFutureSingle(handle, selector, state, cookie, label); if (FdbNative.FutureIsReady(handle)) { // the result is already computed - Debug.WriteLine("Future.{0} 0x{1} already completed!", label, handle.ToString("X")); + Debug.WriteLine("FutureSingle.{0} 0x{1} already completed!", label, handle.ToString("X")); + cookie = IntPtr.Zero; mustDispose = false; - future.OnFired(); + future.OnReady(); return future.Task; } + if (ct.CanBeCanceled) + { + if (ct.IsCancellationRequested) + { + future.TrySetCanceled(); + cookie = IntPtr.Zero; + return future.Task; + } + + // note that the cancellation handler can fire inline, but it will only mark the future as cancelled + // this means that we will still wait for the future callback to fire and set the task state in there. + future.m_ctr = RegisterForCancellation(future, ct); + } + lock (m_futures) { //TODO: marke the future as "registered" (must unreg when it fires?) @@ -262,8 +276,9 @@ string label var err = FdbNative.FutureSetCallback(handle, GlobalCallback, cookie); if (!Fdb.Success(err)) - { - throw Fdb.MapToException(err); + { // the callback will not fire, so we have to abort the future immediately + future.TrySetException(Fdb.MapToException(err)); + return future.Task; } mustDispose = false; return future.Task; @@ -273,17 +288,20 @@ string label if (future != null) { future.TrySetException(e); - if (cookie != IntPtr.Zero) - { - lock (m_futures) - { - m_futures.Remove(cookie); - } - } return future.Task; } throw; } + finally + { + if (mustDispose && cookie != IntPtr.Zero) + { // make sure that we never leak a failed future ! + lock (m_futures) + { + m_futures.Remove(cookie); + } + } + } } /// Add a new future handle to this context @@ -303,11 +321,13 @@ protected Task RegisterFutures( string label ) { + if (ct.IsCancellationRequested) return TaskHelpers.FromCancellation(ct); + FdbFutureArray future = null; IntPtr cookie = IntPtr.Zero; try { - uint futureId = (uint)Interlocked.Increment(ref m_localCookieCounter); + uint futureId = (uint) Interlocked.Increment(ref m_localCookieCounter); cookie = MakeCallbackCookie(m_contextId, futureId); // make a copy because we may diverge from the caller if we partially fail to register the callbacks below @@ -315,7 +335,24 @@ string label handles.CopyTo(tmp, 0); future = new FdbFutureArray(tmp, selector, state, cookie, label); - //TODO: we could check if all handles are already completed/failed? + // check the case where all futures are already ready (served from cache?) + bool ready = true; + foreach (var handle in tmp) + { + if (!FdbNative.FutureIsReady(handle)) + { + ready = false; + break; + } + } + if (ready) + { + Debug.WriteLine("FutureArray.{0} [{1}] already completed!", label, tmp.Length); + cookie = IntPtr.Zero; + mustDispose = false; + future.OnReady(); + return future.Task; + } lock (m_futures) { @@ -323,6 +360,16 @@ string label m_futures[cookie] = future; } + if (ct.CanBeCanceled) + { + future.m_ctr = RegisterForCancellation(future, ct); + if (future.Task.IsCompleted) + { // cancellation ran inline + future.TrySetCanceled(); + return future.Task; + } + } + for (int i = 0; i < handles.Length; i++) { var err = FdbNative.FutureSetCallback(handles[i], GlobalCallback, cookie); @@ -354,19 +401,97 @@ string label if (future != null) { future.TrySetException(e); - if (cookie != IntPtr.Zero) - { - lock (m_futures) - { - m_futures.Remove(cookie); - } - } return future.Task; } throw; } + finally + { + if (mustDispose && cookie != IntPtr.Zero) + { // make sure that we never leak a failed future ! + lock (m_futures) + { + m_futures.Remove(cookie); + } + } + + } } + internal static CancellationTokenRegistration RegisterForCancellation(IFdbFuture future, CancellationToken cancellationToken) + { + //note: if the token is already cancelled, the callback handler will run inline and any exception would bubble up here + //=> this is not a problem because the ctor already has a try/catch that will clean up everything + return cancellationToken.Register( + (_state) => { CancellationHandler(_state); }, + future, + false + ); + } + + private static void CancellationHandler(object state) + { + var future = (IFdbFuture)state; + Contract.Assert(state != null); +#if DEBUG_FUTURES + Debug.WriteLine("Future<" + typeof(T).Name + ">.Cancel(0x" + future.m_handle.Handle.ToString("x") + ") was called on thread #" + Thread.CurrentThread.ManagedThreadId.ToString()); +#endif + future.Cancel(); + } + + internal static void DestroyHandle(ref IntPtr handle) + { + if (handle != IntPtr.Zero) + { + FdbNative.FutureDestroy(handle); + handle = IntPtr.Zero; + } + } + + internal static void DestroyHandles(ref IntPtr[] handles) + { + if (handles != null) + { + foreach (var handle in handles) + { + if (handle != IntPtr.Zero) FdbNative.FutureDestroy(handle); + } + handles = null; + } + } + + internal const int CATEGORY_SUCCESS = 0; + internal const int CATEGORY_RETRYABLE = 1; + internal const int CATEGORY_CANCELLED = 2; + internal const int CATEGORY_FAILURE = 3; + + internal static int ClassifyErrorSeverity(FdbError error) + { + switch (error) + { + case FdbError.Success: + { + return CATEGORY_SUCCESS; + } + case FdbError.PastVersion: + case FdbError.FutureVersion: + case FdbError.TimedOut: + case FdbError.TooManyWatches: + { + return CATEGORY_RETRYABLE; + } + + case FdbError.OperationCancelled: + { + return CATEGORY_CANCELLED; + } + + default: + { + return CATEGORY_FAILURE; + } + } + } } internal class FdbFutureContext : FdbFutureContext @@ -472,6 +597,7 @@ protected Task StartNewFutures( throw; } } + } } diff --git a/FoundationDB.Client/Native/FdbFutureSingle.cs b/FoundationDB.Client/Native/FdbFutureSingle.cs index 0bb0ff4ee..c6b918701 100644 --- a/FoundationDB.Client/Native/FdbFutureSingle.cs +++ b/FoundationDB.Client/Native/FdbFutureSingle.cs @@ -29,6 +29,7 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY #undef DEBUG_FUTURES using System.Diagnostics; +using System.Security.Policy; namespace FoundationDB.Client.Native { @@ -50,19 +51,16 @@ internal sealed class FdbFutureSingle : FdbFuture /// Lambda used to extract the result of this FDBFuture private readonly Func m_resultSelector; - private readonly object m_state; - #endregion internal FdbFutureSingle(IntPtr handle, [NotNull] Func selector, object state, IntPtr cookie, string label) - : base(cookie, label) + : base(cookie, label, state) { if (handle == IntPtr.Zero) throw new ArgumentException("Invalid future handle", "handle"); if (selector == null) throw new ArgumentNullException("selector"); m_handle = handle; m_resultSelector = selector; - m_state = state; } public override bool Visit(IntPtr handle) @@ -72,12 +70,9 @@ public override bool Visit(IntPtr handle) } [HandleProcessCorruptedStateExceptions] // to be able to handle Access Violations and terminate the process - public override void OnFired() + public override void OnReady() { - Debug.WriteLine("Future{0}<{1}>.OnFired(0x{2})", this.Label, typeof(T).Name, m_handle.ToString("X8")); - - var handle = Interlocked.Exchange(ref m_handle, IntPtr.Zero); - if (handle == IntPtr.Zero) return; // already disposed? + IntPtr handle = IntPtr.Zero; //README: // - This callback will fire either from the ThreadPool (async ops) or inline form the ctor of the future (non-async ops, or ops that where served from some cache). @@ -86,63 +81,72 @@ public override void OnFired() try { - T result = default(T); - FdbError code; - Exception error = null; - try + handle = Interlocked.Exchange(ref m_handle, IntPtr.Zero); + if (handle == IntPtr.Zero) return; // already disposed? + + Debug.WriteLine("FutureSingle.{0}<{1}>.OnReady(0x{2})", this.Label, typeof(T).Name, handle.ToString("X8")); + + if (this.Task.IsCompleted) + { // task has already been handled by someone else + return; + } + + var result = default(T); + var error = default(Exception); + + var code = FdbNative.FutureGetError(handle); + if (code == FdbError.Success) { - if (this.Task.IsCompleted) - { // task has already been handled by someone else - return; + try + { + result = m_resultSelector(handle, this.Task.AsyncState); } + catch (AccessViolationException e) + { // trouble in paradise! + + Debug.WriteLine("EPIC FAIL: " + e.ToString()); + + // => THIS IS VERY BAD! We have no choice but to terminate the process immediately, because any new call to any method to the binding may end up freezing the whole process (best case) or sending corrupted data to the cluster (worst case) + if (Debugger.IsAttached) Debugger.Break(); - code = FdbNative.FutureGetError(handle); - if (code == FdbError.Success) + Environment.FailFast("FIXME: FDB done goofed!", e); + } + catch (Exception e) { - try - { - result = m_resultSelector(handle, m_state); - } - catch (AccessViolationException e) - { // trouble in paradise! - - Debug.WriteLine("EPIC FAIL: " + e.ToString()); - - // => THIS IS VERY BAD! We have no choice but to terminate the process immediately, because any new call to any method to the binding may end up freezing the whole process (best case) or sending corrupted data to the cluster (worst case) - if (Debugger.IsAttached) Debugger.Break(); - - Environment.FailFast("FIXME: FDB done goofed!", e); - } - catch (Exception e) - { - Debug.WriteLine("FAIL: " + e.ToString()); - code = FdbError.InternalError; - error = e; - } + Debug.WriteLine("FAIL: " + e.ToString()); + code = FdbError.InternalError; + error = e; } } - finally - { - FdbNative.FutureDestroy(handle); - } + + // since continuations may fire inline, make sure to release all the memory used by this handle first + FdbFutureContext.DestroyHandle(ref handle); if (code == FdbError.Success) { - TrySetResult(result); - } - else if (code == FdbError.OperationCancelled || code == FdbError.TransactionCancelled) - { - TrySetCanceled(); + PublishResult(result); } else { - TrySetException(error ?? Fdb.MapToException(code)); + PublishError(error, code); } } catch (Exception e) { // we must not blow up the TP or the parent, so make sure to propagate all exceptions to the task TrySetException(e); } + finally + { + if (handle != IntPtr.Zero) FdbFutureContext.DestroyHandle(ref handle); + GC.KeepAlive(this); + } + } + + protected override void OnCancel() + { + IntPtr handle = Volatile.Read(ref m_handle); + //TODO: we probably need locking to prevent concurrent destroy and cancel calls + if (handle != IntPtr.Zero) FdbNative.FutureCancel(handle); } } diff --git a/FoundationDB.Client/Native/IFdbFuture.cs b/FoundationDB.Client/Native/IFdbFuture.cs index 317037938..7ec3fc921 100644 --- a/FoundationDB.Client/Native/IFdbFuture.cs +++ b/FoundationDB.Client/Native/IFdbFuture.cs @@ -41,13 +41,16 @@ internal interface IFdbFuture /// Label of the future (usually the name of the operation) string Label { get; } + /// Cancel the future, if it hasen't completed yet + void Cancel(); + /// Test if this was the last pending handle for this future, or not /// Handle that completed - /// True if this was the last handle and OnFired() can be called, or False if more handles need to fire first. + /// True if this was the last handle and can be called, or False if more handles need to fire first. bool Visit(IntPtr handle); /// Called when all handles tracked by this future have fired - void OnFired(); + void OnReady(); } } From 302d8e95c77f96242da13d6976421c33bbef4085 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 30 Jan 2015 17:05:30 +0100 Subject: [PATCH 41/63] Fixed merge (new api) --- FoundationDB.Tests/TransactionFacts.cs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/FoundationDB.Tests/TransactionFacts.cs b/FoundationDB.Tests/TransactionFacts.cs index 1fd7289c0..d88acc78d 100644 --- a/FoundationDB.Tests/TransactionFacts.cs +++ b/FoundationDB.Tests/TransactionFacts.cs @@ -1931,8 +1931,7 @@ public async Task Test_BadPractice_Future_Fuzzer() using (var db = await OpenTestDatabaseAsync()) { - var location = db.Partition("Fuzzer"); - + var location = db.Partition.ByKey("Fuzzer"); var rnd = new Random(); int seed = rnd.Next(); @@ -1943,7 +1942,7 @@ await db.WriteAsync((tr) => { for (int i = 0; i < R; i++) { - tr.Set(location.Pack(i), Slice.FromInt32(i)); + tr.Set(location.Tuples.EncodeKey(i), Slice.FromInt32(i)); } }, this.Cancellation); @@ -2006,7 +2005,7 @@ await db.WriteAsync((tr) => int x = rnd.Next(R); try { - var res = await tr.GetAsync(location.Pack(x)); + var res = await tr.GetAsync(location.Tuples.EncodeKey(x)); } catch (FdbException) { @@ -2024,7 +2023,7 @@ await db.WriteAsync((tr) => var tr = m_alive[p]; int x = rnd.Next(R); - var t = tr.GetAsync(location.Pack(x)).ContinueWith((_) => Console.Write('!'), TaskContinuationOptions.NotOnRanToCompletion); + var t = tr.GetAsync(location.Tuples.EncodeKey(x)).ContinueWith((_) => Console.Write('!'), TaskContinuationOptions.NotOnRanToCompletion); // => t is not stored break; } From 481a3ecd2fc9298aaca22bffeef8af92946d2761 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Mon, 2 Feb 2015 17:35:41 +0100 Subject: [PATCH 42/63] Tuples: preparing for switch bewteen FdbTuple.Unpack() and FdbTuple.UnpackOrDefault() - FdbTuple.UnpackOrDefault() has the current behavior (return null if Slice.Nil) - FdbTuple.Unpack() is not changed yet (coming soon) --- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 16 +++++- .../Layers/Tuples/FdbTupleExtensions.cs | 21 ++++++- .../Layers/Tuples/FdbTuplePackers.cs | 13 ++++- .../Subspaces/FdbSubspaceTuples.cs | 55 +++++++++++++++++-- .../Messaging/WorkerPoolTest.cs | 2 +- 5 files changed, 96 insertions(+), 11 deletions(-) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 5a8b2422b..17370f79a 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -765,8 +765,9 @@ public static Slice[] EncodePrefixedKeys([NotNull] IFdbTuple prefix, [NotNull /// Unpack a tuple from a serialied key blob /// Binary key containing a previously packed tuple - /// Unpacked tuple, or null if the key is Slice.Nil - [CanBeNull] + /// Unpacked tuple, or the empty tuple if the key is + /// If is equal to + [CanBeNull] //REVIEW: => NotNull! public static IFdbTuple Unpack(Slice packedKey) { //REVIEW: the fact that Unpack(..) can return null (for Slice.Empty) creates a lot of "possible nullref" noise on FdbTuple.Unpack(someKey) when the key cannot possibly Slice.Nil (ex: GetKey, GetRange, ...) @@ -777,6 +778,17 @@ public static IFdbTuple Unpack(Slice packedKey) return FdbTuplePackers.Unpack(packedKey, false); } + /// Unpack a tuple from a binary representation + /// Binary key containing a previously packed tuple, or Slice.Nil + /// Unpacked tuple, the empty tuple if is equal to , or null if the key is + [CanBeNull] + public static IFdbTuple UnpackOrDefault(Slice packedKey) + { + if (packedKey.IsNull) return null; + if (packedKey.Count == 0) return FdbTuple.Empty; + return FdbTuplePackers.Unpack(packedKey, false); + } + /// Unpack a tuple from a serialized key, after removing a required prefix /// Packed key /// Expected prefix of the key (that is not part of the tuple) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs index 224079826..882b196fe 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs @@ -177,9 +177,28 @@ public static FdbMemoizedTuple Memoize(this IFdbTuple tuple) /// Unpack a tuple from this slice /// /// Unpacked tuple if the slice contains data, FdbTuple.Empty if the slice is empty, or null if the slice is Slice.Nil - [CanBeNull] + [CanBeNull] //REVIEW: => NotNull! public static IFdbTuple ToTuple(this Slice slice) { + //note: this method is here to allow a fluent API with method chaining, like "something.ToFoundationDbKey().ToTuple().With((int x, int y) => .....)" + + //REVIEW: same as FdbTuple.Unpack(): we need to throw if slice is Nil (caller should use ToTupleOrDefault() in this case + if (slice.IsNullOrEmpty) + { + return slice.HasValue ? FdbTuple.Empty : null; + } + + return FdbTuple.Unpack(slice); + } + + /// Unpack a tuple from this slice + /// + /// Unpacked tuple if the slice contains data, FdbTuple.Empty if the slice is empty, or null if the slice is Slice.Nil + [CanBeNull] + public static IFdbTuple ToTupleOrDefault(this Slice slice) + { + //note: this method is here to allow a fluent API with method chaining, like "something.ToFoundationDbKey().ToTuple().With((int x, int y) => .....)" + if (slice.IsNullOrEmpty) { return slice.HasValue ? FdbTuple.Empty : null; diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs b/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs index ae2d8f2cd..ab944cbf0 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuplePackers.cs @@ -522,6 +522,7 @@ public static void SerializeFdbKeyTo(ref TupleWriter writer, IFdbKey key) private static readonly Dictionary s_sliceUnpackers = InitializeDefaultUnpackers(); + [NotNull] private static Dictionary InitializeDefaultUnpackers() { var map = new Dictionary(); @@ -554,6 +555,7 @@ private static Dictionary InitializeDefaultUnpackers() /// Returns a lambda that will be able to serialize values of type /// Type of values to serialize /// Reusable action that knows how to serialize values of type into binary buffers, or an exception if the type is not supported + [NotNull] internal static Func GetDeserializer(bool required) { Type type = typeof(T); @@ -588,7 +590,7 @@ internal static bool IsNilSegment(Slice slice) return slice.IsNullOrEmpty || slice[0] == FdbTupleTypes.Nil; } - private static Delegate MakeNullableDeserializer(Type nullableType, Type type, Delegate decoder) + private static Delegate MakeNullableDeserializer([NotNull] Type nullableType, [NotNull] Type type, [NotNull] Delegate decoder) { Contract.Requires(nullableType != null && type != null && decoder != null); // We have a Decoder of T, but we have to transform it into a Decoder for Nullable, which returns null if the slice is "nil", or falls back to the underlying decoder if the slice contains something @@ -610,6 +612,7 @@ private static Delegate MakeNullableDeserializer(Type nullableType, Type type, D /// Slice that contains a single packed element /// Decoded element, in the type that is the best fit. /// You should avoid working with untyped values as much as possible! Blindly casting the returned object may be problematic because this method may need to return very large intergers as Int64 or even UInt64. + [CanBeNull] public static object DeserializeBoxed(Slice slice) { if (slice.IsNullOrEmpty) return null; @@ -667,7 +670,7 @@ public static T DeserializeFormattable(Slice slice) /// Slice that contains a single packed element /// Lambda that will be called to construct a new instance of values of type /// Decoded value of type - public static T DeserializeFormattable(Slice slice, Func factory) + public static T DeserializeFormattable(Slice slice, [NotNull] Func factory) where T : ITupleFormattable { var tuple = FdbTupleParser.ParseTuple(slice); @@ -708,12 +711,14 @@ public static Slice DeserializeSlice(Slice slice) } /// Deserialize a tuple segment into a byte array + [CanBeNull] //REVIEW: because of Slice.GetBytes() public static byte[] DeserializeBytes(Slice slice) { return DeserializeSlice(slice).GetBytes(); } /// Deserialize a tuple segment into a tuple + [CanBeNull] public static IFdbTuple DeserializeTuple(Slice slice) { if (slice.IsNullOrEmpty) return null; @@ -960,7 +965,7 @@ public static DateTime DeserializeDateTime(Slice slice) { // Number of days since Epoch const long UNIX_EPOCH_TICKS = 621355968000000000L; //note: we can't user TimeSpan.FromDays(...) because it rounds to the nearest millisecond! - long ticks = UNIX_EPOCH_TICKS + (long)(FdbTupleParser.ParseDouble(slice) * TimeSpan.TicksPerDay); + long ticks = UNIX_EPOCH_TICKS + (long)(FdbTupleParser.ParseDouble(slice) * TimeSpan.TicksPerDay); return new DateTime(ticks, DateTimeKind.Utc); } } @@ -1012,6 +1017,7 @@ public static TimeSpan DeserializeTimeSpan(Slice slice) /// Deserialize a tuple segment into a Unicode string /// Slice that contains a single packed element + [CanBeNull] public static string DeserializeString(Slice slice) { if (slice.IsNullOrEmpty) return null; @@ -1148,6 +1154,7 @@ public static Uuid64 DeserializeUuid64(Slice slice) /// Deserialize a tuple segment into Guid /// Slice that contains a single packed element + [CanBeNull] public static System.Net.IPAddress DeserializeIPAddress(Slice slice) { if (slice.IsNullOrEmpty) return null; diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs index 48d7093af..b09308438 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs @@ -157,8 +157,24 @@ public Slice[] Pack([NotNull] params ITupleFormattable[] items) /// new Subspace([FE]).Unpack([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => ("hello", 1,) /// If is equal to the subspace prefix, then an empty tuple is returned. /// If the unpacked tuple is not contained in this subspace - [CanBeNull] + [CanBeNull] //REVIEW: [NotNull] ! public IFdbTuple Unpack(Slice key) + { + // We special case 'Slice.Nil' because it is returned by GetAsync(..) when the key does not exist + // This is to simplifiy decoding logic where the caller could do "var foo = FdbTuple.Unpack(await tr.GetAsync(...))" and then only have to test "if (foo != null)" + if (key.IsNull) return null; //REVIEW: throw! (see FdbTuple.Unpack) + + return FdbTuple.Unpack(m_subspace.ExtractKey(key, boundCheck: true)); + } + + /// Unpack a key into a tuple, with the subspace prefix removed + /// Packed version of a key that should fit inside this subspace. + /// Unpacked tuple that is relative to the current subspace, or null if the key is equal to Slice.Nil + /// new Subspace([FE]).Unpack([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => ("hello", 1,) + /// If is equal to the subspace prefix, then an empty tuple is returned. + /// If the unpacked tuple is not contained in this subspace + [CanBeNull] //REVIEW: [NotNull] ! + public IFdbTuple UnpackOrDefault(Slice key) { // We special case 'Slice.Nil' because it is returned by GetAsync(..) when the key does not exist // This is to simplifiy decoding logic where the caller could do "var foo = FdbTuple.Unpack(await tr.GetAsync(...))" and then only have to test "if (foo != null)" @@ -170,7 +186,7 @@ public IFdbTuple Unpack(Slice key) /// Unpack an sequence of keys into tuples, with the subspace prefix removed /// Packed version of keys inside this subspace /// Unpacked tuples that are relative to the current subspace - [NotNull] + [NotNull, ItemCanBeNull] //REVIEW: ItemNotNull! public IFdbTuple[] Unpack([NotNull] IEnumerable keys) { // return an array with the keys minus the subspace's prefix @@ -181,20 +197,51 @@ public IFdbTuple[] Unpack([NotNull] IEnumerable keys) var tuples = new IFdbTuple[extracted.Length]; for(int i = 0; i < extracted.Length; i++) { + //REVIEW: throw is null! if (extracted[i].HasValue) tuples[i] = new FdbPrefixedTuple(prefix, FdbTuple.Unpack(extracted[i])); } return tuples; } - /// Unpack an array of keys into tuples, with the subspace prefix removed + /// Unpack an sequence of keys into tuples, with the subspace prefix removed /// Packed version of keys inside this subspace /// Unpacked tuples that are relative to the current subspace - [NotNull] + [NotNull, ItemCanBeNull] + public IFdbTuple[] UnpackOrDefault([NotNull] IEnumerable keys) + { + // return an array with the keys minus the subspace's prefix + var extracted = m_subspace.ExtractKeys(keys, boundCheck: true); + + // unpack everything + var prefix = m_subspace.Key; + var tuples = new IFdbTuple[extracted.Length]; + for (int i = 0; i < extracted.Length; i++) + { + if (extracted[i].HasValue) tuples[i] = new FdbPrefixedTuple(prefix, FdbTuple.UnpackOrDefault(extracted[i])); + } + return tuples; + } + + /// Unpack an array of keys into tuples, with the subspace prefix removed + /// Packed version of keys inside this subspace + /// Unpacked tuples that are relative to the current subspace. + [NotNull, ItemCanBeNull] //REVIEW: ItemNotNull public IFdbTuple[] Unpack([NotNull] params Slice[] keys) { + //note: this overload allows writing ".Unpack(foo, bar, baz)" instead of ".Unpack(new [] { foo, bar, baz })" return Unpack((IEnumerable)keys); } + /// Unpack an array of keys into tuples, with the subspace prefix removed + /// Packed version of keys inside this subspace + /// Unpacked tuples that are relative to the current subspace. If a key is equal to then the corresponding tuple will be null + [NotNull, ItemCanBeNull] + public IFdbTuple[] UnpackOrDefault([NotNull] params Slice[] keys) + { + //note: this overload allows writing ".Unpack(foo, bar, baz)" instead of ".UnpackOrDefault(new [] { foo, bar, baz })" + return UnpackOrDefault((IEnumerable)keys); + } + #endregion #region ToRange: Tuple => Range diff --git a/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs b/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs index 877135106..f7d5bdfc0 100644 --- a/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs +++ b/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs @@ -142,7 +142,7 @@ await tr.Snapshot .GetRange(FdbKeyRange.StartsWith(location.Key)) .ForEachAsync((kvp) => { - Console.WriteLine(" - " + FdbTuple.Unpack(location.Keys.Extract(kvp.Key)) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine(" - " + location.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }).ConfigureAwait(false); } Console.WriteLine(""); From 7d5597da290aeb239d84c98e180f276e7ef0e667 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Mon, 2 Feb 2015 17:58:54 +0100 Subject: [PATCH 43/63] Tuples: chanded FdbTuple.Unpack() to throw on Slice.Nil (breaking!) - Unpack() is not marked [NotNull], which should help remove a lot of false-positive with code analysis - UnpackOrDefault() has the old behavior and is marked [CanBeNull] (since you would test for null in this case anyway) - updated all alias methods with a similar change --- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 10 +-- .../Layers/Tuples/FdbTupleExtensions.cs | 9 +-- .../Subspaces/FdbSubspaceTuples.cs | 77 ++++++++++++------- 3 files changed, 55 insertions(+), 41 deletions(-) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 17370f79a..2d62ac72e 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -766,14 +766,12 @@ public static Slice[] EncodePrefixedKeys([NotNull] IFdbTuple prefix, [NotNull /// Unpack a tuple from a serialied key blob /// Binary key containing a previously packed tuple /// Unpacked tuple, or the empty tuple if the key is - /// If is equal to - [CanBeNull] //REVIEW: => NotNull! + /// If is equal to + [NotNull] public static IFdbTuple Unpack(Slice packedKey) { - //REVIEW: the fact that Unpack(..) can return null (for Slice.Empty) creates a lot of "possible nullref" noise on FdbTuple.Unpack(someKey) when the key cannot possibly Slice.Nil (ex: GetKey, GetRange, ...) - // => either change it so that we return FdbTuple.Empty in both cases (Empty/Nil), OR throw and exception, OR have a different method UnpackOrDefault(...) if people really want to get null in some cases? - - if (packedKey.IsNullOrEmpty) return packedKey.HasValue ? FdbTuple.Empty : null; + if (packedKey.IsNull) throw new ArgumentNullException("packedKey"); + if (packedKey.Count == 0) return FdbTuple.Empty; return FdbTuplePackers.Unpack(packedKey, false); } diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs index 882b196fe..672522ffc 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs @@ -177,17 +177,10 @@ public static FdbMemoizedTuple Memoize(this IFdbTuple tuple) /// Unpack a tuple from this slice /// /// Unpacked tuple if the slice contains data, FdbTuple.Empty if the slice is empty, or null if the slice is Slice.Nil - [CanBeNull] //REVIEW: => NotNull! + [NotNull] public static IFdbTuple ToTuple(this Slice slice) { //note: this method is here to allow a fluent API with method chaining, like "something.ToFoundationDbKey().ToTuple().With((int x, int y) => .....)" - - //REVIEW: same as FdbTuple.Unpack(): we need to throw if slice is Nil (caller should use ToTupleOrDefault() in this case - if (slice.IsNullOrEmpty) - { - return slice.HasValue ? FdbTuple.Empty : null; - } - return FdbTuple.Unpack(slice); } diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs index b09308438..49b11a31a 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs @@ -153,40 +153,38 @@ public Slice[] Pack([NotNull] params ITupleFormattable[] items) /// Unpack a key into a tuple, with the subspace prefix removed /// Packed version of a key that should fit inside this subspace. - /// Unpacked tuple that is relative to the current subspace, or null if the key is equal to Slice.Nil + /// Unpacked tuple that is relative to the current subspace, or the empty tuple if is equal to the prefix of this tuple. /// new Subspace([FE]).Unpack([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => ("hello", 1,) /// If is equal to the subspace prefix, then an empty tuple is returned. + /// If is /// If the unpacked tuple is not contained in this subspace - [CanBeNull] //REVIEW: [NotNull] ! + [NotNull] public IFdbTuple Unpack(Slice key) { - // We special case 'Slice.Nil' because it is returned by GetAsync(..) when the key does not exist - // This is to simplifiy decoding logic where the caller could do "var foo = FdbTuple.Unpack(await tr.GetAsync(...))" and then only have to test "if (foo != null)" - if (key.IsNull) return null; //REVIEW: throw! (see FdbTuple.Unpack) - + if (key.IsNull) throw new ArgumentNullException("key"); return FdbTuple.Unpack(m_subspace.ExtractKey(key, boundCheck: true)); } /// Unpack a key into a tuple, with the subspace prefix removed /// Packed version of a key that should fit inside this subspace. - /// Unpacked tuple that is relative to the current subspace, or null if the key is equal to Slice.Nil - /// new Subspace([FE]).Unpack([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => ("hello", 1,) + /// Unpacked tuple that is relative to the current subspace, the empty tuple if is equal to , or null if is equal to + /// new Subspace([FE]).UnpackOrDefault([FE 02 'H' 'e' 'l' 'l' 'o' 00 15 1]) => ("hello", 1,) /// If is equal to the subspace prefix, then an empty tuple is returned. /// If the unpacked tuple is not contained in this subspace - [CanBeNull] //REVIEW: [NotNull] ! + [CanBeNull] public IFdbTuple UnpackOrDefault(Slice key) { - // We special case 'Slice.Nil' because it is returned by GetAsync(..) when the key does not exist - // This is to simplifiy decoding logic where the caller could do "var foo = FdbTuple.Unpack(await tr.GetAsync(...))" and then only have to test "if (foo != null)" + // We special case 'Slice.Nil' because it is returned by GetAsync(..) when the key does not exist. + // This simplifies the decoding logic where the caller could do "var foo = FdbTuple.UnpackOrDefault(await tr.GetAsync(...))" and then only have to test "if (foo != null)" if (key.IsNull) return null; - return FdbTuple.Unpack(m_subspace.ExtractKey(key, boundCheck: true)); + return FdbTuple.UnpackOrDefault(m_subspace.ExtractKey(key, boundCheck: true)); } /// Unpack an sequence of keys into tuples, with the subspace prefix removed /// Packed version of keys inside this subspace /// Unpacked tuples that are relative to the current subspace - [NotNull, ItemCanBeNull] //REVIEW: ItemNotNull! + [NotNull, ItemNotNull] public IFdbTuple[] Unpack([NotNull] IEnumerable keys) { // return an array with the keys minus the subspace's prefix @@ -197,8 +195,8 @@ public IFdbTuple[] Unpack([NotNull] IEnumerable keys) var tuples = new IFdbTuple[extracted.Length]; for(int i = 0; i < extracted.Length; i++) { - //REVIEW: throw is null! - if (extracted[i].HasValue) tuples[i] = new FdbPrefixedTuple(prefix, FdbTuple.Unpack(extracted[i])); + if (extracted[i].IsNull) throw new InvalidOperationException("The list of keys contains at least one element which is null."); + tuples[i] = new FdbPrefixedTuple(prefix, FdbTuple.Unpack(extracted[i])); } return tuples; } @@ -225,7 +223,7 @@ public IFdbTuple[] UnpackOrDefault([NotNull] IEnumerable keys) /// Unpack an array of keys into tuples, with the subspace prefix removed /// Packed version of keys inside this subspace /// Unpacked tuples that are relative to the current subspace. - [NotNull, ItemCanBeNull] //REVIEW: ItemNotNull + [NotNull, ItemNotNull] public IFdbTuple[] Unpack([NotNull] params Slice[] keys) { //note: this overload allows writing ".Unpack(foo, bar, baz)" instead of ".Unpack(new [] { foo, bar, baz })" @@ -238,7 +236,7 @@ public IFdbTuple[] Unpack([NotNull] params Slice[] keys) [NotNull, ItemCanBeNull] public IFdbTuple[] UnpackOrDefault([NotNull] params Slice[] keys) { - //note: this overload allows writing ".Unpack(foo, bar, baz)" instead of ".UnpackOrDefault(new [] { foo, bar, baz })" + //note: this overload allows writing ".UnpackOrDefault(foo, bar, baz)" instead of ".UnpackOrDefault(new [] { foo, bar, baz })" return UnpackOrDefault((IEnumerable)keys); } @@ -447,19 +445,23 @@ public Slice[] EncodeKeys([NotNull] TElement[] elements, [NotNul /// Unpack a key into a singleton tuple, and return the single element /// Expected type of the only element /// Packed version of a key that should fit inside this subspace - /// Converted value of the only element in the tuple. Throws an exception if the tuple is empty or contains more than one element + /// Tuple of size 1, with the converted value of the only element in the tuple. Throws an exception if the tuple is empty or contains more than one element /// new Subspace([FE]).UnpackSingle<int>([FE 02 'H' 'e' 'l' 'l' 'o' 00]) => (string) "Hello" public T DecodeKey(Slice key) { return FdbTuple.DecodeKey(m_subspace.ExtractKey(key, boundCheck: true)); } - + /// Unpack a key into a pair of elements + /// Type of the first element + /// Type of the second element + /// Packed version of a key that should fit inside this subspace, and is composed of two elements. + /// Tuple of size 2, with the converted values of the tuple. Throws an exception if the tuple is empty or contains more than two elements public FdbTuple DecodeKey(Slice key) { + if (key.IsNullOrEmpty) throw new FormatException("The specified key is empty"); var tuple = Unpack(key); - if (tuple == null) throw new FormatException("The specified key does not contain any items"); - if (tuple.Count != 2) throw new FormatException("The specified key is not a tuple with 2 items"); + if (tuple.Count != 2) throw new FormatException("The specified key is not a tuple with two items"); return FdbTuple.Create( tuple.Get(0), @@ -467,11 +469,17 @@ public FdbTuple DecodeKey(Slice key) ); } + /// Unpack a key into a triplet of elements + /// Type of the first element + /// Type of the second element + /// Type of the third element + /// Packed version of a key that should fit inside this subspace, and is composed of three elements. + /// Tuple of size 3, with the converted values of the tuple. Throws an exception if the tuple is empty or contains more than three elements public FdbTuple DecodeKey(Slice key) { + if (key.IsNullOrEmpty) throw new FormatException("The specified key is empty"); var tuple = Unpack(key); - if (tuple == null) throw new FormatException("The specified key does not contain any items"); - if (tuple.Count != 3) throw new FormatException("The specified key is not a tuple with 3 items"); + if (tuple.Count != 3) throw new FormatException("The specified key is not a tuple with three items"); return FdbTuple.Create( tuple.Get(0), @@ -480,11 +488,18 @@ public FdbTuple DecodeKey(Slice key) ); } + /// Unpack a key into a quartet of elements + /// Type of the first element + /// Type of the second element + /// Type of the third element + /// Type of the fourth element + /// Packed version of a key that should fit inside this subspace, and is composed of four elements. + /// Tuple of size 3, with the converted values of the tuple. Throws an exception if the tuple is empty or contains more than four elements public FdbTuple DecodeKey(Slice key) { + if (key.IsNullOrEmpty) throw new FormatException("The specified key is empty"); var tuple = Unpack(key); - if (tuple == null) throw new FormatException("The specified key does not contain any items"); - if (tuple.Count != 4) throw new FormatException("The specified key is not a tuple with 4 items"); + if (tuple.Count != 4) throw new FormatException("The specified key is not a tuple with four items"); return FdbTuple.Create( tuple.Get(0), @@ -494,11 +509,19 @@ public FdbTuple DecodeKey(Slice key) ); } + /// Unpack a key into a quintet of elements + /// Type of the first element + /// Type of the second element + /// Type of the third element + /// Type of the fourth element + /// Type of the fifth element + /// Packed version of a key that should fit inside this subspace, and is composed of five elements. + /// Tuple of size 3, with the converted values of the tuple. Throws an exception if the tuple is empty or contains more than five elements public FdbTuple DecodeKey(Slice key) { + if (key.IsNullOrEmpty) throw new FormatException("The specified key is empty"); var tuple = Unpack(key); - if (tuple == null) throw new FormatException("The specified key does not contain any items"); - if (tuple.Count != 5) throw new FormatException("The specified key is not a tuple with 5 items"); + if (tuple.Count != 5) throw new FormatException("The specified key is not a tuple with five items"); return FdbTuple.Create( tuple.Get(0), From d33ae28b3b1f073c35e61711f072f6dadda28785 Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Mon, 2 Feb 2015 18:19:47 +0100 Subject: [PATCH 44/63] Tuples: cleaned up unused methods and fixed xml comments --- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 138 +----------------- .../Layers/Tuples/FdbTupleExtensions.cs | 60 ++++++-- .../Subspaces/FdbSubspaceTuples.cs | 105 +++++++------ 3 files changed, 112 insertions(+), 191 deletions(-) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 2d62ac72e..6879a25c9 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -198,7 +198,7 @@ public static FdbTuple Create(T1 item1, /// Create a new N-tuple, from N items /// Items to wrap in a tuple - /// If you already have an array of items, you should call instead. Mutating the array, would also mutate the tuple! + /// If you already have an array of items, you should call instead. Mutating the array, would also mutate the tuple! [NotNull] public static IFdbTuple Create([NotNull] params object[] items) { @@ -239,47 +239,6 @@ public static IFdbTuple Wrap([NotNull] object[] items, int offset, int count) return new FdbListTuple(items, offset, count); } - ///// Create a new N-tuple, from an array of untyped items - //[NotNull] - //public static IFdbTuple CreateRange(object[] items) - //{ - // //REVIEW: this is idential to Create(object[]) and Wrap(object[]) !! - - // if (items == null) throw new ArgumentNullException("items"); - - // return CreateRange(items, 0, items.Length); - //} - - ///// Create a new N-tuple, from a section of an array of untyped items - //[NotNull] - //public static IFdbTuple CreateRange(object[] items, int offset, int count) - //{ - // //REVIEW: this is idential to Wrap(object[]) !! - - // if (items == null) throw new ArgumentNullException("items"); - // if (offset < 0) throw new ArgumentOutOfRangeException("offset", "Offset cannot be less than zero"); - // if (count < 0) throw new ArgumentOutOfRangeException("count", "Count cannot be less than zero"); - // if (offset + count > items.Length) throw new ArgumentOutOfRangeException("count", "Source array is too small"); - - // if (count == 0) return FdbTuple.Empty; - - // // copy the items - // var tmp = new object[count]; - // Array.Copy(items, offset, tmp, 0, count); - // return new FdbListTuple(tmp, 0, count); - //} - - ///// Create a new N-tuple from a sequence of items - //[NotNull] - //public static IFdbTuple CreateRange(IEnumerable items) - //{ - // if (items == null) throw new ArgumentNullException("items"); - - // // may already be a tuple (because it implements IE) - // var tuple = items as IFdbTuple ?? new FdbListTuple(items); - // return tuple; - //} - /// Create a new tuple, from an array of typed items /// Array of items /// Tuple with the same size as and where all the items are of type @@ -465,7 +424,7 @@ public static Slice[] Pack(Slice prefix, [NotNull] IEnumerable tuples public static Slice[] Pack(Slice prefix, [NotNull] TElement[] elements, Func transform) { if (elements == null) throw new ArgumentNullException("elements"); - if (transform == null) throw new ArgumentNullException("convert"); + if (transform == null) throw new ArgumentNullException("transform"); var next = new List(elements.Length); var writer = new TupleWriter(); @@ -494,7 +453,7 @@ public static Slice[] Pack(Slice prefix, [NotNull] TElement[] elements public static Slice[] Pack(Slice prefix, [NotNull] IEnumerable elements, Func transform) { if (elements == null) throw new ArgumentNullException("elements"); - if (transform == null) throw new ArgumentNullException("convert"); + if (transform == null) throw new ArgumentNullException("transform"); // use optimized version for arrays var array = elements as TElement[]; @@ -787,27 +746,6 @@ public static IFdbTuple UnpackOrDefault(Slice packedKey) return FdbTuplePackers.Unpack(packedKey, false); } - /// Unpack a tuple from a serialized key, after removing a required prefix - /// Packed key - /// Expected prefix of the key (that is not part of the tuple) - /// Unpacked tuple (minus the prefix) or an exception if the key is outside the prefix - /// If prefix is null - /// If the unpacked key is outside the specified prefix - [NotNull] - public static IFdbTuple Unpack(Slice packedKey, Slice prefix) - { - // ensure that the key starts with the prefix - if (!packedKey.StartsWith(prefix)) -#if DEBUG - throw new ArgumentOutOfRangeException("packedKey", String.Format("The specifed packed tuple does not start with the expected prefix '{0}'", prefix.ToString())); -#else - throw new ArgumentOutOfRangeException("packedKey", "The specifed packed tuple does not start with the expected prefix"); -#endif - - // unpack the key, minus the prefix - return FdbTuplePackers.Unpack(packedKey.Substring(prefix.Count), false); - } - /// Unpack a tuple and only return its first element /// Type of the first value in the decoded tuple /// Slice that should be entirely parsable as a tuple @@ -822,28 +760,6 @@ public static T DecodeFirst(Slice packedKey) return FdbTuplePacker.Deserialize(slice); } - /// Unpack a tuple and only return its first element, after removing from the start of the buffer - /// Type of the first value in the decoded tuple - /// Slice composed of followed by a packed tuple - /// Expected prefix of the key (that is not part of the tuple) - /// Decoded value of the first item in the tuple - public static T DecodePrefixedFirst(Slice packedKey, Slice prefix) - { - // ensure that the key starts with the prefix - if (!packedKey.StartsWith(prefix)) - { -#if DEBUG - //REVIEW: for now only in debug mode, because leaking keys in exceptions mesasges may not be a good idea? - throw new ArgumentOutOfRangeException("packedKey", String.Format("The specifed packed tuple ({0}) does not start with the expected prefix ({1})", FdbKey.Dump(packedKey), FdbKey.Dump(prefix))); -#else - throw new ArgumentOutOfRangeException("packedKey", "The specifed packed tuple does not start with the expected prefix"); -#endif - } - - // unpack the key, minus the prefix - return DecodeFirst(packedKey.Substring(prefix.Count)); - } - /// Unpack a tuple and only return its last element /// Type of the last value in the decoded tuple /// Slice that should be entirely parsable as a tuple @@ -858,28 +774,6 @@ public static T DecodeLast(Slice packedKey) return FdbTuplePacker.Deserialize(slice); } - /// Unpack a tuple and only return its last element, after removing from the start of the buffer - /// Type of the last value in the decoded tuple - /// Slice composed of followed by a packed tuple - /// Expected prefix of the key (that is not part of the tuple) - /// Decoded value of the last item in the tuple - public static T DecodePrefixedLast(Slice packedKey, Slice prefix) - { - // ensure that the key starts with the prefix - if (!packedKey.StartsWith(prefix)) - { -#if DEBUG - //REVIEW: for now only in debug mode, because leaking keys in exceptions mesasges may not be a good idea? - throw new ArgumentOutOfRangeException("packedKey", String.Format("The specifed packed tuple ({0}) does not start with the expected prefix ({1})", FdbKey.Dump(packedKey), FdbKey.Dump(prefix))); -#else - throw new ArgumentOutOfRangeException("packedKey", "The specifed packed tuple does not start with the expected prefix"); -#endif - } - - // unpack the key, minus the prefix - return DecodeLast(packedKey.Substring(prefix.Count)); - } - /// Unpack the value of a singletion tuple /// Type of the single value in the decoded tuple /// Slice that should contain the packed representation of a tuple with a single element @@ -894,20 +788,6 @@ public static T DecodeKey(Slice packedKey) return FdbTuplePacker.Deserialize(slice); } - /// Unpack the value of a singleton tuple, after removing from the start of the buffer - /// Type of the single value in the decoded tuple - /// Slice composed of followed by a packed singleton tuple - /// Expected prefix of the key (that is not part of the tuple) - /// Decoded value of the only item in the tuple. Throws an exception if the tuple is empty of has more than one element. - public static T DecodePrefixedKey(Slice packedKey, Slice prefix) - { - // ensure that the key starts with the prefix - if (!packedKey.StartsWith(prefix)) throw new ArgumentOutOfRangeException("packedKey", "The specifed packed tuple does not start with the expected prefix"); - - // unpack the key, minus the prefix - return DecodeKey(packedKey.Substring(prefix.Count)); - } - /// Unpack the next item in the tuple, and advance the cursor /// Type of the next value in the tuple /// Reader positionned at the start of the next item to read @@ -1156,7 +1036,7 @@ internal static string ToString(object[] items, int offset, int count) if (items == null) return String.Empty; Contract.Requires(offset >= 0 && count >= 0); - if (count == 0) + if (count <= 0) { // empty tuple: "()" return TokenTupleEmpty; } @@ -1168,14 +1048,12 @@ internal static string ToString(object[] items, int offset, int count) { // singleton tuple : "(X,)" return sb.Append(TokenTupleSingleClose).ToString(); } - else + + while (--count > 0) { - while (--count > 0) - { - sb.Append(TokenTupleSep /* ", " */).Append(Stringify(items[offset++])); - } - return sb.Append(TokenTupleClose /* ",)" */).ToString(); + sb.Append(TokenTupleSep /* ", " */).Append(Stringify(items[offset++])); } + return sb.Append(TokenTupleClose /* ",)" */).ToString(); } /// Converts a sequence of object into a displaying string, for loggin/debugging purpose diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs index 672522ffc..0ff783d9c 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleExtensions.cs @@ -345,7 +345,9 @@ internal static void ThrowInvalidTupleSize(IFdbTuple tuple, int expected, int te } /// Execute a lambda Action with the content of this tuple + /// Tuple of size 1 /// Action that will be passed the content of this tuple as parameters + /// If has not the expected size public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) { OfSize(tuple, 1); @@ -353,7 +355,9 @@ public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action } /// Execute a lambda Action with the content of this tuple + /// Tuple of size 2 /// Action that will be passed the content of this tuple as parameters + /// If has not the expected size public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) { OfSize(tuple, 2); @@ -361,7 +365,9 @@ public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action } /// Execute a lambda Action with the content of this tuple + /// Tuple of size 3 /// Action that will be passed the content of this tuple as parameters + /// If has not the expected size public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) { OfSize(tuple, 3); @@ -369,7 +375,9 @@ public static void With([NotNull] this IFdbTuple tuple, [NotNull] Ac } /// Execute a lambda Action with the content of this tuple + /// Tuple of size 4 /// Action that will be passed the content of this tuple as parameters + /// If has not the expected size public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) { OfSize(tuple, 4); @@ -377,7 +385,9 @@ public static void With([NotNull] this IFdbTuple tuple, [NotNull } /// Execute a lambda Action with the content of this tuple + /// Tuple of size 5 /// Action that will be passed the content of this tuple as parameters + /// If has not the expected size public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) { OfSize(tuple, 5); @@ -385,7 +395,9 @@ public static void With([NotNull] this IFdbTuple tuple, [Not } /// Execute a lambda Action with the content of this tuple + /// Tuple of size 6 /// Action that will be passed the content of this tuple as parameters + /// If has not the expected size public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) { OfSize(tuple, 6); @@ -393,7 +405,9 @@ public static void With([NotNull] this IFdbTuple tuple, } /// Execute a lambda Action with the content of this tuple + /// Tuple of size 7 /// Action that will be passed the content of this tuple as parameters + /// If has not the expected size public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) { OfSize(tuple, 7); @@ -401,7 +415,9 @@ public static void With([NotNull] this IFdbTuple tup } /// Execute a lambda Action with the content of this tuple + /// Tuple of size 8 /// Action that will be passed the content of this tuple as parameters + /// If has not the expected size public static void With([NotNull] this IFdbTuple tuple, [NotNull] Action lambda) { OfSize(tuple, 8); @@ -409,72 +425,88 @@ public static void With([NotNull] this IFdbTuple } /// Execute a lambda Function with the content of this tuple + /// Tuple of size 1 /// Action that will be passed the content of this tuple as parameters /// Result of calling with the items of this tuple - public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + /// If has not the expected size + public static TResult With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) { OfSize(tuple, 1); return lambda(tuple.Get(0)); } /// Execute a lambda Function with the content of this tuple - /// Action that will be passed the content of this tuple as parameters + /// Tuple of size 2 + /// Function that will be passed the content of this tuple as parameters /// Result of calling with the items of this tuple - public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + /// If has not the expected size + public static TResult With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) { OfSize(tuple, 2); return lambda(tuple.Get(0), tuple.Get(1)); } /// Execute a lambda Function with the content of this tuple + /// Tuple of size 3 /// Action that will be passed the content of this tuple as parameters /// Result of calling with the items of this tuple - public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + /// If has not the expected size + public static TResult With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) { OfSize(tuple, 3); return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2)); } /// Execute a lambda Function with the content of this tuple - /// Action that will be passed the content of this tuple as parameters + /// Tuple of size 4 + /// Function that will be passed the content of this tuple as parameters /// Result of calling with the items of this tuple - public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + /// If has not the expected size + public static TResult With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) { OfSize(tuple, 4); return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3)); } /// Execute a lambda Function with the content of this tuple - /// Action that will be passed the content of this tuple as parameters + /// Tuple of size 5 + /// Function that will be passed the content of this tuple as parameters /// Result of calling with the items of this tuple - public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + /// If has not the expected size + public static TResult With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) { OfSize(tuple, 5); return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4)); } /// Execute a lambda Function with the content of this tuple - /// Action that will be passed the content of this tuple as parameters + /// Tuple of size 6 + /// Function that will be passed the content of this tuple as parameters /// Result of calling with the items of this tuple - public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + /// If has not the expected size + public static TResult With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) { OfSize(tuple, 6); return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4), tuple.Get(5)); } /// Execute a lambda Function with the content of this tuple - /// Action that will be passed the content of this tuple as parameters + /// Tuple of size 7 + /// Function that will be passed the content of this tuple as parameters /// Result of calling with the items of this tuple - public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + /// If has not the expected size + public static TResult With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) { OfSize(tuple, 7); return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4), tuple.Get(5), tuple.Get(6)); } /// Execute a lambda Function with the content of this tuple - /// Action that will be passed the content of this tuple as parameters + /// Tuple of size 8 + /// Function that will be passed the content of this tuple as parameters /// Result of calling with the items of this tuple - public static R With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) + /// If has not the expected size + public static TResult With([NotNull] this IFdbTuple tuple, [NotNull] Func lambda) { OfSize(tuple, 8); return lambda(tuple.Get(0), tuple.Get(1), tuple.Get(2), tuple.Get(3), tuple.Get(4), tuple.Get(5), tuple.Get(6), tuple.Get(7)); diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs index 49b11a31a..0f600c01d 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,7 +31,6 @@ namespace FoundationDB.Client using FoundationDB.Layers.Tuples; using JetBrains.Annotations; using System; - using System.Linq; using System.Collections.Generic; using FoundationDB.Client.Utils; using System.Diagnostics; @@ -61,7 +60,7 @@ public IFdbSubspace Subspace /// Return a key that is composed of the subspace prefix, and the packed representation of a tuple. /// Tuple to pack (can be null or empty) - /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . + /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . /// If is null or empty, then the prefix of the subspace is returned. public Slice this[[NotNull] IFdbTuple tuple] { @@ -79,7 +78,7 @@ public Slice this[[NotNull] ITupleFormattable item] /// Return a key that is composed of the subspace prefix, and the packed representation of a tuple. /// Tuple to pack (can be null or empty) - /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . + /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . /// If is null or empty, then the prefix of the subspace is returned. [DebuggerStepThrough] public Slice Pack([NotNull] IFdbTuple tuple) @@ -114,8 +113,8 @@ public Slice[] Pack([NotNull] params IFdbTuple[] tuples) /// Return a key that is composed of the subspace prefix, and the packed representation of a tuple. /// Tuple to pack (can be null or empty) - /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . - /// If is null or empty, then the prefix of the subspace is returned. + /// Key which starts with the subspace prefix, followed by the packed representation of . This key can be parsed back to an equivalent tuple by calling . + /// If is null or empty, then the prefix of the subspace is returned. [DebuggerStepThrough] public Slice Pack([NotNull] ITupleFormattable item) { @@ -278,7 +277,7 @@ public FdbKeyRange ToRange([NotNull] ITupleFormattable item) /// Item that will be appended at the end of the key /// Key that is equivalent to adding the packed singleton to the subspace's prefix /// tuple.Pack(x) is equivalent to tuple.Append(x).ToSlice() - /// The key produced can be decoded back into the original value by calling , or a tuple by calling + /// The key produced can be decoded back into the original value by calling , or a tuple by calling public Slice EncodeKey(T item) { return FdbTuple.EncodePrefixedKey(m_subspace.Key, item); @@ -287,11 +286,11 @@ public Slice EncodeKey(T item) /// Create a new key by adding two items to the current subspace /// Type of the first item /// Type of the second item - /// Item that will be in the next to last position - /// Item that will be in the last position + /// Item in the first position + /// Item in the second position /// Key that is equivalent to adding the packed pair (, ) to the subspace's prefix /// {subspace}.EncodeKey(x, y) is much faster way to do {subspace}.Key + FdbTuple.Create(x, y).ToSlice() - /// The key produced can be decoded back into a pair by calling either or + /// The key produced can be decoded back into a pair by calling either or public Slice EncodeKey(T1 item1, T2 item2) { return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2); @@ -301,12 +300,12 @@ public Slice EncodeKey(T1 item1, T2 item2) /// Type of the first item /// Type of the second item /// Type of the third item - /// Item that will be appended first - /// Item that will be appended second - /// Item that will be appended third + /// Item in the first position + /// Item in the second position + /// Item in the third position /// Key that is equivalent to adding the packed triplet (, , ) to the subspace's prefix /// {subspace}.EncodeKey(x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(x, y, z).ToSlice() - /// The key produced can be decoded back into a triplet by calling either or + /// The key produced can be decoded back into a triplet by calling either or public Slice EncodeKey(T1 item1, T2 item2, T3 item3) { return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3); @@ -317,13 +316,13 @@ public Slice EncodeKey(T1 item1, T2 item2, T3 item3) /// Type of the second item /// Type of the third item /// Type of the fourth item - /// Item that will be appended first - /// Item that will be appended second - /// Item that will be appended third - /// Item that will be appended fourth + /// Item in the first position + /// Item in the second position + /// Item in the third position + /// Item in the fourth position /// Key that is equivalent to adding the packed tuple quad (, , , ) to the subspace's prefix /// {subspace}.EncodeKey(w, x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(w, x, y, z).ToSlice() - /// The key produced can be decoded back into a quad by calling either or + /// The key produced can be decoded back into a quad by calling either or public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4) { return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4); @@ -335,14 +334,14 @@ public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4) /// Type of the third item /// Type of the fourth item /// Type of the fifth item - /// Item that will be appended first - /// Item that will be appended second - /// Item that will be appended third - /// Item that will be appended fourth - /// Item that will be appended fifth - /// Key that is equivalent to adding the packed tuple quad (, , , ) to the subspace's prefix + /// Item in the first position + /// Item in the second position + /// Item in the third position + /// Item in the fourth position + /// Item in the fifth position + /// Key that is equivalent to adding the packed tuple (, , , , ) to the subspace's prefix /// {subspace}.EncodeKey(w, x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(w, x, y, z).ToSlice() - /// The key produced can be decoded back into a tuple by calling either or + /// The key produced can be decoded back into a tuple by calling either or public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) { return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4, item5); @@ -354,14 +353,16 @@ public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item /// Type of the third item /// Type of the fourth item /// Type of the fifth item - /// Item that will be appended first - /// Item that will be appended second - /// Item that will be appended third - /// Item that will be appended fourth - /// Item that will be appended fifth - /// Key that is equivalent to adding the packed tuple quad (, , , ) to the subspace's prefix + /// Type of the sixth item + /// Item in the first position + /// Item in the second position + /// Item in the third position + /// Item in the fourth position + /// Item in the fifth position + /// Item in the sixth position + /// Key that is equivalent to adding the packed tuple (, , , , , ) to the subspace's prefix /// {subspace}.EncodeKey(w, x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(w, x, y, z).ToSlice() - /// The key produced can be decoded back into a tuple by calling + /// The key produced can be decoded back into a tuple by calling public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6) { return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4, item5, item6); @@ -373,14 +374,18 @@ public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 /// Type of the third item /// Type of the fourth item /// Type of the fifth item - /// Item that will be appended first - /// Item that will be appended second - /// Item that will be appended third - /// Item that will be appended fourth - /// Item that will be appended fifth - /// Key that is equivalent to adding the packed tuple quad (, , , ) to the subspace's prefix + /// Type of the sixth item + /// Type of the seventh item + /// Item in the first position + /// Item in the second position + /// Item in the third position + /// Item in the fourth position + /// Item in the fifth position + /// Item in the sixth position + /// Item in the seventh position + /// Key that is equivalent to adding the packed tuple (, , , , , , ) to the subspace's prefix /// {subspace}.EncodeKey(w, x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(w, x, y, z).ToSlice() - /// The key produced can be decoded back into a tuple by calling + /// The key produced can be decoded back into a tuple by calling public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7) { return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4, item5, item6, item7); @@ -393,14 +398,20 @@ public Slice EncodeKey(T1 item1, T2 item2, T3 item3, /// Type of the third item /// Type of the fourth item /// Type of the fifth item - /// Item that will be appended first - /// Item that will be appended second - /// Item that will be appended third - /// Item that will be appended fourth - /// Item that will be appended fifth - /// Key that is equivalent to adding the packed tuple quad (, , , ) to the subspace's prefix + /// Type of the sixth item + /// Type of the seventh item + /// Type of the eight item + /// Item in the first position + /// Item in the second position + /// Item in the third position + /// Item in the fourth position + /// Item in the fifth position + /// Item in the sixth position + /// Item in the seventh position + /// Item in the eigth position + /// Key that is equivalent to adding the packed tuple (, , , , , , , ) to the subspace's prefix /// {subspace}.EncodeKey(w, x, y, z) is much faster way to do {subspace}.Key + FdbTuple.Create(w, x, y, z).ToSlice() - /// The key produced can be decoded back into a tuple by calling + /// The key produced can be decoded back into a tuple by calling public Slice EncodeKey(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7, T8 item8) { return FdbTuple.EncodePrefixedKey(m_subspace.Key, item1, item2, item3, item4, item5, item6, item7, item8); From da91433f931ecb03a74ad83344971c30535b512f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gr=C3=A9goire=20Castre?= Date: Tue, 3 Feb 2015 12:29:50 +0100 Subject: [PATCH 45/63] Fix comment --- FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs | 2 +- FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs | 4 ++-- FoundationDB.Client/Utils/Slice.cs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs index 2078962ab..bd2fcc6ce 100644 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs +++ b/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs @@ -36,7 +36,7 @@ namespace FoundationDB.Client /// Subspace that knows how to encode and decode its key /// Type of the first item of the keys handled by this subspace /// Type of the second item of the keys handled by this subspace - /// Type of the thrid item of the keys handled by this subspace + /// Type of the thrid item of the keys handled by this subspace public class FdbEncoderSubspace : FdbSubspace, ICompositeKeyEncoder { /// Reference to the wrapped subspace diff --git a/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs b/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs index 2325b0a1f..da660621b 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTupleParser.cs @@ -1035,9 +1035,9 @@ public static bool Skip(ref TupleReader reader, int count) } /// Visit the different tokens of a packed tuple - /// Reader positionned at the start of a packed tuple + /// Reader positionned at the start of a packed tuple /// Lambda called for each segment of a tuple. Returns true to continue parsing, or false to stop - /// Number of tokens that have been visited until either returned false, or reached the end. + /// Number of tokens that have been visited until either returned false, or reached the end. public static T VisitNext(ref TupleReader reader, Func visitor) { if (!reader.Input.HasMore) throw new InvalidOperationException("The reader has already reached the end"); diff --git a/FoundationDB.Client/Utils/Slice.cs b/FoundationDB.Client/Utils/Slice.cs index 5d9483d2b..8551a28ba 100644 --- a/FoundationDB.Client/Utils/Slice.cs +++ b/FoundationDB.Client/Utils/Slice.cs @@ -1960,7 +1960,7 @@ public static Slice Concat(params Slice[] args) /// This method is optmized to reduce the amount of memory allocated public static Slice[] ConcatRange(Slice prefix, IEnumerable slices) { - if (slices == null) throw new ArgumentNullException("suffixes"); + if (slices == null) throw new ArgumentNullException("slices"); if (prefix.IsNullOrEmpty) { // nothing to do, but we still need to copy the array From 2609a0c67d515e2350bf0cc4d5d0728efd7b2d5c Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Thu, 5 Feb 2015 14:18:15 +0100 Subject: [PATCH 46/63] The Great Subspace API Refactoring of 2015 - split subspaces into 3 categories: regular "untyped" IFdbSubspace, dynamic "tuple-based" IFdbDynamicSubspace, and encoder-based IFdbEncoderSubspace<....> - each subspace category comes with its own set of Keys / Partition fields that have different methods, for each use case - a regular subspace can be "enhanced" via subspace.Using(TypeSystem.Tuples), or subspace.UsingEncoder(myCustomEncoder) - TypeSystem.Tuples contains the basic of a "Type System" which uses the Tuple Encoding format to generate Keys --- FdbBurner/Program.cs | 2 +- FdbShell/Commands/BasicCommands.cs | 10 +- .../Encoders/FdbEncoderSubspace`1.cs | 181 ------- .../Encoders/FdbEncoderSubspace`2.cs | 159 ------ .../Encoders/FdbEncoderSubspace`3.cs | 155 ------ .../Encoders/IKeyValueEncoder.cs | 2 +- .../Encoders/ISliceSerializable.cs | 2 +- .../Encoders/KeyValueEncoders.cs | 2 +- FoundationDB.Client/FdbCluster.cs | 2 +- FoundationDB.Client/FdbDatabase.cs | 71 ++- .../Filters/FdbDatabaseFilter.cs | 60 ++- .../FoundationDB.Client.csproj | 26 +- FoundationDB.Client/IFdbCluster.cs | 2 +- FoundationDB.Client/IFdbDatabase.cs | 4 +- .../Layers/Directories/FdbDirectoryLayer.cs | 73 +-- .../Directories/FdbDirectoryPartition.cs | 2 +- .../Directories/FdbDirectorySubspace.cs | 8 +- .../Directories/FdbHighContentionAllocator.cs | 20 +- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 104 ++++ .../Subspaces/Fdb.Directory.cs | 2 +- .../Subspaces/FdbDynamicSubspace.cs | 80 +++ .../Subspaces/FdbDynamicSubspaceKeys.cs | 463 ++++++++++++++++++ ...tion.cs => FdbDynamicSubspacePartition.cs} | 57 ++- .../Subspaces/FdbEncoderSubspaceKeys`1.cs | 81 +++ .../Subspaces/FdbEncoderSubspaceKeys`2.cs | 81 +++ .../Subspaces/FdbEncoderSubspaceKeys`3.cs | 81 +++ .../FdbEncoderSubspacePartition`1.cs | 61 +++ .../FdbEncoderSubspacePartition`2.cs | 66 +++ .../FdbEncoderSubspacePartition`3.cs | 66 +++ .../Subspaces/FdbEncoderSubspace`1.cs | 73 +++ .../Subspaces/FdbEncoderSubspace`2.cs | 81 +++ .../Subspaces/FdbEncoderSubspace`3.cs | 87 ++++ FoundationDB.Client/Subspaces/FdbSubspace.cs | 200 ++++++-- .../Subspaces/FdbSubspaceExtensions.cs | 32 +- ...SubspaceKeys.cs => FdbSubspaceKeys_OLD.cs} | 5 +- ...paceTuples.cs => FdbSubspaceTuples_OLD.cs} | 5 +- .../Subspaces/IFdbDynamicSubspace.cs | 49 ++ .../Subspaces/IFdbEncoderSubspace.cs | 79 +++ FoundationDB.Client/Subspaces/IFdbSubspace.cs | 51 +- .../TypeSystem/FdbTypeCodec`1.cs | 3 + .../TypeSystem/IFdbTypeSystem.cs | 175 +++++++ .../TypeSystem/Tuples/Tuples.cs | 147 ++++++ .../TypeSystem/Tuples/Tupspace.cs | 31 ++ FoundationDB.Client/TypeSystem/TypeSystem.cs | 34 ++ FoundationDB.Layers.Common/Blobs/FdbBlob.cs | 41 +- .../Collections/FdbMap`2.cs | 38 +- .../Collections/FdbMultimap`2.cs | 28 +- .../Collections/FdbQueue`1.cs | 48 +- .../Collections/FdbRankedSet.cs | 45 +- .../Collections/FdbVector`1.cs | 26 +- .../Counters/FdbCounterMap.cs | 18 +- .../Counters/FdbHighContentionCounter.cs | 35 +- .../Indexes/FdbIndex`2.cs | 30 +- .../Interning/FdbStringIntern.cs | 8 +- .../Documents/FdbDocumentCollection.cs | 14 +- .../Documents/FdbHashSetCollection.cs | 10 +- .../Indexes/FdbCompressedBitmapIndex.cs | 20 +- .../Messaging/FdbWorkerPool.cs | 58 +-- .../Messaging/WorkerPoolTest.cs | 5 +- .../Benchmarks/BenchRunner.cs | 4 +- FoundationDB.Samples/Benchmarks/LeakTest.cs | 8 +- .../MessageQueue/MessageQueueRunner.cs | 18 +- .../Tutorials/ClassScheduling.cs | 12 +- .../Transactions/Benchmarks.cs | 2 +- .../Transactions/Comparisons.cs | 16 +- .../Transactions/MemoryTransactionFacts.cs | 204 ++++---- .../API/MemoryDatabase.cs | 4 +- FoundationDB.Tests.Sandbox/Program.cs | 32 +- FoundationDB.Tests/DatabaseBulkFacts.cs | 40 +- .../Filters/LoggingFilterFacts.cs | 62 +-- FoundationDB.Tests/FoundationDB.Tests.csproj | 2 +- FoundationDB.Tests/Layers/DirectoryFacts.cs | 95 ++-- FoundationDB.Tests/Layers/MapFacts.cs | 6 +- FoundationDB.Tests/Layers/MultiMapFacts.cs | 6 +- FoundationDB.Tests/Layers/RankedSetFacts.cs | 6 +- .../Layers/StringInternFacts.cs | 22 +- FoundationDB.Tests/Layers/VectorFacts.cs | 6 +- .../Linq/FdbAsyncQueryableFacts.cs | 6 +- FoundationDB.Tests/RangeQueryFacts.cs | 122 ++--- .../{Layers => }/SubspaceFacts.cs | 48 +- FoundationDB.Tests/TransactionFacts.cs | 264 +++++----- FoundationDB.Tests/TransactionalFacts.cs | 22 +- 82 files changed, 3006 insertions(+), 1400 deletions(-) delete mode 100644 FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs delete mode 100644 FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs delete mode 100644 FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs create mode 100644 FoundationDB.Client/Subspaces/FdbDynamicSubspace.cs create mode 100644 FoundationDB.Client/Subspaces/FdbDynamicSubspaceKeys.cs rename FoundationDB.Client/Subspaces/{FdbSubspacePartition.cs => FdbDynamicSubspacePartition.cs} (73%) create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`1.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`2.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`3.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`1.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`2.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`3.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspace`1.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspace`2.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspace`3.cs rename FoundationDB.Client/Subspaces/{FdbSubspaceKeys.cs => FdbSubspaceKeys_OLD.cs} (97%) rename FoundationDB.Client/Subspaces/{FdbSubspaceTuples.cs => FdbSubspaceTuples_OLD.cs} (99%) create mode 100644 FoundationDB.Client/Subspaces/IFdbDynamicSubspace.cs create mode 100644 FoundationDB.Client/Subspaces/IFdbEncoderSubspace.cs create mode 100644 FoundationDB.Client/TypeSystem/IFdbTypeSystem.cs create mode 100644 FoundationDB.Client/TypeSystem/Tuples/Tuples.cs create mode 100644 FoundationDB.Client/TypeSystem/Tuples/Tupspace.cs create mode 100644 FoundationDB.Client/TypeSystem/TypeSystem.cs rename FoundationDB.Tests/{Layers => }/SubspaceFacts.cs (81%) diff --git a/FdbBurner/Program.cs b/FdbBurner/Program.cs index 3f6dff833..8e898300f 100644 --- a/FdbBurner/Program.cs +++ b/FdbBurner/Program.cs @@ -102,7 +102,7 @@ private static async Task BurnerThread(IFdbDatabase db, CancellationToken ct) ? rnd.Next() : pos + i; - tr.Set(folder.Tuples.EncodeKey(x, Suffix), Value); + tr.Set(folder.Keys.Encode(x, Suffix), Value); Interlocked.Increment(ref Keys); } pos += N; diff --git a/FdbShell/Commands/BasicCommands.cs b/FdbShell/Commands/BasicCommands.cs index 2f9faedc1..ecbeac4ab 100644 --- a/FdbShell/Commands/BasicCommands.cs +++ b/FdbShell/Commands/BasicCommands.cs @@ -68,7 +68,7 @@ public static async Task Dir(string[] path, IFdbTuple extras, DirectoryBrowseOpt { if (!(subfolder is FdbDirectoryPartition)) { - long count = await Fdb.System.EstimateCountAsync(db, subfolder.Tuples.ToRange(), ct); + long count = await Fdb.System.EstimateCountAsync(db, subfolder.Keys.ToRange(), ct); log.WriteLine(" {0,-12} {1,-12} {3,9:N0} {2}", FdbKey.Dump(FdbSubspace.Copy(subfolder).Key), subfolder.Layer.IsNullOrEmpty ? "-" : ("<" + subfolder.Layer.ToUnicode() + ">"), name, count); } else @@ -115,7 +115,7 @@ public static async Task CreateDirectory(string[] path, IFdbTuple extras, IFdbDa log.WriteLine("- Created under {0} [{1}]", FdbKey.Dump(folder.Key), folder.Key.ToHexaString(' ')); // look if there is already stuff under there - var stuff = await db.ReadAsync((tr) => tr.GetRange(folder.Tuples.ToRange()).FirstOrDefaultAsync(), cancellationToken: ct); + var stuff = await db.ReadAsync((tr) => tr.GetRange(folder.Keys.ToRange()).FirstOrDefaultAsync(), cancellationToken: ct); if (stuff.Key.IsPresent) { log.WriteLine("CAUTION: There is already some data under {0} !"); @@ -224,7 +224,7 @@ public static async Task Count(string[] path, IFdbTuple extras, IFdbDatabase db, log.Write("\r# Found {0:N0} keys...", state.Item1); }); - long count = await Fdb.System.EstimateCountAsync(db, copy.Tuples.ToRange(), progress, ct); + long count = await Fdb.System.EstimateCountAsync(db, copy.ToRange(), progress, ct); log.WriteLine("\r# Found {0:N0} keys in {1}", count, folder.FullName); } @@ -245,7 +245,7 @@ public static async Task Show(string[] path, IFdbTuple extras, bool reverse, IFd log.WriteLine("# Content of {0} [{1}]", FdbKey.Dump(folder.Key), folder.Key.ToHexaString(' ')); var keys = await db.QueryAsync((tr) => { - var query = tr.GetRange(folder.Tuples.ToRange()); + var query = tr.GetRange(folder.Keys.ToRange()); return reverse ? query.Reverse().Take(count) : query.Take(count + 1); @@ -329,7 +329,7 @@ public static async Task Map(string[] path, IFdbTuple extras, IFdbDatabase db, T return; } - var span = folder.DirectoryLayer.ContentSubspace.Tuples.ToRange(); + var span = folder.DirectoryLayer.ContentSubspace.Keys.ToRange(); // note: this may break in future versions of the DL! Maybe we need a custom API to get a flat list of all directories in a DL that span a specific range ? diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs deleted file mode 100644 index b6d3fb768..000000000 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`1.cs +++ /dev/null @@ -1,181 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013, Doxense SARL -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Client -{ - using FoundationDB.Layers.Tuples; - using JetBrains.Annotations; - using System; - using System.Collections.Generic; - using System.Linq; - using System.Threading.Tasks; - - /// Subspace that knows how to encode and decode its key - /// Type of the key handled by this subspace - public class FdbEncoderSubspace : FdbSubspace, IKeyEncoder - { - /// Reference to the wrapped subspace - private readonly IFdbSubspace m_base; - - /// Encoder used to handle keys - private readonly IKeyEncoder m_encoder; - - /// Wrap an existing subspace with a specific key encoder - /// Original subspace - /// Key encoder - public FdbEncoderSubspace([NotNull] IFdbSubspace subspace, [NotNull] IKeyEncoder encoder) - : base(subspace) - { - if (subspace == null) throw new ArgumentNullException("subspace"); - if (encoder == null) throw new ArgumentNullException("encoder"); - m_base = subspace; - m_encoder = encoder; - } - - /// Untyped version of this subspace - public IFdbSubspace Base - { - get { return m_base; } - } - - /// Encoder used by this subpsace to format keys - public IKeyEncoder Encoder - { - [NotNull] - get { return m_encoder; } - } - - #region Transaction Helpers... - - public void Set([NotNull] IFdbTransaction trans, T key, Slice value) - { - if (trans == null) throw new ArgumentNullException("trans"); - trans.Set(EncodeKey(key), value); - } - - public void SetValues([NotNull] IFdbTransaction trans, [NotNull] IEnumerable> items) - { - if (trans == null) throw new ArgumentNullException("trans"); - if (items == null) throw new ArgumentNullException("items"); - //TODO: find a way to mass convert all the keys using the same buffer? - trans.SetValues(items.Select(item => new KeyValuePair(EncodeKey(item.Key), item.Value))); - } - - public void Clear([NotNull] IFdbTransaction trans, T key) - { - trans.Clear(EncodeKey(key)); - } - - public Task GetAsync([NotNull] IFdbReadOnlyTransaction trans, T key) - { - return trans.GetAsync(EncodeKey(key)); - } - - public Task GetValuesAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] T[] keys) - { - return trans.GetValuesAsync(EncodeKeys(keys)); - } - - public Task GetValuesAsync([NotNull] IFdbReadOnlyTransaction trans, [NotNull] IEnumerable keys) - { - return trans.GetValuesAsync(EncodeKeys(keys)); - } - - #endregion - - #region Key Encoding/Decoding... - - public Slice EncodeKey(T key) - { - return this.Key + m_encoder.EncodeKey(key); - } - - [NotNull] - public Slice[] EncodeKeys([NotNull] IEnumerable keys) - { - return ConcatKeys(m_encoder.EncodeKeys(keys)); - } - - [NotNull] - public Slice[] EncodeKeys([NotNull] params T[] keys) - { - return ConcatKeys(m_encoder.EncodeKeys(keys)); - } - - [NotNull] - public Slice[] EncodeKeys([NotNull] IEnumerable elements, Func selector) - { - return ConcatKeys(m_encoder.EncodeKeys(elements, selector)); - } - - [NotNull] - public Slice[] EncodeKeys([NotNull] TElement[] elements, Func selector) - { - return ConcatKeys(m_encoder.EncodeKeys(elements, selector)); - } - - public T DecodeKey(Slice encoded) - { - return m_encoder.DecodeKey(ExtractKey(encoded, boundCheck: true)); - } - - [NotNull] - public T[] DecodeKeys([NotNull] IEnumerable encoded) - { - return m_encoder.DecodeKeys(ExtractKeys(encoded, boundCheck: true)); - } - - [NotNull] - public T[] DecodeKeys([NotNull] params Slice[] encoded) - { - return m_encoder.DecodeKeys(ExtractKeys(encoded, boundCheck: true)); - } - - public virtual FdbKeyRange ToRange(T key) - { - return FdbTuple.ToRange(EncodeKey(key)); - } - - [NotNull] - public FdbKeyRange[] ToRange([NotNull] T[] keys) - { - var packed = EncodeKeys(keys); - - var ranges = new FdbKeyRange[keys.Length]; - for (int i = 0; i < ranges.Length; i++) - { - ranges[i] = FdbTuple.ToRange(packed[i]); - } - return ranges; - } - - #endregion - - } - -} diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs deleted file mode 100644 index cf978f73f..000000000 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`2.cs +++ /dev/null @@ -1,159 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Client -{ - using FoundationDB.Layers.Tuples; - using JetBrains.Annotations; - using System; - using System.Threading.Tasks; - - /// Subspace that knows how to encode and decode its key - /// Type of the first item of the keys handled by this subspace - /// Type of the second item of the keys handled by this subspace - public class FdbEncoderSubspace : FdbSubspace, ICompositeKeyEncoder - { - /// Reference to the wrapped subspace - private readonly IFdbSubspace m_base; - - /// Encoder used to handle keys - private readonly ICompositeKeyEncoder m_encoder; - - /// Version of this subspace that encodes only the first key - private volatile FdbEncoderSubspace m_head; - - public FdbEncoderSubspace([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) - : base(subspace) - { - if (subspace == null) throw new ArgumentNullException("subspace"); - if (encoder == null) throw new ArgumentNullException("encoder"); - m_base = subspace; - m_encoder = encoder; - } - - /// Untyped version of this subspace - public IFdbSubspace Base - { - get { return m_base; } - } - - /// Gets the key encoder - public ICompositeKeyEncoder Encoder - { - [NotNull] - get { return m_encoder; } - } - - /// Returns a partial encoder for (T1,) - public FdbEncoderSubspace Partial - { - [NotNull] - get { return m_head ?? (m_head = new FdbEncoderSubspace(m_base, KeyValueEncoders.Head(m_encoder))); } - } - - #region Transaction Helpers... - - public void Set([NotNull] IFdbTransaction trans, T1 key1, T2 key2, Slice value) - { - trans.Set(EncodeKey(key1, key2), value); - } - - public void Set([NotNull] IFdbTransaction trans, FdbTuple key, Slice value) - { - trans.Set(EncodeKey(key), value); - } - - public void Clear([NotNull] IFdbTransaction trans, T1 key1, T2 key2) - { - trans.Clear(EncodeKey(key1, key2)); - } - - public void Clear([NotNull] IFdbTransaction trans, FdbTuple key) - { - trans.Clear(EncodeKey(key)); - } - - public Task GetAsync([NotNull] IFdbReadOnlyTransaction trans, T1 key1, T2 key2) - { - return trans.GetAsync(EncodeKey(key1, key2)); - } - - #endregion - - #region Key Encoding/Decoding... - - public virtual Slice EncodeKey(FdbTuple key) - { - return ConcatKey(m_encoder.EncodeKey(key)); - } - - public virtual Slice EncodeKey(T1 key1, T2 key2) - { - return ConcatKey(m_encoder.EncodeKey(key1, key2)); - } - - public virtual Slice EncodeKey(T1 key1) - { - return ConcatKey(m_encoder.EncodeComposite(FdbTuple.Create(key1, default(T2)), 1)); - } - - Slice ICompositeKeyEncoder>.EncodeComposite(FdbTuple key, int items) - { - return ConcatKey(m_encoder.EncodeComposite(key, items)); - } - - public virtual FdbTuple DecodeKey(Slice encoded) - { - return m_encoder.DecodeKey(ExtractKey(encoded, boundCheck: true)); - } - - FdbTuple ICompositeKeyEncoder>.DecodeComposite(Slice encoded, int items) - { - return m_encoder.DecodeComposite(ExtractKey(encoded, boundCheck: true), items); - } - - public virtual FdbKeyRange ToRange(FdbTuple key) - { - return FdbTuple.ToRange(EncodeKey(key)); - } - - public virtual FdbKeyRange ToRange(T1 key1, T2 key2) - { - return FdbTuple.ToRange(EncodeKey(key1, key2)); - } - - public virtual FdbKeyRange ToRange(T1 key1) - { - return FdbTuple.ToRange(EncodeKey(key1)); - } - - #endregion - - } - -} diff --git a/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs b/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs deleted file mode 100644 index 2078962ab..000000000 --- a/FoundationDB.Client/Encoders/FdbEncoderSubspace`3.cs +++ /dev/null @@ -1,155 +0,0 @@ -#region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Doxense nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#endregion - -namespace FoundationDB.Client -{ - using FoundationDB.Layers.Tuples; - using JetBrains.Annotations; - using System; - using System.Threading.Tasks; - - /// Subspace that knows how to encode and decode its key - /// Type of the first item of the keys handled by this subspace - /// Type of the second item of the keys handled by this subspace - /// Type of the thrid item of the keys handled by this subspace - public class FdbEncoderSubspace : FdbSubspace, ICompositeKeyEncoder - { - /// Reference to the wrapped subspace - private readonly IFdbSubspace m_base; - - /// Encoder used to handle keys - private readonly ICompositeKeyEncoder m_encoder; - - /// Version of this subspace that encodes only the first key - private volatile FdbEncoderSubspace m_head; - - /// Version of this subspace that encodes only the first and second keys - private volatile FdbEncoderSubspace m_partial; - - public FdbEncoderSubspace([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) - : base(subspace) - { - if (subspace == null) throw new ArgumentNullException("subspace"); - if (encoder == null) throw new ArgumentNullException("encoder"); - m_base = subspace; - m_encoder = encoder; - } - - /// Untyped version of this subspace - public IFdbSubspace Base - { - get { return m_base; } - } - - /// Gets the key encoder - public ICompositeKeyEncoder Encoder - { - [NotNull] - get { return m_encoder; } - } - - /// Returns a partial encoder for (T1,) - public FdbEncoderSubspace Head - { - [NotNull] - get { return m_head ?? (m_head = new FdbEncoderSubspace(m_base, KeyValueEncoders.Head(m_encoder))); } - } - - /// Returns a partial encoder for (T1,T2) - public FdbEncoderSubspace Partial - { - [NotNull] - get { return m_partial ?? (m_partial = new FdbEncoderSubspace(m_base, KeyValueEncoders.Pair(m_encoder))); } - } - - #region Transaction Helpers... - - public void Set([NotNull] IFdbTransaction trans, T1 key1, T2 key2, T3 key3, Slice value) - { - trans.Set(EncodeKey(key1, key2, key3), value); - } - - public void Set([NotNull] IFdbTransaction trans, FdbTuple key, Slice value) - { - trans.Set(EncodeKey(key), value); - } - - public void Clear([NotNull] IFdbTransaction trans, T1 key1, T2 key2, T3 key3) - { - trans.Clear(EncodeKey(key1, key2, key3)); - } - - public void Clear([NotNull] IFdbTransaction trans, FdbTuple key) - { - trans.Clear(EncodeKey(key)); - } - - public Task GetAsync([NotNull] IFdbReadOnlyTransaction trans, T1 key1, T2 key2, T3 key3) - { - return trans.GetAsync(EncodeKey(key1, key2, key3)); - } - - #endregion - - #region Key Encoding/Decoding... - - public virtual Slice EncodeKey(FdbTuple key) - { - return this.Key + m_encoder.EncodeKey(key); - } - - public virtual Slice EncodeKey(T1 key1, T2 key2, T3 key3) - { - return this.Key + m_encoder.EncodeKey(key1, key2, key3); - } - - Slice ICompositeKeyEncoder>.EncodeComposite(FdbTuple key, int items) - { - return this.Key + m_encoder.EncodeComposite(key, items); - } - - public virtual FdbTuple DecodeKey(Slice encoded) - { - return m_encoder.DecodeKey(ExtractKey(encoded, boundCheck: true)); - } - - FdbTuple ICompositeKeyEncoder>.DecodeComposite(Slice encoded, int items) - { - return m_encoder.DecodeComposite(ExtractKey(encoded, boundCheck: true), items); - } - - public virtual FdbKeyRange ToRange(T1 key1, T2 key2, T3 key3) - { - return FdbTuple.ToRange(EncodeKey(key1, key2, key3)); - } - - #endregion - - } - -} diff --git a/FoundationDB.Client/Encoders/IKeyValueEncoder.cs b/FoundationDB.Client/Encoders/IKeyValueEncoder.cs index 2055357b9..dbe75de91 100644 --- a/FoundationDB.Client/Encoders/IKeyValueEncoder.cs +++ b/FoundationDB.Client/Encoders/IKeyValueEncoder.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/FoundationDB.Client/Encoders/ISliceSerializable.cs b/FoundationDB.Client/Encoders/ISliceSerializable.cs index 46a9d65ef..5d648d87d 100644 --- a/FoundationDB.Client/Encoders/ISliceSerializable.cs +++ b/FoundationDB.Client/Encoders/ISliceSerializable.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/FoundationDB.Client/Encoders/KeyValueEncoders.cs b/FoundationDB.Client/Encoders/KeyValueEncoders.cs index afc24ae15..ba7c0defb 100644 --- a/FoundationDB.Client/Encoders/KeyValueEncoders.cs +++ b/FoundationDB.Client/Encoders/KeyValueEncoders.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/FoundationDB.Client/FdbCluster.cs b/FoundationDB.Client/FdbCluster.cs index 6b1a09225..c4a633c44 100644 --- a/FoundationDB.Client/FdbCluster.cs +++ b/FoundationDB.Client/FdbCluster.cs @@ -111,7 +111,7 @@ protected virtual void Dispose(bool disposing) /// If is anything other than 'DB' /// If the token is cancelled /// Any attempt to use a key outside the specified subspace will throw an exception - public async Task OpenDatabaseAsync(string databaseName, FdbSubspace subspace, bool readOnly, CancellationToken cancellationToken) + public async Task OpenDatabaseAsync(string databaseName, IFdbSubspace subspace, bool readOnly, CancellationToken cancellationToken) { if (subspace == null) throw new ArgumentNullException("subspace"); return await OpenDatabaseInternalAsync(databaseName, subspace, readOnly: readOnly, ownsCluster: false, cancellationToken: cancellationToken).ConfigureAwait(false); diff --git a/FoundationDB.Client/FdbDatabase.cs b/FoundationDB.Client/FdbDatabase.cs index 9e1fef64e..ebf52627e 100644 --- a/FoundationDB.Client/FdbDatabase.cs +++ b/FoundationDB.Client/FdbDatabase.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -26,6 +26,8 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY */ #endregion +using FoundationDB.Filters.Logging; + namespace FoundationDB.Client { using FoundationDB.Async; @@ -77,9 +79,9 @@ public class FdbDatabase : IFdbDatabase, IFdbRetryable /// Global namespace used to prefix ALL keys and subspaces accessible by this database instance (default is empty) /// This is readonly and is set when creating the database instance - private IFdbSubspace m_globalSpace; + private IFdbDynamicSubspace m_globalSpace; /// Copy of the namespace, that is exposed to the outside. - private IFdbSubspace m_globalSpaceCopy; + private IFdbDynamicSubspace m_globalSpaceCopy; /// Default Timeout value for all transactions private int m_defaultTimeout; @@ -459,14 +461,14 @@ internal void ChangeRoot(IFdbSubspace subspace, IFdbDirectory directory, bool re lock (this)//TODO: don't use this for locking { m_readOnly = readOnly; - m_globalSpace = FdbSubspace.Copy(subspace); - m_globalSpaceCopy = FdbSubspace.Copy(subspace); // keep another copy + m_globalSpace = FdbSubspace.CopyDynamic(subspace, TypeSystem.Tuples); + m_globalSpaceCopy = FdbSubspace.CopyDynamic(subspace, TypeSystem.Tuples); // keep another copy m_directory = directory == null ? null : new FdbDatabasePartition(this, directory); } } /// Returns the global namespace used by this database instance - public IFdbSubspace GlobalSpace + public IFdbDynamicSubspace GlobalSpace { //REVIEW: rename to just "Subspace" ? [NotNull] @@ -543,11 +545,21 @@ Slice IFdbSubspace.ConcatKey(Slice key) return m_globalSpace.ConcatKey(key); } + Slice IFdbSubspace.ConcatKey(TKey key) + { + return m_globalSpace.ConcatKey(key); + } + Slice[] IFdbSubspace.ConcatKeys(IEnumerable keys) { return m_globalSpace.ConcatKeys(keys); } + Slice[] IFdbSubspace.ConcatKeys(IEnumerable keys) + { + return m_globalSpace.ConcatKeys(keys); + } + /// Remove the database global subspace prefix from a binary key, or throw if the key is outside of the global subspace. Slice IFdbSubspace.ExtractKey(Slice key, bool boundCheck) { @@ -560,26 +572,61 @@ Slice[] IFdbSubspace.ExtractKeys(IEnumerable keys, bool boundCheck) return m_globalSpace.ExtractKeys(keys, boundCheck); } + SliceWriter IFdbSubspace.GetWriter(int capacity) + { + return m_globalSpace.GetWriter(capacity); + } + Slice IFdbSubspace.Key { get { return m_globalSpace.Key; } } - public FdbSubspacePartition Partition + IFdbSubspace IFdbSubspace.this[Slice suffix] + { + get + { + return m_globalSpace[suffix]; + } + } + + IFdbSubspace IFdbSubspace.this[IFdbKey key] + { + get + { + return m_globalSpace[key]; + } + } + + FdbKeyRange IFdbSubspace.ToRange() + { + return m_globalSpace.ToRange(); + } + + FdbKeyRange IFdbSubspace.ToRange(Slice suffix) + { + return m_globalSpace.ToRange(suffix); + } + + FdbKeyRange IFdbSubspace.ToRange(TKey key) + { + return m_globalSpace.ToRange(key); + } + + public FdbDynamicSubspacePartition Partition { //REVIEW: should we hide this on the main db? get { return m_globalSpace.Partition; } } - public FdbSubspaceKeys Keys + IFdbTypeSystem IFdbDynamicSubspace.Protocol { - get { return m_globalSpace.Keys; } + get { return m_globalSpace.Protocol; } } - public FdbSubspaceTuples Tuples + public FdbDynamicSubspaceKeys Keys { - //REVIEW: should we hide this on the main db? - get { return m_globalSpace.Tuples; } + get { return m_globalSpace.Keys; } } /// Returns true if the key is inside the system key space (starts with '\xFF') diff --git a/FoundationDB.Client/Filters/FdbDatabaseFilter.cs b/FoundationDB.Client/Filters/FdbDatabaseFilter.cs index 2d875824d..05fec0431 100644 --- a/FoundationDB.Client/Filters/FdbDatabaseFilter.cs +++ b/FoundationDB.Client/Filters/FdbDatabaseFilter.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -108,7 +108,7 @@ public CancellationToken Cancellation } /// Returns the global namespace used by this database instance - public virtual IFdbSubspace GlobalSpace + public virtual IFdbDynamicSubspace GlobalSpace { [NotNull] get { return m_database.GlobalSpace; } @@ -138,19 +138,39 @@ Slice IFdbSubspace.Key get { return this.GlobalSpace.Key; } } - public virtual FdbSubspacePartition Partition + FdbKeyRange IFdbSubspace.ToRange() { - get { return m_database.Partition; } + return this.GlobalSpace.ToRange(); } - public virtual FdbSubspaceKeys Keys + FdbKeyRange IFdbSubspace.ToRange(Slice suffix) { - get { return m_database.Keys; } + return this.GlobalSpace.ToRange(suffix); + } + + FdbKeyRange IFdbSubspace.ToRange(TKey key) + { + return this.GlobalSpace.ToRange(key); + } + + IFdbSubspace IFdbSubspace.this[Slice suffix] + { + get { return this.GlobalSpace[suffix]; } + } + + IFdbSubspace IFdbSubspace.this[IFdbKey key] + { + get { return this.GlobalSpace[key]; } + } + + public virtual FdbDynamicSubspacePartition Partition + { + get { return m_database.Partition; } } - public virtual FdbSubspaceTuples Tuples + public virtual FdbDynamicSubspaceKeys Keys { - get { return m_database.Tuples; } + get { return m_database.Keys; } } public virtual bool Contains(Slice key) @@ -168,12 +188,24 @@ public virtual Slice ConcatKey(Slice key) return m_database.ConcatKey(key); } + public virtual Slice ConcatKey(TKey key) + where TKey : IFdbKey + { + return m_database.ConcatKey(key); + } + public virtual Slice[] ConcatKeys(IEnumerable keys) { return m_database.ConcatKeys(keys); } - public virtual Slice ExtractKey(Slice key, bool boundCheck = false) + public virtual Slice[] ConcatKeys(IEnumerable keys) + where TKey : IFdbKey + { + return m_database.ConcatKeys(keys); + } + + public virtual Slice ExtractKey(Slice key, bool boundCheck = false) { return m_database.ExtractKey(key, boundCheck); } @@ -183,6 +215,16 @@ public virtual Slice[] ExtractKeys(IEnumerable keys, bool boundCheck = fa return m_database.ExtractKeys(keys, boundCheck); } + public virtual SliceWriter GetWriter(int capacity = 0) + { + return m_database.GetWriter(capacity); + } + + public virtual IFdbTypeSystem Protocol + { + get { return m_database.Protocol; } + } + #endregion #region Transactionals... diff --git a/FoundationDB.Client/FoundationDB.Client.csproj b/FoundationDB.Client/FoundationDB.Client.csproj index 107d98ac4..3f3888ecf 100644 --- a/FoundationDB.Client/FoundationDB.Client.csproj +++ b/FoundationDB.Client/FoundationDB.Client.csproj @@ -65,10 +65,13 @@ - - + + + + + + - @@ -162,9 +165,14 @@ - - - + + + + + + + + @@ -206,8 +214,14 @@ + + + + + + diff --git a/FoundationDB.Client/IFdbCluster.cs b/FoundationDB.Client/IFdbCluster.cs index 032ee7cba..6c14420fa 100644 --- a/FoundationDB.Client/IFdbCluster.cs +++ b/FoundationDB.Client/IFdbCluster.cs @@ -59,7 +59,7 @@ public interface IFdbCluster : IDisposable /// If true, the database will only allow read operations. /// Cancellation Token (optionnal) for the connect operation /// Task that will return an FdbDatabase, or an exception - Task OpenDatabaseAsync(string databaseName, FdbSubspace subspace, bool readOnly, CancellationToken cancellationToken); + Task OpenDatabaseAsync(string databaseName, IFdbSubspace subspace, bool readOnly, CancellationToken cancellationToken); //REVIEW: we should return an IFdbDatabase instead ! } diff --git a/FoundationDB.Client/IFdbDatabase.cs b/FoundationDB.Client/IFdbDatabase.cs index 7640b7565..c3eca66c4 100644 --- a/FoundationDB.Client/IFdbDatabase.cs +++ b/FoundationDB.Client/IFdbDatabase.cs @@ -33,7 +33,7 @@ namespace FoundationDB.Client using System.Threading; /// Database connection context. - public interface IFdbDatabase : IFdbReadOnlyRetryable, IFdbRetryable, IFdbSubspace, IFdbKey, IDisposable + public interface IFdbDatabase : IFdbReadOnlyRetryable, IFdbRetryable, IFdbDynamicSubspace, IFdbKey, IDisposable { /// Name of the database string Name { [NotNull] get; } @@ -47,7 +47,7 @@ public interface IFdbDatabase : IFdbReadOnlyRetryable, IFdbRetryable, IFdbSubspa /// Returns the global namespace used by this database instance /// Makes a copy of the subspace tuple, so you should not call this property a lot. Use any of the Partition(..) methods to create a subspace of the database - IFdbSubspace GlobalSpace { [NotNull] get; } + IFdbDynamicSubspace GlobalSpace { [NotNull] get; } /// Directory partition of this database instance FdbDatabasePartition Directory { [NotNull] get; } diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs index 20ddfa01e..0fc10e552 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectoryLayer.cs @@ -64,13 +64,13 @@ public class FdbDirectoryLayer : IFdbDirectory public static bool AnnotateTransactions { get; set; } /// Subspace where the content of each folder will be stored - public IFdbSubspace ContentSubspace { [NotNull] get; private set; } + public IFdbDynamicSubspace ContentSubspace { [NotNull] get; private set; } /// Subspace where all the metadata nodes for each folder will be stored - public IFdbSubspace NodeSubspace { [NotNull] get; private set; } + public IFdbDynamicSubspace NodeSubspace { [NotNull] get; private set; } /// Root node of the directory - internal IFdbSubspace RootNode { [NotNull] get; private set; } + internal IFdbDynamicSubspace RootNode { [NotNull] get; private set; } /// Allocated used to generated prefix for new content internal FdbHighContentionAllocator Allocator { [NotNull] get; private set; } @@ -129,7 +129,7 @@ Task IFdbDirectory.ChangeLayerAsync(IFdbTransaction trans, /// Subspace where all the node metadata will be stored ('\xFE' by default) /// Subspace where all automatically allocated directories will be stored (empty by default) /// Location of the root of all the directories managed by this Directory Layer. Ususally empty for the root partition of the database. - internal FdbDirectoryLayer(IFdbSubspace nodeSubspace, IFdbSubspace contentSubspace, IFdbTuple location) + internal FdbDirectoryLayer(IFdbDynamicSubspace nodeSubspace, IFdbDynamicSubspace contentSubspace, IFdbTuple location) { Contract.Requires(nodeSubspace != null && contentSubspace != null); @@ -156,7 +156,7 @@ internal FdbDirectoryLayer(IFdbSubspace nodeSubspace, IFdbSubspace contentSubspa [NotNull] public static FdbDirectoryLayer Create() { - return new FdbDirectoryLayer(new FdbSubspace(FdbKey.Directory), FdbSubspace.Empty, null); + return Create(Slice.Empty); } /// Create an instance of a Directory Layer located under a specific prefix and path @@ -165,7 +165,7 @@ public static FdbDirectoryLayer Create() [NotNull] public static FdbDirectoryLayer Create(Slice prefix, IEnumerable path = null) { - var subspace = FdbSubspace.Create(prefix); + var subspace = FdbSubspace.CreateDynamic(prefix, TypeSystem.Tuples); var location = path != null ? ParsePath(path) : FdbTuple.Empty; return new FdbDirectoryLayer(subspace.Partition[FdbKey.Directory], subspace, location); } @@ -179,7 +179,8 @@ public static FdbDirectoryLayer Create(IFdbSubspace subspace, IEnumerableCreate an instance of a Directory Layer located under a specific subpsace and path @@ -187,7 +188,7 @@ public static FdbDirectoryLayer Create(IFdbSubspace subspace, IEnumerableSubspace for the content of the Directory Layer. /// Optional path, if the Directory Layer is not located at the root of the database [NotNull] - public static FdbDirectoryLayer Create(IFdbSubspace nodeSubspace, IFdbSubspace contentSubspace, IEnumerable path = null) + public static FdbDirectoryLayer Create(IFdbDynamicSubspace nodeSubspace, IFdbDynamicSubspace contentSubspace, IEnumerable path = null) { if (nodeSubspace == null) throw new ArgumentNullException("nodeSubspace"); if (contentSubspace == null) throw new ArgumentNullException("contentSubspace"); @@ -476,7 +477,7 @@ public override string ToString() private struct Node { - public Node(IFdbSubspace subspace, IFdbTuple path, IFdbTuple targetPath, Slice layer) + public Node(IFdbDynamicSubspace subspace, IFdbTuple path, IFdbTuple targetPath, Slice layer) { this.Subspace = subspace; this.Path = path; @@ -484,7 +485,7 @@ public Node(IFdbSubspace subspace, IFdbTuple path, IFdbTuple targetPath, Slice l this.Layer = layer; } - public readonly IFdbSubspace Subspace; + public readonly IFdbDynamicSubspace Subspace; public readonly IFdbTuple Path; public readonly IFdbTuple TargetPath; public Slice Layer; //PERF: readonly struct @@ -500,10 +501,10 @@ public bool IsInPartition(bool includeEmptySubPath) } - private static void SetLayer([NotNull] IFdbTransaction trans, [NotNull] IFdbSubspace subspace, Slice layer) + private static void SetLayer([NotNull] IFdbTransaction trans, [NotNull] IFdbDynamicSubspace subspace, Slice layer) { if (layer.IsNull) layer = Slice.Empty; - trans.Set(subspace.Tuples.EncodeKey(LayerSuffix), layer); + trans.Set(subspace.Keys.Encode(LayerSuffix), layer); } [NotNull] @@ -623,7 +624,7 @@ internal async Task CreateOrOpenInternalAsync(IFdbReadOnly if (prefix == null) { // automatically allocate a new prefix inside the ContentSubspace long id = await this.Allocator.AllocateAsync(trans).ConfigureAwait(false); - prefix = this.ContentSubspace.Tuples.EncodeKey(id); + prefix = this.ContentSubspace.Keys.Encode(id); // ensure that there is no data already present under this prefix if (FdbDirectoryLayer.AnnotateTransactions) trans.Annotate("Ensure that there is no data already present under prefix {0}", prefix); @@ -650,7 +651,7 @@ internal async Task CreateOrOpenInternalAsync(IFdbReadOnly } // we need to recursively create any missing parents - IFdbSubspace parentNode; + IFdbDynamicSubspace parentNode; if (path.Count > 1) { var parentSubspace = await CreateOrOpenInternalAsync(readTrans, trans, path.Substring(0, path.Count - 1), Slice.Nil, Slice.Nil, true, true, true).ConfigureAwait(false); @@ -724,7 +725,7 @@ internal async Task MoveInternalAsync([NotNull] IFdbTransa } if (FdbDirectoryLayer.AnnotateTransactions) trans.Annotate("Register the prefix {0} to its new location in the folder sub-tree", oldNode.Subspace.Key); - trans.Set(GetSubDirKey(parentNode.Subspace, newPath.Get(-1)), this.NodeSubspace.Tuples.DecodeKey(oldNode.Subspace.Key)); + trans.Set(GetSubDirKey(parentNode.Subspace, newPath.Get(-1)), this.NodeSubspace.Keys.Decode(oldNode.Subspace.Key)); await RemoveFromParent(trans, oldPath).ConfigureAwait(false); return ContentsOfNode(oldNode.Subspace, newPath, oldNode.Layer); @@ -829,7 +830,7 @@ private async Task CheckReadVersionAsync([NotNull] IFdbReadOnlyTransaction trans { Contract.Requires(trans != null); - var value = await trans.GetAsync(this.RootNode.Tuples.EncodeKey(VersionKey)).ConfigureAwait(false); + var value = await trans.GetAsync(this.RootNode.Keys.Encode(VersionKey)).ConfigureAwait(false); if (!value.IsNullOrEmpty) { CheckVersion(value, false); @@ -840,7 +841,7 @@ private async Task CheckWriteVersionAsync([NotNull] IFdbTransaction trans) { Contract.Requires(trans != null); - var value = await trans.GetAsync(this.RootNode.Tuples.EncodeKey(VersionKey)).ConfigureAwait(false); + var value = await trans.GetAsync(this.RootNode.Keys.Encode(VersionKey)).ConfigureAwait(false); if (value.IsNullOrEmpty) { InitializeDirectory(trans); @@ -872,7 +873,7 @@ private void InitializeDirectory([NotNull] IFdbTransaction trans) writer.WriteFixed32((uint)LayerVersion.Major); writer.WriteFixed32((uint)LayerVersion.Minor); writer.WriteFixed32((uint)LayerVersion.Build); - trans.Set(this.RootNode.Tuples.EncodeKey(VersionKey), writer.ToSlice()); + trans.Set(this.RootNode.Keys.Encode(VersionKey), writer.ToSlice()); } private async Task NodeContainingKey([NotNull] IFdbReadOnlyTransaction tr, Slice key) @@ -888,15 +889,15 @@ private async Task NodeContainingKey([NotNull] IFdbReadOnlyTransac var kvp = await tr .GetRange( - this.NodeSubspace.Tuples.ToRange().Begin, - this.NodeSubspace.Tuples.EncodeKey(key) + FdbKey.MinValue + this.NodeSubspace.Keys.ToRange().Begin, + this.NodeSubspace.Keys.Encode(key) + FdbKey.MinValue ) .LastOrDefaultAsync() .ConfigureAwait(false); if (kvp.Key.HasValue) { - var prevPrefix = this.NodeSubspace.Tuples.DecodeFirst(kvp.Key); + var prevPrefix = this.NodeSubspace.Keys.DecodeFirst(kvp.Key); if (key.StartsWith(prevPrefix)) { return NodeWithPrefix(prevPrefix); @@ -908,7 +909,7 @@ private async Task NodeContainingKey([NotNull] IFdbReadOnlyTransac /// Returns the subspace to a node metadata, given its prefix [CanBeNull] - private IFdbSubspace NodeWithPrefix(Slice prefix) + private IFdbDynamicSubspace NodeWithPrefix(Slice prefix) { if (prefix.IsNullOrEmpty) return null; return this.NodeSubspace.Partition.ByKey(prefix); @@ -921,14 +922,14 @@ private FdbDirectorySubspace ContentsOfNode([NotNull] IFdbSubspace node, [NotNul Contract.Requires(node != null); var path = this.Location.Concat(relativePath); - var prefix = this.NodeSubspace.Tuples.DecodeKey(node.Key); + var prefix = this.NodeSubspace.Keys.Decode(node.Key); if (layer == FdbDirectoryPartition.LayerId) { return new FdbDirectoryPartition(path, relativePath, prefix, this); } else { - return new FdbDirectorySubspace(path, relativePath, prefix, this, layer); + return new FdbDirectorySubspace(path, relativePath, prefix, this, layer, TypeSystem.Default); } } @@ -960,7 +961,7 @@ private async Task FindAsync([NotNull] IFdbReadOnlyTransaction tr, [NotNul } if (FdbDirectoryLayer.AnnotateTransactions) tr.Annotate("Reading Layer value for subfolder {0} found at {1}", path, n.Key); - layer = await tr.GetAsync(n.Tuples.EncodeKey(LayerSuffix)).ConfigureAwait(false); + layer = await tr.GetAsync(n.Keys.Encode(LayerSuffix)).ConfigureAwait(false); if (layer == FdbDirectoryPartition.LayerId) { // stop when reaching a partition return new Node(n, path.Substring(0, i + 1), path, FdbDirectoryPartition.LayerId); @@ -973,15 +974,15 @@ private async Task FindAsync([NotNull] IFdbReadOnlyTransaction tr, [NotNul /// Returns the list of names and nodes of all children of the specified node [NotNull] - private IFdbAsyncEnumerable> SubdirNamesAndNodes([NotNull] IFdbReadOnlyTransaction tr, [NotNull] IFdbSubspace node) + private IFdbAsyncEnumerable> SubdirNamesAndNodes([NotNull] IFdbReadOnlyTransaction tr, [NotNull] IFdbDynamicSubspace node) { Contract.Requires(tr != null && node != null); var sd = node.Partition.ByKey(SUBDIRS); return tr - .GetRange(sd.Tuples.ToRange()) - .Select(kvp => new KeyValuePair( - sd.Tuples.DecodeKey(kvp.Key), + .GetRange(sd.Keys.ToRange()) + .Select(kvp => new KeyValuePair( + sd.Keys.Decode(kvp.Key), NodeWithPrefix(kvp.Value) )); } @@ -1003,7 +1004,7 @@ private async Task RemoveFromParent([NotNull] IFdbTransaction tr, [NotNull } /// Resursively remove a node (including the content), all its children - private async Task RemoveRecursive([NotNull] IFdbTransaction tr, [NotNull] IFdbSubspace node) + private async Task RemoveRecursive([NotNull] IFdbTransaction tr, [NotNull] IFdbDynamicSubspace node) { Contract.Requires(tr != null && node != null); @@ -1012,10 +1013,10 @@ private async Task RemoveRecursive([NotNull] IFdbTransaction tr, [NotNull] IFdbS // remove ALL the contents if (FdbDirectoryLayer.AnnotateTransactions) tr.Annotate("Removing all content located under {0}", node.Key); - tr.ClearRange(ContentsOfNode(node, FdbTuple.Empty, Slice.Empty).Keys.ToRange()); + tr.ClearRange(ContentsOfNode(node, FdbTuple.Empty, Slice.Empty).ToRange()); // and all the metadata for this folder if (FdbDirectoryLayer.AnnotateTransactions) tr.Annotate("Removing all metadata for folder under {0}", node.Key); - tr.ClearRange(node.Tuples.ToRange()); + tr.ClearRange(node.Keys.ToRange()); } private async Task IsPrefixFree([NotNull] IFdbReadOnlyTransaction tr, Slice prefix) @@ -1031,20 +1032,20 @@ private async Task IsPrefixFree([NotNull] IFdbReadOnlyTransaction tr, Slic return await tr .GetRange( - this.NodeSubspace.Tuples.EncodeKey(prefix), - this.NodeSubspace.Tuples.EncodeKey(FdbKey.Increment(prefix)) + this.NodeSubspace.Keys.Encode(prefix), + this.NodeSubspace.Keys.Encode(FdbKey.Increment(prefix)) ) .NoneAsync() .ConfigureAwait(false); } - private static Slice GetSubDirKey([NotNull] IFdbSubspace parent, [NotNull] string path) + private static Slice GetSubDirKey([NotNull] IFdbDynamicSubspace parent, [NotNull] string path) { Contract.Requires(parent != null && path != null); // for a path equal to ("foo","bar","baz") and index = -1, we need to generate (parent, SUBDIRS, "baz") // but since the last item of path can be of any type, we will use tuple splicing to copy the last item without changing its type - return parent.Tuples.EncodeKey(SUBDIRS, path); + return parent.Keys.Encode(SUBDIRS, path); } #endregion diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectoryPartition.cs b/FoundationDB.Client/Layers/Directories/FdbDirectoryPartition.cs index 080834c6f..3dcbf560b 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectoryPartition.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectoryPartition.cs @@ -40,7 +40,7 @@ public class FdbDirectoryPartition : FdbDirectorySubspace private readonly FdbDirectoryLayer m_parentDirectoryLayer; internal FdbDirectoryPartition(IFdbTuple location, IFdbTuple relativeLocation, Slice prefix, FdbDirectoryLayer directoryLayer) - : base(location, relativeLocation, prefix, new FdbDirectoryLayer(FdbSubspace.Create(prefix + FdbKey.Directory), FdbSubspace.Create(prefix), location), LayerId) + : base(location, relativeLocation, prefix, new FdbDirectoryLayer(FdbSubspace.CreateDynamic(prefix + FdbKey.Directory, TypeSystem.Tuples), FdbSubspace.CreateDynamic(prefix, TypeSystem.Tuples), location), LayerId, TypeSystem.Tuples) { m_parentDirectoryLayer = directoryLayer; } diff --git a/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs b/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs index 98901648b..02e8f9185 100644 --- a/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs +++ b/FoundationDB.Client/Layers/Directories/FdbDirectorySubspace.cs @@ -40,11 +40,11 @@ namespace FoundationDB.Layers.Directories /// A Directory Subspace represents the contents of a directory, but it also remembers the path with which it was opened and offers convenience methods to operate on the directory at that path. /// An instance of DirectorySubspace can be used for all the usual subspace operations. It can also be used to operate on the directory with which it was opened. [DebuggerDisplay("Path={this.FullName}, Prefix={InternalKey}, Layer={Layer}")] - public class FdbDirectorySubspace : FdbSubspace, IFdbDirectory + public class FdbDirectorySubspace : FdbDynamicSubspace, IFdbDirectory { - internal FdbDirectorySubspace(IFdbTuple location, IFdbTuple relativeLocation, Slice prefix, FdbDirectoryLayer directoryLayer, Slice layer) - : base(prefix) + internal FdbDirectorySubspace(IFdbTuple location, IFdbTuple relativeLocation, Slice prefix, FdbDirectoryLayer directoryLayer, Slice layer, IFdbTypeSystem protocol) + : base(prefix, protocol) { Contract.Requires(location != null && relativeLocation != null && prefix != null && directoryLayer != null); if (layer.IsNull) layer = Slice.Empty; @@ -151,7 +151,7 @@ public async Task ChangeLayerAsync([NotNull] IFdbTransacti // set the layer to the new value await this.DirectoryLayer.ChangeLayerInternalAsync(trans, this.RelativeLocation, newLayer).ConfigureAwait(false); // and return the new version of the subspace - return new FdbDirectorySubspace(this.Location, this.RelativeLocation, this.InternalKey, this.DirectoryLayer, newLayer); + return new FdbDirectorySubspace(this.Location, this.RelativeLocation, this.InternalKey, this.DirectoryLayer, newLayer, TypeSystem.Default); } /// Opens a subdirectory with the given . diff --git a/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs b/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs index dd6c2098a..cf9b268cc 100644 --- a/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs +++ b/FoundationDB.Client/Layers/Directories/FdbHighContentionAllocator.cs @@ -46,7 +46,7 @@ public sealed class FdbHighContentionAllocator /// Create an allocator operating under a specific location /// - public FdbHighContentionAllocator(IFdbSubspace subspace) + public FdbHighContentionAllocator(IFdbDynamicSubspace subspace) { if (subspace == null) throw new ArgumentException("subspace"); @@ -56,13 +56,13 @@ public FdbHighContentionAllocator(IFdbSubspace subspace) } /// Location of the allocator - public IFdbSubspace Subspace { [NotNull] get; private set; } + public IFdbDynamicSubspace Subspace { [NotNull] get; private set; } /// Subspace used to store the allocation count for the current window - private IFdbSubspace Counters { [NotNull] get; set; } + private IFdbDynamicSubspace Counters { [NotNull] get; set; } /// Subspace used to store the prefixes allocated in the current window - private IFdbSubspace Recent { [NotNull] get; set; } + private IFdbDynamicSubspace Recent { [NotNull] get; set; } /// Returns a 64-bit integer that /// 1) has never and will never be returned by another call to this @@ -77,12 +77,12 @@ public async Task AllocateAsync([NotNull] IFdbTransaction trans) long start = 0, count = 0; var kv = await trans .Snapshot - .GetRange(this.Counters.Tuples.ToRange()) + .GetRange(this.Counters.Keys.ToRange()) .LastOrDefaultAsync(); if (kv.Key.IsPresent) { - start = this.Counters.Tuples.DecodeKey(kv.Key); + start = this.Counters.Keys.Decode(kv.Key); count = kv.Value.ToInt64(); } @@ -91,14 +91,14 @@ public async Task AllocateAsync([NotNull] IFdbTransaction trans) if ((count + 1) * 2 >= window) { // advance the window if (FdbDirectoryLayer.AnnotateTransactions) trans.Annotate("Advance allocator window size to {0} starting at {1}", window, start + window); - trans.ClearRange(this.Counters.Key, this.Counters.Tuples.EncodeKey(start) + FdbKey.MinValue); + trans.ClearRange(this.Counters.Key, this.Counters.Keys.Encode(start) + FdbKey.MinValue); start += window; count = 0; - trans.ClearRange(this.Recent.Key, this.Recent.Tuples.EncodeKey(start)); + trans.ClearRange(this.Recent.Key, this.Recent.Keys.Encode(start)); } // Increment the allocation count for the current window - trans.AtomicAdd(this.Counters.Tuples.EncodeKey(start), Slice.FromFixed64(1)); + trans.AtomicAdd(this.Counters.Keys.Encode(start), Slice.FromFixed64(1)); // As of the snapshot being read from, the window is less than half // full, so this should be expected to take 2 tries. Under high @@ -114,7 +114,7 @@ public async Task AllocateAsync([NotNull] IFdbTransaction trans) } // test if the key is used - var key = this.Recent.Tuples.EncodeKey(candidate); + var key = this.Recent.Keys.Encode(candidate); var value = await trans.GetAsync(key).ConfigureAwait(false); if (value.IsNull) diff --git a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs index 6879a25c9..d3d99d5b8 100644 --- a/FoundationDB.Client/Layers/Tuples/FdbTuple.cs +++ b/FoundationDB.Client/Layers/Tuples/FdbTuple.cs @@ -354,6 +354,12 @@ public static Slice[] Pack([NotNull] IEnumerable tuples) // With prefix + public static void Pack(ref TupleWriter writer, [CanBeNull] IFdbTuple tuple) + { + if (tuple == null || tuple.Count == 0) return; + tuple.PackTo(ref writer); + } + /// Efficiently concatenate a prefix with the packed representation of a tuple public static Slice Pack(Slice prefix, [CanBeNull] IFdbTuple tuple) { @@ -788,6 +794,104 @@ public static T DecodeKey(Slice packedKey) return FdbTuplePacker.Deserialize(slice); } + /// Unpack a key containing two elements + /// Slice that should contain the packed representation of a tuple with two elements + /// Decoded value of the elements int the tuple. Throws an exception if the tuple is empty of has more than elements. + public static FdbTuple DecodeKey(Slice packedKey) + { + if (packedKey.IsNullOrEmpty) throw new InvalidOperationException("Cannot unpack an empty tuple"); + + var reader = new TupleReader(packedKey); + + T1 item1; + if (!DecodeNext(ref reader, out item1)) throw new FormatException("Failed to decode first item"); + + T2 item2; + if (!DecodeNext(ref reader, out item2)) throw new FormatException("Failed to decode second item"); + + if (reader.Input.HasMore) throw new FormatException("The key contains more than two items"); + + return Create(item1, item2); + } + + /// Unpack a key containing three elements + /// Slice that should contain the packed representation of a tuple with three elements + /// Decoded value of the elements int the tuple. Throws an exception if the tuple is empty of has more than elements. + public static FdbTuple DecodeKey(Slice packedKey) + { + if (packedKey.IsNullOrEmpty) throw new InvalidOperationException("Cannot unpack an empty tuple"); + + var reader = new TupleReader(packedKey); + + T1 item1; + if (!DecodeNext(ref reader, out item1)) throw new FormatException("Failed to decode first item"); + + T2 item2; + if (!DecodeNext(ref reader, out item2)) throw new FormatException("Failed to decode second item"); + + T3 item3; + if (!DecodeNext(ref reader, out item3)) throw new FormatException("Failed to decode third item"); + + if (reader.Input.HasMore) throw new FormatException("The key contains more than three items"); + + return Create(item1, item2, item3); + } + + /// Unpack a key containing four elements + /// Slice that should contain the packed representation of a tuple with four elements + /// Decoded value of the elements int the tuple. Throws an exception if the tuple is empty of has more than elements. + public static FdbTuple DecodeKey(Slice packedKey) + { + if (packedKey.IsNullOrEmpty) throw new InvalidOperationException("Cannot unpack an empty tuple"); + + var reader = new TupleReader(packedKey); + + T1 item1; + if (!DecodeNext(ref reader, out item1)) throw new FormatException("Failed to decode first item"); + + T2 item2; + if (!DecodeNext(ref reader, out item2)) throw new FormatException("Failed to decode second item"); + + T3 item3; + if (!DecodeNext(ref reader, out item3)) throw new FormatException("Failed to decode third item"); + + T4 item4; + if (!DecodeNext(ref reader, out item4)) throw new FormatException("Failed to decode fourth item"); + + if (reader.Input.HasMore) throw new FormatException("The key contains more than four items"); + + return Create(item1, item2, item3, item4); + } + + /// Unpack a key containing five elements + /// Slice that should contain the packed representation of a tuple with five elements + /// Decoded value of the elements int the tuple. Throws an exception if the tuple is empty of has more than elements. + public static FdbTuple DecodeKey(Slice packedKey) + { + if (packedKey.IsNullOrEmpty) throw new InvalidOperationException("Cannot unpack an empty tuple"); + + var reader = new TupleReader(packedKey); + + T1 item1; + if (!DecodeNext(ref reader, out item1)) throw new FormatException("Failed to decode first item"); + + T2 item2; + if (!DecodeNext(ref reader, out item2)) throw new FormatException("Failed to decode second item"); + + T3 item3; + if (!DecodeNext(ref reader, out item3)) throw new FormatException("Failed to decode third item"); + + T4 item4; + if (!DecodeNext(ref reader, out item4)) throw new FormatException("Failed to decode fourth item"); + + T5 item5; + if (!DecodeNext(ref reader, out item5)) throw new FormatException("Failed to decode fiftyh item"); + + if (reader.Input.HasMore) throw new FormatException("The key contains more than four items"); + + return Create(item1, item2, item3, item4, item5); + } + /// Unpack the next item in the tuple, and advance the cursor /// Type of the next value in the tuple /// Reader positionned at the start of the next item to read diff --git a/FoundationDB.Client/Subspaces/Fdb.Directory.cs b/FoundationDB.Client/Subspaces/Fdb.Directory.cs index 0f0cbbc63..61b50b0d5 100644 --- a/FoundationDB.Client/Subspaces/Fdb.Directory.cs +++ b/FoundationDB.Client/Subspaces/Fdb.Directory.cs @@ -72,7 +72,7 @@ public static async Task OpenNamedPartitionAsync(string clusterFil // By convention, all named databases will be under the "/Databases" folder FdbDatabase db = null; - FdbSubspace rootSpace = FdbSubspace.Empty; + var rootSpace = FdbSubspace.Empty; try { db = await Fdb.OpenInternalAsync(clusterFile, dbName, rootSpace, readOnly: false, cancellationToken: cancellationToken).ConfigureAwait(false); diff --git a/FoundationDB.Client/Subspaces/FdbDynamicSubspace.cs b/FoundationDB.Client/Subspaces/FdbDynamicSubspace.cs new file mode 100644 index 000000000..814018307 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbDynamicSubspace.cs @@ -0,0 +1,80 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using System.Diagnostics; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + public class FdbDynamicSubspace : FdbSubspace, IFdbDynamicSubspace + { + /// Protocol used to encode keys + private readonly IFdbTypeSystem m_protocol; + + /// Create a new subspace from a binary prefix + /// Prefix of the new subspace + /// If true, take a copy of the prefix + /// Type System used to encode keys in this subspace (optional, will use Tuple Encoding by default) + internal FdbDynamicSubspace(Slice rawPrefix, bool copy, IFdbTypeSystem protocol) + : base (rawPrefix, copy) + { + m_protocol = protocol ?? TypeSystem.Default; + } + + public FdbDynamicSubspace(Slice rawPrefix, IFdbTypeSystem protocol) + : this(rawPrefix, true, protocol) + { } + + protected override IFdbSubspace CreateChildren(Slice suffix) + { + return new FdbDynamicSubspace(ConcatKey(suffix), m_protocol); + } + + public IFdbTypeSystem Protocol + { + get { return m_protocol; } + } + + /// Return a view of all the possible binary keys of this subspace + public FdbDynamicSubspaceKeys Keys + { + [DebuggerStepThrough] + get { return new FdbDynamicSubspaceKeys(this, m_protocol); } + } + + /// Returns an helper object that knows how to create sub-partitions of this subspace + public FdbDynamicSubspacePartition Partition + { + //note: not cached, because this is probably not be called frequently (except in the init path) + [DebuggerStepThrough] + get { return new FdbDynamicSubspacePartition(this, m_protocol); } + } + + } +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbDynamicSubspaceKeys.cs b/FoundationDB.Client/Subspaces/FdbDynamicSubspaceKeys.cs new file mode 100644 index 000000000..7b4fc1959 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbDynamicSubspaceKeys.cs @@ -0,0 +1,463 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using FoundationDB.Client.Utils; +using FoundationDB.Layers.Tuples; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + + internal static class Batched + { + + public delegate void Handler(ref SliceWriter writer, TValue item, TState protocol); + + [NotNull] + public static Slice[] Convert(SliceWriter writer, [NotNull, ItemNotNull] IEnumerable values, Handler handler, TState state) + { + Contract.Requires(values != null && handler != null); + + //Note on performance: + // - we will reuse the same buffer for each temp key, and copy them into a slice buffer + // - doing it this way adds a memory copy (writer => buffer) but reduce the number of byte[] allocations (and reduce the GC overhead) + + int start = writer.Position; + + var buffer = new SliceBuffer(); + + var coll = values as ICollection; + if (coll != null) + { // pre-allocate the final array with the correct size + var res = new Slice[coll.Count]; + int p = 0; + foreach (var tuple in coll) + { + // reset position to just after the subspace prefix + writer.Position = start; + + handler(ref writer, tuple, state); + + // copy full key in the buffer + res[p++] = buffer.Intern(writer.ToSlice()); + } + Contract.Assert(p == res.Length); + return res; + } + else + { // we won't now the array size until the end... + var res = new List(); + foreach (var tuple in values) + { + // reset position to just after the subspace prefix + writer.Position = start; + + handler(ref writer, tuple, state); + + // copy full key in the buffer + res.Add(buffer.Intern(writer.ToSlice())); + } + return res.ToArray(); + } + } + } + + /// Key helper for a dynamic TypeSystem + public struct FdbDynamicSubspaceKeys + { + //NOTE: everytime an IFdbTuple is used here, it is as a container (vector of objects), and NOT as the Tuple Encoding scheme ! (separate concept) + + [NotNull] + public readonly IFdbSubspace Subspace; + + [NotNull] + public readonly IFdbTypeSystem Protocol; + + public FdbDynamicSubspaceKeys([NotNull] IFdbSubspace subspace, [NotNull] IFdbTypeSystem protocol) + { + Contract.Requires(subspace != null && protocol != null); + this.Subspace = subspace; + this.Protocol = protocol; + } + + public FdbKeyRange ToRange() + { + return this.Protocol.ToRange(this.Subspace.Key); + } + + public FdbKeyRange ToRange([NotNull] IFdbTuple tuple) + { + return this.Protocol.ToRange(Pack(tuple)); + } + + public FdbKeyRange ToRange([NotNull] ITupleFormattable tuple) + { + return this.Protocol.ToRange(Pack(tuple)); + } + + public Slice this[[NotNull] IFdbTuple tuple] + { + get { return Pack(tuple); } + } + + public Slice this[[NotNull] ITupleFormattable item] + { + get { return Pack(item); } + } + + public Slice Pack([NotNull] IFdbTuple tuple) + { + if (tuple == null) throw new ArgumentNullException("tuple"); + + var writer = this.Subspace.GetWriter(); + this.Protocol.PackKey(ref writer, tuple); + return writer.ToSlice(); + } + + public Slice Pack([NotNull] ITupleFormattable item) + { + if (item == null) throw new ArgumentNullException("item"); + return Pack(item.ToTuple()); + } + + public Slice[] Pack([NotNull, ItemNotNull] IEnumerable tuples) + { + if (tuples == null) throw new ArgumentNullException("tuples"); + + return Batched.Convert( + this.Subspace.GetWriter(), + tuples, + (ref SliceWriter writer, IFdbTuple tuple, IFdbTypeSystem protocol) => protocol.PackKey(ref writer, tuple), + this.Protocol + ); + } + + public Slice Encode(T item1) + { + var writer = this.Subspace.GetWriter(); + this.Protocol.EncodeKey(ref writer, item1); + return writer.ToSlice(); + } + + public Slice[] Encode(IEnumerable items) + { + return Batched.Convert( + this.Subspace.GetWriter(), + items, + (ref SliceWriter writer, T item, IFdbTypeSystem protocol) => protocol.EncodeKey(ref writer, item), + this.Protocol + ); + } + + public Slice[] Encode(IEnumerable items, Func selector) + { + return Batched.Convert( + this.Subspace.GetWriter(), + items, + (ref SliceWriter writer, TSource item, IFdbTypeSystem protocol) => protocol.EncodeKey(ref writer, selector(item)), + this.Protocol + ); + } + + public Slice Encode(T1 item1, T2 item2) + { + var writer = this.Subspace.GetWriter(); + this.Protocol.EncodeKey(ref writer, item1, item2); + return writer.ToSlice(); + } + + public Slice[] Encode(IEnumerable items, Func selector1, Func selector2) + { + return Batched.Convert( + this.Subspace.GetWriter(), + items, + (ref SliceWriter writer, TItem item, IFdbTypeSystem protocol) => protocol.EncodeKey(ref writer, selector1(item), selector2(item)), + this.Protocol + ); + } + + public Slice Encode(T1 item1, T2 item2, T3 item3) + { + var writer = this.Subspace.GetWriter(); + this.Protocol.EncodeKey(ref writer, item1, item2, item3); + return writer.ToSlice(); + } + + public Slice Encode(T1 item1, T2 item2, T3 item3, T4 item4) + { + var writer = this.Subspace.GetWriter(); + this.Protocol.EncodeKey(ref writer, item1, item2, item3, item4); + return writer.ToSlice(); + } + + public Slice Encode(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) + { + var writer = this.Subspace.GetWriter(); + this.Protocol.EncodeKey(ref writer, item1, item2, item3, item4, item5); + return writer.ToSlice(); + } + + public Slice Encode(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6) + { + var writer = this.Subspace.GetWriter(); + this.Protocol.EncodeKey(ref writer, item1, item2, item3, item4, item5, item6); + return writer.ToSlice(); + } + + public Slice Encode(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7) + { + var writer = this.Subspace.GetWriter(); + this.Protocol.EncodeKey(ref writer, item1, item2, item3, item4, item5, item6, item7); + return writer.ToSlice(); + } + + public Slice Encode(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7, T8 item8) + { + var writer = this.Subspace.GetWriter(); + this.Protocol.EncodeKey(ref writer, item1, item2, item3, item4, item5, item6, item7, item8); + return writer.ToSlice(); + } + + public IFdbTuple Unpack(Slice packed) + { + return this.Protocol.UnpackKey(this.Subspace.ExtractKey(packed)); + } + + private static T[] BatchDecode(IEnumerable packed, IFdbSubspace subspace, IFdbTypeSystem protocol, Func decode) + { + var coll = packed as ICollection; + if (coll != null) + { + var res = new T[coll.Count]; + int p = 0; + foreach (var data in packed) + { + res[p++] = decode(subspace.ExtractKey(data), protocol); + } + Contract.Assert(p == res.Length); + return res; + } + else + { + var res = new List(); + foreach (var data in packed) + { + res.Add(decode(subspace.ExtractKey(data), protocol)); + } + return res.ToArray(); + } + } + + public IFdbTuple[] Unpack(IEnumerable packed) + { + return BatchDecode(packed, this.Subspace, this.Protocol, (data, protocol) => protocol.UnpackKey(data)); + } + + public T1 Decode(Slice packed) + { + return this.Protocol.DecodeKey(this.Subspace.ExtractKey(packed)); + } + + public IEnumerable Decode(IEnumerable packed) + { + return BatchDecode(packed, this.Subspace, this.Protocol, (data, protocol) => protocol.DecodeKey(data)); + } + + public FdbTuple Decode(Slice packed) + { + return this.Protocol.DecodeKey(this.Subspace.ExtractKey(packed)); + } + + public IEnumerable> Decode(IEnumerable packed) + { + return BatchDecode(packed, this.Subspace, this.Protocol, (data, protocol) => protocol.DecodeKey(data)); + } + + public FdbTuple Decode(Slice packed) + { + return this.Protocol.DecodeKey(this.Subspace.ExtractKey(packed)); + } + + public IEnumerable> Decode(IEnumerable packed) + { + return BatchDecode(packed, this.Subspace, this.Protocol, (data, protocol) => protocol.DecodeKey(data)); + } + + public FdbTuple Decode(Slice packed) + { + return this.Protocol.DecodeKey(this.Subspace.ExtractKey(packed)); + } + + public IEnumerable> Decode(IEnumerable packed) + { + return BatchDecode(packed, this.Subspace, this.Protocol, (data, protocol) => protocol.DecodeKey(data)); + } + + public FdbTuple Decode(Slice packed) + { + return this.Protocol.DecodeKey(this.Subspace.ExtractKey(packed)); + } + + public IEnumerable> Decode(IEnumerable packed) + { + return BatchDecode(packed, this.Subspace, this.Protocol, (data, protocol) => protocol.DecodeKey(data)); + } + + public T DecodeFirst(Slice packed) + { + return this.Protocol.DecodeKeyFirst(this.Subspace.ExtractKey(packed)); + } + public IEnumerable DecodeFirst(IEnumerable packed) + { + return BatchDecode(packed, this.Subspace, this.Protocol, (data, protocol) => protocol.DecodeKeyFirst(data)); + } + + public T DecodeLast(Slice packed) + { + return this.Protocol.DecodeKeyLast(this.Subspace.ExtractKey(packed)); + } + public IEnumerable DecodeLast(Slice[] packed) + { + return BatchDecode(packed, this.Subspace, this.Protocol, (data, protocol) => protocol.DecodeKeyLast(data)); + } + + #region Append: Subspace => Tuple + + /// Return an empty tuple that is attached to this subspace + /// Empty tuple that can be extended, and whose packed representation will always be prefixed by the subspace key + [NotNull] + public IFdbTuple ToTuple() + { + return new FdbPrefixedTuple(this.Subspace.Key, FdbTuple.Empty); + } + + /// Attach a tuple to an existing subspace. + /// Tuple whose items will be appended at the end of the current subspace + /// Tuple that wraps the items of and whose packed representation will always be prefixed by the subspace key. + [NotNull] + public IFdbTuple Concat([NotNull] IFdbTuple tuple) + { + return new FdbPrefixedTuple(this.Subspace.Key, tuple); + } + + /// Convert a formattable item into a tuple that is attached to this subspace. + /// Item that can be converted into a tuple + /// Tuple that is the logical representation of the item, and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(formattable.ToTuple())' + [NotNull] + public IFdbTuple Concat([NotNull] ITupleFormattable formattable) + { + if (formattable == null) throw new ArgumentNullException("formattable"); + var tuple = formattable.ToTuple(); + if (tuple == null) throw new InvalidOperationException("Formattable item cannot return an empty tuple"); + return new FdbPrefixedTuple(this.Subspace.Key, tuple); + } + + /// Create a new 1-tuple that is attached to this subspace + /// Type of the value to append + /// Value that will be appended + /// Tuple of size 1 that contains , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T>(value))' + [NotNull] + public IFdbTuple Append(T value) + { + return new FdbPrefixedTuple(this.Subspace.Key, FdbTuple.Create(value)); + } + + /// Create a new 2-tuple that is attached to this subspace + /// Type of the first value to append + /// Type of the second value to append + /// First value that will be appended + /// Second value that will be appended + /// Tuple of size 2 that contains and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2>(item1, item2))' + [NotNull] + public IFdbTuple Append(T1 item1, T2 item2) + { + return new FdbPrefixedTuple(this.Subspace.Key, FdbTuple.Create(item1, item2)); + } + + /// Create a new 3-tuple that is attached to this subspace + /// Type of the first value to append + /// Type of the second value to append + /// Type of the third value to append + /// First value that will be appended + /// Second value that will be appended + /// Third value that will be appended + /// Tuple of size 3 that contains , and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3>(item1, item2, item3))' + [NotNull] + public IFdbTuple Append(T1 item1, T2 item2, T3 item3) + { + return new FdbPrefixedTuple(this.Subspace.Key, FdbTuple.Create(item1, item2, item3)); + } + + /// Create a new 4-tuple that is attached to this subspace + /// Type of the first value to append + /// Type of the second value to append + /// Type of the third value to append + /// Type of the fourth value to append + /// First value that will be appended + /// Second value that will be appended + /// Third value that will be appended + /// Fourth value that will be appended + /// Tuple of size 4 that contains , , and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3, T4>(item1, item2, item3, item4))' + [NotNull] + public IFdbTuple Append(T1 item1, T2 item2, T3 item3, T4 item4) + { + return new FdbPrefixedTuple(this.Subspace.Key, FdbTuple.Create(item1, item2, item3, item4)); + } + + /// Create a new 5-tuple that is attached to this subspace + /// Type of the first value to append + /// Type of the second value to append + /// Type of the third value to append + /// Type of the fourth value to append + /// Type of the fifth value to append + /// First value that will be appended + /// Second value that will be appended + /// Third value that will be appended + /// Fourth value that will be appended + /// Fifth value that will be appended + /// Tuple of size 5 that contains , , , and , and whose packed representation will always be prefixed by the subspace key. + /// This is the equivalent of calling 'subspace.Create(FdbTuple.Create<T1, T2, T3, T4, T5>(item1, item2, item3, item4, item5))' + [NotNull] + public IFdbTuple Append(T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) + { + return new FdbPrefixedTuple(this.Subspace.Key, FdbTuple.Create(item1, item2, item3, item4, item5)); + } + + #endregion + + } +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbSubspacePartition.cs b/FoundationDB.Client/Subspaces/FdbDynamicSubspacePartition.cs similarity index 73% rename from FoundationDB.Client/Subspaces/FdbSubspacePartition.cs rename to FoundationDB.Client/Subspaces/FdbDynamicSubspacePartition.cs index 16cc324e1..b298a5b49 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspacePartition.cs +++ b/FoundationDB.Client/Subspaces/FdbDynamicSubspacePartition.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -31,42 +31,53 @@ namespace FoundationDB.Client using FoundationDB.Layers.Tuples; using JetBrains.Annotations; using System; - using System.Linq; - using System.Collections.Generic; - public struct FdbSubspacePartition + public struct FdbDynamicSubspacePartition { - private readonly IFdbSubspace m_subspace; + [NotNull] + public readonly IFdbDynamicSubspace Subspace; + + [NotNull] + public readonly IFdbTypeSystem Protocol; - public FdbSubspacePartition(IFdbSubspace subspace) + public FdbDynamicSubspacePartition([NotNull] IFdbDynamicSubspace subspace, [NotNull] IFdbTypeSystem protocol) { if (subspace == null) throw new ArgumentNullException("subspace"); - m_subspace = subspace; + if (protocol == null) throw new ArgumentNullException("protocol"); + this.Subspace = subspace; + this.Protocol = protocol; } - public IFdbSubspace Subspace + /// Returns the same view but using a different Type System + /// Type System that will code keys in this new view + /// Review that will partition this subspace using a different Type System + /// + /// This should only be used for one-off usages where creating a new subspace just to encode one key would be overkill. + /// If you are calling this in a loop, consider creating a new subspace using that protocol. + /// + public FdbDynamicSubspacePartition Using([NotNull] IFdbTypeSystem protocol) { - get { return m_subspace; } + return new FdbDynamicSubspacePartition(this.Subspace, protocol); } /// Create a new subspace by appdending a suffix to the current subspace /// Suffix of the new subspace /// New subspace with prefix equal to the current subspace's prefix, followed by - public IFdbSubspace this[Slice suffix] + public IFdbDynamicSubspace this[Slice suffix] { [NotNull] get { if (suffix.IsNull) throw new ArgumentException("Partition suffix cannot be null", "suffix"); //TODO: find a way to limit the number of copies of the key? - return new FdbSubspace(m_subspace.ConcatKey(suffix)); + return new FdbDynamicSubspace(this.Subspace.ConcatKey(suffix), false, this.Protocol); } } /// Create a new subspace by adding a to the current subspace's prefix /// Key that will be appended to the current prefix /// New subspace whose prefix is the concatenation of the parent prefix, and the packed representation of - public IFdbSubspace this[IFdbKey key] + public IFdbDynamicSubspace this[IFdbKey key] { [ContractAnnotation("null => halt; notnull => notnull")] get @@ -77,18 +88,18 @@ public IFdbSubspace this[IFdbKey key] } } - public IFdbSubspace this[IFdbTuple tuple] + public IFdbDynamicSubspace this[IFdbTuple tuple] { [ContractAnnotation("null => halt; notnull => notnull")] get { if (tuple == null) throw new ArgumentNullException("tuple"); //TODO: find a way to limit the number of copies of the packed tuple? - return new FdbSubspace(m_subspace.Tuples.Pack(tuple)); + return new FdbDynamicSubspace(this.Subspace.Keys.Pack(tuple), false, this.Protocol); } } - public IFdbSubspace this[ITupleFormattable item] + public IFdbDynamicSubspace this[ITupleFormattable item] { [ContractAnnotation("null => halt; notnull => notnull")] get @@ -109,9 +120,9 @@ public IFdbSubspace this[ITupleFormattable item] /// new FdbSubspace(["Users", ]).Partition("Contacts") == new FdbSubspace(["Users", "Contacts", ]) /// [NotNull] - public IFdbSubspace ByKey(T value) + public IFdbDynamicSubspace ByKey(T value) { - return this[FdbTuple.Create(value)]; + return new FdbDynamicSubspace(this.Subspace.Keys.Encode(value), false, this.Protocol); } /// Partition this subspace into a child subspace @@ -125,9 +136,9 @@ public IFdbSubspace ByKey(T value) /// new FdbSubspace(["Users", ]).Partition("Contacts", "Friends") == new FdbSubspace(["Users", "Contacts", "Friends", ]) /// [NotNull] - public IFdbSubspace ByKey(T1 value1, T2 value2) + public IFdbDynamicSubspace ByKey(T1 value1, T2 value2) { - return this[FdbTuple.Create(value1, value2)]; + return new FdbDynamicSubspace(this.Subspace.Keys.Encode(value1, value2), false, this.Protocol); } /// Partition this subspace into a child subspace @@ -142,9 +153,9 @@ public IFdbSubspace ByKey(T1 value1, T2 value2) /// new FdbSubspace(["Users", ]).Partition("John Smith", "Contacts", "Friends") == new FdbSubspace(["Users", "John Smith", "Contacts", "Friends", ]) /// [NotNull] - public IFdbSubspace ByKey(T1 value1, T2 value2, T3 value3) + public IFdbDynamicSubspace ByKey(T1 value1, T2 value2, T3 value3) { - return this[FdbTuple.Create(value1, value2, value3)]; + return new FdbDynamicSubspace(this.Subspace.Keys.Encode(value1, value2, value3), false, this.Protocol); } /// Partition this subspace into a child subspace @@ -161,9 +172,9 @@ public IFdbSubspace ByKey(T1 value1, T2 value2, T3 value3) /// new FdbSubspace(["Users", ]).Partition("John Smith", "Contacts", "Friends", "Messages") == new FdbSubspace(["Users", "John Smith", "Contacts", "Friends", "Messages", ]) /// [NotNull] - public IFdbSubspace ByKey(T1 value1, T2 value2, T3 value3, T4 value4) + public IFdbDynamicSubspace ByKey(T1 value1, T2 value2, T3 value3, T4 value4) { - return this[FdbTuple.Create(value1, value2, value3, value4)]; + return new FdbDynamicSubspace(this.Subspace.Keys.Encode(value1, value2, value3, value4), false, this.Protocol); } } diff --git a/FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`1.cs b/FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`1.cs new file mode 100644 index 000000000..e929a0bec --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`1.cs @@ -0,0 +1,81 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using System.Collections.Generic; +using FoundationDB.Layers.Tuples; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + public struct FdbEncoderSubspaceKeys + { + + public readonly IFdbSubspace Subspace; + public readonly IKeyEncoder Encoder; + + public FdbEncoderSubspaceKeys([NotNull] IFdbSubspace subspace, [NotNull] IKeyEncoder encoder) + { + this.Subspace = subspace; + this.Encoder = encoder; + } + + public Slice this[T value] + { + get { return Encode(value); } + } + + public Slice Encode(T value) + { + return this.Subspace.ConcatKey(this.Encoder.EncodeKey(value)); + } + + public Slice[] Encode([NotNull] IEnumerable values) + { + if (values == null) throw new ArgumentNullException("values"); + return Batched>.Convert( + this.Subspace.GetWriter(), + values, + (ref SliceWriter writer, T value, IKeyEncoder encoder) => { writer.WriteBytes(encoder.EncodeKey(value)); }, + this.Encoder + ); + } + + public T Decode(Slice packed) + { + return this.Encoder.DecodeKey(this.Subspace.ExtractKey(packed)); + } + + public FdbKeyRange ToRange(T value) + { + //REVIEW: which semantic for ToRange() should we use? + return FdbTuple.ToRange(Encode(value)); + } + + } +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`2.cs b/FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`2.cs new file mode 100644 index 000000000..7669ca180 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`2.cs @@ -0,0 +1,81 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using System.Collections.Generic; +using FoundationDB.Layers.Tuples; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + public struct FdbEncoderSubspaceKeys + { + + public readonly IFdbSubspace Subspace; + public readonly ICompositeKeyEncoder Encoder; + + public FdbEncoderSubspaceKeys([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) + { + this.Subspace = subspace; + this.Encoder = encoder; + } + + public Slice this[T1 value1, T2 value2] + { + get { return Encode(value1, value2); } + } + + public Slice Encode(T1 value1, T2 value2) + { + return this.Subspace.ConcatKey(this.Encoder.EncodeKey(value1, value2)); + } + + public Slice[] Encode([NotNull] IEnumerable values, Func selector1, Func selector2) + { + if (values == null) throw new ArgumentNullException("values"); + return Batched>.Convert( + this.Subspace.GetWriter(), + values, + (ref SliceWriter writer, TSource value, ICompositeKeyEncoder encoder) => writer.WriteBytes(encoder.EncodeKey(selector1(value), selector2(value))), + this.Encoder + ); + } + + public FdbTuple Decode(Slice packed) + { + return this.Encoder.DecodeKey(this.Subspace.ExtractKey(packed)); + } + + public FdbKeyRange ToRange(T1 value1, T2 value2) + { + //REVIEW: which semantic for ToRange() should we use? + return FdbTuple.ToRange(Encode(value1, value2)); + } + + } +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`3.cs b/FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`3.cs new file mode 100644 index 000000000..143de47e9 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`3.cs @@ -0,0 +1,81 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using System.Collections.Generic; +using FoundationDB.Layers.Tuples; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + public struct FdbEncoderSubspaceKeys + { + + public readonly IFdbSubspace Subspace; + public readonly ICompositeKeyEncoder Encoder; + + public FdbEncoderSubspaceKeys([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) + { + this.Subspace = subspace; + this.Encoder = encoder; + } + + public Slice this[T1 value1, T2 value2, T3 value3] + { + get { return Encode(value1, value2, value3); } + } + + public Slice Encode(T1 value1, T2 value2, T3 value3) + { + return this.Subspace.ConcatKey(this.Encoder.EncodeKey(value1, value2, value3)); + } + + public Slice[] Encode([NotNull] IEnumerable values, Func selector1, Func selector2, Func selector3) + { + if (values == null) throw new ArgumentNullException("values"); + return Batched>.Convert( + this.Subspace.GetWriter(), + values, + (ref SliceWriter writer, TSource value, ICompositeKeyEncoder encoder) => writer.WriteBytes(encoder.EncodeKey(selector1(value), selector2(value), selector3(value))), + this.Encoder + ); + } + + public FdbTuple Decode(Slice packed) + { + return this.Encoder.DecodeKey(this.Subspace.ExtractKey(packed)); + } + + public FdbKeyRange ToRange(T1 value1, T2 value2, T3 value3) + { + //REVIEW: which semantic for ToRange() should we use? + return FdbTuple.ToRange(Encode(value1, value2, value3)); + } + + } +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`1.cs b/FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`1.cs new file mode 100644 index 000000000..6112971aa --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`1.cs @@ -0,0 +1,61 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + public struct FdbEncoderSubspacePartition + { + public readonly IFdbSubspace Subspace; + public readonly IKeyEncoder Encoder; + + public FdbEncoderSubspacePartition([NotNull] IFdbSubspace subspace, [NotNull] IKeyEncoder encoder) + { + this.Subspace = subspace; + this.Encoder = encoder; + } + + public IFdbSubspace ByKey(T value) + { + return this.Subspace[this.Encoder.EncodeKey(value)]; + } + + public IFdbDynamicSubspace ByKey(T value, IFdbTypeSystem protocol) + { + return new FdbDynamicSubspace(this.Subspace.ConcatKey(this.Encoder.EncodeKey(value)), protocol); + } + + public IFdbEncoderSubspace ByKey(T value, IKeyEncoder encoder) + { + return new FdbEncoderSubspace(this.Subspace.ConcatKey(this.Encoder.EncodeKey(value)), encoder); + } + + } +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`2.cs b/FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`2.cs new file mode 100644 index 000000000..2db335d21 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`2.cs @@ -0,0 +1,66 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + public struct FdbEncoderSubspacePartition + { + public readonly IFdbSubspace Subspace; + public readonly ICompositeKeyEncoder Encoder; + + public FdbEncoderSubspacePartition([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) + { + this.Subspace = subspace; + this.Encoder = encoder; + } + + public IFdbSubspace this[T1 value1, T2 value2] + { + get { return ByKey(value1, value2); } + } + + public IFdbSubspace ByKey(T1 value1, T2 value2) + { + return this.Subspace[this.Encoder.EncodeKey(value1, value2)]; + } + + public IFdbDynamicSubspace ByKey(T1 value1, T2 value2, IFdbTypeSystem protocol) + { + return new FdbDynamicSubspace(this.Subspace.ConcatKey(this.Encoder.EncodeKey(value1, value2)), protocol); + } + + public IFdbEncoderSubspace ByKey(T1 value1, T2 value2, IKeyEncoder encoder) + { + return new FdbEncoderSubspace(this.Subspace.ConcatKey(this.Encoder.EncodeKey(value1, value2)), encoder); + } + + } +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`3.cs b/FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`3.cs new file mode 100644 index 000000000..91e507c23 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`3.cs @@ -0,0 +1,66 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + public struct FdbEncoderSubspacePartition + { + public readonly IFdbSubspace Subspace; + public readonly ICompositeKeyEncoder Encoder; + + public FdbEncoderSubspacePartition([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) + { + this.Subspace = subspace; + this.Encoder = encoder; + } + + public IFdbSubspace this[T1 value1, T2 value2, T3 value3] + { + get { return ByKey(value1, value2, value3); } + } + + public IFdbSubspace ByKey(T1 value1, T2 value2, T3 value3) + { + return this.Subspace[this.Encoder.EncodeKey(value1, value2, value3)]; + } + + public IFdbDynamicSubspace ByKey(T1 value1, T2 value2, T3 value3, IFdbTypeSystem protocol) + { + return new FdbDynamicSubspace(this.Subspace.ConcatKey(this.Encoder.EncodeKey(value1, value2, value3)), protocol); + } + + public IFdbEncoderSubspace ByKey(T1 value1, T2 value2, T3 value3, IKeyEncoder encoder) + { + return new FdbEncoderSubspace(this.Subspace.ConcatKey(this.Encoder.EncodeKey(value1, value2, value3)), encoder); + } + + } +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbEncoderSubspace`1.cs b/FoundationDB.Client/Subspaces/FdbEncoderSubspace`1.cs new file mode 100644 index 000000000..645c17fc1 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbEncoderSubspace`1.cs @@ -0,0 +1,73 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + + /// Subspace that knows how to encode and decode its key + /// Type of the key handled by this subspace + public class FdbEncoderSubspace : FdbSubspace, IFdbEncoderSubspace + { + private readonly IKeyEncoder m_encoder; + + // ReSharper disable once FieldCanBeMadeReadOnly.Local + private /*readonly*/ FdbEncoderSubspaceKeys m_keys; + + public FdbEncoderSubspace(Slice rawPrefix, [NotNull] IKeyEncoder encoder) + : this(rawPrefix, true, encoder) + { } + + internal FdbEncoderSubspace(Slice rawPrefix, bool copy, [NotNull] IKeyEncoder encoder) + : base(rawPrefix, copy) + { + if (encoder == null) throw new ArgumentNullException("encoder"); + m_encoder = encoder; + m_keys = new FdbEncoderSubspaceKeys(this, encoder); + } + + public IKeyEncoder Encoder + { + get { return m_encoder; } + } + + public FdbEncoderSubspaceKeys Keys + { + get { return m_keys; } + } + + public FdbEncoderSubspacePartition Partition + { + get { return new FdbEncoderSubspacePartition(this, m_encoder); } + } + + } + +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbEncoderSubspace`2.cs b/FoundationDB.Client/Subspaces/FdbEncoderSubspace`2.cs new file mode 100644 index 000000000..64d522a43 --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbEncoderSubspace`2.cs @@ -0,0 +1,81 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + + /// Subspace that knows how to encode and decode its key + /// Type of the first item of the keys handled by this subspace + /// Type of the second item of the keys handled by this subspace + public class FdbEncoderSubspace : FdbSubspace, IFdbEncoderSubspace + { + private readonly ICompositeKeyEncoder m_encoder; + + // ReSharper disable once FieldCanBeMadeReadOnly.Local + private /*readonly*/ FdbEncoderSubspaceKeys m_keys; + + public FdbEncoderSubspace(Slice rawPrefix, [NotNull] ICompositeKeyEncoder encoder) + : this(rawPrefix, true, encoder) + { } + + internal FdbEncoderSubspace(Slice rawPrefix, bool copy, [NotNull] ICompositeKeyEncoder encoder) + : base(rawPrefix, copy) + { + if (encoder == null) throw new ArgumentNullException("encoder"); + m_encoder = encoder; + m_keys = new FdbEncoderSubspaceKeys(this, encoder); + } + + private FdbEncoderSubspace m_partial; + + public IFdbEncoderSubspace Partial + { + get { return m_partial ?? (m_partial = new FdbEncoderSubspace(GetKeyPrefix(), false, KeyValueEncoders.Head(m_encoder))); } + } + + public ICompositeKeyEncoder Encoder + { + get { return m_encoder; } + } + + public FdbEncoderSubspaceKeys Keys + { + get { return m_keys; } + } + + public FdbEncoderSubspacePartition Partition + { + get { return new FdbEncoderSubspacePartition(this, m_encoder); } + } + + } + +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbEncoderSubspace`3.cs b/FoundationDB.Client/Subspaces/FdbEncoderSubspace`3.cs new file mode 100644 index 000000000..444f91b9a --- /dev/null +++ b/FoundationDB.Client/Subspaces/FdbEncoderSubspace`3.cs @@ -0,0 +1,87 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + + /// Subspace that knows how to encode and decode its key + /// Type of the first item of the keys handled by this subspace + /// Type of the second item of the keys handled by this subspace + /// Type of the third item of the keys handled by this subspace + public class FdbEncoderSubspace : FdbSubspace, IFdbEncoderSubspace + { + private readonly ICompositeKeyEncoder m_encoder; + + // ReSharper disable once FieldCanBeMadeReadOnly.Local + private /*readonly*/ FdbEncoderSubspaceKeys m_keys; + private FdbEncoderSubspace m_head; + private FdbEncoderSubspace m_partial; + + public FdbEncoderSubspace(Slice rawPrefix, [NotNull] ICompositeKeyEncoder encoder) + : this(rawPrefix, true, encoder) + { } + + internal FdbEncoderSubspace(Slice rawPrefix, bool copy, [NotNull] ICompositeKeyEncoder encoder) + : base(rawPrefix, copy) + { + if (encoder == null) throw new ArgumentNullException("encoder"); + m_encoder = encoder; + m_keys = new FdbEncoderSubspaceKeys(this, encoder); + } + + public IFdbEncoderSubspace Head + { + get { return m_head ?? (m_head = new FdbEncoderSubspace(GetKeyPrefix(), false, KeyValueEncoders.Head(m_encoder))); } + } + + public IFdbEncoderSubspace Partial + { + get { return m_partial ?? (m_partial = new FdbEncoderSubspace(GetKeyPrefix(), false, KeyValueEncoders.Pair(m_encoder))); } + } + + public ICompositeKeyEncoder Encoder + { + get { return m_encoder; } + } + + public FdbEncoderSubspaceKeys Keys + { + get { return m_keys; } + } + + public FdbEncoderSubspacePartition Partition + { + get { return new FdbEncoderSubspacePartition(this, m_encoder); } + } + + } + +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/FdbSubspace.cs b/FoundationDB.Client/Subspaces/FdbSubspace.cs index 3254a9d4a..ad2e31ffa 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspace.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspace.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -26,31 +26,25 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY */ #endregion +using System.Linq; + namespace FoundationDB.Client { using FoundationDB.Layers.Tuples; using JetBrains.Annotations; using System; - using System.Linq; using System.Collections.Generic; - using System.Globalization; using System.Diagnostics; /// Adds a prefix on every keys, to group them inside a common subspace public class FdbSubspace : IFdbSubspace, IFdbKey, IEquatable, IComparable { /// Empty subspace, that does not add any prefix to the keys - public static readonly FdbSubspace Empty = new FdbSubspace(Slice.Empty); + public static readonly IFdbSubspace Empty = new FdbSubspace(Slice.Empty); /// Binary prefix of this subspace private Slice m_rawPrefix; //PERF: readonly struct - /// Helper used to deal with keys in this subspace - private FdbSubspaceKeys m_keys; // cached for perf reasons - - /// Helper used to deal with keys in this subspace - private FdbSubspaceTuples m_tuples; // cached for perf reasons - /// Returns the key of this directory subspace /// This should only be used by methods that can use the key internally, even if it is not supposed to be exposed (as is the case for directory partitions) protected Slice InternalKey @@ -68,20 +62,16 @@ protected FdbSubspace([NotNull] IFdbSubspace copy) Slice key = sub != null ? sub.m_rawPrefix : copy.ToFoundationDbKey(); if (key.IsNull) throw new ArgumentException("The subspace key cannot be null. Use Slice.Empty if you want a subspace with no prefix.", "copy"); m_rawPrefix = key; - m_keys = new FdbSubspaceKeys(this); - m_tuples = new FdbSubspaceTuples(this); } /// Create a new subspace from a binary prefix /// Prefix of the new subspace /// If true, take a copy of the prefix - protected FdbSubspace(Slice rawPrefix, bool copy) + internal FdbSubspace(Slice rawPrefix, bool copy) { if (rawPrefix.IsNull) throw new ArgumentException("The subspace key cannot be null. Use Slice.Empty if you want a subspace with no prefix.", "rawPrefix"); if (copy) rawPrefix = rawPrefix.Memoize(); m_rawPrefix = rawPrefix.Memoize(); - m_keys = new FdbSubspaceKeys(this); - m_tuples = new FdbSubspaceTuples(this); } /// Create a new subspace from a binary prefix @@ -98,37 +88,113 @@ public FdbSubspace(Slice rawPrefix) /// Prefix of the new subspace /// New subspace that will use a copy of as its prefix [NotNull] - public static FdbSubspace Create(Slice slice) + public static IFdbSubspace Create(Slice slice) + { + return new FdbDynamicSubspace(slice, TypeSystem.Default); + } + + public static IFdbSubspace Create([NotNull] TKey key) + where TKey : IFdbKey + { + if (key == null) throw new ArgumentNullException("key"); + return new FdbSubspace(key.ToFoundationDbKey()); + } + + /// Create a new Subspace using a binary key as the prefix + /// Prefix of the new subspace + /// Type System used to encode the keys of this subspace + /// New subspace that will use a copy of as its prefix + [NotNull] + public static IFdbDynamicSubspace CreateDynamic(Slice slice, IFdbTypeSystem protocol = null) + { + return new FdbDynamicSubspace(slice, protocol); + } + + public static IFdbDynamicSubspace CreateDynamic([NotNull] TKey key, IFdbTypeSystem protocol = null) + where TKey : IFdbKey { - return new FdbSubspace(slice); + if (key == null) throw new ArgumentNullException("key"); + return new FdbDynamicSubspace(key.ToFoundationDbKey(), protocol); } /// Create a new Subspace using a tuples as the prefix /// Tuple that represents the prefix of the new subspace /// New subspace instance that will use the packed representation of as its prefix [NotNull] - public static FdbSubspace Create([NotNull] IFdbTuple tuple) + public static IFdbDynamicSubspace CreateDynamic([NotNull] IFdbTuple tuple) { if (tuple == null) throw new ArgumentNullException("tuple"); - return new FdbSubspace(tuple.ToSlice(), true); + return new FdbDynamicSubspace(tuple.ToSlice(), true, TypeSystem.Tuples); + } + + [NotNull] + public static IFdbEncoderSubspace CreateEncoder(Slice slice, IKeyEncoder encoder) + { + if (encoder == null) throw new ArgumentNullException("encoder"); + return new FdbEncoderSubspace(slice, encoder); + } + + [NotNull] + public static IFdbEncoderSubspace CreateEncoder(Slice slice, ICompositeKeyEncoder encoder) + { + if (encoder == null) throw new ArgumentNullException("encoder"); + return new FdbEncoderSubspace(slice, encoder); + } + + [NotNull] + public static IFdbEncoderSubspace CreateEncoder(Slice slice, ICompositeKeyEncoder encoder) + { + if (encoder == null) throw new ArgumentNullException("encoder"); + return new FdbEncoderSubspace(slice, encoder); } /// Clone this subspace /// New Subspace that uses the same prefix key /// Hint: Cloning a special Subspace like a or will not keep all the "special abilities" of the parent. [NotNull] - public static FdbSubspace Copy([NotNull] IFdbSubspace subspace) + public static IFdbSubspace Copy([NotNull] IFdbSubspace subspace) { + var dyn = subspace as FdbDynamicSubspace; + if (dyn != null) + { + return new FdbDynamicSubspace(dyn.InternalKey, true, dyn.Protocol); + } + var sub = subspace as FdbSubspace; if (sub != null) { //SPOILER WARNING: You didn't hear it from me, but some say that you can use this to bypass the fact that FdbDirectoryPartition.get_Key and ToRange() throws in v2.x ... If you bypass this protection and bork your database, don't come crying! return new FdbSubspace(sub.InternalKey, true); } - else - { - return new FdbSubspace(subspace.Key, true); - } + + return new FdbSubspace(subspace.Key, true); + } + + /// Create a copy of a subspace, using a specific Type System + /// New Subspace that uses the same prefix key, and the provided Type System + [NotNull] + public static IFdbDynamicSubspace CopyDynamic([NotNull] IFdbSubspace subspace, [NotNull] IFdbTypeSystem protocol) + { + if (protocol == null) throw new ArgumentNullException("protocol"); ; + return new FdbDynamicSubspace(subspace.Key, true, protocol); + } + + /// Create a copy of a subspace, using a specific Type System + /// New Subspace that uses the same prefix key, and the provided Type System + [NotNull] + public static IFdbEncoderSubspace CopyEncoder([NotNull] IFdbSubspace subspace, [NotNull] IKeyEncoder encoder) + { + if (encoder == null) throw new ArgumentNullException("encoder"); + return new FdbEncoderSubspace(subspace.Key, true, encoder); + } + + /// Create a copy of a subspace, using a specific Type System + /// New Subspace that uses the same prefix key, and the provided Type System + [NotNull] + public static IFdbEncoderSubspace CopyEncoder([NotNull] IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) + { + if (encoder == null) throw new ArgumentNullException("encoder"); + return new FdbEncoderSubspace(subspace.Key, true, encoder); } #endregion @@ -160,26 +226,40 @@ protected virtual Slice GetKeyPrefix() return m_rawPrefix; } - /// Return a view of all the possible binary keys of this subspace - public FdbSubspaceKeys Keys + protected virtual IFdbSubspace CreateChildren(Slice suffix) { - [DebuggerStepThrough] - get { return m_keys; } + return new FdbSubspace(ConcatKey(suffix)); } - /// Returns an helper object that knows how to create sub-partitions of this subspace - public FdbSubspacePartition Partition + public FdbKeyRange ToRange() { - //note: not cached, because this is probably not be called frequently (except in the init path) - [DebuggerStepThrough] - get { return new FdbSubspacePartition(this); } + return ToRange(Slice.Empty); } - /// Return a view of all the possible tuple-based keys of this subspace - public FdbSubspaceTuples Tuples + public virtual FdbKeyRange ToRange(Slice suffix) { - [DebuggerStepThrough] - get { return m_tuples; } + return FdbKeyRange.StartsWith(ConcatKey(suffix)); + } + + public virtual FdbKeyRange ToRange(TKey key) + where TKey : IFdbKey + { + if (key == null) throw new ArgumentNullException("key"); + return FdbKeyRange.StartsWith(ConcatKey(key.ToFoundationDbKey())); + } + + public IFdbSubspace this[Slice suffix] + { + get { return CreateChildren(suffix); } + } + + public IFdbSubspace this[IFdbKey key] + { + get + { + if (key == null) throw new ArgumentNullException("key"); + return CreateChildren(key.ToFoundationDbKey()); + } } /// Tests whether the specified starts with this Subspace's prefix, indicating that the Subspace logically contains . @@ -191,24 +271,42 @@ public virtual bool Contains(Slice key) } /// Append a key to the subspace key - /// This is the equivalent of calling 'subspace.Key + key' - public Slice ConcatKey(Slice key) + /// This is the equivalent of calling 'subspace.Key + suffix' + public Slice ConcatKey(Slice suffix) { //REVIEW: what to do with Slice.Nil? - return GetKeyPrefix().Concat(key); + return GetKeyPrefix().Concat(suffix); + } + + public Slice ConcatKey(TKey key) + where TKey : IFdbKey + { + if (key == null) throw new ArgumentNullException("key"); + var suffix = key.ToFoundationDbKey(); + return GetKeyPrefix().Concat(suffix); } /// Merge an array of keys with the subspace's prefix, all sharing the same buffer /// Array of keys to pack /// Array of slices (for all keys) that share the same underlying buffer - [NotNull] - public Slice[] ConcatKeys([NotNull] IEnumerable keys) + public Slice[] ConcatKeys(IEnumerable keys) { if (keys == null) throw new ArgumentNullException("keys"); //REVIEW: what to do with keys that are Slice.Nil ? return Slice.ConcatRange(GetKeyPrefix(), keys); } + /// Merge an array of keys with the subspace's prefix, all sharing the same buffer + /// Array of keys to pack + /// Array of slices (for all keys) that share the same underlying buffer + public Slice[] ConcatKeys(IEnumerable keys) + where TKey : IFdbKey + { + if (keys == null) throw new ArgumentNullException("keys"); + //REVIEW: what to do with keys that are Slice.Nil ? + return Slice.ConcatRange(GetKeyPrefix(), keys.Select(key => key.ToFoundationDbKey())); + } + /// Remove the subspace prefix from a binary key, and only return the tail, or Slice.Nil if the key does not fit inside the namespace /// Complete key that contains the current subspace prefix, and a binary suffix /// If true, verify that is inside the bounds of the subspace @@ -234,8 +332,7 @@ public Slice ExtractKey(Slice key, bool boundCheck = false) /// If true, verify that each key in is inside the bounds of the subspace /// Array of only the binary suffix of the keys, Slice.Empty for a key that is exactly equal to the subspace prefix, or Slice.Nil for a key that is outside of the subspace /// If is true and at least one key in is outside the current subspace. - [NotNull] - public Slice[] ExtractKeys([NotNull] IEnumerable keys, bool boundCheck = false) + public Slice[] ExtractKeys(IEnumerable keys, bool boundCheck = false) { if (keys == null) throw new ArgumentNullException("keys"); @@ -278,6 +375,19 @@ public Slice[] ExtractKeys([NotNull] IEnumerable keys, bool boundCheck = } } + public SliceWriter GetWriter(int capacity = 0) + { + if (capacity < 0) throw new ArgumentOutOfRangeException("capacity"); + + var prefix = GetKeyPrefix(); + if (capacity > 0) + { + capacity += prefix.Count; + //TODO: round up to multiple of 8? + } + return new SliceWriter(prefix, capacity); + } + #endregion #region IEquatable / IComparable... @@ -377,7 +487,7 @@ public virtual string DumpKey(Slice key) /// Printable representation of this subspace public override string ToString() { - return String.Format(CultureInfo.InvariantCulture, "Subspace({0})", this.InternalKey.ToString()); + return "Subspace(" + this.InternalKey.ToString() + ")"; } #endregion diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs b/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs index 1e3126afa..652e267c8 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceExtensions.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,11 +29,9 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Client { using FoundationDB.Client.Utils; - using FoundationDB.Layers.Tuples; using JetBrains.Annotations; using System; using System.Collections.Generic; - using System.Linq; using System.Threading; using System.Threading.Tasks; @@ -41,6 +39,33 @@ namespace FoundationDB.Client public static class FdbSubspaceExtensions { + /// Return a version of this subspace, which uses a different type system to produces the keys and values + /// Instance of a generic subspace + /// If non-null, uses this specific instance of the TypeSystem. If null, uses the default instance for this particular TypeSystem + /// Subspace equivalent to , but augmented with a specific TypeSystem + public static IFdbDynamicSubspace Using([NotNull] this IFdbSubspace subspace, [NotNull] IFdbTypeSystem protocol) + { + return FdbSubspace.CopyDynamic(subspace, protocol); + } + + /// Return a version of this subspace, which uses a different type system to produces the keys and values + /// Instance of a generic subspace + /// Custom key encoder + /// Subspace equivalent to , but augmented with a specific TypeSystem + public static IFdbEncoderSubspace UsingEncoder([NotNull] this IFdbSubspace subspace, [NotNull] IKeyEncoder encoder) + { + return FdbSubspace.CopyEncoder(subspace, encoder); + } + + /// Return a version of this subspace, which uses a different type system to produces the keys and values + /// Instance of a generic subspace + /// Custom key encoder + /// Subspace equivalent to , but augmented with a specific TypeSystem + public static IFdbEncoderSubspace UsingEncoder([NotNull] this IFdbSubspace subspace, [NotNull] ICompositeKeyEncoder encoder) + { + return FdbSubspace.CopyEncoder(subspace, encoder); + } + /// Clear the entire content of a subspace public static void ClearRange(this IFdbTransaction trans, [NotNull] IFdbSubspace subspace) { @@ -70,6 +95,7 @@ public static FdbRangeQuery> GetRangeStartsWith(this } /// Tests whether the specified starts with this Subspace's prefix, indicating that the Subspace logically contains . + /// /// The key to be tested /// If is null public static bool Contains([NotNull] this IFdbSubspace subspace, [NotNull] TKey key) diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs b/FoundationDB.Client/Subspaces/FdbSubspaceKeys_OLD.cs similarity index 97% rename from FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs rename to FoundationDB.Client/Subspaces/FdbSubspaceKeys_OLD.cs index 3ea23d66a..d30f41304 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceKeys.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceKeys_OLD.cs @@ -34,11 +34,12 @@ namespace FoundationDB.Client using System.Linq; using System.Collections.Generic; - public struct FdbSubspaceKeys + [Obsolete("REMOVE ME!")] + public struct FdbSubspaceKeys_OLD { private readonly IFdbSubspace m_subspace; - public FdbSubspaceKeys(IFdbSubspace subspace) + public FdbSubspaceKeys_OLD(IFdbSubspace subspace) { Contract.Requires(subspace != null); m_subspace = subspace; diff --git a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs b/FoundationDB.Client/Subspaces/FdbSubspaceTuples_OLD.cs similarity index 99% rename from FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs rename to FoundationDB.Client/Subspaces/FdbSubspaceTuples_OLD.cs index 0f600c01d..1721020bb 100644 --- a/FoundationDB.Client/Subspaces/FdbSubspaceTuples.cs +++ b/FoundationDB.Client/Subspaces/FdbSubspaceTuples_OLD.cs @@ -37,7 +37,8 @@ namespace FoundationDB.Client /// Provides of methods to encode and decodes keys using the Tuple Encoding format - public struct FdbSubspaceTuples + [Obsolete("REMOVE ME!")] + public struct FdbSubspaceTuples_OLD { /// Ref to the parent subspace @@ -45,7 +46,7 @@ public struct FdbSubspaceTuples /// Wraps an existing subspace /// - public FdbSubspaceTuples(IFdbSubspace subspace) + public FdbSubspaceTuples_OLD(IFdbSubspace subspace) { Contract.Requires(subspace != null); m_subspace = subspace; diff --git a/FoundationDB.Client/Subspaces/IFdbDynamicSubspace.cs b/FoundationDB.Client/Subspaces/IFdbDynamicSubspace.cs new file mode 100644 index 000000000..4f5e997cd --- /dev/null +++ b/FoundationDB.Client/Subspaces/IFdbDynamicSubspace.cs @@ -0,0 +1,49 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + + public interface IFdbDynamicSubspace : IFdbSubspace + { + + /// Type system used to encode and decode keys in this subspace + IFdbTypeSystem Protocol {[NotNull] get; } + + /// Return a view of all the possible keys of this subspace + FdbDynamicSubspaceKeys Keys { get; } + + /// Returns an helper object that knows how to create sub-partitions of this subspace + FdbDynamicSubspacePartition Partition { get; } + + } + +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/IFdbEncoderSubspace.cs b/FoundationDB.Client/Subspaces/IFdbEncoderSubspace.cs new file mode 100644 index 000000000..41f28789d --- /dev/null +++ b/FoundationDB.Client/Subspaces/IFdbEncoderSubspace.cs @@ -0,0 +1,79 @@ +#region BSD Licence +/* Copyright (c) 2013-2015, Doxense SAS +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Doxense nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#endregion + +using System; +using JetBrains.Annotations; + +namespace FoundationDB.Client +{ + + public interface IFdbEncoderSubspace : IFdbSubspace + { + + IKeyEncoder Encoder { [NotNull] get; } + + /// Return a view of all the possible keys of this subspace + FdbEncoderSubspaceKeys Keys { get; } + + /// Returns an helper object that knows how to create sub-partitions of this subspace + FdbEncoderSubspacePartition Partition { get; } + + } + + public interface IFdbEncoderSubspace : IFdbSubspace + { + + ICompositeKeyEncoder Encoder {[NotNull] get; } + + /// Return a view of all the possible keys of this subspace + FdbEncoderSubspaceKeys Keys { get; } + + /// Returns an helper object that knows how to create sub-partitions of this subspace + FdbEncoderSubspacePartition Partition { get; } + + IFdbEncoderSubspace Partial {[NotNull] get; } + + } + + public interface IFdbEncoderSubspace : IFdbSubspace + { + + ICompositeKeyEncoder Encoder {[NotNull] get; } + + /// Return a view of all the possible keys of this subspace + FdbEncoderSubspaceKeys Keys { get; } + + /// Returns an helper object that knows how to create sub-partitions of this subspace + FdbEncoderSubspacePartition Partition { get; } + + IFdbEncoderSubspace Head { [NotNull] get; } + + IFdbEncoderSubspace Partial {[NotNull] get; } + } + +} \ No newline at end of file diff --git a/FoundationDB.Client/Subspaces/IFdbSubspace.cs b/FoundationDB.Client/Subspaces/IFdbSubspace.cs index c2c35161d..980638326 100644 --- a/FoundationDB.Client/Subspaces/IFdbSubspace.cs +++ b/FoundationDB.Client/Subspaces/IFdbSubspace.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -40,19 +40,20 @@ public interface IFdbSubspace : IFdbKey /// Returns the prefix of this subspace Slice Key { get; } - /// Return a view of all the possible binary keys of this subspace - FdbSubspaceKeys Keys { get; } + /// Return a key range that contains all the keys in this subspace + FdbKeyRange ToRange(); + FdbKeyRange ToRange(Slice suffix); + FdbKeyRange ToRange([NotNull] TKey key) where TKey : IFdbKey; - /// Helper that can be used to partition this subspace into smaller subspaces - FdbSubspacePartition Partition { get; } + /// Create a new subspace by adding a suffix to the key of the current subspace. + /// Binary suffix that will be appended to the current prefix + /// New subspace whose prefix is the concatenation of the parent prefix, and + IFdbSubspace this[Slice suffix] { [NotNull] get; } - /// Return a view of all the possible tuple-based keys of this subspace - FdbSubspaceTuples Tuples { get; } - - ///// Create a new subspace by adding a suffix to the key of the current subspace. - ///// Binary suffix that will be appended to the current prefix - ///// New subspace whose prefix is the concatenation of the parent prefix, and - //IFdbSubspace this[Slice suffix] { [NotNull] get; } + /// Create a new subspace by adding a suffix to the key of the current subspace. + /// Item that can serialize itself into a binary suffix, that will be appended to the current subspace's prefix + /// New subspace whose prefix is the concatenation of the parent prefix, and + IFdbSubspace this[[NotNull] IFdbKey key] { [NotNull] get; } /// Test if a key is inside the range of keys logically contained by this subspace /// Key to test @@ -68,18 +69,36 @@ public interface IFdbSubspace : IFdbKey Slice ConcatKey(Slice suffix); + Slice ConcatKey([NotNull] TKey key) where TKey : IFdbKey; + [NotNull] Slice[] ConcatKeys([NotNull] IEnumerable suffixes); - /// Remove the subspace prefix from a binary key, or throw if the key does not belong to this subspace - /// Complete key that contains the current subspace prefix, and a binary suffix. - /// Binary suffix of the key (or Slice.Empty is the key is exactly equal to the subspace prefix). If the key is equal to Slice.Nil, then it will be returned unmodified. If the key is outside of the subspace, the method throws. - /// If key is outside the current subspace. + [NotNull] + Slice[] ConcatKeys([NotNull, ItemNotNull] IEnumerable suffixes) where TKey : IFdbKey; + + /// Remove the subspace prefix from a binary key, and only return the tail, or Slice.Nil if the key does not fit inside the namespace + /// Complete key that contains the current subspace prefix, and a binary suffix + /// If true, verify that is inside the bounds of the subspace + /// Binary suffix of the key (or Slice.Empty if the key is exactly equal to the subspace prefix). If the key is outside of the subspace, returns Slice.Nil + /// This is the inverse operation of + /// If is true and is outside the current subspace. Slice ExtractKey(Slice key, bool boundCheck = false); + /// Remove the subspace prefix from a batch of binary keys, and only return the tail, or Slice.Nil if a key does not fit inside the namespace + /// Sequence of complete keys that contains the current subspace prefix, and a binary suffix + /// If true, verify that each key in is inside the bounds of the subspace + /// Array of only the binary suffix of the keys, Slice.Empty for a key that is exactly equal to the subspace prefix, or Slice.Nil for a key that is outside of the subspace + /// If is true and at least one key in is outside the current subspace. [NotNull] Slice[] ExtractKeys([NotNull] IEnumerable keys, bool boundCheck = false); + /// Return a new slice buffer, initialized with the subspace prefix, that can be used for custom key serialization + /// If non-zero, the expected buffer capacity. The size of the subspace prefix will be added to this value. + /// Instance of a SliceWriter with the prefix of this subspace already copied. + SliceWriter GetWriter(int capacity = 0); + + } } diff --git a/FoundationDB.Client/TypeSystem/FdbTypeCodec`1.cs b/FoundationDB.Client/TypeSystem/FdbTypeCodec`1.cs index 43e007cf7..ce29ddb12 100644 --- a/FoundationDB.Client/TypeSystem/FdbTypeCodec`1.cs +++ b/FoundationDB.Client/TypeSystem/FdbTypeCodec`1.cs @@ -26,6 +26,9 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY */ #endregion +using System.ComponentModel; +using System.Runtime.CompilerServices; + namespace FoundationDB.Client { using System; diff --git a/FoundationDB.Client/TypeSystem/IFdbTypeSystem.cs b/FoundationDB.Client/TypeSystem/IFdbTypeSystem.cs new file mode 100644 index 000000000..887385363 --- /dev/null +++ b/FoundationDB.Client/TypeSystem/IFdbTypeSystem.cs @@ -0,0 +1,175 @@ +using System; +using FoundationDB.Layers.Tuples; +using JetBrains.Annotations; + +namespace FoundationDB.Client //REVIEW: what namespace? +{ + /// Type system that handles values of arbitrary sizes and types + public interface IFdbTypeSystem + { + + FdbKeyRange ToRange(Slice key); + + /// Pack a tuple of arbitrary length into a binary slice + /// Buffer where to append the binary representation + /// Tuple of any size (0 to N) + /// If some elements in are not supported by this type system + void PackKey(ref SliceWriter writer, IFdbTuple items); + + /// Encode a key composed of a single element into a binary slice + /// Type of the element + /// Buffer where to append the binary representation + /// Element to encode + void EncodeKey(ref SliceWriter writer, T item1); + + /// Encode a key composed of two elements into a binary slice + /// Type of the first element + /// Type of the second element + /// Buffer where to append the binary representation + /// First element to encode + /// Second element to encode + void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2); + + /// Encode a key composed of a three elements into a binary slice + /// Type of the first element + /// Type of the second element + /// Type of the third element + /// Buffer where to append the binary representation + /// First element to encode + /// Second element to encode + /// Third element to encode + void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3); + + /// Encode a key composed of a four elements into a binary slice + /// Type of the first element + /// Type of the second element + /// Type of the third element + /// Type of the fourth element + /// Buffer where to append the binary representation + /// First element to encode + /// Second element to encode + /// Third element to encode + /// Fourth element to encode + void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4); + + /// Encode a key composed of a four elements into a binary slice + /// Type of the first element + /// Type of the second element + /// Type of the third element + /// Type of the fourth element + /// Type of the fifth element + /// Buffer where to append the binary representation + /// First element to encode + /// Second element to encode + /// Third element to encode + /// Fourth element to encode + /// Fifth element to encode + void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4, T5 item5); + + /// Encode a key composed of a four elements into a binary slice + /// Type of the first element + /// Type of the second element + /// Type of the third element + /// Type of the fourth element + /// Type of the fifth element + /// Type of the sixth element + /// Buffer where to append the binary representation + /// First element to encode + /// Second element to encode + /// Third element to encode + /// Fourth element to encode + /// Fifth element to encode + /// Sixth element to encode + void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6); + + /// Encode a key composed of a four elements into a binary slice + /// Type of the first element + /// Type of the second element + /// Type of the third element + /// Type of the fourth element + /// Type of the fifth element + /// Type of the sixth element + /// Type of the seventh element + /// Buffer where to append the binary representation + /// First element to encode + /// Second element to encode + /// Third element to encode + /// Fourth element to encode + /// Fifth element to encode + /// Sixth element to encode + /// Seventh element to encode + void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7); + + + /// Encode a key composed of a four elements into a binary slice + /// Type of the first element + /// Type of the second element + /// Type of the third element + /// Type of the fourth element + /// Type of the fifth element + /// Type of the sixth element + /// Type of the seventh element + /// Type of the eighth element + /// Buffer where to append the binary representation + /// First element to encode + /// Second element to encode + /// Third element to encode + /// Fourth element to encode + /// Fifth element to encode + /// Sixth element to encode + /// Seventh element to encode + /// Eighth element to encode + void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7, T8 item8); + + /// Decode a binary slice into a tuple or arbitrary length + /// Binary slice produced by a previous call to + /// Tuple of any size (0 to N) + IFdbTuple UnpackKey(Slice packed); + + /// Decode a binary slice containing exactly on element + /// Expected type of the element + /// Binary slice produced by a previous call to or + /// Tuple containing a single element, or an exception if the data is invalid, or the tuples has less or more than 1 element + T DecodeKey(Slice packed); + + T DecodeKeyFirst(Slice packed); + + T DecodeKeyLast(Slice packed); + + /// Decode a binary slice containing exactly two elements + /// Expected type of the first element + /// Expected type of the second element + /// Binary slice produced by a previous call to or + /// Tuple containing two elements, or an exception if the data is invalid, or the tuples has less or more than two elements + FdbTuple DecodeKey(Slice packed); + + /// Decode a binary slice containing exactly three elements + /// Expected type of the first element + /// Expected type of the second element + /// Expected type of the third element + /// Binary slice produced by a previous call to or + /// Tuple containing three elements, or an exception if the data is invalid, or the tuples has less or more than three elements + FdbTuple DecodeKey(Slice packed); + + /// Decode a binary slice containing exactly four elements + /// Expected type of the first element + /// Expected type of the second element + /// Expected type of the third element + /// Expected type of the fourth element + /// Binary slice produced by a previous call to or + /// Tuple containing four elements, or an exception if the data is invalid, or the tuples has less or more than four elements + FdbTuple DecodeKey(Slice packed); + + /// Decode a binary slice containing exactly five elements + /// Expected type of the first element + /// Expected type of the second element + /// Expected type of the third element + /// Expected type of the fourth element + /// Expected type of the fifth element + /// Binary slice produced by a previous call to or + /// Tuple containing five elements, or an exception if the data is invalid, or the tuples has less or more than five elements + FdbTuple DecodeKey(Slice packed); + + } + +} \ No newline at end of file diff --git a/FoundationDB.Client/TypeSystem/Tuples/Tuples.cs b/FoundationDB.Client/TypeSystem/Tuples/Tuples.cs new file mode 100644 index 000000000..a7f468fcb --- /dev/null +++ b/FoundationDB.Client/TypeSystem/Tuples/Tuples.cs @@ -0,0 +1,147 @@ +using System; +using FoundationDB.Layers.Tuples; + +namespace FoundationDB.Client +{ + + public sealed class TupleTypeSystem : IFdbTypeSystem + { + + public FdbKeyRange ToRange(Slice key) + { + return FdbTuple.ToRange(key); + } + + public void PackKey(ref SliceWriter writer, IFdbTuple items) + { + var tw = new TupleWriter(writer); + FdbTuple.Pack(ref tw, items); + writer = tw.Output; + } + + public void EncodeKey(ref SliceWriter writer, T1 item1) + { + var tw = new TupleWriter(writer); + FdbTuplePacker.SerializeTo(ref tw, item1); + writer = tw.Output; + } + + public void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2) + { + var tw = new TupleWriter(writer); + FdbTuplePacker.SerializeTo(ref tw, item1); + FdbTuplePacker.SerializeTo(ref tw, item2); + writer = tw.Output; + } + + public void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3) + { + var tw = new TupleWriter(writer); + FdbTuplePacker.SerializeTo(ref tw, item1); + FdbTuplePacker.SerializeTo(ref tw, item2); + FdbTuplePacker.SerializeTo(ref tw, item3); + writer = tw.Output; + } + + public void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4) + { + var tw = new TupleWriter(writer); + FdbTuplePacker.SerializeTo(ref tw, item1); + FdbTuplePacker.SerializeTo(ref tw, item2); + FdbTuplePacker.SerializeTo(ref tw, item3); + FdbTuplePacker.SerializeTo(ref tw, item4); + writer = tw.Output; + } + + public void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4, T5 item5) + { + var tw = new TupleWriter(writer); + FdbTuplePacker.SerializeTo(ref tw, item1); + FdbTuplePacker.SerializeTo(ref tw, item2); + FdbTuplePacker.SerializeTo(ref tw, item3); + FdbTuplePacker.SerializeTo(ref tw, item4); + FdbTuplePacker.SerializeTo(ref tw, item5); + writer = tw.Output; + } + + public void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6) + { + var tw = new TupleWriter(writer); + FdbTuplePacker.SerializeTo(ref tw, item1); + FdbTuplePacker.SerializeTo(ref tw, item2); + FdbTuplePacker.SerializeTo(ref tw, item3); + FdbTuplePacker.SerializeTo(ref tw, item4); + FdbTuplePacker.SerializeTo(ref tw, item5); + FdbTuplePacker.SerializeTo(ref tw, item6); + writer = tw.Output; + } + + public void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7) + { + var tw = new TupleWriter(writer); + FdbTuplePacker.SerializeTo(ref tw, item1); + FdbTuplePacker.SerializeTo(ref tw, item2); + FdbTuplePacker.SerializeTo(ref tw, item3); + FdbTuplePacker.SerializeTo(ref tw, item4); + FdbTuplePacker.SerializeTo(ref tw, item5); + FdbTuplePacker.SerializeTo(ref tw, item6); + FdbTuplePacker.SerializeTo(ref tw, item7); + writer = tw.Output; + } + + public void EncodeKey(ref SliceWriter writer, T1 item1, T2 item2, T3 item3, T4 item4, T5 item5, T6 item6, T7 item7, T8 item8) + { + var tw = new TupleWriter(writer); + FdbTuplePacker.SerializeTo(ref tw, item1); + FdbTuplePacker.SerializeTo(ref tw, item2); + FdbTuplePacker.SerializeTo(ref tw, item3); + FdbTuplePacker.SerializeTo(ref tw, item4); + FdbTuplePacker.SerializeTo(ref tw, item5); + FdbTuplePacker.SerializeTo(ref tw, item6); + FdbTuplePacker.SerializeTo(ref tw, item7); + FdbTuplePacker.SerializeTo(ref tw, item8); + writer = tw.Output; + } + + public IFdbTuple UnpackKey(Slice packed) + { + return FdbTuple.Unpack(packed); + } + + public T DecodeKey(Slice packed) + { + return FdbTuple.DecodeKey(packed); + } + + public T DecodeKeyFirst(Slice packed) + { + return FdbTuple.DecodeFirst(packed); + } + + public T DecodeKeyLast(Slice packed) + { + return FdbTuple.DecodeLast(packed); + } + + public FdbTuple DecodeKey(Slice packed) + { + return FdbTuple.DecodeKey(packed); + } + + public FdbTuple DecodeKey(Slice packed) + { + return FdbTuple.DecodeKey(packed); + } + + public FdbTuple DecodeKey(Slice packed) + { + return FdbTuple.DecodeKey(packed); + } + + public FdbTuple DecodeKey(Slice packed) + { + return FdbTuple.DecodeKey(packed); + } + + } +} \ No newline at end of file diff --git a/FoundationDB.Client/TypeSystem/Tuples/Tupspace.cs b/FoundationDB.Client/TypeSystem/Tuples/Tupspace.cs new file mode 100644 index 000000000..66b0ca984 --- /dev/null +++ b/FoundationDB.Client/TypeSystem/Tuples/Tupspace.cs @@ -0,0 +1,31 @@ +using FoundationDB.Layers.Tuples; +using System; +using System.Runtime.CompilerServices; + +namespace FoundationDB.Client +{ + public static class Zobi + { + public static void Zoba() + { + var ts = FdbSubspace.CreateDynamic(FdbTuple.Create(42), TypeSystem.Tuples); + + var s0 = ts.Keys.Pack(FdbTuple.Create("hello", "world", 123, true)); + Console.WriteLine(s0); + var t0 = ts.Keys.Unpack(s0); + Console.WriteLine(t0); + + var s1 = ts.Keys.Encode("hello"); + Console.WriteLine(s1); + var t1 = ts.Keys.Decode(s1); + Console.WriteLine(t1); + + var s2 = ts.Keys.Encode("hello", 123); + Console.WriteLine(s2); + var t2 = ts.Keys.Decode(s2); + Console.WriteLine(t2); + } + } + + +} \ No newline at end of file diff --git a/FoundationDB.Client/TypeSystem/TypeSystem.cs b/FoundationDB.Client/TypeSystem/TypeSystem.cs new file mode 100644 index 000000000..a0d3ce6d6 --- /dev/null +++ b/FoundationDB.Client/TypeSystem/TypeSystem.cs @@ -0,0 +1,34 @@ +using System; +using JetBrains.Annotations; +using System.Runtime.CompilerServices; + +namespace FoundationDB.Client //REVIEW: what namespace? +{ + + public static class TypeSystem + { + public static readonly IFdbTypeSystem Default; + public static readonly IFdbTypeSystem Tuples; + + static TypeSystem() + { + var tuples = new TupleTypeSystem(); + Tuples = tuples; + + Default = tuples; + } + + public static IFdbTypeSystem FromName(string name) + { + //TODO: use a dictionary! + switch (name) + { + case "tuples": return Tuples; + } + + throw new InvalidOperationException("Type System '{0}' is not known. You must register a typesystem by calling TypeSystem.Register() in your initialization logic or in a static constructor, before using it."); + } + + } + +} \ No newline at end of file diff --git a/FoundationDB.Layers.Common/Blobs/FdbBlob.cs b/FoundationDB.Layers.Common/Blobs/FdbBlob.cs index 10dccd27e..8e96dc6f6 100644 --- a/FoundationDB.Layers.Common/Blobs/FdbBlob.cs +++ b/FoundationDB.Layers.Common/Blobs/FdbBlob.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -30,6 +30,7 @@ namespace FoundationDB.Layers.Blobs { using FoundationDB.Client; using FoundationDB.Client.Utils; + using JetBrains.Annotations; using System; using System.Diagnostics; using System.Globalization; @@ -51,15 +52,15 @@ public class FdbBlob /// Only keys within the subspace will be used by the object. /// Other clients of the database should refrain from modifying the subspace. /// Subspace to be used for storing the blob data and metadata - public FdbBlob(IFdbSubspace subspace) + public FdbBlob([NotNull] IFdbSubspace subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); - this.Subspace = subspace; + this.Subspace = subspace.Using(TypeSystem.Tuples); } /// Subspace used as a prefix for all items in this table - public IFdbSubspace Subspace { get; private set; } + public IFdbDynamicSubspace Subspace {[NotNull] get; private set; } /// Returns the key for data chunk at the specified offset /// @@ -67,24 +68,24 @@ public FdbBlob(IFdbSubspace subspace) protected virtual Slice DataKey(long offset) { //note: python code uses "%16d" % offset, which pads the value with spaces.. Not sure why ? - return this.Subspace.Tuples.EncodeKey(DataSuffix, offset.ToString("D16", CultureInfo.InvariantCulture)); + return this.Subspace.Keys.Encode(DataSuffix, offset.ToString("D16", CultureInfo.InvariantCulture)); } protected virtual long DataKeyOffset(Slice key) { - long offset = Int64.Parse(this.Subspace.Tuples.DecodeLast(key), CultureInfo.InvariantCulture); + long offset = Int64.Parse(this.Subspace.Keys.DecodeLast(key), CultureInfo.InvariantCulture); if (offset < 0) throw new InvalidOperationException("Chunk offset value cannot be less than zero"); return offset; } protected virtual Slice SizeKey() { - return this.Subspace.Tuples.EncodeKey(SizeSuffix); + return this.Subspace.Keys.Encode(SizeSuffix); } protected virtual Slice AttributeKey(string name) { - return this.Subspace.Tuples.EncodeKey(AttributesSuffix, name); + return this.Subspace.Keys.Encode(AttributesSuffix, name); } #region Internal Helpers... @@ -103,7 +104,7 @@ public Chunk(Slice key, Slice data, long offset) } } - private async Task GetChunkAtAsync(IFdbTransaction trans, long offset) + private async Task GetChunkAtAsync([NotNull] IFdbTransaction trans, long offset) { Contract.Requires(trans != null && offset >= 0); @@ -130,7 +131,7 @@ private async Task GetChunkAtAsync(IFdbTransaction trans, long offset) return new Chunk(chunkKey, chunkData, chunkOffset); } - private async Task MakeSplitPointAsync(IFdbTransaction trans, long offset) + private async Task MakeSplitPointAsync([NotNull] IFdbTransaction trans, long offset) { Contract.Requires(trans != null && offset >= 0); @@ -144,14 +145,14 @@ private async Task MakeSplitPointAsync(IFdbTransaction trans, long offset) trans.Set(DataKey(offset), chunk.Data.Substring(splitPoint)); } - private async Task MakeSparseAsync(IFdbTransaction trans, long start, long end) + private async Task MakeSparseAsync([NotNull] IFdbTransaction trans, long start, long end) { await MakeSplitPointAsync(trans, start).ConfigureAwait(false); await MakeSplitPointAsync(trans, end).ConfigureAwait(false); trans.ClearRange(DataKey(start), DataKey(end)); } - private async Task TryRemoteSplitPointAsync(IFdbTransaction trans, long offset) + private async Task TryRemoteSplitPointAsync([NotNull] IFdbTransaction trans, long offset) { Contract.Requires(trans != null && offset >= 0); @@ -169,7 +170,7 @@ private async Task TryRemoteSplitPointAsync(IFdbTransaction trans, long of return true; } - private void WriteToSparse(IFdbTransaction trans, long offset, Slice data) + private void WriteToSparse([NotNull] IFdbTransaction trans, long offset, Slice data) { Contract.Requires(trans != null && offset >= 0); @@ -184,7 +185,7 @@ private void WriteToSparse(IFdbTransaction trans, long offset, Slice data) } } - private void SetSize(IFdbTransaction trans, long size) + private void SetSize([NotNull] IFdbTransaction trans, long size) { Contract.Requires(trans != null && size >= 0); @@ -197,7 +198,7 @@ private void SetSize(IFdbTransaction trans, long size) /// /// Delete all key-value pairs associated with the blob. /// - public void Delete(IFdbTransaction trans) + public void Delete([NotNull] IFdbTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); @@ -208,7 +209,7 @@ public void Delete(IFdbTransaction trans) /// Get the size (in bytes) of the blob. /// /// Return null if the blob does not exists, 0 if is empty, or the size in bytes - public async Task GetSizeAsync(IFdbReadOnlyTransaction trans) + public async Task GetSizeAsync([NotNull] IFdbReadOnlyTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); @@ -225,7 +226,7 @@ public void Delete(IFdbTransaction trans) /// /// Read from the blob, starting at , retrieving up to bytes (fewer then n bytes are returned when the end of the blob is reached). /// - public async Task ReadAsync(IFdbReadOnlyTransaction trans, long offset, int n) + public async Task ReadAsync([NotNull] IFdbReadOnlyTransaction trans, long offset, int n) { if (trans == null) throw new ArgumentNullException("trans"); if (offset < 0) throw new ArgumentNullException("offset", "Offset cannot be less than zero"); @@ -278,7 +279,7 @@ await trans /// /// Write to the blob, starting at and overwriting any existing data at that location. The length of the blob is increased if necessary. /// - public async Task WriteAsync(IFdbTransaction trans, long offset, Slice data) + public async Task WriteAsync([NotNull] IFdbTransaction trans, long offset, Slice data) { if (trans == null) throw new ArgumentNullException("trans"); if (offset < 0) throw new ArgumentOutOfRangeException("offset", "Offset cannot be less than zero"); @@ -304,7 +305,7 @@ public async Task WriteAsync(IFdbTransaction trans, long offset, Slice data) /// /// Append the contents of onto the end of the blob. /// - public async Task AppendAsync(IFdbTransaction trans, Slice data) + public async Task AppendAsync([NotNull] IFdbTransaction trans, Slice data) { if (trans == null) throw new ArgumentNullException("trans"); @@ -319,7 +320,7 @@ public async Task AppendAsync(IFdbTransaction trans, Slice data) /// /// Change the blob length to , erasing any data when shrinking, and filling new bytes with 0 when growing. /// - public async Task TruncateAsync(IFdbTransaction trans, long newLength) + public async Task TruncateAsync([NotNull] IFdbTransaction trans, long newLength) { if (trans == null) throw new ArgumentNullException("trans"); if (newLength < 0) throw new ArgumentOutOfRangeException("newLength", "Length cannot be less than zero"); diff --git a/FoundationDB.Layers.Common/Collections/FdbMap`2.cs b/FoundationDB.Layers.Common/Collections/FdbMap`2.cs index 29cc3db71..12f61b461 100644 --- a/FoundationDB.Layers.Common/Collections/FdbMap`2.cs +++ b/FoundationDB.Layers.Common/Collections/FdbMap`2.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -56,7 +56,7 @@ public FdbMap([NotNull] string name, [NotNull] IFdbSubspace subspace, [NotNull] this.Name = name; this.Subspace = subspace; - this.Location = new FdbEncoderSubspace(subspace, keyEncoder); + this.Location = subspace.UsingEncoder(keyEncoder); this.ValueEncoder = valueEncoder; } @@ -70,7 +70,7 @@ public FdbMap([NotNull] string name, [NotNull] IFdbSubspace subspace, [NotNull] public IFdbSubspace Subspace { [NotNull] get; private set; } /// Subspace used to encoded the keys for the items - protected FdbEncoderSubspace Location { [NotNull] get; private set; } + protected IFdbEncoderSubspace Location { [NotNull] get; private set; } /// Class that can serialize/deserialize values into/from slices public IValueEncoder ValueEncoder { [NotNull] get; private set; } @@ -90,7 +90,7 @@ public async Task GetAsync([NotNull] IFdbReadOnlyTransaction trans, TKey if (trans == null) throw new ArgumentNullException("trans"); if (id == null) throw new ArgumentNullException("id"); - var data = await trans.GetAsync(this.Location.EncodeKey(id)).ConfigureAwait(false); + var data = await trans.GetAsync(this.Location.Keys.Encode(id)).ConfigureAwait(false); if (data.IsNull) throw new KeyNotFoundException("The given id was not present in the map."); return this.ValueEncoder.DecodeValue(data); @@ -105,7 +105,7 @@ public async Task> TryGetAsync([NotNull] IFdbReadOnlyTransactio if (trans == null) throw new ArgumentNullException("trans"); if (id == null) throw new ArgumentNullException("id"); - var data = await trans.GetAsync(this.Location.EncodeKey(id)).ConfigureAwait(false); + var data = await trans.GetAsync(this.Location.Keys.Encode(id)).ConfigureAwait(false); if (data.IsNull) return default(Optional); return this.ValueEncoder.DecodeValue(data); @@ -121,7 +121,7 @@ public void Set([NotNull] IFdbTransaction trans, TKey id, TValue value) if (trans == null) throw new ArgumentNullException("trans"); if (id == null) throw new ArgumentNullException("id"); - trans.Set(this.Location.EncodeKey(id), this.ValueEncoder.EncodeValue(value)); + trans.Set(this.Location.Keys.Encode(id), this.ValueEncoder.EncodeValue(value)); } /// Remove a single entry from the map @@ -133,7 +133,7 @@ public void Remove([NotNull] IFdbTransaction trans, TKey id) if (trans == null) throw new ArgumentNullException("trans"); if (id == null) throw new ArgumentNullException("id"); - trans.Clear(this.Location.EncodeKey(id)); + trans.Clear(this.Location.Keys.Encode(id)); } /// Create a query that will attempt to read all the entries in the map within a single transaction. @@ -146,7 +146,7 @@ public IFdbAsyncEnumerable> All([NotNull] IFdbReadOnl if (trans == null) throw new ArgumentNullException("trans"); return trans - .GetRange(this.Location.Tuples.ToRange(), options) + .GetRange(this.Location.ToRange(), options) .Select(this.DecodeItem); } @@ -159,7 +159,7 @@ public async Task[]> GetValuesAsync([NotNull] IFdbReadOnlyTrans if (trans == null) throw new ArgumentNullException("trans"); if (ids == null) throw new ArgumentNullException("ids"); - var results = await trans.GetValuesAsync(this.Location.EncodeKeys(ids)).ConfigureAwait(false); + var results = await trans.GetValuesAsync(this.Location.Keys.Encode(ids)).ConfigureAwait(false); return Optional.DecodeRange(this.ValueEncoder, results); } @@ -171,7 +171,7 @@ public async Task[]> GetValuesAsync([NotNull] IFdbReadOnlyTrans private KeyValuePair DecodeItem(KeyValuePair item) { return new KeyValuePair( - this.Location.DecodeKey(item.Key), + this.Location.Keys.Decode(item.Key), this.ValueEncoder.DecodeValue(item.Value) ); } @@ -181,14 +181,14 @@ private KeyValuePair[] DecodeItems(KeyValuePair[] ba { Contract.Requires(batch != null); - var keyEncoder = this.Location; + var keyEncoder = this.Location.Keys; var valueEncoder = this.ValueEncoder; var items = new KeyValuePair[batch.Length]; for (int i = 0; i < batch.Length; i++) { items[i] = new KeyValuePair( - keyEncoder.DecodeKey(batch[i].Key), + keyEncoder.Decode(batch[i].Key), valueEncoder.DecodeValue(batch[i].Value) ); } @@ -202,7 +202,7 @@ public void Clear([NotNull] IFdbTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); - trans.ClearRange(this.Location.Tuples.ToRange()); + trans.ClearRange(this.Location.ToRange()); } #region Export... @@ -220,7 +220,7 @@ public Task ExportAsync([NotNull] IFdbDatabase db, [NotNull] Action { foreach (var item in batch) @@ -246,7 +246,7 @@ public Task ExportAsync([NotNull] IFdbDatabase db, [NotNull] Func { foreach (var item in batch) @@ -271,7 +271,7 @@ public Task ExportAsync([NotNull] IFdbDatabase db, [NotNull] Action { if (batch.Length > 0) @@ -297,7 +297,7 @@ public Task ExportAsync([NotNull] IFdbDatabase db, [NotNull] Func handler(DecodeItems(batch), ct), cancellationToken ); @@ -323,7 +323,7 @@ public async Task AggregateAsync([NotNull] IFdbDatabase db, Fu await Fdb.Bulk.ExportAsync( db, - this.Location.Tuples.ToRange(), + this.Location.ToRange(), (batch, _, ct) => { state = handler(state, DecodeItems(batch)); @@ -356,7 +356,7 @@ public async Task AggregateAsync([NotNull] IFdbDatabas await Fdb.Bulk.ExportAsync( db, - this.Location.Tuples.ToRange(), + this.Location.ToRange(), (batch, _, ct) => { state = handler(state, DecodeItems(batch)); diff --git a/FoundationDB.Layers.Common/Collections/FdbMultimap`2.cs b/FoundationDB.Layers.Common/Collections/FdbMultimap`2.cs index 2085bdcf3..4739173a2 100644 --- a/FoundationDB.Layers.Common/Collections/FdbMultimap`2.cs +++ b/FoundationDB.Layers.Common/Collections/FdbMultimap`2.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2014, Doxense SAS +/* Copyright (c) 2014-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -69,7 +69,7 @@ public FdbMultiMap(IFdbSubspace subspace, bool allowNegativeValues, ICompositeKe this.Subspace = subspace; this.AllowNegativeValues = allowNegativeValues; - this.Location = new FdbEncoderSubspace(subspace, encoder); + this.Location = subspace.UsingEncoder(encoder); } #region Public Properties... @@ -81,7 +81,7 @@ public FdbMultiMap(IFdbSubspace subspace, bool allowNegativeValues, ICompositeKe public bool AllowNegativeValues { get; private set; } /// Subspace used to encoded the keys for the items - protected FdbEncoderSubspace Location { [NotNull] get; private set; } + protected IFdbEncoderSubspace Location { [NotNull] get; private set; } #endregion @@ -98,7 +98,7 @@ public Task AddAsync([NotNull] IFdbTransaction trans, TKey key, TValue value) //note: this method does not need to be async, but subtract is, so it's better if both methods have the same shape. if (trans == null) throw new ArgumentNullException("trans"); - trans.AtomicAdd(this.Location.EncodeKey(key, value), PlusOne); + trans.AtomicAdd(this.Location.Keys.Encode(key, value), PlusOne); return FoundationDB.Async.TaskHelpers.CompletedTask; } @@ -111,7 +111,7 @@ public async Task SubtractAsync([NotNull] IFdbTransaction trans, TKey key, TValu { if (trans == null) throw new ArgumentNullException("trans"); - Slice k = this.Location.EncodeKey(key, value); + Slice k = this.Location.Keys.Encode(key, value); if (this.AllowNegativeValues) { trans.AtomicAdd(k, MinusOne); @@ -139,7 +139,7 @@ public async Task ContainsAsync([NotNull] IFdbReadOnlyTransaction trans, T { if (trans == null) throw new ArgumentNullException("trans"); - var v = await trans.GetAsync(this.Location.EncodeKey(key, value)).ConfigureAwait(false); + var v = await trans.GetAsync(this.Location.Keys.Encode(key, value)).ConfigureAwait(false); return this.AllowNegativeValues ? v.IsPresent : v.ToInt64() > 0; } @@ -153,7 +153,7 @@ public async Task ContainsAsync([NotNull] IFdbReadOnlyTransaction trans, T { if (trans == null) throw new ArgumentNullException("trans"); - Slice v = await trans.GetAsync(this.Location.EncodeKey(key, value)).ConfigureAwait(false); + Slice v = await trans.GetAsync(this.Location.Keys.Encode(key, value)).ConfigureAwait(false); if (v.IsNullOrEmpty) return null; long c = v.ToInt64(); return this.AllowNegativeValues || c > 0 ? c : default(long?); @@ -168,19 +168,19 @@ public IFdbAsyncEnumerable Get([NotNull] IFdbReadOnlyTransaction trans, { if (trans == null) throw new ArgumentNullException("trans"); - var range = FdbKeyRange.StartsWith(this.Location.Partial.EncodeKey(key)); + var range = FdbKeyRange.StartsWith(this.Location.Partial.Keys.Encode(key)); if (this.AllowNegativeValues) { return trans .GetRange(range) - .Select(kvp => this.Location.DecodeKey(kvp.Key).Item2); + .Select(kvp => this.Location.Keys.Decode(kvp.Key).Item2); } else { return trans .GetRange(range) .Where(kvp => kvp.Value.ToInt64() > 0) // we need to filter out zero or negative values (possible artefacts) - .Select(kvp => this.Location.DecodeKey(kvp.Key).Item2); + .Select(kvp => this.Location.Keys.Decode(kvp.Key).Item2); } } @@ -200,11 +200,11 @@ public Task> GetAsync([NotNull] IFdbReadOnlyTransaction trans, TKey [NotNull] public IFdbAsyncEnumerable> GetCounts([NotNull] IFdbReadOnlyTransaction trans, TKey key) { - var range = FdbKeyRange.StartsWith(this.Location.Partial.EncodeKey(key)); + var range = FdbKeyRange.StartsWith(this.Location.Partial.Keys.Encode(key)); var query = trans .GetRange(range) - .Select(kvp => new KeyValuePair(this.Location.DecodeKey(kvp.Key).Item2, kvp.Value.ToInt64())); + .Select(kvp => new KeyValuePair(this.Location.Keys.Decode(kvp.Key).Item2, kvp.Value.ToInt64())); if (this.AllowNegativeValues) { @@ -234,7 +234,7 @@ public void Remove([NotNull] IFdbTransaction trans, TKey key) { if (trans == null) throw new ArgumentNullException("trans"); - trans.ClearRange(FdbKeyRange.StartsWith(this.Location.Partial.EncodeKey(key))); + trans.ClearRange(FdbKeyRange.StartsWith(this.Location.Partial.Keys.Encode(key))); } /// Remove a value for a specific key @@ -246,7 +246,7 @@ public void Remove([NotNull] IFdbTransaction trans, TKey key, TValue value) { if (trans == null) throw new ArgumentNullException("trans"); - trans.Clear(this.Location.EncodeKey(key, value)); + trans.Clear(this.Location.Keys.Encode(key, value)); } #endregion diff --git a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs index 8612c625f..86b791899 100644 --- a/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs +++ b/FoundationDB.Layers.Common/Collections/FdbQueue`1.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -72,18 +72,18 @@ public FdbQueue([NotNull] IFdbSubspace subspace, bool highContention, [NotNull] if (subspace == null) throw new ArgumentNullException("subspace"); if (encoder == null) throw new ArgumentNullException("encoder"); - this.Subspace = subspace; + this.Subspace = subspace.Using(TypeSystem.Tuples); this.HighContention = highContention; this.Encoder = encoder; //TODO: rewrite this, using FdbEncoderSubpsace<..> ! - this.ConflictedPop = subspace.Partition.ByKey(Slice.FromAscii("pop")); - this.ConflictedItem = subspace.Partition.ByKey(Slice.FromAscii("conflict")); - this.QueueItem = subspace.Partition.ByKey(Slice.FromAscii("item")); + this.ConflictedPop = this.Subspace.Partition.ByKey(Slice.FromAscii("pop")); + this.ConflictedItem = this.Subspace.Partition.ByKey(Slice.FromAscii("conflict")); + this.QueueItem = this.Subspace.Partition.ByKey(Slice.FromAscii("item")); } /// Subspace used as a prefix for all items in this table - public IFdbSubspace Subspace { [NotNull] get; private set; } + public IFdbDynamicSubspace Subspace { [NotNull] get; private set; } /// If true, the queue is operating in High Contention mode that will scale better with a lot of popping clients. public bool HighContention { get; private set; } @@ -91,11 +91,11 @@ public FdbQueue([NotNull] IFdbSubspace subspace, bool highContention, [NotNull] /// Serializer for the elements of the queue public IValueEncoder Encoder { [NotNull] get; private set; } - internal IFdbSubspace ConflictedPop { get; private set; } + internal IFdbDynamicSubspace ConflictedPop { get; private set; } - internal IFdbSubspace ConflictedItem { get; private set; } + internal IFdbDynamicSubspace ConflictedItem { get; private set; } - internal IFdbSubspace QueueItem { get; private set; } + internal IFdbDynamicSubspace QueueItem { get; private set; } /// Remove all items from the queue. public void Clear([NotNull] IFdbTransaction trans) @@ -174,7 +174,7 @@ public Task ExportAsync(IFdbDatabase db, Action handler, CancellationTo return Fdb.Bulk.ExportAsync( db, - this.QueueItem.Tuples.ToRange(), + this.QueueItem.Keys.ToRange(), (kvs, offset, ct) => { foreach(var kv in kvs) @@ -199,7 +199,7 @@ public Task ExportAsync(IFdbDatabase db, Func handler, Cancellati return Fdb.Bulk.ExportAsync( db, - this.QueueItem.Tuples.ToRange(), + this.QueueItem.Keys.ToRange(), async (kvs, offset, ct) => { foreach (var kv in kvs) @@ -223,7 +223,7 @@ public Task ExportAsync(IFdbDatabase db, Action handler, Cancellation return Fdb.Bulk.ExportAsync( db, - this.QueueItem.Tuples.ToRange(), + this.QueueItem.Keys.ToRange(), (kvs, offset, ct) => { handler(this.Encoder.DecodeValues(kvs), offset); @@ -242,7 +242,7 @@ public Task ExportAsync(IFdbDatabase db, Func handler, Cancella return Fdb.Bulk.ExportAsync( db, - this.QueueItem.Tuples.ToRange(), + this.QueueItem.Keys.ToRange(), (kvs, offset, ct) => handler(this.Encoder.DecodeValues(kvs), offset), cancellationToken ); @@ -254,7 +254,7 @@ public Task ExportAsync(IFdbDatabase db, Func handler, Cancella private Slice ConflictedItemKey(object subKey) { - return this.ConflictedItem.Tuples.EncodeKey(subKey); + return this.ConflictedItem.Keys.Encode(subKey); } private Slice RandId() @@ -272,14 +272,14 @@ private async Task PushAtAsync([NotNull] IFdbTransaction tr, T value, long index // This makes pushes fast and usually conflict free (unless the queue becomes empty // during the push) - Slice key = this.QueueItem.Tuples.EncodeKey(index, this.RandId()); + Slice key = this.QueueItem.Keys.Encode(index, this.RandId()); await tr.GetAsync(key).ConfigureAwait(false); tr.Set(key, this.Encoder.EncodeValue(value)); } - private async Task GetNextIndexAsync([NotNull] IFdbReadOnlyTransaction tr, IFdbSubspace subspace) + private async Task GetNextIndexAsync([NotNull] IFdbReadOnlyTransaction tr, IFdbDynamicSubspace subspace) { - var range = subspace.Tuples.ToRange(); + var range = subspace.Keys.ToRange(); var lastKey = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(range.End)).ConfigureAwait(false); @@ -288,12 +288,12 @@ private async Task GetNextIndexAsync([NotNull] IFdbReadOnlyTransaction tr, return 0; } - return subspace.Tuples.DecodeFirst(lastKey) + 1; + return subspace.Keys.DecodeFirst(lastKey) + 1; } private Task> GetFirstItemAsync([NotNull] IFdbReadOnlyTransaction tr) { - var range = this.QueueItem.Tuples.ToRange(); + var range = this.QueueItem.Keys.ToRange(); return tr.GetRange(range).FirstOrDefaultAsync(); } @@ -324,7 +324,7 @@ private async Task AddConflictedPopAsync([NotNull] IFdbTransaction tr, bo return Slice.Nil; } - Slice waitKey = this.ConflictedPop.Tuples.EncodeKey(index, this.RandId()); + Slice waitKey = this.ConflictedPop.Keys.Encode(index, this.RandId()); await tr.GetAsync(waitKey).ConfigureAwait(false); tr.Set(waitKey, Slice.Empty); return waitKey; @@ -332,13 +332,13 @@ private async Task AddConflictedPopAsync([NotNull] IFdbTransaction tr, bo private Task>> GetWaitingPopsAsync([NotNull] IFdbReadOnlyTransaction tr, int numPops) { - var range = this.ConflictedPop.Tuples.ToRange(); + var range = this.ConflictedPop.Keys.ToRange(); return tr.GetRange(range, limit: numPops, reverse: false).ToListAsync(); } private Task>> GetItemsAsync([NotNull] IFdbReadOnlyTransaction tr, int numItems) { - var range = this.QueueItem.Tuples.ToRange(); + var range = this.QueueItem.Keys.ToRange(); return tr.GetRange(range, limit: numItems, reverse: false).ToListAsync(); } @@ -372,7 +372,7 @@ private async Task FulfillConflictedPops([NotNull] IFdbDatabase db, Cancel var pop = pops[i]; var kvp = items[i]; - var key = this.ConflictedPop.Tuples.Unpack(pop.Key); + var key = this.ConflictedPop.Keys.Unpack(pop.Key); var storageKey = this.ConflictedItemKey(key[1]); tr.Set(storageKey, kvp.Value); @@ -447,7 +447,7 @@ private async Task> PopHighContentionAsync([NotNull] IFdbDatabase db } // The result of the pop will be stored at this key once it has been fulfilled - var resultKey = ConflictedItemKey(this.ConflictedPop.Tuples.DecodeLast(waitKey)); + var resultKey = ConflictedItemKey(this.ConflictedPop.Keys.DecodeLast(waitKey)); tr.Reset(); diff --git a/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs b/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs index 29e27a666..f72d5b84d 100644 --- a/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs +++ b/FoundationDB.Layers.Common/Collections/FdbRankedSet.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -30,9 +30,6 @@ namespace FoundationDB.Layers.Collections { using FoundationDB.Client; using FoundationDB.Client.Utils; -#if DEBUG - using FoundationDB.Filters.Logging; -#endif using FoundationDB.Linq; using JetBrains.Annotations; using System; @@ -58,7 +55,7 @@ public FdbRankedSet([NotNull] IFdbSubspace subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); - this.Subspace = subspace; + this.Subspace = subspace.Using(TypeSystem.Tuples); } public Task OpenAsync([NotNull] IFdbTransaction trans) @@ -68,7 +65,7 @@ public Task OpenAsync([NotNull] IFdbTransaction trans) } /// Subspace used as a prefix for all items in this table - public IFdbSubspace Subspace { [NotNull] get; private set; } + public IFdbDynamicSubspace Subspace { [NotNull] get; private set; } /// Returns the number of items in the set. /// @@ -78,7 +75,7 @@ public Task SizeAsync([NotNull] IFdbReadOnlyTransaction trans) if (trans == null) throw new ArgumentNullException("trans"); return trans - .GetRange(this.Subspace.Partition.ByKey(MAX_LEVELS - 1).Tuples.ToRange()) + .GetRange(this.Subspace.Partition.ByKey(MAX_LEVELS - 1).Keys.ToRange()) .Select(kv => DecodeCount(kv.Value)) .SumAsync(); } @@ -109,14 +106,14 @@ public async Task InsertAsync([NotNull] IFdbTransaction trans, Slice key) // Insert into this level by looking at the count of the previous // key in the level and recounting the next lower level to correct // the counts - var prevCount = DecodeCount(await trans.GetAsync(this.Subspace.Tuples.EncodeKey(level, prevKey)).ConfigureAwait(false)); + var prevCount = DecodeCount(await trans.GetAsync(this.Subspace.Keys.Encode(level, prevKey)).ConfigureAwait(false)); var newPrevCount = await SlowCountAsync(trans, level - 1, prevKey, key); var count = checked((prevCount - newPrevCount) + 1); // print "insert", key, "level", level, "count", count, // "splits", prevKey, "oldC", prevCount, "newC", newPrevCount - trans.Set(this.Subspace.Tuples.EncodeKey(level, prevKey), EncodeCount(newPrevCount)); - trans.Set(this.Subspace.Tuples.EncodeKey(level, key), EncodeCount(count)); + trans.Set(this.Subspace.Keys.Encode(level, prevKey), EncodeCount(newPrevCount)); + trans.Set(this.Subspace.Keys.Encode(level, key), EncodeCount(count)); } } } @@ -126,7 +123,7 @@ public async Task ContainsAsync([NotNull] IFdbReadOnlyTransaction trans, S if (trans == null) throw new ArgumentNullException("trans"); if (key.IsNull) throw new ArgumentException("Empty key not allowed in set", "key"); - return (await trans.GetAsync(this.Subspace.Tuples.EncodeKey(0, key)).ConfigureAwait(false)).HasValue; + return (await trans.GetAsync(this.Subspace.Keys.Encode(0, key)).ConfigureAwait(false)).HasValue; } public async Task EraseAsync([NotNull] IFdbTransaction trans, Slice key) @@ -151,7 +148,7 @@ public async Task EraseAsync([NotNull] IFdbTransaction trans, Slice key) long countChange = -1; if (c.HasValue) countChange += DecodeCount(c); - trans.AtomicAdd(this.Subspace.Tuples.EncodeKey(level, prevKey), EncodeCount(countChange)); + trans.AtomicAdd(this.Subspace.Keys.Encode(level, prevKey), EncodeCount(countChange)); } } @@ -172,12 +169,12 @@ public async Task EraseAsync([NotNull] IFdbTransaction trans, Slice key) var lss = this.Subspace.Partition.ByKey(level); long lastCount = 0; var kcs = await trans.GetRange( - FdbKeySelector.FirstGreaterOrEqual(lss.Tuples.EncodeKey(rankKey)), - FdbKeySelector.FirstGreaterThan(lss.Tuples.EncodeKey(key)) + FdbKeySelector.FirstGreaterOrEqual(lss.Keys.Encode(rankKey)), + FdbKeySelector.FirstGreaterThan(lss.Keys.Encode(key)) ).ToListAsync().ConfigureAwait(false); foreach (var kc in kcs) { - rankKey = lss.Tuples.DecodeKey(kc.Key); + rankKey = lss.Keys.Decode(kc.Key); lastCount = DecodeCount(kc.Value); r += lastCount; } @@ -199,13 +196,13 @@ public async Task GetNthAsync([NotNull] IFdbReadOnlyTransaction trans, lo for (int level = MAX_LEVELS - 1; level >= 0; level--) { var lss = this.Subspace.Partition.ByKey(level); - var kcs = await trans.GetRange(lss.Tuples.EncodeKey(key), lss.Tuples.ToRange().End).ToListAsync().ConfigureAwait(false); + var kcs = await trans.GetRange(lss.Keys.Encode(key), lss.Keys.ToRange().End).ToListAsync().ConfigureAwait(false); if (kcs.Count == 0) break; foreach(var kc in kcs) { - key = lss.Tuples.DecodeKey(kc.Key); + key = lss.Keys.Decode(kc.Key); long count = DecodeCount(kc.Value); if (key.IsPresent && r == 0) { @@ -226,7 +223,7 @@ public async Task GetNthAsync([NotNull] IFdbReadOnlyTransaction trans, lo /// Clears the entire set. public Task ClearAllAsync([NotNull] IFdbTransaction trans) { - trans.ClearRange(this.Subspace.Tuples.ToRange()); + trans.ClearRange(this.Subspace.Keys.ToRange()); return SetupLevelsAsync(trans); } @@ -250,7 +247,7 @@ private Task SlowCountAsync(IFdbReadOnlyTransaction trans, int level, Slic } return trans - .GetRange(this.Subspace.Tuples.EncodeKey(level, beginKey), this.Subspace.Tuples.EncodeKey(level, endKey)) + .GetRange(this.Subspace.Keys.Encode(level, beginKey), this.Subspace.Keys.Encode(level, endKey)) .Select(kv => DecodeCount(kv.Value)) .SumAsync(); } @@ -258,7 +255,7 @@ private Task SlowCountAsync(IFdbReadOnlyTransaction trans, int level, Slic private async Task SetupLevelsAsync(IFdbTransaction trans) { var ks = Enumerable.Range(0, MAX_LEVELS) - .Select((l) => this.Subspace.Tuples.EncodeKey(l, Slice.Empty)) + .Select((l) => this.Subspace.Keys.Encode(l, Slice.Empty)) .ToList(); var res = await trans.GetValuesAsync(ks).ConfigureAwait(false); @@ -278,7 +275,7 @@ private async Task GetPreviousNodeAsync(IFdbTransaction trans, int level, // a transaction conflict. We also add a conflict key on the found previous // key in level 0. This allows detection of erasures. - var k = this.Subspace.Tuples.EncodeKey(level, key); + var k = this.Subspace.Keys.Encode(level, key); //Console.WriteLine(k); //Console.WriteLine("GetPreviousNode(" + level + ", " + key + ")"); //Console.WriteLine(FdbKeySelector.LastLessThan(k) + " <= x < " + FdbKeySelector.FirstGreaterOrEqual(k)); @@ -292,14 +289,12 @@ private async Task GetPreviousNodeAsync(IFdbTransaction trans, int level, .ConfigureAwait(false); //Console.WriteLine("Found " + FdbKey.Dump(kv.Key)); - var prevKey = this.Subspace.Tuples.DecodeLast(kv.Key); + var prevKey = this.Subspace.Keys.DecodeLast(kv.Key); trans.AddReadConflictRange(kv.Key + FdbKey.MinValue, k); - trans.AddReadConflictKey(this.Subspace.Tuples.EncodeKey(0, (Slice)prevKey)); + trans.AddReadConflictKey(this.Subspace.Keys.Encode(0, (Slice)prevKey)); return prevKey; } - - #endregion } diff --git a/FoundationDB.Layers.Common/Collections/FdbVector`1.cs b/FoundationDB.Layers.Common/Collections/FdbVector`1.cs index 8cd034581..6ab0d44ed 100644 --- a/FoundationDB.Layers.Common/Collections/FdbVector`1.cs +++ b/FoundationDB.Layers.Common/Collections/FdbVector`1.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -66,22 +66,22 @@ public FdbVector([NotNull] FdbSubspace subspace) /// Create a new sparse Vector /// Subspace where the vector will be stored /// Default value for sparse entries - public FdbVector([NotNull] FdbSubspace subspace, T defaultValue) + public FdbVector([NotNull] IFdbSubspace subspace, T defaultValue) : this(subspace, defaultValue, KeyValueEncoders.Tuples.Value()) { } - public FdbVector([NotNull] FdbSubspace subspace, T defaultValue, [NotNull] IValueEncoder encoder) + public FdbVector([NotNull] IFdbSubspace subspace, T defaultValue, [NotNull] IValueEncoder encoder) { if (subspace == null) throw new ArgumentNullException("subspace"); if (encoder == null) throw new ArgumentNullException("encoder"); - this.Subspace = subspace; + this.Subspace = subspace.Using(TypeSystem.Tuples); this.DefaultValue = defaultValue; this.Encoder = encoder; } /// Subspace used as a prefix for all items in this vector - public FdbSubspace Subspace { [NotNull] get; private set; } + public IFdbDynamicSubspace Subspace { [NotNull] get; private set; } /// Default value for sparse entries public T DefaultValue { get; private set; } @@ -112,7 +112,7 @@ public Task BackAsync([NotNull] IFdbReadOnlyTransaction tr) if (tr == null) throw new ArgumentNullException("tr"); return tr - .GetRange(this.Subspace.Tuples.ToRange()) + .GetRange(this.Subspace.Keys.ToRange()) .Select((kvp) => this.Encoder.DecodeValue(kvp.Value)) .LastOrDefaultAsync(); } @@ -128,7 +128,7 @@ public async Task> PopAsync([NotNull] IFdbTransaction tr) { if (tr == null) throw new ArgumentNullException("tr"); - var keyRange = this.Subspace.Tuples.ToRange(); + var keyRange = this.Subspace.Keys.ToRange(); // Read the last two entries so we can check if the second to last item // is being represented sparsely. If so, we will be required to set it @@ -142,7 +142,7 @@ public async Task> PopAsync([NotNull] IFdbTransaction tr) if (lastTwo.Count == 0) return default(Optional); //note: keys are reversed so indices[0] = last, indices[1] = second to last - var indices = lastTwo.Select(kvp => this.Subspace.Tuples.DecodeFirst(kvp.Key)).ToList(); + var indices = lastTwo.Select(kvp => this.Subspace.Keys.DecodeFirst(kvp.Key)).ToList(); if (indices[0] == 0) { // Vector has size one @@ -202,7 +202,7 @@ public async Task GetAsync([NotNull] IFdbReadOnlyTransaction tr, long index) if (index < 0) throw new IndexOutOfRangeException(String.Format("Index {0} must be positive", index)); var start = GetKeyAt(index); - var end = this.Subspace.Tuples.ToRange().End; + var end = this.Subspace.Keys.ToRange().End; var output = await tr .GetRange(start, end) @@ -259,7 +259,7 @@ public async Task ResizeAsync([NotNull] IFdbTransaction tr, long length) if (length < currentSize) { - tr.ClearRange(GetKeyAt(length), this.Subspace.Tuples.ToRange().End); + tr.ClearRange(GetKeyAt(length), this.Subspace.Keys.ToRange().End); // Check if the new end of the vector was being sparsely represented if (await ComputeSizeAsync(tr).ConfigureAwait(false) < length) @@ -287,7 +287,7 @@ private async Task ComputeSizeAsync(IFdbReadOnlyTransaction tr) { Contract.Requires(tr != null); - var keyRange = this.Subspace.Tuples.ToRange(); + var keyRange = this.Subspace.Keys.ToRange(); var lastKey = await tr.GetKeyAsync(FdbKeySelector.LastLessOrEqual(keyRange.End)).ConfigureAwait(false); @@ -296,12 +296,12 @@ private async Task ComputeSizeAsync(IFdbReadOnlyTransaction tr) return 0; } - return this.Subspace.Tuples.DecodeFirst(lastKey) + 1; + return this.Subspace.Keys.DecodeFirst(lastKey) + 1; } private Slice GetKeyAt(long index) { - return this.Subspace.Tuples.EncodeKey(index); + return this.Subspace.Keys.Encode(index); } #endregion diff --git a/FoundationDB.Layers.Common/Counters/FdbCounterMap.cs b/FoundationDB.Layers.Common/Counters/FdbCounterMap.cs index b88d36fb0..5e774248a 100644 --- a/FoundationDB.Layers.Common/Counters/FdbCounterMap.cs +++ b/FoundationDB.Layers.Common/Counters/FdbCounterMap.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -41,19 +41,19 @@ public sealed class FdbCounterMap private static readonly Slice MinusOne = Slice.FromFixed64(-1); /// Create a new counter map. - public FdbCounterMap(IFdbSubspace subspace) + public FdbCounterMap([NotNull] IFdbSubspace subspace) : this(subspace, KeyValueEncoders.Tuples.Key()) { } /// Create a new counter map, using a specific key encoder. - public FdbCounterMap(IFdbSubspace subspace, IKeyEncoder keyEncoder) + public FdbCounterMap([NotNull] IFdbSubspace subspace, [NotNull] IKeyEncoder keyEncoder) { if (subspace == null) throw new ArgumentNullException("subspace"); if (keyEncoder == null) throw new ArgumentNullException("keyEncoder"); this.Subspace = subspace; this.KeyEncoder = keyEncoder; - this.Location = new FdbEncoderSubspace(subspace, keyEncoder); + this.Location = subspace.UsingEncoder(keyEncoder); } /// Subspace used as a prefix for all items in this counter list @@ -62,7 +62,7 @@ public FdbCounterMap(IFdbSubspace subspace, IKeyEncoder keyEncoder) /// Encoder for the keys of the counter map public IKeyEncoder KeyEncoder { [NotNull] get; private set; } - internal FdbEncoderSubspace Location { [NotNull] get; private set; } + internal IFdbEncoderSubspace Location { [NotNull] get; private set; } /// Add a value to a counter in one atomic operation /// @@ -76,7 +76,7 @@ public void Add([NotNull] IFdbTransaction transaction, [NotNull] TKey counterKey //REVIEW: we could no-op if value == 0 but this may change conflict behaviour for other transactions... Slice param = value == 1 ? PlusOne : value == -1 ? MinusOne : Slice.FromFixed64(value); - transaction.AtomicAdd(this.Location.EncodeKey(counterKey), param); + transaction.AtomicAdd(this.Location.Keys.Encode(counterKey), param); } /// Subtract a value from a counter in one atomic operation @@ -116,7 +116,7 @@ public void Decrement([NotNull] IFdbTransaction transaction, [NotNull] TKey coun if (transaction == null) throw new ArgumentNullException("transaction"); if (counterKey == null) throw new ArgumentNullException("counterKey"); - var data = await transaction.GetAsync(this.Location.EncodeKey(counterKey)).ConfigureAwait(false); + var data = await transaction.GetAsync(this.Location.Keys.Encode(counterKey)).ConfigureAwait(false); if (data.IsNullOrEmpty) return default(long?); return data.ToInt64(); } @@ -131,7 +131,7 @@ public async Task AddThenReadAsync([NotNull] IFdbTransaction transaction, if (transaction == null) throw new ArgumentNullException("transaction"); if (counterKey == null) throw new ArgumentNullException("counterKey"); - var key = this.Location.EncodeKey(counterKey); + var key = this.Location.Keys.Encode(counterKey); var res = await transaction.GetAsync(key).ConfigureAwait(false); if (!res.IsNullOrEmpty) value += res.ToInt64(); @@ -165,7 +165,7 @@ public async Task ReadThenAddAsync([NotNull] IFdbTransaction transaction, if (transaction == null) throw new ArgumentNullException("transaction"); if (counterKey == null) throw new ArgumentNullException("counterKey"); - var key = this.Location.EncodeKey(counterKey); + var key = this.Location.Keys.Encode(counterKey); var res = await transaction.GetAsync(key).ConfigureAwait(false); long previous = res.IsNullOrEmpty ? 0 : res.ToInt64(); diff --git a/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs b/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs index 61720d939..cb8e7c21b 100644 --- a/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs +++ b/FoundationDB.Layers.Common/Counters/FdbHighContentionCounter.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,6 +29,7 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Layers.Counters { using FoundationDB.Client; + using JetBrains.Annotations; using System; using System.Threading; using System.Threading.Tasks; @@ -53,7 +54,7 @@ public class FdbHighContentionCounter /// Create a new High Contention counter. /// Database used by this layer /// Subspace to be used for storing the counter - public FdbHighContentionCounter(IFdbDatabase db, FdbSubspace subspace) + public FdbHighContentionCounter([NotNull] IFdbDatabase db, [NotNull] IFdbSubspace subspace) : this(db, subspace, KeyValueEncoders.Tuples.Value()) { } @@ -61,25 +62,25 @@ public FdbHighContentionCounter(IFdbDatabase db, FdbSubspace subspace) /// Database used by this layer /// Subspace to be used for storing the counter /// Encoder for the counter values - public FdbHighContentionCounter(IFdbDatabase db, FdbSubspace subspace, IValueEncoder encoder) + public FdbHighContentionCounter([NotNull] IFdbDatabase db, [NotNull] IFdbSubspace subspace, [NotNull] IValueEncoder encoder) { if (db == null) throw new ArgumentNullException("db"); if (subspace == null) throw new ArgumentNullException("subspace"); if (encoder == null) throw new ArgumentNullException("encoder"); this.Database = db; - this.Subspace = subspace; + this.Subspace = subspace.Using(TypeSystem.Tuples); this.Encoder = encoder; } /// Subspace used as a prefix for all items in this table - public FdbSubspace Subspace { get; private set; } + public IFdbDynamicSubspace Subspace {[NotNull] get; private set; } /// Database instance that is used to perform background coalescing of the counter - public IFdbDatabase Database { get; private set; } + public IFdbDatabase Database {[NotNull] get; private set; } /// Encoder for the integer values of the counter - public IValueEncoder Encoder { get; private set; } + public IValueEncoder Encoder {[NotNull] get; private set; } /// Generate a new random slice protected virtual Slice RandomId() @@ -99,13 +100,13 @@ private async Task Coalesce(int N, CancellationToken ct) try { // read N writes from a random place in ID space - var loc = this.Subspace.Tuples.EncodeKey(RandomId()); + var loc = this.Subspace.Keys.Encode(RandomId()); bool right; lock(this.Rng) { right = this.Rng.NextDouble() < 0.5; } var query = right - ? tr.Snapshot.GetRange(loc, this.Subspace.Tuples.ToRange().End, limit: N, reverse: false) - : tr.Snapshot.GetRange(this.Subspace.Tuples.ToRange().Begin, loc, limit: N, reverse: true); + ? tr.Snapshot.GetRange(loc, this.Subspace.Keys.ToRange().End, limit: N, reverse: false) + : tr.Snapshot.GetRange(this.Subspace.Keys.ToRange().Begin, loc, limit: N, reverse: true); var shards = await query.ToListAsync().ConfigureAwait(false); if (shards.Count > 0) @@ -118,7 +119,7 @@ private async Task Coalesce(int N, CancellationToken ct) tr.Clear(shard.Key); } - tr.Set(this.Subspace.Tuples.EncodeKey(RandomId()), this.Encoder.EncodeValue(total)); + tr.Set(this.Subspace.Keys.Encode(RandomId()), this.Encoder.EncodeValue(total)); // note: contrary to the python impl, we will await the commit, and rely on the caller to not wait to the Coalesce task itself to complete. // That way, the transaction will live as long as the task, and we ensure that it gets disposed at some time @@ -169,13 +170,13 @@ private void BackgroundCoalesce(int n, CancellationToken ct) /// Get the value of the counter. /// Not recommended for use with read/write transactions when the counter is being frequently updated (conflicts will be very likely). /// - public async Task GetTransactional(IFdbReadOnlyTransaction trans) + public async Task GetTransactional([NotNull] IFdbReadOnlyTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); long total = 0; await trans - .GetRange(this.Subspace.Tuples.ToRange()) + .GetRange(this.Subspace.Keys.ToRange()) .ForEachAsync((kvp) => { checked { total += this.Encoder.DecodeValue(kvp.Value); } }) .ConfigureAwait(false); @@ -183,7 +184,7 @@ await trans } /// Get the value of the counter with snapshot isolation (no transaction conflicts). - public Task GetSnapshot(IFdbReadOnlyTransaction trans) + public Task GetSnapshot([NotNull] IFdbReadOnlyTransaction trans) { if (trans == null) throw new ArgumentNullException("trans"); @@ -191,11 +192,11 @@ public Task GetSnapshot(IFdbReadOnlyTransaction trans) } /// Add the value x to the counter. - public void Add(IFdbTransaction trans, long x) + public void Add([NotNull] IFdbTransaction trans, long x) { if (trans == null) throw new ArgumentNullException("trans"); - trans.Set(this.Subspace.Tuples.EncodeKey(RandomId()), this.Encoder.EncodeValue(x)); + trans.Set(this.Subspace.Keys.Encode(RandomId()), this.Encoder.EncodeValue(x)); // decide if we must coalesce //note: Random() is not thread-safe so we must lock @@ -209,7 +210,7 @@ public void Add(IFdbTransaction trans, long x) } /// Set the counter to value x. - public async Task SetTotal(IFdbTransaction trans, long x) + public async Task SetTotal([NotNull] IFdbTransaction trans, long x) { if (trans == null) throw new ArgumentNullException("trans"); diff --git a/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs b/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs index e25619b96..b6276d998 100644 --- a/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs +++ b/FoundationDB.Layers.Common/Indexes/FdbIndex`2.cs @@ -58,14 +58,14 @@ public FdbIndex([NotNull] string name, [NotNull] IFdbSubspace subspace, IEqualit this.Subspace = subspace; this.ValueComparer = valueComparer ?? EqualityComparer.Default; this.IndexNullValues = indexNullValues; - this.Location = new FdbEncoderSubspace(subspace, encoder); + this.Location = subspace.UsingEncoder(encoder); } public string Name { [NotNull] get; private set; } public IFdbSubspace Subspace { [NotNull] get; private set; } - protected FdbEncoderSubspace Location { [NotNull] get; private set; } + protected IFdbEncoderSubspace Location { [NotNull] get; private set; } public IEqualityComparer ValueComparer { [NotNull] get; private set; } @@ -82,7 +82,7 @@ public bool Add([NotNull] IFdbTransaction trans, TId id, TValue value) { if (this.IndexNullValues || value != null) { - trans.Set(this.Location.EncodeKey(value, id), Slice.Empty); + trans.Set(this.Location.Keys.Encode(value, id), Slice.Empty); return true; } return false; @@ -102,13 +102,13 @@ public bool Update([NotNull] IFdbTransaction trans, TId id, TValue newValue, TVa // remove previous value if (this.IndexNullValues || previousValue != null) { - this.Location.Clear(trans, FdbTuple.Create(previousValue, id)); + trans.Clear(this.Location.Keys.Encode(previousValue, id)); } // add new value if (this.IndexNullValues || newValue != null) { - this.Location.Set(trans, FdbTuple.Create(newValue, id), Slice.Empty); + trans.Set(this.Location.Keys.Encode(newValue, id), Slice.Empty); } // cannot be both null, so we did at least something) @@ -125,7 +125,7 @@ public void Remove([NotNull] IFdbTransaction trans, TId id, TValue value) { if (trans == null) throw new ArgumentNullException("trans"); - this.Location.Clear(trans, FdbTuple.Create(value, id)); + trans.Clear(this.Location.Keys.Encode(value, id)); } /// Returns a list of ids matching a specific value @@ -148,48 +148,48 @@ public Task> LookupAsync([NotNull] IFdbReadOnlyTransaction trans, TVal [NotNull] public FdbRangeQuery Lookup(IFdbReadOnlyTransaction trans, TValue value, bool reverse = false) { - var prefix = this.Location.Partial.EncodeKey(value); + var prefix = this.Location.Partial.Keys.Encode(value); return trans .GetRange(FdbKeyRange.StartsWith(prefix), new FdbRangeOptions { Reverse = reverse }) - .Select((kvp) => this.Location.DecodeKey(kvp.Key).Item2); + .Select((kvp) => this.Location.Keys.Decode(kvp.Key).Item2); } [NotNull] public FdbRangeQuery LookupGreaterThan([NotNull] IFdbReadOnlyTransaction trans, TValue value, bool orEqual, bool reverse = false) { - var prefix = this.Location.Partial.EncodeKey(value); + var prefix = this.Location.Partial.Keys.Encode(value); if (!orEqual) prefix = FdbKey.Increment(prefix); var space = new FdbKeySelectorPair( FdbKeySelector.FirstGreaterThan(prefix), - FdbKeySelector.FirstGreaterOrEqual(this.Location.Tuples.ToRange().End) + FdbKeySelector.FirstGreaterOrEqual(this.Location.ToRange().End) ); return trans .GetRange(space, new FdbRangeOptions { Reverse = reverse }) - .Select((kvp) => this.Location.DecodeKey(kvp.Key).Item2); + .Select((kvp) => this.Location.Keys.Decode(kvp.Key).Item2); } [NotNull] public FdbRangeQuery LookupLessThan([NotNull] IFdbReadOnlyTransaction trans, TValue value, bool orEqual, bool reverse = false) { - var prefix = this.Location.Partial.EncodeKey(value); + var prefix = this.Location.Partial.Keys.Encode(value); if (orEqual) prefix = FdbKey.Increment(prefix); var space = new FdbKeySelectorPair( - FdbKeySelector.FirstGreaterOrEqual(this.Location.Tuples.ToRange().Begin), + FdbKeySelector.FirstGreaterOrEqual(this.Location.ToRange().Begin), FdbKeySelector.FirstGreaterThan(prefix) ); return trans .GetRange(space, new FdbRangeOptions { Reverse = reverse }) - .Select((kvp) => this.Location.DecodeKey(kvp.Key).Item2); + .Select((kvp) => this.Location.Keys.Decode(kvp.Key).Item2); } public override string ToString() { - return String.Format(CultureInfo.InvariantCulture, "Index['{0}']", this.Name); + return "Index[" + this.Name + "]"; } } diff --git a/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs b/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs index 8f25515cc..0ff97aeed 100644 --- a/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs +++ b/FoundationDB.Layers.Common/Interning/FdbStringIntern.cs @@ -95,22 +95,22 @@ public FdbStringIntern(IFdbSubspace subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); - this.Subspace = subspace; + this.Subspace = subspace.Using(TypeSystem.Tuples); } - public IFdbSubspace Subspace { get; private set; } + public IFdbDynamicSubspace Subspace { get; private set; } #region Private Helpers... protected virtual Slice UidKey(Slice uid) { - return this.Subspace.Tuples.EncodeKey(Uid2StringKey, uid); + return this.Subspace.Keys.Encode(Uid2StringKey, uid); } protected virtual Slice StringKey(string value) { - return this.Subspace.Tuples.EncodeKey(String2UidKey, value); + return this.Subspace.Keys.Encode(String2UidKey, value); } /// Evict a random value from the cache diff --git a/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs b/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs index 49681f970..76611e893 100644 --- a/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs +++ b/FoundationDB.Layers.Experimental/Documents/FdbDocumentCollection.cs @@ -60,12 +60,12 @@ public FdbDocumentCollection(FdbSubspace subspace, Func selector this.Subspace = subspace; this.IdSelector = selector; this.ValueEncoder = valueEncoder; - this.Location = new FdbEncoderSubspace(subspace, keyEncoder); + this.Location = subspace.UsingEncoder(keyEncoder); } protected virtual Task> LoadPartsAsync(IFdbReadOnlyTransaction trans, TId id) { - var key = this.Location.Partial.EncodeKey(id); + var key = this.Location.Partial.Keys.Encode(id); return trans .GetRange(FdbKeyRange.StartsWith(key)) //TODO: options ? @@ -82,7 +82,7 @@ protected virtual TDocument DecodeParts(List parts) /// Subspace used as a prefix for all hashsets in this collection public FdbSubspace Subspace { get; private set; } - protected FdbEncoderSubspace Location { get; private set; } + protected IFdbEncoderSubspace Location { get; private set; } /// Encoder that packs/unpacks the documents public IValueEncoder ValueEncoder { get; private set; } @@ -106,7 +106,7 @@ public void Insert(IFdbTransaction trans, TDocument document) var packed = this.ValueEncoder.EncodeValue(document); // Key Prefix = ...(id,) - var key = this.Location.Partial.EncodeKey(id); + var key = this.Location.Partial.Keys.Encode(id); // clear previous value trans.ClearRange(FdbKeyRange.StartsWith(key)); @@ -127,7 +127,7 @@ public void Insert(IFdbTransaction trans, TDocument document) while (remaining > 0) { int sz = Math.Max(remaining, this.ChunkSize); - this.Location.Set(trans, FdbTuple.Create(id, index), packed.Substring(p, sz)); + trans.Set(this.Location.Keys.Encode(id, index), packed.Substring(p, sz)); ++index; p += sz; remaining -= sz; @@ -171,7 +171,7 @@ public void Delete(IFdbTransaction trans, TId id) if (trans == null) throw new ArgumentNullException("trans"); if (id == null) throw new ArgumentNullException("id"); - var key = this.Location.Partial.EncodeKey(id); + var key = this.Location.Partial.Keys.Encode(id); trans.ClearRange(FdbKeyRange.StartsWith(key)); } @@ -184,7 +184,7 @@ public void DeleteMultiple(IFdbTransaction trans, IEnumerable ids) if (trans == null) throw new ArgumentNullException("trans"); if (ids == null) throw new ArgumentNullException("ids"); - foreach (var key in this.Location.Partial.EncodeKeys(ids)) + foreach (var key in this.Location.Partial.Keys.Encode(ids)) { trans.ClearRange(FdbKeyRange.StartsWith(key)); } diff --git a/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs b/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs index 7d8d27057..bd41e125f 100644 --- a/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs +++ b/FoundationDB.Layers.Experimental/Documents/FdbHashSetCollection.cs @@ -51,11 +51,11 @@ public FdbHashSetCollection(IFdbSubspace subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); - this.Subspace = subspace; + this.Subspace = subspace.Using(TypeSystem.Tuples); } /// Subspace used as a prefix for all hashsets in this collection - public IFdbSubspace Subspace { get; private set; } + public IFdbDynamicSubspace Subspace { get; private set; } /// Returns the key prefix of an HashSet: (subspace, id, ) /// @@ -63,7 +63,7 @@ public FdbHashSetCollection(IFdbSubspace subspace) protected virtual Slice GetKey(IFdbTuple id) { //REVIEW: should the id be encoded as a an embedded tuple or not? - return this.Subspace.Tuples.Pack(id); + return this.Subspace.Keys.Pack(id); } /// Returns the key of a specific field of an HashSet: (subspace, id, field, ) @@ -73,7 +73,7 @@ protected virtual Slice GetKey(IFdbTuple id) protected virtual Slice GetFieldKey(IFdbTuple id, string field) { //REVIEW: should the id be encoded as a an embedded tuple or not? - return this.Subspace.Tuples.Pack(id.Append(field)); + return this.Subspace.Keys.Pack(id.Append(field)); } protected virtual string ParseFieldKey(IFdbTuple key) @@ -113,7 +113,7 @@ await trans .GetRange(FdbKeyRange.StartsWith(prefix)) .ForEachAsync((kvp) => { - string field = this.Subspace.Tuples.DecodeLast(kvp.Key); + string field = this.Subspace.Keys.DecodeLast(kvp.Key); results[field] = kvp.Value; }) .ConfigureAwait(false); diff --git a/FoundationDB.Layers.Experimental/Indexes/FdbCompressedBitmapIndex.cs b/FoundationDB.Layers.Experimental/Indexes/FdbCompressedBitmapIndex.cs index 8b7865464..cafdf3d70 100644 --- a/FoundationDB.Layers.Experimental/Indexes/FdbCompressedBitmapIndex.cs +++ b/FoundationDB.Layers.Experimental/Indexes/FdbCompressedBitmapIndex.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013-2014, Doxense SAS +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,16 +29,12 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Layers.Experimental.Indexing { using FoundationDB.Client; - using FoundationDB.Client.Utils; - using FoundationDB.Layers.Tuples; using JetBrains.Annotations; using System; using System.Collections.Generic; using System.Diagnostics; using System.Globalization; - using System.IO; using System.Linq; - using System.Text; using System.Threading.Tasks; /// Simple index that maps values of type into lists of ids of type @@ -62,14 +58,14 @@ public FdbCompressedBitmapIndex([NotNull] string name, [NotNull] FdbSubspace sub this.Subspace = subspace; this.ValueComparer = valueComparer ?? EqualityComparer.Default; this.IndexNullValues = indexNullValues; - this.Location = new FdbEncoderSubspace(subspace, encoder); + this.Location = subspace.UsingEncoder(encoder); } public string Name { [NotNull] get; private set; } public FdbSubspace Subspace { [NotNull] get; private set; } - protected FdbEncoderSubspace Location { [NotNull] get; private set; } + protected IFdbEncoderSubspace Location { [NotNull] get; private set; } public IEqualityComparer ValueComparer { [NotNull] get; private set; } @@ -88,7 +84,7 @@ public async Task AddAsync([NotNull] IFdbTransaction trans, long id, TValu if (this.IndexNullValues || value != null) { - var key = this.Location.EncodeKey(value); + var key = this.Location.Keys.Encode(value); var data = await trans.GetAsync(key).ConfigureAwait(false); var builder = data.HasValue ? new CompressedBitmapBuilder(data) : CompressedBitmapBuilder.Empty; @@ -118,7 +114,7 @@ public async Task UpdateAsync([NotNull] IFdbTransaction trans, long id, TV // remove previous value if (this.IndexNullValues || previousValue != null) { - var key = this.Location.EncodeKey(previousValue); + var key = this.Location.Keys.Encode(previousValue); var data = await trans.GetAsync(key).ConfigureAwait(false); if (data.HasValue) { @@ -131,7 +127,7 @@ public async Task UpdateAsync([NotNull] IFdbTransaction trans, long id, TV // add new value if (this.IndexNullValues || newValue != null) { - var key = this.Location.EncodeKey(newValue); + var key = this.Location.Keys.Encode(newValue); var data = await trans.GetAsync(key).ConfigureAwait(false); var builder = data.HasValue ? new CompressedBitmapBuilder(data) : CompressedBitmapBuilder.Empty; builder.Set((int)id); //BUGBUG: 64 bit id! @@ -152,7 +148,7 @@ public async Task RemoveAsync([NotNull] IFdbTransaction trans, long id, TV { if (trans == null) throw new ArgumentNullException("trans"); - var key = this.Location.EncodeKey(value); + var key = this.Location.Keys.Encode(value); var data = await trans.GetAsync(key).ConfigureAwait(false); if (data.HasValue) { @@ -171,7 +167,7 @@ public async Task RemoveAsync([NotNull] IFdbTransaction trans, long id, TV /// List of document ids matching this value for this particular index (can be empty if no document matches) public async Task> LookupAsync([NotNull] IFdbReadOnlyTransaction trans, TValue value, bool reverse = false) { - var key = this.Location.EncodeKey(value); + var key = this.Location.Keys.Encode(value); var data = await trans.GetAsync(key).ConfigureAwait(false); if (data.IsNull) return null; if (data.IsEmpty) return Enumerable.Empty(); diff --git a/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs b/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs index 213b63d55..a9c44da08 100644 --- a/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs +++ b/FoundationDB.Layers.Experimental/Messaging/FdbWorkerPool.cs @@ -64,15 +64,15 @@ public class FdbWorkerPool private readonly RandomNumberGenerator m_rng = RandomNumberGenerator.Create(); - public IFdbSubspace Subspace { get; private set; } + public IFdbDynamicSubspace Subspace { get; private set; } - internal IFdbSubspace TaskStore { get; private set; } + internal IFdbDynamicSubspace TaskStore { get; private set; } - internal IFdbSubspace IdleRing { get; private set; } + internal IFdbDynamicSubspace IdleRing { get; private set; } - internal IFdbSubspace BusyRing { get; private set; } + internal IFdbDynamicSubspace BusyRing { get; private set; } - internal IFdbSubspace UnassignedTaskRing { get; private set; } + internal IFdbDynamicSubspace UnassignedTaskRing { get; private set; } internal FdbCounterMap Counters { get; private set; } @@ -112,22 +112,22 @@ public FdbWorkerPool(IFdbSubspace subspace) { if (subspace == null) throw new ArgumentNullException("subspace"); - this.Subspace = subspace; + this.Subspace = subspace.Using(TypeSystem.Tuples); - this.TaskStore = subspace.Partition.ByKey(Slice.FromChar('T')); - this.IdleRing = subspace.Partition.ByKey(Slice.FromChar('I')); - this.BusyRing = subspace.Partition.ByKey(Slice.FromChar('B')); - this.UnassignedTaskRing = subspace.Partition.ByKey(Slice.FromChar('U')); + this.TaskStore = this.Subspace.Partition.ByKey(Slice.FromChar('T')); + this.IdleRing = this.Subspace.Partition.ByKey(Slice.FromChar('I')); + this.BusyRing = this.Subspace.Partition.ByKey(Slice.FromChar('B')); + this.UnassignedTaskRing = this.Subspace.Partition.ByKey(Slice.FromChar('U')); - this.Counters = new FdbCounterMap(subspace.Partition.ByKey(Slice.FromChar('C'))); + this.Counters = new FdbCounterMap(this.Subspace.Partition.ByKey(Slice.FromChar('C'))); } - private async Task> FindRandomItem(IFdbTransaction tr, IFdbSubspace ring) + private async Task> FindRandomItem(IFdbTransaction tr, IFdbDynamicSubspace ring) { - var range = ring.Tuples.ToRange(); + var range = ring.Keys.ToRange(); // start from a random position around the ring - Slice key = ring.Tuples.EncodeKey(GetRandomId()); + Slice key = ring.Keys.Encode(GetRandomId()); // We want to find the next item in the clockwise direction. If we reach the end of the ring, we "wrap around" by starting again from the start // => So we do find_next(key <= x < MAX) and if that does not produce any result, we do a find_next(MIN <= x < key) @@ -154,19 +154,19 @@ private Slice GetRandomId() } } - private async Task PushQueueAsync(IFdbTransaction tr, IFdbSubspace queue, Slice taskId) + private async Task PushQueueAsync(IFdbTransaction tr, IFdbDynamicSubspace queue, Slice taskId) { //TODO: use a high contention algo ? // - must support Push and Pop // - an empty queue must correspond to an empty subspace // get the current size of the queue - var range = queue.Tuples.ToRange(); + var range = queue.Keys.ToRange(); var lastKey = await tr.Snapshot.GetKeyAsync(FdbKeySelector.LastLessThan(range.End)).ConfigureAwait(false); - int count = lastKey < range.Begin ? 0 : queue.Tuples.DecodeFirst(lastKey) + 1; + int count = lastKey < range.Begin ? 0 : queue.Keys.DecodeFirst(lastKey) + 1; // set the value - tr.Set(queue.Tuples.EncodeKey(count, GetRandomId()), taskId); + tr.Set(queue.Keys.Encode(count, GetRandomId()), taskId); } private void StoreTask(IFdbTransaction tr, Slice taskId, DateTime scheduledUtc, Slice taskBody) @@ -177,7 +177,7 @@ private void StoreTask(IFdbTransaction tr, Slice taskId, DateTime scheduledUtc, // store task body and timestamp tr.Set(prefix.Key, taskBody); - tr.Set(prefix.Tuples.EncodeKey(TASK_META_SCHEDULED), Slice.FromInt64(scheduledUtc.Ticks)); + tr.Set(prefix.Keys.Encode(TASK_META_SCHEDULED), Slice.FromInt64(scheduledUtc.Ticks)); // increment total and pending number of tasks this.Counters.Increment(tr, COUNTER_TOTAL_TASKS); this.Counters.Increment(tr, COUNTER_PENDING_TASKS); @@ -188,7 +188,7 @@ private void ClearTask(IFdbTransaction tr, Slice taskId) tr.Annotate("Deleting task {0}", taskId.ToAsciiOrHexaString()); // clear all metadata about the task - tr.ClearRange(FdbKeyRange.StartsWith(this.TaskStore.Tuples.EncodeKey(taskId))); + tr.ClearRange(FdbKeyRange.StartsWith(this.TaskStore.Keys.Encode(taskId))); // decrement pending number of tasks this.Counters.Decrement(tr, COUNTER_PENDING_TASKS); } @@ -217,16 +217,16 @@ await db.ReadWriteAsync(async (tr) => if (randomWorkerKey.Key != null) { - Slice workerId = this.IdleRing.Tuples.DecodeKey(randomWorkerKey.Key); + Slice workerId = this.IdleRing.Keys.Decode(randomWorkerKey.Key); tr.Annotate("Assigning {0} to {1}", taskId.ToAsciiOrHexaString(), workerId.ToAsciiOrHexaString()); // remove worker from the idle ring - tr.Clear(this.IdleRing.Tuples.EncodeKey(workerId)); + tr.Clear(this.IdleRing.Keys.Encode(workerId)); this.Counters.Decrement(tr, COUNTER_IDLE); // assign task to the worker - tr.Set(this.BusyRing.Tuples.EncodeKey(workerId), taskId); + tr.Set(this.BusyRing.Keys.Encode(workerId), taskId); this.Counters.Increment(tr, COUNTER_BUSY); } else @@ -283,7 +283,7 @@ await db.ReadWriteAsync( else if (myId.IsPresent) { // look for an already assigned task tr.Annotate("Look for already assigned task"); - msg.Id = await tr.GetAsync(this.BusyRing.Tuples.EncodeKey(myId)).ConfigureAwait(false); + msg.Id = await tr.GetAsync(this.BusyRing.Keys.Encode(myId)).ConfigureAwait(false); } if (!msg.Id.IsPresent) @@ -292,7 +292,7 @@ await db.ReadWriteAsync( tr.Annotate("Look for next queued item"); // Find the next task on the queue - var item = await tr.GetRange(this.UnassignedTaskRing.Tuples.ToRange()).FirstOrDefaultAsync().ConfigureAwait(false); + var item = await tr.GetRange(this.UnassignedTaskRing.Keys.ToRange()).FirstOrDefaultAsync().ConfigureAwait(false); if (item.Key != null) { // pop the Task from the queue @@ -305,7 +305,7 @@ await db.ReadWriteAsync( // note: we need a random id so generate one if it is the first time... if (!myId.IsPresent) myId = GetRandomId(); tr.Annotate("Found {0}, switch to busy with id {1}", msg.Id.ToAsciiOrHexaString(), myId.ToAsciiOrHexaString()); - tr.Set(this.BusyRing.Tuples.EncodeKey(myId), msg.Id); + tr.Set(this.BusyRing.Keys.Encode(myId), msg.Id); this.Counters.Increment(tr, COUNTER_BUSY); } else if (myId.IsPresent) @@ -323,7 +323,7 @@ await db.ReadWriteAsync( //TODO: replace this with a get_range ? var data = await tr.GetValuesAsync(new [] { prefix.ToFoundationDbKey(), - prefix.Tuples.EncodeKey(TASK_META_SCHEDULED) + prefix.Keys.Encode(TASK_META_SCHEDULED) }).ConfigureAwait(false); msg.Body = data[0]; @@ -336,7 +336,7 @@ await db.ReadWriteAsync( // remove us from the busy ring if (myId.IsPresent) { - tr.Clear(this.BusyRing.Tuples.EncodeKey(myId)); + tr.Clear(this.BusyRing.Keys.Encode(myId)); this.Counters.Decrement(tr, COUNTER_BUSY); } @@ -344,7 +344,7 @@ await db.ReadWriteAsync( myId = GetRandomId(); // the idle key will also be used as the watch key to wake us up - var watchKey = this.IdleRing.Tuples.EncodeKey(myId); + var watchKey = this.IdleRing.Keys.Encode(myId); tr.Annotate("Will start watching on key {0} with id {1}", watchKey.ToAsciiOrHexaString(), myId.ToAsciiOrHexaString()); tr.Set(watchKey, Slice.Empty); this.Counters.Increment(tr, COUNTER_IDLE); diff --git a/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs b/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs index f7d5bdfc0..41ea9f191 100644 --- a/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs +++ b/FoundationDB.Layers.Experimental/Messaging/WorkerPoolTest.cs @@ -1,7 +1,6 @@ using FoundationDB.Async; using FoundationDB.Client; using FoundationDB.Filters.Logging; -using FoundationDB.Layers.Tuples; using System; using System.Diagnostics; using System.Linq; @@ -59,7 +58,7 @@ public void Main() } } - private async Task RunAsync(IFdbDatabase db, FdbSubspace location, CancellationToken ct, Action done, int N, int K, int W) + private async Task RunAsync(IFdbDatabase db, IFdbDynamicSubspace location, CancellationToken ct, Action done, int N, int K, int W) { if (db == null) throw new ArgumentNullException("db"); @@ -142,7 +141,7 @@ await tr.Snapshot .GetRange(FdbKeyRange.StartsWith(location.Key)) .ForEachAsync((kvp) => { - Console.WriteLine(" - " + location.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine(" - " + location.Keys.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }).ConfigureAwait(false); } Console.WriteLine(""); diff --git a/FoundationDB.Samples/Benchmarks/BenchRunner.cs b/FoundationDB.Samples/Benchmarks/BenchRunner.cs index fafd119d6..9e1d67694 100644 --- a/FoundationDB.Samples/Benchmarks/BenchRunner.cs +++ b/FoundationDB.Samples/Benchmarks/BenchRunner.cs @@ -46,7 +46,7 @@ public BenchRunner(BenchMode mode, int value = 1) public BenchMode Mode { get; private set; } - public FdbSubspace Subspace { get; private set; } + public IFdbDynamicSubspace Subspace { get; private set; } public RobustHistogram Histo { get; private set; } @@ -86,7 +86,7 @@ public async Task Run(IFdbDatabase db, TextWriter log, CancellationToken ct) var duration = Stopwatch.StartNew(); - var foo = this.Subspace.Tuples.EncodeKey("foo"); + var foo = this.Subspace.Keys.Encode("foo"); var bar = Slice.FromString("bar"); var barf = Slice.FromString("barf"); diff --git a/FoundationDB.Samples/Benchmarks/LeakTest.cs b/FoundationDB.Samples/Benchmarks/LeakTest.cs index 0f5f99439..54e180f30 100644 --- a/FoundationDB.Samples/Benchmarks/LeakTest.cs +++ b/FoundationDB.Samples/Benchmarks/LeakTest.cs @@ -27,7 +27,7 @@ public LeakTest(int k, int m, int n, TimeSpan delay) public int N { get; private set; } public TimeSpan Delay { get; private set; } - public IFdbSubspace Subspace { get; private set; } + public IFdbDynamicSubspace Subspace { get; private set; } /// /// Setup the initial state of the database @@ -43,8 +43,8 @@ public async Task Init(IFdbDatabase db, CancellationToken ct) // insert all the classes await db.WriteAsync((tr) => { - tr.Set(this.Subspace.Keys[FdbKey.MinValue], Slice.FromString("BEGIN")); - tr.Set(this.Subspace.Keys[FdbKey.MaxValue], Slice.FromString("END")); + tr.Set(this.Subspace.Key + FdbKey.MinValue, Slice.FromString("BEGIN")); + tr.Set(this.Subspace.Key + FdbKey.MaxValue, Slice.FromString("END")); }, ct); } @@ -81,7 +81,7 @@ await db.WriteAsync((tr) => if (tr.Context.Retries > 0) Console.Write("!"); for (int j = 0; j < values.Length; j++) { - tr.Set(location.Tuples.EncodeKey(j, now), Slice.FromString(values[j] + new string('A', 100))); + tr.Set(location.Keys.Encode(j, now), Slice.FromString(values[j] + new string('A', 100))); } }, ct); Console.Write("."); diff --git a/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs b/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs index 683f0657b..3b3753f05 100644 --- a/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs +++ b/FoundationDB.Samples/MessageQueue/MessageQueueRunner.cs @@ -149,7 +149,7 @@ public async Task RunStatus(IFdbDatabase db, CancellationToken ct) using(var tr = db.BeginTransaction(ct)) { - var counters = await tr.Snapshot.GetRange(countersLocation.Tuples.ToRange()).Select(kvp => new KeyValuePair(countersLocation.Tuples.DecodeLast(kvp.Key), kvp.Value.ToInt64())).ToListAsync().ConfigureAwait(false); + var counters = await tr.Snapshot.GetRange(countersLocation.Keys.ToRange()).Select(kvp => new KeyValuePair(countersLocation.Keys.DecodeLast(kvp.Key), kvp.Value.ToInt64())).ToListAsync().ConfigureAwait(false); Console.WriteLine("Status at " + DateTimeOffset.Now.ToString("O")); foreach(var counter in counters) @@ -159,24 +159,24 @@ public async Task RunStatus(IFdbDatabase db, CancellationToken ct) Console.WriteLine("Dump:"); Console.WriteLine("> Idle"); - await tr.Snapshot.GetRange(idleLocation.Tuples.ToRange()).ForEachAsync((kvp) => + await tr.Snapshot.GetRange(idleLocation.Keys.ToRange()).ForEachAsync((kvp) => { - Console.WriteLine("- Idle." + idleLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine("- Idle." + idleLocation.Keys.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Busy"); - await tr.Snapshot.GetRange(busyLocation.Tuples.ToRange()).ForEachAsync((kvp) => + await tr.Snapshot.GetRange(busyLocation.Keys.ToRange()).ForEachAsync((kvp) => { - Console.WriteLine("- Busy." + busyLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine("- Busy." + busyLocation.Keys.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Unassigned"); - await tr.Snapshot.GetRange(unassignedLocation.Tuples.ToRange()).ForEachAsync((kvp) => + await tr.Snapshot.GetRange(unassignedLocation.Keys.ToRange()).ForEachAsync((kvp) => { - Console.WriteLine("- Unassigned." + unassignedLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine("- Unassigned." + unassignedLocation.Keys.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Tasks"); - await tr.Snapshot.GetRange(tasksLocation.Tuples.ToRange()).ForEachAsync((kvp) => + await tr.Snapshot.GetRange(tasksLocation.Keys.ToRange()).ForEachAsync((kvp) => { - Console.WriteLine("- Tasks." + tasksLocation.Tuples.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); + Console.WriteLine("- Tasks." + tasksLocation.Keys.Unpack(kvp.Key) + " = " + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("<"); } diff --git a/FoundationDB.Samples/Tutorials/ClassScheduling.cs b/FoundationDB.Samples/Tutorials/ClassScheduling.cs index 7bdeb13ae..4b237ad70 100644 --- a/FoundationDB.Samples/Tutorials/ClassScheduling.cs +++ b/FoundationDB.Samples/Tutorials/ClassScheduling.cs @@ -34,21 +34,21 @@ public ClassScheduling() public string[] ClassNames { get; private set; } - public FdbSubspace Subspace { get; private set; } + public IFdbDynamicSubspace Subspace { get; private set; } protected Slice ClassKey(string c) { - return this.Subspace.Tuples.EncodeKey("class", c); + return this.Subspace.Keys.Encode("class", c); } protected Slice AttendsKey(string s, string c) { - return this.Subspace.Tuples.EncodeKey("attends", s, c); + return this.Subspace.Keys.Encode("attends", s, c); } protected FdbKeyRange AttendsKeys(string s) { - return this.Subspace.Tuples.ToRange(FdbTuple.Create("attends", s)); + return this.Subspace.Keys.ToRange(FdbTuple.Create("attends", s)); } /// @@ -78,9 +78,9 @@ await db.WriteAsync((tr) => /// public Task> AvailableClasses(IFdbReadOnlyTransaction tr) { - return tr.GetRange(this.Subspace.Tuples.ToRange(FdbTuple.Create("class"))) + return tr.GetRange(this.Subspace.Keys.ToRange(FdbTuple.Create("class"))) .Where(kvp => { int _; return Int32.TryParse(kvp.Value.ToAscii(), out _); }) // (step 3) - .Select(kvp => this.Subspace.Tuples.DecodeKey(kvp.Key)) + .Select(kvp => this.Subspace.Keys.Decode(kvp.Key)) .ToListAsync(); } diff --git a/FoundationDB.Storage.Memory.Test/Transactions/Benchmarks.cs b/FoundationDB.Storage.Memory.Test/Transactions/Benchmarks.cs index e0b8cdf37..c4804a046 100644 --- a/FoundationDB.Storage.Memory.Test/Transactions/Benchmarks.cs +++ b/FoundationDB.Storage.Memory.Test/Transactions/Benchmarks.cs @@ -62,7 +62,7 @@ public async Task MiniBench() //WARMUP using (var db = MemoryDatabase.CreateNew("FOO")) { - await db.WriteAsync((tr) => tr.Set(db.Tuples.EncodeKey("hello"), Slice.FromString("world")), this.Cancellation); + await db.WriteAsync((tr) => tr.Set(db.Keys.Encode("hello"), Slice.FromString("world")), this.Cancellation); Slice.Random(rnd, KEYSIZE); Slice.Random(rnd, VALUESIZE); } diff --git a/FoundationDB.Storage.Memory.Test/Transactions/Comparisons.cs b/FoundationDB.Storage.Memory.Test/Transactions/Comparisons.cs index 573257d99..733213ca9 100644 --- a/FoundationDB.Storage.Memory.Test/Transactions/Comparisons.cs +++ b/FoundationDB.Storage.Memory.Test/Transactions/Comparisons.cs @@ -24,11 +24,11 @@ private async Task Scenario1(IFdbTransaction tr) private Task Scenario2(IFdbTransaction tr) { - var location = FdbSubspace.Create(Slice.FromAscii("TEST")); + var location = FdbSubspace.CreateDynamic(Slice.FromAscii("TEST")); tr.ClearRange(FdbKeyRange.StartsWith(location.Key)); for (int i = 0; i < 10; i++) { - tr.Set(location.Tuples.EncodeKey(i), Slice.FromString("value of " + i)); + tr.Set(location.Keys.Encode(i), Slice.FromString("value of " + i)); } return Task.FromResult(null); } @@ -59,26 +59,26 @@ private Task Scenario4(IFdbTransaction tr) private async Task Scenario5(IFdbTransaction tr) { - var location = FdbSubspace.Create(Slice.FromAscii("TEST")); + var location = FdbSubspace.CreateDynamic(Slice.FromAscii("TEST")); //tr.Set(location.Pack(42), Slice.FromString("42")); //tr.Set(location.Pack(50), Slice.FromString("50")); //tr.Set(location.Pack(60), Slice.FromString("60")); - var x = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(location.Tuples.EncodeKey(49))); + var x = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(location.Keys.Encode(49))); Console.WriteLine(x); - tr.Set(location.Tuples.EncodeKey("FOO"), Slice.FromString("BAR")); + tr.Set(location.Keys.Encode("FOO"), Slice.FromString("BAR")); } private async Task Scenario6(IFdbTransaction tr) { - var location = FdbSubspace.Create(Slice.FromAscii("TEST")); + var location = FdbSubspace.CreateDynamic(Slice.FromAscii("TEST")); - tr.AtomicAdd(location.Tuples.EncodeKey("ATOMIC"), Slice.FromFixed32(0x55555555)); + tr.AtomicAdd(location.Keys.Encode("ATOMIC"), Slice.FromFixed32(0x55555555)); - var x = await tr.GetAsync(location.Tuples.EncodeKey("ATOMIC")); + var x = await tr.GetAsync(location.Keys.Encode("ATOMIC")); Console.WriteLine(x.ToInt32().ToString("x")); } diff --git a/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs b/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs index 7289ffd94..43de8d3a2 100644 --- a/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs +++ b/FoundationDB.Storage.Memory.Test/Transactions/MemoryTransactionFacts.cs @@ -26,7 +26,7 @@ public async Task Test_Hello_World() { using (var db = MemoryDatabase.CreateNew("DB", FdbSubspace.Empty, false)) { - var key = db.Tuples.EncodeKey("hello"); + var key = db.Keys.Encode("hello"); // v1 await db.WriteAsync((tr) => tr.Set(key, Slice.FromString("World!")), this.Cancellation); @@ -70,15 +70,15 @@ public async Task Test_GetKey() using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.EncodeKey(0), Slice.FromString("first")); - tr.Set(location.EncodeKey(10), Slice.FromString("ten")); - tr.Set(location.EncodeKey(20), Slice.FromString("ten ten")); - tr.Set(location.EncodeKey(42), Slice.FromString("narf!")); - tr.Set(location.EncodeKey(100), Slice.FromString("a hundred missipis")); + tr.Set(location.Encode(0), Slice.FromString("first")); + tr.Set(location.Encode(10), Slice.FromString("ten")); + tr.Set(location.Encode(20), Slice.FromString("ten ten")); + tr.Set(location.Encode(42), Slice.FromString("narf!")); + tr.Set(location.Encode(100), Slice.FromString("a hundred missipis")); await tr.CommitAsync(); } @@ -87,35 +87,35 @@ public async Task Test_GetKey() using (var tr = db.BeginTransaction(this.Cancellation)) { - value = await tr.GetAsync(location.EncodeKey(42)); + value = await tr.GetAsync(location.Encode(42)); Console.WriteLine(value); Assert.That(value.ToString(), Is.EqualTo("narf!")); - key = await tr.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(42))); - Assert.That(key, Is.EqualTo(location.EncodeKey(42))); + key = await tr.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(location.Encode(42))); + Assert.That(key, Is.EqualTo(location.Encode(42))); - key = await tr.GetKeyAsync(FdbKeySelector.FirstGreaterThan(location.EncodeKey(42))); - Assert.That(key, Is.EqualTo(location.EncodeKey(100))); + key = await tr.GetKeyAsync(FdbKeySelector.FirstGreaterThan(location.Encode(42))); + Assert.That(key, Is.EqualTo(location.Encode(100))); - key = await tr.GetKeyAsync(FdbKeySelector.LastLessOrEqual(location.EncodeKey(42))); - Assert.That(key, Is.EqualTo(location.EncodeKey(42))); + key = await tr.GetKeyAsync(FdbKeySelector.LastLessOrEqual(location.Encode(42))); + Assert.That(key, Is.EqualTo(location.Encode(42))); - key = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(location.EncodeKey(42))); - Assert.That(key, Is.EqualTo(location.EncodeKey(20))); + key = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(location.Encode(42))); + Assert.That(key, Is.EqualTo(location.Encode(20))); var keys = await tr.GetKeysAsync(new[] { - FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(42)), - FdbKeySelector.FirstGreaterThan(location.EncodeKey(42)), - FdbKeySelector.LastLessOrEqual(location.EncodeKey(42)), - FdbKeySelector.LastLessThan(location.EncodeKey(42)) + FdbKeySelector.FirstGreaterOrEqual(location.Encode(42)), + FdbKeySelector.FirstGreaterThan(location.Encode(42)), + FdbKeySelector.LastLessOrEqual(location.Encode(42)), + FdbKeySelector.LastLessThan(location.Encode(42)) }); Assert.That(keys.Length, Is.EqualTo(4)); - Assert.That(keys[0], Is.EqualTo(location.EncodeKey(42))); - Assert.That(keys[1], Is.EqualTo(location.EncodeKey(100))); - Assert.That(keys[2], Is.EqualTo(location.EncodeKey(42))); - Assert.That(keys[3], Is.EqualTo(location.EncodeKey(20))); + Assert.That(keys[0], Is.EqualTo(location.Encode(42))); + Assert.That(keys[1], Is.EqualTo(location.Encode(100))); + Assert.That(keys[2], Is.EqualTo(location.Encode(42))); + Assert.That(keys[3], Is.EqualTo(location.Encode(20))); await tr.CommitAsync(); } @@ -131,13 +131,13 @@ public async Task Test_GetKey_ReadConflicts() using (var db = MemoryDatabase.CreateNew("FOO")) { - var location = db.Tuples; + var location = db.Keys; using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.EncodeKey(42), Slice.FromString("42")); - tr.Set(location.EncodeKey(50), Slice.FromString("50")); - tr.Set(location.EncodeKey(60), Slice.FromString("60")); + tr.Set(location.Encode(42), Slice.FromString("42")); + tr.Set(location.Encode(50), Slice.FromString("50")); + tr.Set(location.Encode(60), Slice.FromString("60")); await tr.CommitAsync(); } db.Debug_Dump(); @@ -153,39 +153,39 @@ public async Task Test_GetKey_ReadConflicts() }; await check( - FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(50)), - location.EncodeKey(50) + FdbKeySelector.FirstGreaterOrEqual(location.Encode(50)), + location.Encode(50) ); await check( - FdbKeySelector.FirstGreaterThan(location.EncodeKey(50)), - location.EncodeKey(60) + FdbKeySelector.FirstGreaterThan(location.Encode(50)), + location.Encode(60) ); await check( - FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(49)), - location.EncodeKey(50) + FdbKeySelector.FirstGreaterOrEqual(location.Encode(49)), + location.Encode(50) ); await check( - FdbKeySelector.FirstGreaterThan(location.EncodeKey(49)), - location.EncodeKey(50) + FdbKeySelector.FirstGreaterThan(location.Encode(49)), + location.Encode(50) ); await check( - FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(49)) + 1, - location.EncodeKey(60) + FdbKeySelector.FirstGreaterOrEqual(location.Encode(49)) + 1, + location.Encode(60) ); await check( - FdbKeySelector.FirstGreaterThan(location.EncodeKey(49)) + 1, - location.EncodeKey(60) + FdbKeySelector.FirstGreaterThan(location.Encode(49)) + 1, + location.Encode(60) ); await check( - FdbKeySelector.LastLessOrEqual(location.EncodeKey(49)), - location.EncodeKey(42) + FdbKeySelector.LastLessOrEqual(location.Encode(49)), + location.Encode(42) ); await check( - FdbKeySelector.LastLessThan(location.EncodeKey(49)), - location.EncodeKey(42) + FdbKeySelector.LastLessThan(location.Encode(49)), + location.Encode(42) ); } } @@ -197,13 +197,13 @@ public async Task Test_GetRangeAsync() using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; using (var tr = db.BeginTransaction(this.Cancellation)) { for (int i = 0; i <= 100; i++) { - tr.Set(location.EncodeKey(i), Slice.FromString("value of " + i)); + tr.Set(location.Encode(i), Slice.FromString("value of " + i)); } await tr.CommitAsync(); } @@ -218,15 +218,15 @@ public async Task Test_GetRangeAsync() key = await tr.GetKeyAsync(FdbKeySelector.LastLessOrEqual(FdbKey.MaxValue)); if (key != FdbKey.MaxValue) Assert.Inconclusive("Key selectors are buggy: lLE(max)"); key = await tr.GetKeyAsync(FdbKeySelector.LastLessThan(FdbKey.MaxValue)); - if (key != location.EncodeKey(100)) Assert.Inconclusive("Key selectors are buggy: lLT(max)"); + if (key != location.Encode(100)) Assert.Inconclusive("Key selectors are buggy: lLT(max)"); } using (var tr = db.BeginTransaction(this.Cancellation)) { var chunk = await tr.GetRangeAsync( - FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(0)), - FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(50)) + FdbKeySelector.FirstGreaterOrEqual(location.Encode(0)), + FdbKeySelector.FirstGreaterOrEqual(location.Encode(50)) ); #if DEBUG for (int i = 0; i < chunk.Count; i++) @@ -242,7 +242,7 @@ public async Task Test_GetRangeAsync() for (int i = 0; i < 50; i++) { - Assert.That(chunk.Chunk[i].Key, Is.EqualTo(location.EncodeKey(i)), "[{0}].Key", i); + Assert.That(chunk.Chunk[i].Key, Is.EqualTo(location.Encode(i)), "[{0}].Key", i); Assert.That(chunk.Chunk[i].Value.ToString(), Is.EqualTo("value of " + i), "[{0}].Value", i); } @@ -253,8 +253,8 @@ public async Task Test_GetRangeAsync() { var chunk = await tr.GetRangeAsync( - FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(0)), - FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(50)), + FdbKeySelector.FirstGreaterOrEqual(location.Encode(0)), + FdbKeySelector.FirstGreaterOrEqual(location.Encode(50)), new FdbRangeOptions { Reverse = true } ); #if DEBUG @@ -271,7 +271,7 @@ public async Task Test_GetRangeAsync() for (int i = 0; i < 50; i++) { - Assert.That(chunk.Chunk[i].Key, Is.EqualTo(location.EncodeKey(49 - i)), "[{0}].Key", i); + Assert.That(chunk.Chunk[i].Key, Is.EqualTo(location.Encode(49 - i)), "[{0}].Key", i); Assert.That(chunk.Chunk[i].Value.ToString(), Is.EqualTo("value of " + (49 - i)), "[{0}].Value", i); } @@ -282,7 +282,7 @@ public async Task Test_GetRangeAsync() { var chunk = await tr.GetRangeAsync( - FdbKeySelector.FirstGreaterOrEqual(location.EncodeKey(0)), + FdbKeySelector.FirstGreaterOrEqual(location.Encode(0)), FdbKeySelector.FirstGreaterOrEqual(FdbKey.MaxValue), new FdbRangeOptions { Reverse = true, Limit = 1 } ); @@ -311,13 +311,13 @@ public async Task Test_GetRange() using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; using (var tr = db.BeginTransaction(this.Cancellation)) { for (int i = 0; i <= 100; i++) { - tr.Set(location.EncodeKey(i), Slice.FromString("value of " + i)); + tr.Set(location.Encode(i), Slice.FromString("value of " + i)); } await tr.CommitAsync(); } @@ -328,7 +328,7 @@ public async Task Test_GetRange() { var results = await tr - .GetRange(location.EncodeKey(0), location.EncodeKey(50)) + .GetRange(location.Encode(0), location.Encode(50)) .ToListAsync(); Assert.That(results, Is.Not.Null); @@ -342,7 +342,7 @@ public async Task Test_GetRange() Assert.That(results.Count, Is.EqualTo(50)); for (int i = 0; i < 50; i++) { - Assert.That(results[i].Key, Is.EqualTo(location.EncodeKey(i)), "[{0}].Key", i); + Assert.That(results[i].Key, Is.EqualTo(location.Encode(i)), "[{0}].Key", i); Assert.That(results[i].Value.ToString(), Is.EqualTo("value of " + i), "[{0}].Value", i); } @@ -353,7 +353,7 @@ public async Task Test_GetRange() { var results = await tr - .GetRange(location.EncodeKey(0), location.EncodeKey(50), new FdbRangeOptions { Reverse = true }) + .GetRange(location.Encode(0), location.Encode(50), new FdbRangeOptions { Reverse = true }) .ToListAsync(); Assert.That(results, Is.Not.Null); #if DEBUG @@ -366,7 +366,7 @@ public async Task Test_GetRange() Assert.That(results.Count, Is.EqualTo(50)); for (int i = 0; i < 50; i++) { - Assert.That(results[i].Key, Is.EqualTo(location.EncodeKey(49 - i)), "[{0}].Key", i); + Assert.That(results[i].Key, Is.EqualTo(location.Encode(49 - i)), "[{0}].Key", i); Assert.That(results[i].Value.ToString(), Is.EqualTo("value of " + (49 - i)), "[{0}].Value", i); } @@ -376,13 +376,13 @@ public async Task Test_GetRange() using (var tr = db.BeginTransaction(this.Cancellation)) { var result = await tr - .GetRange(location.EncodeKey(0), FdbKey.MaxValue, new FdbRangeOptions { Reverse = true }) + .GetRange(location.Encode(0), FdbKey.MaxValue, new FdbRangeOptions { Reverse = true }) .FirstOrDefaultAsync(); #if DEBUG Console.WriteLine(result.Key + " = " + result.Value); #endif - Assert.That(result.Key, Is.EqualTo(location.EncodeKey(100))); + Assert.That(result.Key, Is.EqualTo(location.Encode(100))); Assert.That(result.Value.ToString(), Is.EqualTo("value of 100")); await tr.CommitAsync(); @@ -399,14 +399,14 @@ public async Task Test_CommittedVersion_On_ReadOnly_Transactions() using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; using (var tr = db.BeginTransaction(this.Cancellation)) { long ver = tr.GetCommittedVersion(); Assert.That(ver, Is.EqualTo(-1), "Initial committed version"); - var _ = await tr.GetAsync(location.EncodeKey("foo")); + var _ = await tr.GetAsync(location.Encode("foo")); // until the transction commits, the committed version will stay -1 ver = tr.GetCommittedVersion(); @@ -431,7 +431,7 @@ public async Task Test_CommittedVersion_On_Write_Transactions() using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; using (var tr = db.BeginTransaction(this.Cancellation)) { @@ -441,7 +441,7 @@ public async Task Test_CommittedVersion_On_Write_Transactions() long ver = tr.GetCommittedVersion(); Assert.That(ver, Is.EqualTo(-1), "Initial committed version"); - tr.Set(location.EncodeKey("foo"), Slice.FromString("bar")); + tr.Set(location.Encode("foo"), Slice.FromString("bar")); // until the transction commits, the committed version should still be -1 ver = tr.GetCommittedVersion(); @@ -466,14 +466,14 @@ public async Task Test_CommittedVersion_After_Reset() using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; using (var tr = db.BeginTransaction(this.Cancellation)) { // take the read version (to compare with the committed version below) long rv1 = await tr.GetReadVersionAsync(); // do something and commit - tr.Set(location.EncodeKey("foo"), Slice.FromString("bar")); + tr.Set(location.Encode("foo"), Slice.FromString("bar")); await tr.CommitAsync(); long cv1 = tr.GetCommittedVersion(); Console.WriteLine("COMMIT: " + rv1 + " / " + cv1); @@ -489,7 +489,7 @@ public async Task Test_CommittedVersion_After_Reset() //Assert.That(cv2, Is.EqualTo(-1), "Committed version should go back to -1 after reset"); // read-only + commit - await tr.GetAsync(location.EncodeKey("foo")); + await tr.GetAsync(location.Encode("foo")); await tr.CommitAsync(); cv2 = tr.GetCommittedVersion(); Console.WriteLine("COMMIT2: " + rv2 + " / " + cv2); @@ -506,18 +506,18 @@ public async Task Test_Conflicts() // this SHOULD NOT conflict using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; using (var tr1 = db.BeginTransaction(this.Cancellation)) { using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(location.EncodeKey("foo"), Slice.FromString("changed")); + tr2.Set(location.Encode("foo"), Slice.FromString("changed")); await tr2.CommitAsync(); } - var x = await tr1.GetAsync(location.EncodeKey("foo")); - tr1.Set(location.EncodeKey("bar"), Slice.FromString("other")); + var x = await tr1.GetAsync(location.Encode("foo")); + tr1.Set(location.Encode("bar"), Slice.FromString("other")); await tr1.CommitAsync(); } @@ -527,19 +527,19 @@ public async Task Test_Conflicts() // this SHOULD conflict using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; using (var tr1 = db.BeginTransaction(this.Cancellation)) { - var x = await tr1.GetAsync(location.EncodeKey("foo")); + var x = await tr1.GetAsync(location.Encode("foo")); using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(location.EncodeKey("foo"), Slice.FromString("changed")); + tr2.Set(location.Encode("foo"), Slice.FromString("changed")); await tr2.CommitAsync(); } - tr1.Set(location.EncodeKey("bar"), Slice.FromString("other")); + tr1.Set(location.Encode("bar"), Slice.FromString("other")); Assert.That(async () => await tr1.CommitAsync(), Throws.InstanceOf().With.Property("Code").EqualTo(FdbError.NotCommitted)); } @@ -549,7 +549,7 @@ public async Task Test_Conflicts() // this SHOULD conflict using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; using (var tr1 = db.BeginTransaction(this.Cancellation)) { @@ -557,12 +557,12 @@ public async Task Test_Conflicts() using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(location.EncodeKey("foo"), Slice.FromString("changed")); + tr2.Set(location.Encode("foo"), Slice.FromString("changed")); await tr2.CommitAsync(); } - var x = await tr1.GetAsync(location.EncodeKey("foo")); - tr1.Set(location.EncodeKey("bar"), Slice.FromString("other")); + var x = await tr1.GetAsync(location.Encode("foo")); + tr1.Set(location.Encode("bar"), Slice.FromString("other")); Assert.That(async () => await tr1.CommitAsync(), Throws.InstanceOf().With.Property("Code").EqualTo(FdbError.NotCommitted)); } @@ -572,19 +572,19 @@ public async Task Test_Conflicts() // this SHOULD NOT conflict using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; using (var tr1 = db.BeginTransaction(this.Cancellation)) { - var x = await tr1.Snapshot.GetAsync(location.EncodeKey("foo")); + var x = await tr1.Snapshot.GetAsync(location.Encode("foo")); using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(location.EncodeKey("foo"), Slice.FromString("changed")); + tr2.Set(location.Encode("foo"), Slice.FromString("changed")); await tr2.CommitAsync(); } - tr1.Set(location.EncodeKey("bar"), Slice.FromString("other")); + tr1.Set(location.Encode("bar"), Slice.FromString("other")); await tr1.CommitAsync(); } @@ -597,7 +597,7 @@ public async Task Test_Write_Then_Read() { using (var db = MemoryDatabase.CreateNew("FOO")) { - var location = db.Tuples; + var location = db.Keys; using (var tr = db.BeginTransaction(this.Cancellation)) { @@ -665,11 +665,11 @@ public async Task Test_Atomic() { using (var db = MemoryDatabase.CreateNew("DB")) { - var location = db.Tuples; + var location = db.Keys; - var key1 = location.EncodeKey(1); - var key2 = location.EncodeKey(2); - var key16 = location.EncodeKey(16); + var key1 = location.Encode(1); + var key2 = location.Encode(2); + var key16 = location.Encode(16); for (int i = 0; i < 10; i++) { @@ -839,19 +839,19 @@ public async Task Test_Can_BulkLoad_Data_Ordered() Console.WriteLine("Warmup..."); using (var db = MemoryDatabase.CreateNew("WARMUP")) { - await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Tuples.EncodeKey(i), Slice.FromFixed32(i))).ToList(), ordered: true); + await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Keys.Encode(i), Slice.FromFixed32(i))).ToList(), ordered: true); } using(var db = MemoryDatabase.CreateNew("FOO")) { - var location = db.Tuples; + var location = db.Keys; Console.WriteLine("Generating " + N.ToString("N0") + " keys..."); var data = new KeyValuePair[N]; for (int i = 0; i < N; i++) { data[i] = new KeyValuePair( - location.EncodeKey(i), + location.Encode(i), Slice.FromFixed32(i) ); } @@ -870,7 +870,7 @@ public async Task Test_Can_BulkLoad_Data_Ordered() int x = rnd.Next(N); using (var tx = db.BeginReadOnlyTransaction(this.Cancellation)) { - var res = await tx.GetAsync(location.EncodeKey(x)).ConfigureAwait(false); + var res = await tx.GetAsync(location.Encode(x)).ConfigureAwait(false); Assert.That(res.ToInt32(), Is.EqualTo(x)); } } @@ -888,12 +888,12 @@ public async Task Test_Can_BulkLoad_Data_Sequential_Unordered() Console.WriteLine("Warmup..."); using(var db = MemoryDatabase.CreateNew("WARMUP")) { - await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Tuples.EncodeKey(i), Slice.FromFixed32(i))).ToList(), ordered: false); + await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Keys.Encode(i), Slice.FromFixed32(i))).ToList(), ordered: false); } using (var db = MemoryDatabase.CreateNew("FOO")) { - var location = db.Tuples; + var location = db.Keys; Console.WriteLine("Generating " + N.ToString("N0") + " keys..."); var data = new KeyValuePair[N]; @@ -901,7 +901,7 @@ public async Task Test_Can_BulkLoad_Data_Sequential_Unordered() for (int i = 0; i < N; i++) { data[i] = new KeyValuePair( - location.EncodeKey(i), + location.Encode(i), Slice.FromFixed32(i) ); } @@ -919,7 +919,7 @@ public async Task Test_Can_BulkLoad_Data_Sequential_Unordered() int x = rnd.Next(N); using (var tx = db.BeginReadOnlyTransaction(this.Cancellation)) { - var res = await tx.GetAsync(location.EncodeKey(x)).ConfigureAwait(false); + var res = await tx.GetAsync(location.Encode(x)).ConfigureAwait(false); Assert.That(res.ToInt32(), Is.EqualTo(x)); } } @@ -937,12 +937,12 @@ public async Task Test_Can_BulkLoad_Data_Random_Unordered() Console.WriteLine("Warmup..."); using (var db = MemoryDatabase.CreateNew("WARMUP")) { - await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Tuples.EncodeKey(i), Slice.FromFixed32(i))).ToList(), ordered: false); + await db.BulkLoadAsync(Enumerable.Range(0, 100).Select(i => new KeyValuePair(db.Keys.Encode(i), Slice.FromFixed32(i))).ToList(), ordered: false); } using (var db = MemoryDatabase.CreateNew("FOO")) { - var location = db.Tuples; + var location = db.Keys; Console.WriteLine("Generating " + N.ToString("N0") + " keys..."); var data = new KeyValuePair[N]; @@ -951,7 +951,7 @@ public async Task Test_Can_BulkLoad_Data_Random_Unordered() for (int i = 0; i < N; i++) { data[i] = new KeyValuePair( - location.EncodeKey(i), + location.Encode(i), Slice.FromFixed32(i) ); ints[i] = rnd.Next(int.MaxValue); @@ -973,7 +973,7 @@ public async Task Test_Can_BulkLoad_Data_Random_Unordered() int x = rnd.Next(N); using (var tx = db.BeginReadOnlyTransaction(this.Cancellation)) { - var res = await tx.GetAsync(location.EncodeKey(x)).ConfigureAwait(false); + var res = await tx.GetAsync(location.Encode(x)).ConfigureAwait(false); Assert.That(res.ToInt32(), Is.EqualTo(x)); } } diff --git a/FoundationDB.Storage.Memory/API/MemoryDatabase.cs b/FoundationDB.Storage.Memory/API/MemoryDatabase.cs index 4fb7d5dc0..8f47adcdf 100644 --- a/FoundationDB.Storage.Memory/API/MemoryDatabase.cs +++ b/FoundationDB.Storage.Memory/API/MemoryDatabase.cs @@ -30,7 +30,7 @@ public static MemoryDatabase CreateNew(string name) return CreateNew(name, FdbSubspace.Empty, false); } - public static MemoryDatabase CreateNew(string name, FdbSubspace globalSpace, bool readOnly) + public static MemoryDatabase CreateNew(string name, IFdbSubspace globalSpace, bool readOnly) { globalSpace = globalSpace ?? FdbSubspace.Empty; var uid = Guid.NewGuid(); @@ -84,7 +84,7 @@ public static async Task LoadFromAsync(string path, Cancellation private readonly MemoryDatabaseHandler m_handler; - private MemoryDatabase(IFdbCluster cluster, MemoryDatabaseHandler handler, string name, FdbSubspace globalSpace, IFdbDirectory directory, bool readOnly, bool ownsCluster) + private MemoryDatabase(IFdbCluster cluster, MemoryDatabaseHandler handler, string name, IFdbSubspace globalSpace, IFdbDirectory directory, bool readOnly, bool ownsCluster) : base(cluster, handler, name, globalSpace, directory, readOnly, ownsCluster) { m_handler = handler; diff --git a/FoundationDB.Tests.Sandbox/Program.cs b/FoundationDB.Tests.Sandbox/Program.cs index 7492abfe0..7e3eb6559 100644 --- a/FoundationDB.Tests.Sandbox/Program.cs +++ b/FoundationDB.Tests.Sandbox/Program.cs @@ -289,19 +289,19 @@ private static async Task TestSimpleTransactionAsync(IFdbDatabase db, Cancellati Console.WriteLine("> Read Version = " + readVersion); Console.WriteLine("Getting 'hello'..."); - var result = await trans.GetAsync(location.Tuples.EncodeKey("hello")); + var result = await trans.GetAsync(location.Keys.Encode("hello")); if (result.IsNull) Console.WriteLine("> hello NOT FOUND"); else Console.WriteLine("> hello = " + result.ToString()); Console.WriteLine("Setting 'Foo' = 'Bar'"); - trans.Set(location.Tuples.EncodeKey("Foo"), Slice.FromString("Bar")); + trans.Set(location.Keys.Encode("Foo"), Slice.FromString("Bar")); Console.WriteLine("Setting 'TopSecret' = rnd(512)"); var data = new byte[512]; new Random(1234).NextBytes(data); - trans.Set(location.Tuples.EncodeKey("TopSecret"), Slice.Create(data)); + trans.Set(location.Keys.Encode("TopSecret"), Slice.Create(data)); Console.WriteLine("Committing transaction..."); await trans.CommitAsync(); @@ -335,7 +335,7 @@ private static async Task BenchInsertSmallKeysAsync(IFdbDatabase db, int N, int tmp[1] = (byte)(i >> 8); // (Batch, 1) = [......] // (Batch, 2) = [......] - trans.Set(subspace.Tuples.EncodeKey(k * N + i), Slice.Create(tmp)); + trans.Set(subspace.Keys.Encode(k * N + i), Slice.Create(tmp)); } await trans.CommitAsync(); } @@ -395,7 +395,7 @@ private static async Task BenchConcurrentInsert(IFdbDatabase db, int k, int N, i tmp[1] = (byte)(i >> 8); // ("Batch", batch_index, i) = [..random..] - trans.Set(subspace.Tuples.EncodeKey(i), Slice.Create(tmp)); + trans.Set(subspace.Keys.Encode(i), Slice.Create(tmp)); } x.Stop(); Console.WriteLine("> [" + offset + "] packaged " + n + " keys (" + trans.Size.ToString("N0", CultureInfo.InvariantCulture) + " bytes) in " + FormatTimeMilli(x.Elapsed.TotalMilliseconds)); @@ -438,7 +438,7 @@ private static async Task BenchSerialWriteAsync(IFdbDatabase db, int N, Cancella for (int i = 0; i < N; i++) { if (trans == null) trans = db.BeginTransaction(ct); - trans.Set(location.Tuples.EncodeKey(i), Slice.FromInt32(i)); + trans.Set(location.Keys.Encode(i), Slice.FromInt32(i)); if (trans.Size > 100 * 1024) { await trans.CommitAsync(); @@ -473,7 +473,7 @@ private static async Task BenchSerialReadAsync(IFdbDatabase db, int N, Cancellat { for (int i = k; i < N && i < k + 1000; i++) { - var result = await trans.GetAsync(location.Tuples.EncodeKey(i)); + var result = await trans.GetAsync(location.Keys.Encode(i)); } } Console.Write("."); @@ -491,7 +491,7 @@ private static async Task BenchConcurrentReadAsync(IFdbDatabase db, int N, Cance var location = db.Partition.ByKey("hello"); - var keys = Enumerable.Range(0, N).Select(i => location.Tuples.EncodeKey(i)).ToArray(); + var keys = Enumerable.Range(0, N).Select(i => location.Keys.Encode(i)).ToArray(); var sw = Stopwatch.StartNew(); using (var trans = db.BeginTransaction(ct)) @@ -524,7 +524,7 @@ private static async Task BenchClearAsync(IFdbDatabase db, int N, CancellationTo { for (int i = 0; i < N; i++) { - trans.Clear(location.Tuples.EncodeKey(i)); + trans.Clear(location.Keys.Encode(i)); } await trans.CommitAsync(); @@ -541,7 +541,7 @@ private static async Task BenchUpdateSameKeyLotsOfTimesAsync(IFdbDatabase db, in var list = new byte[N]; var update = Stopwatch.StartNew(); - var key = db.GlobalSpace.Tuples.EncodeKey("list"); + var key = db.GlobalSpace.Keys.Encode("list"); for (int i = 0; i < N; i++) { list[i] = (byte)i; @@ -564,7 +564,7 @@ private static async Task BenchUpdateLotsOfKeysAsync(IFdbDatabase db, int N, Can var location = db.Partition.ByKey("lists"); var rnd = new Random(); - var keys = Enumerable.Range(0, N).Select(x => location.Tuples.EncodeKey(x)).ToArray(); + var keys = Enumerable.Range(0, N).Select(x => location.Keys.Encode(x)).ToArray(); Console.WriteLine("> creating " + N + " half filled keys"); var segment = new byte[60]; @@ -646,7 +646,7 @@ private static async Task BenchBulkInsertThenBulkReadAsync(IFdbDatabase db, int int z = 0; foreach (int i in Enumerable.Range(chunk.Key, chunk.Value)) { - tr.Set(subspace.Tuples.EncodeKey(i), Slice.Create(new byte[256])); + tr.Set(subspace.Keys.Encode(i), Slice.Create(new byte[256])); z++; } @@ -717,7 +717,7 @@ private static async Task BenchMergeSortAsync(IFdbDatabase db, int N, int K, int var list = location.Partition.ByKey(source); for (int i = 0; i < N; i++) { - tr.Set(list.Tuples.EncodeKey(rnd.Next()), Slice.FromInt32(i)); + tr.Set(list.Keys.Encode(rnd.Next()), Slice.FromInt32(i)); } await tr.CommitAsync(); } @@ -730,11 +730,11 @@ private static async Task BenchMergeSortAsync(IFdbDatabase db, int N, int K, int { var mergesort = tr .MergeSort( - sources.Select(source => FdbKeySelectorPair.StartsWith(location.Tuples.EncodeKey(source))), - (kvp) => location.Tuples.DecodeLast(kvp.Key) + sources.Select(source => FdbKeySelectorPair.StartsWith(location.Keys.Encode(source))), + (kvp) => location.Keys.DecodeLast(kvp.Key) ) .Take(B) - .Select(kvp => location.Tuples.Unpack(kvp.Key)); + .Select(kvp => location.Keys.Unpack(kvp.Key)); Console.Write("> MergeSort with limit " + B + "... "); var sw = Stopwatch.StartNew(); diff --git a/FoundationDB.Tests/DatabaseBulkFacts.cs b/FoundationDB.Tests/DatabaseBulkFacts.cs index 69c47c9c4..e6e754e8d 100644 --- a/FoundationDB.Tests/DatabaseBulkFacts.cs +++ b/FoundationDB.Tests/DatabaseBulkFacts.cs @@ -60,7 +60,7 @@ public async Task Test_Can_Bulk_Insert_Raw_Data() var rnd = new Random(2403); var data = Enumerable.Range(0, N) - .Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x.ToString("x8")), Slice.Random(rnd, 16 + rnd.Next(240)))) + .Select((x) => new KeyValuePair(location.Keys.Encode(x.ToString("x8")), Slice.Random(rnd, 16 + rnd.Next(240)))) .ToArray(); Log("Total data size is {0:N0} bytes", data.Sum(x => x.Key.Count + x.Value.Count)); @@ -143,7 +143,7 @@ public async Task Test_Can_Bulk_Insert_Items() ++called; uniqueKeys.Add(kv.Key); tr.Set( - location.Tuples.EncodeKey(kv.Key), + location.Keys.Encode(kv.Key), Slice.FromString(new string('A', kv.Value)) ); }, @@ -168,13 +168,13 @@ public async Task Test_Can_Bulk_Insert_Items() var stored = await db.ReadAsync((tr) => { - return tr.GetRange(location.Tuples.ToRange()).ToArrayAsync(); + return tr.GetRange(location.Keys.ToRange()).ToArrayAsync(); }, this.Cancellation); Assert.That(stored.Length, Is.EqualTo(N), "DB contains less or more items than expected"); for (int i = 0; i < stored.Length;i++) { - Assert.That(stored[i].Key, Is.EqualTo(location.Tuples.EncodeKey(data[i].Key)), "Key #{0}", i); + Assert.That(stored[i].Key, Is.EqualTo(location.Keys.Encode(data[i].Key)), "Key #{0}", i); Assert.That(stored[i].Value.Count, Is.EqualTo(data[i].Value), "Value #{0}", i); } @@ -198,7 +198,7 @@ public async Task Test_Can_Batch_ForEach_AsyncWithContextAndState() await Fdb.Bulk.WriteAsync( db, - Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x), Slice.FromInt32(x))), + Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Keys.Encode(x), Slice.FromInt32(x))), this.Cancellation ); @@ -210,7 +210,7 @@ await Fdb.Bulk.WriteAsync( var sw = Stopwatch.StartNew(); await Fdb.Bulk.ForEachAsync( db, - Enumerable.Range(1, N).Select(x => location.Tuples.EncodeKey(x)), + Enumerable.Range(1, N).Select(x => location.Keys.Encode(x)), () => FdbTuple.Create(0L, 0L), async (xs, ctx, state) => { @@ -284,7 +284,7 @@ public async Task Test_Can_Bulk_Batched_Insert_Items() { uniqueKeys.Add(kv.Key); tr.Set( - location.Tuples.EncodeKey(kv.Key), + location.Keys.Encode(kv.Key), Slice.FromString(new string('A', kv.Value)) ); } @@ -317,7 +317,7 @@ public async Task Test_Can_Bulk_Batched_Insert_Items() var stored = await db.ReadAsync((tr) => { - return tr.GetRange(location.Tuples.ToRange()).ToArrayAsync(); + return tr.GetRange(location.Keys.ToRange()).ToArrayAsync(); }, this.Cancellation); Log("Read {0:N0} keys", stored.Length); @@ -325,7 +325,7 @@ public async Task Test_Can_Bulk_Batched_Insert_Items() Assert.That(stored.Length, Is.EqualTo(N), "DB contains less or more items than expected"); for (int i = 0; i < stored.Length; i++) { - Assert.That(stored[i].Key, Is.EqualTo(location.Tuples.EncodeKey(data[i].Key)), "Key #{0}", i); + Assert.That(stored[i].Key, Is.EqualTo(location.Keys.Encode(data[i].Key)), "Key #{0}", i); Assert.That(stored[i].Value.Count, Is.EqualTo(data[i].Value), "Value #{0}", i); } @@ -349,7 +349,7 @@ public async Task Test_Can_Batch_ForEach_WithContextAndState() await Fdb.Bulk.WriteAsync( db, - Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x), Slice.FromInt32(x))), + Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Keys.Encode(x), Slice.FromInt32(x))), this.Cancellation ); @@ -361,7 +361,7 @@ await Fdb.Bulk.WriteAsync( var sw = Stopwatch.StartNew(); await Fdb.Bulk.ForEachAsync( db, - Enumerable.Range(1, N).Select(x => location.Tuples.EncodeKey(x)), + Enumerable.Range(1, N).Select(x => location.Keys.Encode(x)), () => FdbTuple.Create(0L, 0L), // (sum, count) (xs, ctx, state) => { @@ -414,7 +414,7 @@ public async Task Test_Can_Batch_ForEach_AsyncWithContext() await Fdb.Bulk.WriteAsync( db, - Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x), Slice.FromInt32(x))), + Enumerable.Range(1, N).Select((x) => new KeyValuePair(location.Keys.Encode(x), Slice.FromInt32(x))), this.Cancellation ); @@ -426,7 +426,7 @@ await Fdb.Bulk.WriteAsync( var sw = Stopwatch.StartNew(); await Fdb.Bulk.ForEachAsync( db, - Enumerable.Range(1, N).Select(x => location.Tuples.EncodeKey(x)), + Enumerable.Range(1, N).Select(x => location.Keys.Encode(x)), async (xs, ctx) => { Interlocked.Increment(ref chunks); @@ -474,7 +474,7 @@ public async Task Test_Can_Batch_Aggregate() await Fdb.Bulk.WriteAsync( db, - source.Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x.Key), Slice.FromInt32(x.Value))), + source.Select((x) => new KeyValuePair(location.Keys.Encode(x.Key), Slice.FromInt32(x.Value))), this.Cancellation ); @@ -484,7 +484,7 @@ await Fdb.Bulk.WriteAsync( var sw = Stopwatch.StartNew(); long total = await Fdb.Bulk.AggregateAsync( db, - source.Select(x => location.Tuples.EncodeKey(x.Key)), + source.Select(x => location.Keys.Encode(x.Key)), () => 0L, async (xs, ctx, sum) => { @@ -535,7 +535,7 @@ public async Task Test_Can_Batch_Aggregate_With_Transformed_Result() await Fdb.Bulk.WriteAsync( db, - source.Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x.Key), Slice.FromInt32(x.Value))), + source.Select((x) => new KeyValuePair(location.Keys.Encode(x.Key), Slice.FromInt32(x.Value))), this.Cancellation ); @@ -545,7 +545,7 @@ await Fdb.Bulk.WriteAsync( var sw = Stopwatch.StartNew(); double average = await Fdb.Bulk.AggregateAsync( db, - source.Select(x => location.Tuples.EncodeKey(x.Key)), + source.Select(x => location.Keys.Encode(x.Key)), () => FdbTuple.Create(0L, 0L), async (xs, ctx, state) => { @@ -604,7 +604,7 @@ public async Task Test_Can_Export_To_Disk() await Fdb.Bulk.WriteAsync( db.WithoutLogging(), - source.Select((x) => new KeyValuePair(location.Tuples.EncodeKey(x.Key), x.Value)), + source.Select((x) => new KeyValuePair(location.Keys.Encode(x.Key), x.Value)), this.Cancellation ); @@ -616,7 +616,7 @@ await Fdb.Bulk.WriteAsync( { double average = await Fdb.Bulk.ExportAsync( db, - location.Tuples.ToRange(), + location.Keys.ToRange(), async (xs, pos, ct) => { Assert.That(xs, Is.Not.Null); @@ -631,7 +631,7 @@ await Fdb.Bulk.WriteAsync( var sb = new StringBuilder(4096); foreach(var x in xs) { - sb.AppendFormat("{0} = {1}\r\n", location.Tuples.DecodeKey(x.Key), x.Value.ToBase64()); + sb.AppendFormat("{0} = {1}\r\n", location.Keys.Decode(x.Key), x.Value.ToBase64()); } await file.WriteAsync(sb.ToString()); }, diff --git a/FoundationDB.Tests/Filters/LoggingFilterFacts.cs b/FoundationDB.Tests/Filters/LoggingFilterFacts.cs index 6fd0f5f3c..de985b762 100644 --- a/FoundationDB.Tests/Filters/LoggingFilterFacts.cs +++ b/FoundationDB.Tests/Filters/LoggingFilterFacts.cs @@ -48,37 +48,37 @@ public async Task Test_Can_Log_A_Transaction() using (var db = await OpenTestPartitionAsync()) { // get a tuple view of the directory - var location = (await GetCleanDirectory(db, "Logging")).Tuples; + var location = (await GetCleanDirectory(db, "Logging")).Keys; // note: ensure that all methods are JITed await db.ReadWriteAsync(async (tr) => { await tr.GetReadVersionAsync(); - tr.Set(location.EncodeKey("Warmup", 0), Slice.FromInt32(1)); - tr.Clear(location.EncodeKey("Warmup", 1)); - await tr.GetAsync(location.EncodeKey("Warmup", 2)); - await tr.GetRange(FdbKeyRange.StartsWith(location.EncodeKey("Warmup", 3))).ToListAsync(); - tr.ClearRange(location.EncodeKey("Warmup", 4), location.EncodeKey("Warmup", 5)); + tr.Set(location.Encode("Warmup", 0), Slice.FromInt32(1)); + tr.Clear(location.Encode("Warmup", 1)); + await tr.GetAsync(location.Encode("Warmup", 2)); + await tr.GetRange(FdbKeyRange.StartsWith(location.Encode("Warmup", 3))).ToListAsync(); + tr.ClearRange(location.Encode("Warmup", 4), location.Encode("Warmup", 5)); }, this.Cancellation); await db.WriteAsync((tr) => { var rnd = new Random(); - tr.Set(location.EncodeKey("One"), Slice.FromString("111111")); - tr.Set(location.EncodeKey("Two"), Slice.FromString("222222")); + tr.Set(location.Encode("One"), Slice.FromString("111111")); + tr.Set(location.Encode("Two"), Slice.FromString("222222")); for (int j = 0; j < 4; j++) { for (int i = 0; i < 100; i++) { - tr.Set(location.EncodeKey("Range", j, rnd.Next(1000)), Slice.Empty); + tr.Set(location.Encode("Range", j, rnd.Next(1000)), Slice.Empty); } } for (int j = 0; j < N; j++) { - tr.Set(location.EncodeKey("X", j), Slice.FromInt32(j)); - tr.Set(location.EncodeKey("Y", j), Slice.FromInt32(j)); - tr.Set(location.EncodeKey("Z", j), Slice.FromInt32(j)); - tr.Set(location.EncodeKey("W", j), Slice.FromInt32(j)); + tr.Set(location.Encode("X", j), Slice.FromInt32(j)); + tr.Set(location.Encode("Y", j), Slice.FromInt32(j)); + tr.Set(location.Encode("Z", j), Slice.FromInt32(j)); + tr.Set(location.Encode("W", j), Slice.FromInt32(j)); } }, this.Cancellation); @@ -110,10 +110,10 @@ await logged.ReadWriteAsync(async (tr) => long ver = await tr.GetReadVersionAsync().ConfigureAwait(false); - await tr.GetAsync(location.EncodeKey("One")).ConfigureAwait(false); - await tr.GetAsync(location.EncodeKey("NotFound")).ConfigureAwait(false); + await tr.GetAsync(location.Encode("One")).ConfigureAwait(false); + await tr.GetAsync(location.Encode("NotFound")).ConfigureAwait(false); - tr.Set(location.EncodeKey("Write"), Slice.FromString("abcdef" + k.ToString())); + tr.Set(location.Encode("Write"), Slice.FromString("abcdef" + k.ToString())); //tr.Annotate("BEFORE"); //await Task.Delay(TimeSpan.FromMilliseconds(10)); @@ -126,33 +126,33 @@ await logged.ReadWriteAsync(async (tr) => //await tr.GetRangeAsync(FdbKeySelector.LastLessOrEqual(location.Pack("A")), FdbKeySelector.FirstGreaterThan(location.Pack("Z"))).ConfigureAwait(false); await Task.WhenAll( - tr.GetRange(FdbKeyRange.StartsWith(location.EncodeKey("Range", 0))).ToListAsync(), - tr.GetRange(location.EncodeKey("Range", 1, 0), location.EncodeKey("Range", 1, 200)).ToListAsync(), - tr.GetRange(location.EncodeKey("Range", 2, 400), location.EncodeKey("Range", 2, 600)).ToListAsync(), - tr.GetRange(location.EncodeKey("Range", 3, 800), location.EncodeKey("Range", 3, 1000)).ToListAsync() + tr.GetRange(FdbKeyRange.StartsWith(location.Encode("Range", 0))).ToListAsync(), + tr.GetRange(location.Encode("Range", 1, 0), location.Encode("Range", 1, 200)).ToListAsync(), + tr.GetRange(location.Encode("Range", 2, 400), location.Encode("Range", 2, 600)).ToListAsync(), + tr.GetRange(location.Encode("Range", 3, 800), location.Encode("Range", 3, 1000)).ToListAsync() ).ConfigureAwait(false); - await tr.GetAsync(location.EncodeKey("Two")).ConfigureAwait(false); + await tr.GetAsync(location.Encode("Two")).ConfigureAwait(false); - await tr.GetValuesAsync(Enumerable.Range(0, N).Select(x => location.EncodeKey("X", x))).ConfigureAwait(false); + await tr.GetValuesAsync(Enumerable.Range(0, N).Select(x => location.Encode("X", x))).ConfigureAwait(false); for (int i = 0; i < N; i++) { - await tr.GetAsync(location.EncodeKey("Z", i)).ConfigureAwait(false); + await tr.GetAsync(location.Encode("Z", i)).ConfigureAwait(false); } - await Task.WhenAll(Enumerable.Range(0, N / 2).Select(x => tr.GetAsync(location.EncodeKey("Y", x)))).ConfigureAwait(false); - await Task.WhenAll(Enumerable.Range(N / 2, N / 2).Select(x => tr.GetAsync(location.EncodeKey("Y", x)))).ConfigureAwait(false); + await Task.WhenAll(Enumerable.Range(0, N / 2).Select(x => tr.GetAsync(location.Encode("Y", x)))).ConfigureAwait(false); + await Task.WhenAll(Enumerable.Range(N / 2, N / 2).Select(x => tr.GetAsync(location.Encode("Y", x)))).ConfigureAwait(false); await Task.WhenAll( - tr.GetAsync(location.EncodeKey("W", 1)), - tr.GetAsync(location.EncodeKey("W", 2)), - tr.GetAsync(location.EncodeKey("W", 3)) + tr.GetAsync(location.Encode("W", 1)), + tr.GetAsync(location.Encode("W", 2)), + tr.GetAsync(location.Encode("W", 3)) ).ConfigureAwait(false); - tr.Set(location.EncodeKey("Write2"), Slice.FromString("ghijkl" + k.ToString())); - tr.Clear(location.EncodeKey("Clear", "0")); - tr.ClearRange(location.EncodeKey("Clear", "A"), location.EncodeKey("Clear", "Z")); + tr.Set(location.Encode("Write2"), Slice.FromString("ghijkl" + k.ToString())); + tr.Clear(location.Encode("Clear", "0")); + tr.ClearRange(location.Encode("Clear", "A"), location.Encode("Clear", "Z")); if (tr.Context.Retries == 0) { diff --git a/FoundationDB.Tests/FoundationDB.Tests.csproj b/FoundationDB.Tests/FoundationDB.Tests.csproj index 06bb6ffbe..26b9c4288 100644 --- a/FoundationDB.Tests/FoundationDB.Tests.csproj +++ b/FoundationDB.Tests/FoundationDB.Tests.csproj @@ -96,7 +96,7 @@ - + diff --git a/FoundationDB.Tests/Layers/DirectoryFacts.cs b/FoundationDB.Tests/Layers/DirectoryFacts.cs index 2893084ed..eca7abedc 100644 --- a/FoundationDB.Tests/Layers/DirectoryFacts.cs +++ b/FoundationDB.Tests/Layers/DirectoryFacts.cs @@ -193,7 +193,7 @@ public async Task Test_CreateOrOpen_With_Layer() Assert.That(directory.ContentSubspace, Is.Not.Null); Assert.That(directory.ContentSubspace, Is.EqualTo(location)); Assert.That(directory.NodeSubspace, Is.Not.Null); - Assert.That(directory.NodeSubspace.Key, Is.EqualTo(location.Keys[Slice.FromByte(254)])); + Assert.That(directory.NodeSubspace.Key, Is.EqualTo(location.Key + Slice.FromByte(254))); // first call should create a new subspace (with a random prefix) var foo = await directory.CreateOrOpenAsync(logged, new[] { "Foo" }, Slice.FromString("AcmeLayer"), this.Cancellation); @@ -885,8 +885,6 @@ public async Task Test_Directory_Partitions_Should_Disallow_Creation_Of_Direct_K shouldFail(() => partition.ExtractKey(barKey, boundCheck: false)); shouldFail(() => partition.ExtractKey(barKey, boundCheck: true)); shouldFail(() => partition.ExtractKeys(new[] { barKey, barKey + FdbKey.MinValue })); - shouldFail(() => partition.Keys.Extract(barKey)); - shouldFail(() => partition.Keys.Extract(barKey, barKey + FdbKey.MinValue)); // Partition shouldFail(() => partition.Partition.ByKey(123)); @@ -900,55 +898,52 @@ public async Task Test_Directory_Partitions_Should_Disallow_Creation_Of_Direct_K shouldFail(() => partition.ConcatKey(location.Key)); shouldFail(() => partition.ConcatKeys(new[] { Slice.FromString("hello"), Slice.FromString("world"), Slice.FromString("!") })); - shouldFail(() => partition.Keys.Concat(Slice.FromString("hello"))); - shouldFail(() => partition.Keys.Concat(location.Key)); - shouldFail(() => partition.Keys.Concat(location)); - shouldFail(() => partition.Keys.Concat(new[] { Slice.FromString("hello"), Slice.FromString("world"), Slice.FromString("!") })); - shouldFail(() => partition.Keys.Concat(new[] { location, location })); - - shouldFail(() => { var _ = partition.Keys[Slice.FromString("hello")]; }); - shouldFail(() => { var _ = partition.Keys[location.Key]; }); - shouldFail(() => { var _ = partition.Keys[location]; }); + shouldFail(() => { var _ = partition[Slice.FromString("hello")]; }); + shouldFail(() => { var _ = partition[location.Key]; }); + shouldFail(() => { var _ = partition[location]; }); - shouldFail(() => partition.Keys.ToRange()); - shouldFail(() => partition.Keys.ToRange(Slice.FromString("hello"))); - shouldFail(() => partition.Keys.ToRange(FdbTuple.EncodeKey("hello"))); - shouldFail(() => partition.Keys.ToRange(location)); + shouldFail(() => partition.ToRange()); + shouldFail(() => partition.ToRange(Slice.FromString("hello"))); + shouldFail(() => partition.ToRange(FdbTuple.EncodeKey("hello"))); + shouldFail(() => partition.ToRange(location)); // Tuples - shouldFail(() => partition.Tuples.EncodeKey(123)); - shouldFail(() => partition.Tuples.EncodeKey(123, "hello")); - shouldFail(() => partition.Tuples.EncodeKey(123, "hello", false)); - shouldFail(() => partition.Tuples.EncodeKey(123, "hello", false, "world")); - shouldFail(() => partition.Tuples.EncodeKey(123)); - - shouldFail(() => partition.Tuples.EncodeKeys(new[] { 123, 456, 789 })); - shouldFail(() => partition.Tuples.EncodeKeys((IEnumerable)new[] { 123, 456, 789 })); - shouldFail(() => partition.Tuples.EncodeKeys(new object[] { 123, "hello", true })); - shouldFail(() => partition.Tuples.EncodeKeys((IEnumerable)new object[] { 123, "hello", true })); - - shouldFail(() => partition.Tuples.Unpack(barKey)); - shouldFail(() => partition.Tuples.Unpack(new[] { barKey, barKey + FdbTuple.EncodeKey(123) })); - shouldFail(() => partition.Tuples.DecodeKey(barKey)); - shouldFail(() => partition.Tuples.DecodeKeys(new[] { barKey, barKey })); - shouldFail(() => partition.Tuples.DecodeLast(barKey)); - shouldFail(() => partition.Tuples.DecodeKeysLast(new[] { barKey, barKey + FdbTuple.EncodeKey(123) })); - shouldFail(() => partition.Tuples.DecodeFirst(barKey)); - shouldFail(() => partition.Tuples.DecodeKeysFirst(new[] { barKey, barKey + FdbTuple.EncodeKey(123) })); - - shouldFail(() => partition.Tuples.ToTuple()); - - shouldFail(() => partition.Tuples.Append(123)); - shouldFail(() => partition.Tuples.Append(123, "hello")); - shouldFail(() => partition.Tuples.Append(123, "hello", false)); - shouldFail(() => partition.Tuples.Append(123, "hello", false, "world")); - shouldFail(() => partition.Tuples.Concat(FdbTuple.Create(123, "hello", false, "world"))); - shouldFail(() => partition.Tuples.Append(new object[] { 123, "hello", false, "world" })); - - shouldFail(() => partition.Tuples.ToRange()); - shouldFail(() => partition.Tuples.ToRange(Slice.FromString("hello"))); - shouldFail(() => partition.Tuples.ToRange(FdbTuple.Create("hello"))); + shouldFail(() => partition.Keys.Encode(123)); + shouldFail(() => partition.Keys.Encode(123, "hello")); + shouldFail(() => partition.Keys.Encode(123, "hello", false)); + shouldFail(() => partition.Keys.Encode(123, "hello", false, "world")); + shouldFail(() => partition.Keys.Encode(123)); + + shouldFail(() => partition.Keys.Encode(new[] { 123, 456, 789 })); + shouldFail(() => partition.Keys.Encode((IEnumerable)new[] { 123, 456, 789 })); + shouldFail(() => partition.Keys.Encode(new object[] { 123, "hello", true })); + shouldFail(() => partition.Keys.Encode((IEnumerable)new object[] { 123, "hello", true })); + + shouldFail(() => partition.Keys.Unpack(barKey)); + shouldFail(() => partition.Keys.Unpack(new[] { barKey, barKey + FdbTuple.EncodeKey(123) })); + shouldFail(() => partition.Keys.Decode(barKey)); + shouldFail(() => partition.Keys.Decode(new[] { barKey, barKey })); + shouldFail(() => partition.Keys.DecodeLast(barKey)); + shouldFail(() => partition.Keys.DecodeLast(new[] { barKey, barKey + FdbTuple.EncodeKey(123) })); + shouldFail(() => partition.Keys.DecodeFirst(barKey)); + shouldFail(() => partition.Keys.DecodeFirst(new[] { barKey, barKey + FdbTuple.EncodeKey(123) })); + + //FIXME: need to re-enable this code! +#if REFACTORING_IN_PROGRESS + shouldFail(() => partition.Keys.ToTuple()); + + shouldFail(() => partition.Keys.Append(123)); + shouldFail(() => partition.Keys.Append(123, "hello")); + shouldFail(() => partition.Keys.Append(123, "hello", false)); + shouldFail(() => partition.Keys.Append(123, "hello", false, "world")); + shouldFail(() => partition.Keys.Concat(FdbTuple.Create(123, "hello", false, "world"))); + shouldFail(() => partition.Keys.Append(new object[] { 123, "hello", false, "world" })); +#endif + + shouldFail(() => partition.Keys.ToRange()); + shouldFail(() => partition.ToRange(Slice.FromString("hello"))); + shouldFail(() => partition.Keys.ToRange(FdbTuple.Create("hello"))); } } @@ -1042,10 +1037,10 @@ await Task.WhenAll( tr2.GetReadVersionAsync() ); - var first = await directory.RegisterAsync(tr1, new[] { "First" }, Slice.Nil, location.Tuples.EncodeKey("abc")); + var first = await directory.RegisterAsync(tr1, new[] { "First" }, Slice.Nil, location.Keys.Encode("abc")); tr1.Set(first.Key, Slice.FromString("This belongs to the first directory")); - var second = await directory.RegisterAsync(tr2, new[] { "Second" }, Slice.Nil, location.Tuples.EncodeKey("def")); + var second = await directory.RegisterAsync(tr2, new[] { "Second" }, Slice.Nil, location.Keys.Encode("def")); tr2.Set(second.Key, Slice.FromString("This belongs to the second directory")); Console.WriteLine("Committing T1..."); diff --git a/FoundationDB.Tests/Layers/MapFacts.cs b/FoundationDB.Tests/Layers/MapFacts.cs index bb32e35c6..bde98dbf7 100644 --- a/FoundationDB.Tests/Layers/MapFacts.cs +++ b/FoundationDB.Tests/Layers/MapFacts.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -88,7 +88,7 @@ public async Task Test_FdbMap_Read_Write_Delete() // directly read the value, behind the table's back using (var tr = db.BeginTransaction(this.Cancellation)) { - var value = await tr.GetAsync(location.Tuples.EncodeKey("Foos", "hello")); + var value = await tr.GetAsync(location.Keys.Encode("Foos", "hello")); Assert.That(value, Is.Not.EqualTo(Slice.Nil)); Assert.That(value.ToString(), Is.EqualTo(secret)); } @@ -113,7 +113,7 @@ public async Task Test_FdbMap_Read_Write_Delete() Assert.That(value.HasValue, Is.False); // also check directly - var data = await tr.GetAsync(location.Tuples.EncodeKey("Foos", "hello")); + var data = await tr.GetAsync(location.Keys.Encode("Foos", "hello")); Assert.That(data, Is.EqualTo(Slice.Nil)); } diff --git a/FoundationDB.Tests/Layers/MultiMapFacts.cs b/FoundationDB.Tests/Layers/MultiMapFacts.cs index 0f9bbf33c..af11ff7ee 100644 --- a/FoundationDB.Tests/Layers/MultiMapFacts.cs +++ b/FoundationDB.Tests/Layers/MultiMapFacts.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -89,7 +89,7 @@ public async Task Test_FdbMultiMap_Read_Write_Delete() // directly read the value, behind the table's back using (var tr = db.BeginTransaction(this.Cancellation)) { - var value = await tr.GetAsync(map.Subspace.Tuples.EncodeKey("hello", "world")); + var value = await tr.GetAsync(map.Subspace[FdbTuple.Create("hello", "world")]); Assert.That(value, Is.Not.EqualTo(Slice.Nil)); Assert.That(value.ToInt64(), Is.EqualTo(1)); } @@ -112,7 +112,7 @@ public async Task Test_FdbMultiMap_Read_Write_Delete() Assert.That(count, Is.Null); // also check directly - var data = await tr.GetAsync(map.Subspace.Tuples.EncodeKey("hello", "world")); + var data = await tr.GetAsync(map.Subspace[FdbTuple.Create("hello", "world")]); Assert.That(data, Is.EqualTo(Slice.Nil)); } diff --git a/FoundationDB.Tests/Layers/RankedSetFacts.cs b/FoundationDB.Tests/Layers/RankedSetFacts.cs index 8ef481895..c3980cf25 100644 --- a/FoundationDB.Tests/Layers/RankedSetFacts.cs +++ b/FoundationDB.Tests/Layers/RankedSetFacts.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -80,9 +80,9 @@ private static async Task PrintRankedSet(FdbRankedSet rs, IFdbReadOnlyTransactio for (int l = 0; l < 6; l++) { sb.AppendFormat("Level {0}:\r\n", l); - await tr.GetRange(rs.Subspace.Partition.ByKey(l).Tuples.ToRange()).ForEachAsync((kvp) => + await tr.GetRange(rs.Subspace.Partition.ByKey(l).Keys.ToRange()).ForEachAsync((kvp) => { - sb.AppendFormat("\t{0} = {1}\r\n", rs.Subspace.Tuples.Unpack(kvp.Key), kvp.Value.ToInt64()); + sb.AppendFormat("\t{0} = {1}\r\n", rs.Subspace.Keys.Unpack(kvp.Key), kvp.Value.ToInt64()); }); } Console.WriteLine(sb.ToString()); diff --git a/FoundationDB.Tests/Layers/StringInternFacts.cs b/FoundationDB.Tests/Layers/StringInternFacts.cs index c58307647..4cd5b2460 100644 --- a/FoundationDB.Tests/Layers/StringInternFacts.cs +++ b/FoundationDB.Tests/Layers/StringInternFacts.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -55,11 +55,11 @@ public async Task Test_StringIntern_Example() // insert a bunch of strings using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(dataSpace.Tuples.EncodeKey("a"), await stringTable.InternAsync(tr, "testing 123456789")); - tr.Set(dataSpace.Tuples.EncodeKey("b"), await stringTable.InternAsync(tr, "dog")); - tr.Set(dataSpace.Tuples.EncodeKey("c"), await stringTable.InternAsync(tr, "testing 123456789")); - tr.Set(dataSpace.Tuples.EncodeKey("d"), await stringTable.InternAsync(tr, "cat")); - tr.Set(dataSpace.Tuples.EncodeKey("e"), await stringTable.InternAsync(tr, "cat")); + tr.Set(dataSpace.Keys.Encode("a"), await stringTable.InternAsync(tr, "testing 123456789")); + tr.Set(dataSpace.Keys.Encode("b"), await stringTable.InternAsync(tr, "dog")); + tr.Set(dataSpace.Keys.Encode("c"), await stringTable.InternAsync(tr, "testing 123456789")); + tr.Set(dataSpace.Keys.Encode("d"), await stringTable.InternAsync(tr, "cat")); + tr.Set(dataSpace.Keys.Encode("e"), await stringTable.InternAsync(tr, "cat")); await tr.CommitAsync(); } @@ -72,11 +72,11 @@ public async Task Test_StringIntern_Example() // check the contents of the data using (var tr = db.BeginTransaction(this.Cancellation)) { - var uid_a = await tr.GetAsync(dataSpace.Tuples.EncodeKey("a")); - var uid_b = await tr.GetAsync(dataSpace.Tuples.EncodeKey("b")); - var uid_c = await tr.GetAsync(dataSpace.Tuples.EncodeKey("c")); - var uid_d = await tr.GetAsync(dataSpace.Tuples.EncodeKey("d")); - var uid_e = await tr.GetAsync(dataSpace.Tuples.EncodeKey("e")); + var uid_a = await tr.GetAsync(dataSpace.Keys.Encode("a")); + var uid_b = await tr.GetAsync(dataSpace.Keys.Encode("b")); + var uid_c = await tr.GetAsync(dataSpace.Keys.Encode("c")); + var uid_d = await tr.GetAsync(dataSpace.Keys.Encode("d")); + var uid_e = await tr.GetAsync(dataSpace.Keys.Encode("e")); // a, b, d should be different Assert.That(uid_b, Is.Not.EqualTo(uid_a)); diff --git a/FoundationDB.Tests/Layers/VectorFacts.cs b/FoundationDB.Tests/Layers/VectorFacts.cs index eb5001275..6a9e8a6a7 100644 --- a/FoundationDB.Tests/Layers/VectorFacts.cs +++ b/FoundationDB.Tests/Layers/VectorFacts.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -172,10 +172,10 @@ private static async Task PrintVector(FdbVector vector, IFdbReadOnlyTransa bool first = true; var sb = new StringBuilder(); - await tr.GetRange(vector.Subspace.Tuples.ToRange()).ForEachAsync((kvp) => + await tr.GetRange(vector.Subspace.Keys.ToRange()).ForEachAsync((kvp) => { if (!first) sb.Append(", "); else first = false; - sb.Append(vector.Subspace.Tuples.DecodeLast(kvp.Key) + ":" + kvp.Value.ToAsciiOrHexaString()); + sb.Append(vector.Subspace.Keys.DecodeLast(kvp.Key) + ":" + kvp.Value.ToAsciiOrHexaString()); }); Console.WriteLine("> Vector: (" + sb.ToString() + ")"); diff --git a/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs b/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs index 5058530fc..836913402 100644 --- a/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs +++ b/FoundationDB.Tests/Linq/FdbAsyncQueryableFacts.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -58,8 +58,8 @@ public async Task Test_AsyncQueryable_Basics() await db.WriteAsync((tr) => { - tr.Set(location.Tuples.EncodeKey("Hello"), Slice.FromString("World!")); - tr.Set(location.Tuples.EncodeKey("Narf"), Slice.FromString("Zort")); + tr.Set(location.Keys.Encode("Hello"), Slice.FromString("World!")); + tr.Set(location.Keys.Encode("Narf"), Slice.FromString("Zort")); }, this.Cancellation); var range = db.Query().RangeStartsWith(location.Key); diff --git a/FoundationDB.Tests/RangeQueryFacts.cs b/FoundationDB.Tests/RangeQueryFacts.cs index ce6f1ea9f..b5848c27d 100644 --- a/FoundationDB.Tests/RangeQueryFacts.cs +++ b/FoundationDB.Tests/RangeQueryFacts.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -64,7 +64,7 @@ public async Task Test_Can_Get_Range() { foreach (int i in Enumerable.Range(0, N)) { - tr.Set(location.Tuples.EncodeKey(i), Slice.FromInt32(i)); + tr.Set(location.Keys.Encode(i), Slice.FromInt32(i)); } await tr.CommitAsync(); @@ -77,11 +77,11 @@ public async Task Test_Can_Get_Range() using (var tr = db.BeginTransaction(this.Cancellation)) { - var query = tr.GetRange(location.Tuples.EncodeKey(0), location.Tuples.EncodeKey(N)); + var query = tr.GetRange(location.Keys.Encode(0), location.Keys.Encode(N)); Assert.That(query, Is.Not.Null); Assert.That(query.Transaction, Is.SameAs(tr)); - Assert.That(query.Begin.Key, Is.EqualTo(location.Tuples.EncodeKey(0))); - Assert.That(query.End.Key, Is.EqualTo(location.Tuples.EncodeKey(N))); + Assert.That(query.Begin.Key, Is.EqualTo(location.Keys.Encode(0))); + Assert.That(query.End.Key, Is.EqualTo(location.Keys.Encode(N))); Assert.That(query.Limit, Is.Null); Assert.That(query.TargetBytes, Is.Null); Assert.That(query.Reversed, Is.False); @@ -105,7 +105,7 @@ public async Task Test_Can_Get_Range() var kvp = items[i]; // key should be a tuple in the correct order - var key = location.Tuples.Unpack(kvp.Key); + var key = location.Keys.Unpack(kvp.Key); if (i % 128 == 0) Log("... {0} = {1}", key, kvp.Value); @@ -137,9 +137,9 @@ await db.WriteAsync((tr) => { for (int i = 0; i < 10; i++) { - tr.Set(a.Tuples.EncodeKey(i), Slice.FromInt32(i)); + tr.Set(a.Keys.Encode(i), Slice.FromInt32(i)); } - tr.Set(b.Tuples.EncodeKey(0), Slice.FromInt32(42)); + tr.Set(b.Keys.Encode(0), Slice.FromInt32(42)); }, this.Cancellation); KeyValuePair res; @@ -147,26 +147,26 @@ await db.WriteAsync((tr) => // A: more then one item using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.Tuples.ToRange()); + var query = tr.GetRange(a.Keys.ToRange()); // should return the first one res = await query.FirstOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(0))); + Assert.That(res.Key, Is.EqualTo(a.Keys.Encode(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(0))); // should return the first one res = await query.FirstAsync(); - Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(0))); + Assert.That(res.Key, Is.EqualTo(a.Keys.Encode(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(0))); // should return the last one res = await query.LastOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(9))); + Assert.That(res.Key, Is.EqualTo(a.Keys.Encode(9))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(9))); // should return the last one res = await query.LastAsync(); - Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(9))); + Assert.That(res.Key, Is.EqualTo(a.Keys.Encode(9))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(9))); // should fail because there is more than one @@ -179,43 +179,43 @@ await db.WriteAsync((tr) => // B: exactly one item using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(b.Tuples.ToRange()); + var query = tr.GetRange(b.Keys.ToRange()); // should return the first one res = await query.FirstOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); + Assert.That(res.Key, Is.EqualTo(b.Keys.Encode(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); // should return the first one res = await query.FirstAsync(); - Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); + Assert.That(res.Key, Is.EqualTo(b.Keys.Encode(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); // should return the last one res = await query.LastOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); + Assert.That(res.Key, Is.EqualTo(b.Keys.Encode(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); // should return the last one res = await query.LastAsync(); - Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); + Assert.That(res.Key, Is.EqualTo(b.Keys.Encode(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); // should return the first one res = await query.SingleOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); + Assert.That(res.Key, Is.EqualTo(b.Keys.Encode(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); // should return the first one res = await query.SingleAsync(); - Assert.That(res.Key, Is.EqualTo(b.Tuples.EncodeKey(0))); + Assert.That(res.Key, Is.EqualTo(b.Keys.Encode(0))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(42))); } // C: no items using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(c.Tuples.ToRange()); + var query = tr.GetRange(c.Keys.ToRange()); // should return nothing res = await query.FirstOrDefaultAsync(); @@ -245,32 +245,32 @@ await db.WriteAsync((tr) => // A: with a size limit using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.Tuples.ToRange()).Take(5); + var query = tr.GetRange(a.Keys.ToRange()).Take(5); // should return the fifth one res = await query.LastOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(4))); + Assert.That(res.Key, Is.EqualTo(a.Keys.Encode(4))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(4))); // should return the fifth one res = await query.LastAsync(); - Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(4))); + Assert.That(res.Key, Is.EqualTo(a.Keys.Encode(4))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(4))); } // A: with an offset using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.Tuples.ToRange()).Skip(5); + var query = tr.GetRange(a.Keys.ToRange()).Skip(5); // should return the fifth one res = await query.FirstOrDefaultAsync(); - Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(5))); + Assert.That(res.Key, Is.EqualTo(a.Keys.Encode(5))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(5))); // should return the fifth one res = await query.FirstAsync(); - Assert.That(res.Key, Is.EqualTo(a.Tuples.EncodeKey(5))); + Assert.That(res.Key, Is.EqualTo(a.Keys.Encode(5))); Assert.That(res.Value, Is.EqualTo(Slice.FromInt32(5))); } @@ -293,7 +293,7 @@ await db.WriteAsync((tr) => { for (int i = 0; i < 10; i++) { - tr.Set(a.Tuples.EncodeKey(i), Slice.FromInt32(i)); + tr.Set(a.Keys.Encode(i), Slice.FromInt32(i)); } // add guard keys tr.Set(location.Key, Slice.FromInt32(-1)); @@ -304,7 +304,7 @@ await db.WriteAsync((tr) => using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.Tuples.ToRange()).Take(5); + var query = tr.GetRange(a.Keys.ToRange()).Take(5); Assert.That(query, Is.Not.Null); Assert.That(query.Limit, Is.EqualTo(5)); @@ -313,7 +313,7 @@ await db.WriteAsync((tr) => Assert.That(elements.Count, Is.EqualTo(5)); for (int i = 0; i < 5; i++) { - Assert.That(elements[i].Key, Is.EqualTo(a.Tuples.EncodeKey(i))); + Assert.That(elements[i].Key, Is.EqualTo(a.Keys.Encode(i))); Assert.That(elements[i].Value, Is.EqualTo(Slice.FromInt32(i))); } } @@ -322,7 +322,7 @@ await db.WriteAsync((tr) => using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.Tuples.ToRange()).Take(12); + var query = tr.GetRange(a.Keys.ToRange()).Take(12); Assert.That(query, Is.Not.Null); Assert.That(query.Limit, Is.EqualTo(12)); @@ -331,7 +331,7 @@ await db.WriteAsync((tr) => Assert.That(elements.Count, Is.EqualTo(10)); for (int i = 0; i < 10; i++) { - Assert.That(elements[i].Key, Is.EqualTo(a.Tuples.EncodeKey(i))); + Assert.That(elements[i].Key, Is.EqualTo(a.Keys.Encode(i))); Assert.That(elements[i].Value, Is.EqualTo(Slice.FromInt32(i))); } } @@ -340,7 +340,7 @@ await db.WriteAsync((tr) => using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(a.Tuples.ToRange()).Take(0); + var query = tr.GetRange(a.Keys.ToRange()).Take(0); Assert.That(query, Is.Not.Null); Assert.That(query.Limit, Is.EqualTo(0)); @@ -361,13 +361,13 @@ public async Task Test_Can_Skip() var location = await GetCleanDirectory(db, "Queries", "Range"); // import test data - var data = Enumerable.Range(0, 100).Select(x => new KeyValuePair(location.Tuples.EncodeKey(x), Slice.FromFixed32(x))); + var data = Enumerable.Range(0, 100).Select(x => new KeyValuePair(location.Keys.Encode(x), Slice.FromFixed32(x))); await Fdb.Bulk.WriteAsync(db, data, this.Cancellation); // from the start using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(location.Tuples.ToRange()); + var query = tr.GetRange(location.Keys.ToRange()); // |>>>>>>>>>>>>(50---------->99)| var res = await query.Skip(50).ToListAsync(); @@ -394,7 +394,7 @@ public async Task Test_Can_Skip() // from the end using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(location.Tuples.ToRange()); + var query = tr.GetRange(location.Keys.ToRange()); // |(0 <--------- 49)<<<<<<<<<<<<<| var res = await query.Reverse().Skip(50).ToListAsync(); @@ -421,7 +421,7 @@ public async Task Test_Can_Skip() // from both sides using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { - var query = tr.GetRange(location.Tuples.ToRange()); + var query = tr.GetRange(location.Keys.ToRange()); // |>>>>>>>>>(25<------------74)<<<<<<<<| var res = await query.Skip(25).Reverse().Skip(25).ToListAsync(); @@ -443,13 +443,13 @@ public async Task Test_Original_Range_Does_Not_Overflow() var location = await GetCleanDirectory(db, "Queries", "Range"); // import test data - var data = Enumerable.Range(0, 30).Select(x => new KeyValuePair(location.Tuples.EncodeKey(x), Slice.FromFixed32(x))); + var data = Enumerable.Range(0, 30).Select(x => new KeyValuePair(location.Keys.Encode(x), Slice.FromFixed32(x))); await Fdb.Bulk.WriteAsync(db, data, this.Cancellation); using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { var query = tr - .GetRange(location.Tuples.EncodeKey(10), location.Tuples.EncodeKey(20)) // 10 -> 19 + .GetRange(location.Keys.Encode(10), location.Keys.Encode(20)) // 10 -> 19 .Take(20) // 10 -> 19 (limit 20) .Reverse(); // 19 -> 10 (limit 20) Log("query: {0}", query); @@ -462,7 +462,7 @@ public async Task Test_Original_Range_Does_Not_Overflow() using (var tr = db.BeginReadOnlyTransaction(this.Cancellation)) { var query = tr - .GetRange(location.Tuples.EncodeKey(10), location.Tuples.EncodeKey(20)) // 10 -> 19 + .GetRange(location.Keys.Encode(10), location.Keys.Encode(20)) // 10 -> 19 .Reverse() // 19 -> 10 .Take(20) // 19 -> 10 (limit 20) .Reverse(); // 10 -> 19 (limit 20) @@ -502,7 +502,7 @@ public async Task Test_Can_MergeSort() { for (int i = 0; i < N; i++) { - tr.Set(lists[k].Tuples.EncodeKey((i * K) + k), FdbTuple.EncodeKey(k, i)); + tr.Set(lists[k].Keys.Encode((i * K) + k), FdbTuple.EncodeKey(k, i)); } await tr.CommitAsync(); } @@ -514,8 +514,8 @@ public async Task Test_Can_MergeSort() using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.MergeSort( - lists.Select(list => FdbKeySelectorPair.Create(list.Tuples.ToRange())), - kvp => location.Tuples.DecodeLast(kvp.Key) + lists.Select(list => FdbKeySelectorPair.Create(list.Keys.ToRange())), + kvp => location.Keys.DecodeLast(kvp.Key) ); Assert.That(merge, Is.Not.Null); @@ -566,7 +566,7 @@ public async Task Test_Range_Intersect() { for (int i = 0; i < N; i++) { - var key = lists[k].Tuples.EncodeKey(series[k][i]); + var key = lists[k].Keys.Encode(series[k][i]); var value = FdbTuple.EncodeKey(k, i); //Console.WriteLine("> " + key + " = " + value); tr.Set(key, value); @@ -584,8 +584,8 @@ public async Task Test_Range_Intersect() using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.Intersect( - lists.Select(list => FdbKeySelectorPair.Create(list.Tuples.ToRange())), - kvp => location.Tuples.DecodeLast(kvp.Key) + lists.Select(list => FdbKeySelectorPair.Create(list.Keys.ToRange())), + kvp => location.Keys.DecodeLast(kvp.Key) ); Assert.That(merge, Is.Not.Null); @@ -598,7 +598,7 @@ public async Task Test_Range_Intersect() for (int i = 0; i < results.Count; i++) { - Assert.That(location.Tuples.DecodeLast(results[i].Key), Is.EqualTo(expected[i])); + Assert.That(location.Keys.DecodeLast(results[i].Key), Is.EqualTo(expected[i])); } } } @@ -638,7 +638,7 @@ public async Task Test_Range_Except() { for (int i = 0; i < N; i++) { - var key = lists[k].Tuples.EncodeKey(series[k][i]); + var key = lists[k].Keys.Encode(series[k][i]); var value = FdbTuple.EncodeKey(k, i); //Console.WriteLine("> " + key + " = " + value); tr.Set(key, value); @@ -656,8 +656,8 @@ public async Task Test_Range_Except() using (var tr = db.BeginTransaction(this.Cancellation)) { var merge = tr.Except( - lists.Select(list => FdbKeySelectorPair.Create(list.Tuples.ToRange())), - kvp => location.Tuples.DecodeLast(kvp.Key) + lists.Select(list => FdbKeySelectorPair.Create(list.Keys.ToRange())), + kvp => location.Keys.DecodeLast(kvp.Key) ); Assert.That(merge, Is.Not.Null); @@ -670,7 +670,7 @@ public async Task Test_Range_Except() for (int i = 0; i < results.Count; i++) { - Assert.That(location.Tuples.DecodeLast(results[i].Key), Is.EqualTo(expected[i])); + Assert.That(location.Keys.DecodeLast(results[i].Key), Is.EqualTo(expected[i])); } } @@ -697,11 +697,11 @@ public async Task Test_Range_Except_Composite_Key() await db.WriteAsync((tr) => { // Items - tr.Set(locItems.Tuples.EncodeKey("userA", 10093), Slice.Empty); - tr.Set(locItems.Tuples.EncodeKey("userA", 19238), Slice.Empty); - tr.Set(locItems.Tuples.EncodeKey("userB", 20003), Slice.Empty); + tr.Set(locItems.Keys.Encode("userA", 10093), Slice.Empty); + tr.Set(locItems.Keys.Encode("userA", 19238), Slice.Empty); + tr.Set(locItems.Keys.Encode("userB", 20003), Slice.Empty); // Processed - tr.Set(locProcessed.Tuples.EncodeKey("userA", 19238), Slice.Empty); + tr.Set(locProcessed.Keys.Encode("userA", 19238), Slice.Empty); }, this.Cancellation); // the query (Items ∩ Processed) should return (userA, 10093) and (userB, 20003) @@ -711,14 +711,14 @@ await db.WriteAsync((tr) => var results = await db.QueryAsync((tr) => { var query = tr.Except( - new[] { locItems.Tuples.ToRange(), locProcessed.Tuples.ToRange() }, + new[] { locItems.Keys.ToRange(), locProcessed.Keys.ToRange() }, (kv) => FdbTuple.Unpack(kv.Key).Substring(-2), // note: keys come from any of the two ranges, so we must only keep the last 2 elements of the tuple FdbTupleComparisons.Composite() // compares t[0] as a string, and t[1] as an int ); // problem: Except() still returns the original (Slice,Slice) pairs from the first range, // meaning that we still need to unpack agin the key (this time knowing the location) - return query.Select(kv => locItems.Tuples.Unpack(kv.Key)); + return query.Select(kv => locItems.Keys.Unpack(kv.Key)); }, this.Cancellation); foreach(var r in results) @@ -734,12 +734,12 @@ await db.WriteAsync((tr) => results = await db.QueryAsync((tr) => { var items = tr - .GetRange(locItems.Tuples.ToRange()) - .Select(kv => locItems.Tuples.Unpack(kv.Key)); + .GetRange(locItems.Keys.ToRange()) + .Select(kv => locItems.Keys.Unpack(kv.Key)); var processed = tr - .GetRange(locProcessed.Tuples.ToRange()) - .Select(kv => locProcessed.Tuples.Unpack(kv.Key)); + .GetRange(locProcessed.Keys.ToRange()) + .Select(kv => locProcessed.Keys.Unpack(kv.Key)); // items and processed are lists of (string, int) tuples, we can compare them directly var query = items.Except(processed, FdbTupleComparisons.Composite()); diff --git a/FoundationDB.Tests/Layers/SubspaceFacts.cs b/FoundationDB.Tests/SubspaceFacts.cs similarity index 81% rename from FoundationDB.Tests/Layers/SubspaceFacts.cs rename to FoundationDB.Tests/SubspaceFacts.cs index 937cc3a65..fb7b54de9 100644 --- a/FoundationDB.Tests/Layers/SubspaceFacts.cs +++ b/FoundationDB.Tests/SubspaceFacts.cs @@ -1,5 +1,5 @@ #region BSD Licence -/* Copyright (c) 2013, Doxense SARL +/* Copyright (c) 2013-2015, Doxense SAS All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,11 +29,9 @@ DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY namespace FoundationDB.Layers.Tuples.Tests { using FoundationDB.Client; - using FoundationDB.Client.Tests; using FoundationDB.Layers.Tuples; using NUnit.Framework; using System; - using System.Threading.Tasks; [TestFixture] public class SubspaceFacts @@ -56,7 +54,7 @@ public void Test_Empty_Subspace_Is_Empty() [Category("LocalCluster")] public void Test_Subspace_With_Binary_Prefix() { - var subspace = new FdbSubspace(Slice.Create(new byte[] { 42, 255, 0, 127 })); + var subspace = FdbSubspace.CreateDynamic(Slice.Create(new byte[] { 42, 255, 0, 127 })); Assert.That(subspace.Key.ToString(), Is.EqualTo("*<00><7F>")); Assert.That(FdbSubspace.Copy(subspace), Is.Not.SameAs(subspace)); @@ -67,13 +65,13 @@ public void Test_Subspace_With_Binary_Prefix() Assert.That(subspace.ConcatKey(Slice.FromAscii("hello")).ToString(), Is.EqualTo("*<00><7F>hello")); // pack(...) should use tuple serialization - Assert.That(subspace.Tuples.EncodeKey(123).ToString(), Is.EqualTo("*<00><7F><15>{")); - Assert.That(subspace.Tuples.EncodeKey("hello").ToString(), Is.EqualTo("*<00><7F><02>hello<00>")); - Assert.That(subspace.Tuples.EncodeKey(Slice.FromAscii("world")).ToString(), Is.EqualTo("*<00><7F><01>world<00>")); - Assert.That(subspace.Tuples.Pack(FdbTuple.Create("hello", 123)).ToString(), Is.EqualTo("*<00><7F><02>hello<00><15>{")); + Assert.That(subspace.Keys.Encode(123).ToString(), Is.EqualTo("*<00><7F><15>{")); + Assert.That(subspace.Keys.Encode("hello").ToString(), Is.EqualTo("*<00><7F><02>hello<00>")); + Assert.That(subspace.Keys.Encode(Slice.FromAscii("world")).ToString(), Is.EqualTo("*<00><7F><01>world<00>")); + Assert.That(subspace.Keys.Pack(FdbTuple.Create("hello", 123)).ToString(), Is.EqualTo("*<00><7F><02>hello<00><15>{")); // if we derive a tuple from this subspace, it should keep the binary prefix when converted to a key - var t = subspace.Tuples.Append("world", 123, false); + var t = subspace.Keys.Append("world", 123, false); Assert.That(t, Is.Not.Null); Assert.That(t.Count, Is.EqualTo(3)); Assert.That(t.Get(0), Is.EqualTo("world")); @@ -83,7 +81,7 @@ public void Test_Subspace_With_Binary_Prefix() Assert.That(k.ToString(), Is.EqualTo("*<00><7F><02>world<00><15>{<14>")); // if we unpack the key with the binary prefix, we should get a valid tuple - var t2 = subspace.Tuples.Unpack(k); + var t2 = subspace.Keys.Unpack(k); Assert.That(t2, Is.Not.Null); Assert.That(t2.Count, Is.EqualTo(3)); Assert.That(t2.Get(0), Is.EqualTo("world")); @@ -111,23 +109,25 @@ public void Test_Cannot_Create_Or_Partition_Subspace_With_Slice_Nil() { Assert.That(() => new FdbSubspace(Slice.Nil), Throws.ArgumentException); Assert.That(() => FdbSubspace.Create(Slice.Nil), Throws.ArgumentException); - Assert.That(() => FdbSubspace.Empty.Partition[Slice.Nil], Throws.ArgumentException); - Assert.That(() => FdbSubspace.Create(FdbKey.Directory).Partition[Slice.Nil], Throws.ArgumentException); + //FIXME: typed subspaces refactoring ! + //Assert.That(() => FdbSubspace.Empty.Partition[Slice.Nil], Throws.ArgumentException); + //Assert.That(() => FdbSubspace.Create(FdbKey.Directory).Partition[Slice.Nil], Throws.ArgumentException); } [Test] public void Test_Cannot_Create_Or_Partition_Subspace_With_Null_Tuple() { Assert.That(() => FdbSubspace.Create(default(IFdbTuple)), Throws.InstanceOf()); - Assert.That(() => FdbSubspace.Empty.Partition[default(IFdbTuple)], Throws.InstanceOf()); - Assert.That(() => FdbSubspace.Create(FdbKey.Directory).Partition[default(IFdbTuple)], Throws.InstanceOf()); + //FIXME: typed subspaces refactoring ! + //Assert.That(() => FdbSubspace.Empty.Partition[default(IFdbTuple)], Throws.InstanceOf()); + //Assert.That(() => FdbSubspace.Create(FdbKey.Directory).Partition[default(IFdbTuple)], Throws.InstanceOf()); } [Test] [Category("LocalCluster")] public void Test_Subspace_With_Tuple_Prefix() { - var subspace = FdbSubspace.Create(FdbTuple.Create("hello")); + var subspace = FdbSubspace.CreateDynamic(FdbTuple.Create("hello")); Assert.That(subspace.Key.ToString(), Is.EqualTo("<02>hello<00>")); Assert.That(FdbSubspace.Copy(subspace), Is.Not.SameAs(subspace)); @@ -138,11 +138,11 @@ public void Test_Subspace_With_Tuple_Prefix() Assert.That(subspace.ConcatKey(Slice.FromAscii("world")).ToString(), Is.EqualTo("<02>hello<00>world")); // pack(...) should use tuple serialization - Assert.That(subspace.Tuples.EncodeKey(123).ToString(), Is.EqualTo("<02>hello<00><15>{")); - Assert.That(subspace.Tuples.EncodeKey("world").ToString(), Is.EqualTo("<02>hello<00><02>world<00>")); + Assert.That(subspace.Keys.Encode(123).ToString(), Is.EqualTo("<02>hello<00><15>{")); + Assert.That(subspace.Keys.Encode("world").ToString(), Is.EqualTo("<02>hello<00><02>world<00>")); // even though the subspace prefix is a tuple, appending to it will only return the new items - var t = subspace.Tuples.Append("world", 123, false); + var t = subspace.Keys.Append("world", 123, false); Assert.That(t, Is.Not.Null); Assert.That(t.Count, Is.EqualTo(3)); Assert.That(t.Get(0), Is.EqualTo("world")); @@ -153,7 +153,7 @@ public void Test_Subspace_With_Tuple_Prefix() Assert.That(k.ToString(), Is.EqualTo("<02>hello<00><02>world<00><15>{<14>")); // if we unpack the key with the binary prefix, we should get a valid tuple - var t2 = subspace.Tuples.Unpack(k); + var t2 = subspace.Keys.Unpack(k); Assert.That(t2, Is.Not.Null); Assert.That(t2.Count, Is.EqualTo(3)); Assert.That(t2.Get(0), Is.EqualTo("world")); @@ -166,7 +166,7 @@ public void Test_Subspace_With_Tuple_Prefix() public void Test_Subspace_Partitioning_With_Binary_Suffix() { // start from a parent subspace - var parent = FdbSubspace.Empty; + var parent = FdbSubspace.Empty.Using(TypeSystem.Tuples); Assert.That(parent.Key.ToString(), Is.EqualTo("")); // create a child subspace using a tuple @@ -195,16 +195,16 @@ public void Test_Subspace_Partitioning_With_Binary_Suffix() public void Test_Subspace_Partitioning_With_Tuple_Suffix() { // start from a parent subspace - var parent = new FdbSubspace(Slice.Create(new byte[] { 254 })); + var parent = FdbSubspace.CreateDynamic(Slice.FromByte(254), TypeSystem.Tuples); Assert.That(parent.Key.ToString(), Is.EqualTo("")); // create a child subspace using a tuple - var child = parent.Partition[FdbTuple.Create("hca")]; + var child = parent.Partition.ByKey("hca"); Assert.That(child, Is.Not.Null); Assert.That(child.Key.ToString(), Is.EqualTo("<02>hca<00>")); // create a tuple from this child subspace - var tuple = child.Tuples.Append(123); + var tuple = child.Keys.Append(123); Assert.That(tuple, Is.Not.Null); Assert.That(tuple.ToSlice().ToString(), Is.EqualTo("<02>hca<00><15>{")); @@ -213,7 +213,7 @@ public void Test_Subspace_Partitioning_With_Tuple_Suffix() Assert.That(t1.ToSlice().ToString(), Is.EqualTo("<02>hca<00><15>{<14>")); // check that we could also create the same tuple starting from the parent subspace - var t2 = parent.Tuples.Append("hca", 123, false); + var t2 = parent.Keys.Append("hca", 123, false); Assert.That(t2.ToSlice(), Is.EqualTo(t1.ToSlice())); // cornercase diff --git a/FoundationDB.Tests/TransactionFacts.cs b/FoundationDB.Tests/TransactionFacts.cs index d88acc78d..de4d4e3cb 100644 --- a/FoundationDB.Tests/TransactionFacts.cs +++ b/FoundationDB.Tests/TransactionFacts.cs @@ -104,16 +104,16 @@ public async Task Test_Creating_A_ReadOnly_Transaction_Throws_When_Writing() Assert.That(tr, Is.Not.Null); // reading should not fail - await tr.GetAsync(db.Tuples.EncodeKey("Hello")); + await tr.GetAsync(db.Keys.Encode("Hello")); // any attempt to recast into a writeable transaction should fail! var tr2 = (IFdbTransaction)tr; Assert.That(tr2.IsReadOnly, Is.True, "Transaction should be marked as readonly"); var location = db.Partition.ByKey("ReadOnly"); - Assert.That(() => tr2.Set(location.Tuples.EncodeKey("Hello"), Slice.Empty), Throws.InvalidOperationException); - Assert.That(() => tr2.Clear(location.Tuples.EncodeKey("Hello")), Throws.InvalidOperationException); - Assert.That(() => tr2.ClearRange(location.Tuples.EncodeKey("ABC"), location.Tuples.EncodeKey("DEF")), Throws.InvalidOperationException); - Assert.That(() => tr2.Atomic(location.Tuples.EncodeKey("Counter"), Slice.FromFixed32(1), FdbMutationType.Add), Throws.InvalidOperationException); + Assert.That(() => tr2.Set(location.Keys.Encode("Hello"), Slice.Empty), Throws.InvalidOperationException); + Assert.That(() => tr2.Clear(location.Keys.Encode("Hello")), Throws.InvalidOperationException); + Assert.That(() => tr2.ClearRange(location.Keys.Encode("ABC"), location.Keys.Encode("DEF")), Throws.InvalidOperationException); + Assert.That(() => tr2.Atomic(location.Keys.Encode("Counter"), Slice.FromFixed32(1), FdbMutationType.Add), Throws.InvalidOperationException); } } } @@ -227,7 +227,7 @@ public async Task Test_Cancelling_Transaction_Before_Commit_Should_Throw_Immedia using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.Tuples.EncodeKey(1), Slice.FromString("hello")); + tr.Set(location.Keys.Encode(1), Slice.FromString("hello")); tr.Cancel(); await TestHelpers.AssertThrowsFdbErrorAsync( @@ -259,7 +259,7 @@ public async Task Test_Cancelling_Transaction_During_Commit_Should_Abort_Task() // Writes about 5 MB of stuff in 100k chunks for (int i = 0; i < 50; i++) { - tr.Set(location.Tuples.EncodeKey(i), Slice.Random(rnd, 100 * 1000)); + tr.Set(location.Keys.Encode(i), Slice.Random(rnd, 100 * 1000)); } // start commiting @@ -300,7 +300,7 @@ public async Task Test_Cancelling_Token_During_Commit_Should_Abort_Task() // Writes about 5 MB of stuff in 100k chunks for (int i = 0; i < 50; i++) { - tr.Set(location.Tuples.EncodeKey(i), Slice.Random(rnd, 100 * 1000)); + tr.Set(location.Keys.Encode(i), Slice.Random(rnd, 100 * 1000)); } // start commiting with a cancellation token @@ -350,9 +350,9 @@ public async Task Test_Write_And_Read_Simple_Keys() // write a bunch of keys using (var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.Tuples.EncodeKey("hello"), Slice.FromString("World!")); - tr.Set(location.Tuples.EncodeKey("timestamp"), Slice.FromInt64(ticks)); - tr.Set(location.Tuples.EncodeKey("blob"), Slice.Create(new byte[] { 42, 123, 7 })); + tr.Set(location.Keys.Encode("hello"), Slice.FromString("World!")); + tr.Set(location.Keys.Encode("timestamp"), Slice.FromInt64(ticks)); + tr.Set(location.Keys.Encode("blob"), Slice.Create(new byte[] { 42, 123, 7 })); await tr.CommitAsync(); @@ -368,15 +368,15 @@ public async Task Test_Write_And_Read_Simple_Keys() readVersion = await tr.GetReadVersionAsync(); Assert.That(readVersion, Is.GreaterThan(0), "Read version should be > 0"); - bytes = await tr.GetAsync(location.Tuples.EncodeKey("hello")); // => 1007 "past_version" + bytes = await tr.GetAsync(location.Keys.Encode("hello")); // => 1007 "past_version" Assert.That(bytes.Array, Is.Not.Null); Assert.That(Encoding.UTF8.GetString(bytes.Array, bytes.Offset, bytes.Count), Is.EqualTo("World!")); - bytes = await tr.GetAsync(location.Tuples.EncodeKey("timestamp")); + bytes = await tr.GetAsync(location.Keys.Encode("timestamp")); Assert.That(bytes.Array, Is.Not.Null); Assert.That(bytes.ToInt64(), Is.EqualTo(ticks)); - bytes = await tr.GetAsync(location.Tuples.EncodeKey("blob")); + bytes = await tr.GetAsync(location.Keys.Encode("blob")); Assert.That(bytes.Array, Is.Not.Null); Assert.That(bytes.Array, Is.EqualTo(new byte[] { 42, 123, 7 })); } @@ -406,7 +406,7 @@ public async Task Test_Can_Resolve_Key_Selector() tr.Set(minKey, Slice.FromString("min")); for (int i = 0; i < 20; i++) { - tr.Set(location.Tuples.EncodeKey(i), Slice.FromString(i.ToString())); + tr.Set(location.Keys.Encode(i), Slice.FromString(i.ToString())); } tr.Set(maxKey, Slice.FromString("max")); await tr.CommitAsync(); @@ -418,43 +418,43 @@ public async Task Test_Can_Resolve_Key_Selector() FdbKeySelector sel; // >= 0 - sel = FdbKeySelector.FirstGreaterOrEqual(location.Tuples.EncodeKey(0)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Tuples.EncodeKey(0)), "fGE(0) should return 0"); + sel = FdbKeySelector.FirstGreaterOrEqual(location.Keys.Encode(0)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Keys.Encode(0)), "fGE(0) should return 0"); Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(minKey), "fGE(0)-1 should return minKey"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Tuples.EncodeKey(1)), "fGE(0)+1 should return 1"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Keys.Encode(1)), "fGE(0)+1 should return 1"); // > 0 - sel = FdbKeySelector.FirstGreaterThan(location.Tuples.EncodeKey(0)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Tuples.EncodeKey(1)), "fGT(0) should return 1"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Tuples.EncodeKey(0)), "fGT(0)-1 should return 0"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Tuples.EncodeKey(2)), "fGT(0)+1 should return 2"); + sel = FdbKeySelector.FirstGreaterThan(location.Keys.Encode(0)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Keys.Encode(1)), "fGT(0) should return 1"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Keys.Encode(0)), "fGT(0)-1 should return 0"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Keys.Encode(2)), "fGT(0)+1 should return 2"); // <= 10 - sel = FdbKeySelector.LastLessOrEqual(location.Tuples.EncodeKey(10)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Tuples.EncodeKey(10)), "lLE(10) should return 10"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Tuples.EncodeKey(9)), "lLE(10)-1 should return 9"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Tuples.EncodeKey(11)), "lLE(10)+1 should return 11"); + sel = FdbKeySelector.LastLessOrEqual(location.Keys.Encode(10)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Keys.Encode(10)), "lLE(10) should return 10"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Keys.Encode(9)), "lLE(10)-1 should return 9"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Keys.Encode(11)), "lLE(10)+1 should return 11"); // < 10 - sel = FdbKeySelector.LastLessThan(location.Tuples.EncodeKey(10)); - Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Tuples.EncodeKey(9)), "lLT(10) should return 9"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Tuples.EncodeKey(8)), "lLT(10)-1 should return 8"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Tuples.EncodeKey(10)), "lLT(10)+1 should return 10"); + sel = FdbKeySelector.LastLessThan(location.Keys.Encode(10)); + Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(location.Keys.Encode(9)), "lLT(10) should return 9"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Keys.Encode(8)), "lLT(10)-1 should return 8"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Keys.Encode(10)), "lLT(10)+1 should return 10"); // < 0 - sel = FdbKeySelector.LastLessThan(location.Tuples.EncodeKey(0)); + sel = FdbKeySelector.LastLessThan(location.Keys.Encode(0)); Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(minKey), "lLT(0) should return minKey"); - Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Tuples.EncodeKey(0)), "lLT(0)+1 should return 0"); + Assert.That(await tr.GetKeyAsync(sel + 1), Is.EqualTo(location.Keys.Encode(0)), "lLT(0)+1 should return 0"); // >= 20 - sel = FdbKeySelector.FirstGreaterOrEqual(location.Tuples.EncodeKey(20)); + sel = FdbKeySelector.FirstGreaterOrEqual(location.Keys.Encode(20)); Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(maxKey), "fGE(20) should return maxKey"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Tuples.EncodeKey(19)), "fGE(20)-1 should return 19"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Keys.Encode(19)), "fGE(20)-1 should return 19"); // > 19 - sel = FdbKeySelector.FirstGreaterThan(location.Tuples.EncodeKey(19)); + sel = FdbKeySelector.FirstGreaterThan(location.Keys.Encode(19)); Assert.That(await tr.GetKeyAsync(sel), Is.EqualTo(maxKey), "fGT(19) should return maxKey"); - Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Tuples.EncodeKey(19)), "fGT(19)-1 should return 19"); + Assert.That(await tr.GetKeyAsync(sel - 1), Is.EqualTo(location.Keys.Encode(19)), "fGT(19)-1 should return 19"); } } } @@ -548,14 +548,14 @@ public async Task Test_Get_Multiple_Values() { for (int i = 0; i < ids.Length; i++) { - tr.Set(location.Tuples.EncodeKey(i), Slice.FromString("#" + i.ToString())); + tr.Set(location.Keys.Encode(i), Slice.FromString("#" + i.ToString())); } await tr.CommitAsync(); } using (var tr = db.BeginTransaction(this.Cancellation)) { - var keys = ids.Select(id => location.Tuples.EncodeKey(id)).ToArray(); + var keys = ids.Select(id => location.Keys.Encode(id)).ToArray(); var results = await tr.GetValuesAsync(keys); @@ -598,7 +598,7 @@ public async Task Test_Get_Multiple_Keys() tr.Set(minKey, Slice.FromString("min")); for (int i = 0; i < 20; i++) { - tr.Set(location.Tuples.EncodeKey(i), Slice.FromString(i.ToString())); + tr.Set(location.Keys.Encode(i), Slice.FromString(i.ToString())); } tr.Set(maxKey, Slice.FromString("max")); await tr.CommitAsync(); @@ -607,7 +607,7 @@ public async Task Test_Get_Multiple_Keys() using (var tr = db.BeginTransaction(this.Cancellation)) { - var selectors = Enumerable.Range(0, N).Select((i) => FdbKeySelector.FirstGreaterOrEqual(location.Tuples.EncodeKey(i))).ToArray(); + var selectors = Enumerable.Range(0, N).Select((i) => FdbKeySelector.FirstGreaterOrEqual(location.Keys.Encode(i))).ToArray(); // GetKeysAsync([]) var results = await tr.GetKeysAsync(selectors); @@ -615,7 +615,7 @@ public async Task Test_Get_Multiple_Keys() Assert.That(results.Length, Is.EqualTo(20)); for (int i = 0; i < N; i++) { - Assert.That(results[i], Is.EqualTo(location.Tuples.EncodeKey(i))); + Assert.That(results[i], Is.EqualTo(location.Keys.Encode(i))); } // GetKeysAsync(cast to enumerable) @@ -680,7 +680,7 @@ public async Task Test_Can_Perform_Atomic_Operations() Slice key; - key = location.Tuples.EncodeKey("add"); + key = location.Keys.Encode("add"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Add, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Add, 1); await PerformAtomicOperationAndCheck(db, key, 1, FdbMutationType.Add, 0); @@ -688,21 +688,21 @@ public async Task Test_Can_Perform_Atomic_Operations() await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.Add, 1); await PerformAtomicOperationAndCheck(db, key, 123456789, FdbMutationType.Add, 987654321); - key = location.Tuples.EncodeKey("and"); + key = location.Keys.Encode("and"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitAnd, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitAnd, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.BitAnd, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x00FF00FF, FdbMutationType.BitAnd, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x0F0F0F0F, FdbMutationType.BitAnd, 0x018055AA); - key = location.Tuples.EncodeKey("or"); + key = location.Keys.Encode("or"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitOr, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitOr, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.BitOr, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x00FF00FF, FdbMutationType.BitOr, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, 0x0F0F0F0F, FdbMutationType.BitOr, 0x018055AA); - key = location.Tuples.EncodeKey("xor"); + key = location.Keys.Encode("xor"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitXor, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.BitXor, 0x018055AA); await PerformAtomicOperationAndCheck(db, key, -1, FdbMutationType.BitXor, 0x018055AA); @@ -711,14 +711,14 @@ public async Task Test_Can_Perform_Atomic_Operations() if (Fdb.ApiVersion >= 300) { - key = location.Tuples.EncodeKey("max"); + key = location.Keys.Encode("max"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Max, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Max, 1); await PerformAtomicOperationAndCheck(db, key, 1, FdbMutationType.Max, 0); await PerformAtomicOperationAndCheck(db, key, 2, FdbMutationType.Max, 1); await PerformAtomicOperationAndCheck(db, key, 123456789, FdbMutationType.Max, 987654321); - key = location.Tuples.EncodeKey("min"); + key = location.Keys.Encode("min"); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Min, 0); await PerformAtomicOperationAndCheck(db, key, 0, FdbMutationType.Min, 1); await PerformAtomicOperationAndCheck(db, key, 1, FdbMutationType.Min, 0); @@ -730,7 +730,7 @@ public async Task Test_Can_Perform_Atomic_Operations() // calling with an unsupported mutation type should fail using (var tr = db.BeginTransaction(this.Cancellation)) { - key = location.Tuples.EncodeKey("invalid"); + key = location.Keys.Encode("invalid"); Assert.That(() => tr.Atomic(key, Slice.FromFixed32(42), FdbMutationType.Max), Throws.InstanceOf().With.Property("Code").EqualTo(FdbError.InvalidMutationType)); } } @@ -738,7 +738,7 @@ public async Task Test_Can_Perform_Atomic_Operations() // calling with an invalid mutation type should fail using (var tr = db.BeginTransaction(this.Cancellation)) { - key = location.Tuples.EncodeKey("invalid"); + key = location.Keys.Encode("invalid"); Assert.That(() => tr.Atomic(key, Slice.FromFixed32(42), (FdbMutationType)42), Throws.InstanceOf().With.Property("Code").EqualTo(FdbError.InvalidMutationType)); } } @@ -757,8 +757,8 @@ public async Task Test_Can_Snapshot_Read() // write a bunch of keys await db.WriteAsync((tr) => { - tr.Set(location.Tuples.EncodeKey("hello"), Slice.FromString("World!")); - tr.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("bar")); + tr.Set(location.Keys.Encode("hello"), Slice.FromString("World!")); + tr.Set(location.Keys.Encode("foo"), Slice.FromString("bar")); }, this.Cancellation); // read them using snapshot @@ -766,10 +766,10 @@ await db.WriteAsync((tr) => { Slice bytes; - bytes = await tr.Snapshot.GetAsync(location.Tuples.EncodeKey("hello")); + bytes = await tr.Snapshot.GetAsync(location.Keys.Encode("hello")); Assert.That(bytes.ToUnicode(), Is.EqualTo("World!")); - bytes = await tr.Snapshot.GetAsync(location.Tuples.EncodeKey("foo")); + bytes = await tr.Snapshot.GetAsync(location.Keys.Encode("foo")); Assert.That(bytes.ToUnicode(), Is.EqualTo("bar")); } @@ -790,7 +790,7 @@ public async Task Test_CommittedVersion_On_ReadOnly_Transactions() long ver = tr.GetCommittedVersion(); Assert.That(ver, Is.EqualTo(-1), "Initial committed version"); - var _ = await tr.GetAsync(db.Tuples.EncodeKey("foo")); + var _ = await tr.GetAsync(db.Keys.Encode("foo")); // until the transction commits, the committed version will stay -1 ver = tr.GetCommittedVersion(); @@ -821,7 +821,7 @@ public async Task Test_CommittedVersion_On_Write_Transactions() long ver = tr.GetCommittedVersion(); Assert.That(ver, Is.EqualTo(-1), "Initial committed version"); - tr.Set(db.Tuples.EncodeKey("foo"), Slice.FromString("bar")); + tr.Set(db.Keys.Encode("foo"), Slice.FromString("bar")); // until the transction commits, the committed version should still be -1 ver = tr.GetCommittedVersion(); @@ -849,7 +849,7 @@ public async Task Test_CommittedVersion_After_Reset() // take the read version (to compare with the committed version below) long rv1 = await tr.GetReadVersionAsync(); // do something and commit - tr.Set(db.Tuples.EncodeKey("foo"), Slice.FromString("bar")); + tr.Set(db.Keys.Encode("foo"), Slice.FromString("bar")); await tr.CommitAsync(); long cv1 = tr.GetCommittedVersion(); Log("COMMIT: {0} / {1}", rv1, cv1); @@ -865,7 +865,7 @@ public async Task Test_CommittedVersion_After_Reset() //Assert.That(cv2, Is.EqualTo(-1), "Committed version should go back to -1 after reset"); // read-only + commit - await tr.GetAsync(db.Tuples.EncodeKey("foo")); + await tr.GetAsync(db.Keys.Encode("foo")); await tr.CommitAsync(); cv2 = tr.GetCommittedVersion(); Log("COMMIT2: {0} / {1}", rv2, cv2); @@ -888,18 +888,18 @@ public async Task Test_Regular_Read_With_Concurrent_Change_Should_Conflict() await db.WriteAsync((tr) => { - tr.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("foo")); + tr.Set(location.Keys.Encode("foo"), Slice.FromString("foo")); }, this.Cancellation); using (var trA = db.BeginTransaction(this.Cancellation)) using (var trB = db.BeginTransaction(this.Cancellation)) { // regular read - var foo = await trA.GetAsync(location.Tuples.EncodeKey("foo")); - trA.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("bar")); + var foo = await trA.GetAsync(location.Keys.Encode("foo")); + trA.Set(location.Keys.Encode("foo"), Slice.FromString("bar")); // this will conflict with our read - trB.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("bar")); + trB.Set(location.Keys.Encode("foo"), Slice.FromString("bar")); await trB.CommitAsync(); // should fail with a "not_comitted" error @@ -926,18 +926,18 @@ public async Task Test_Snapshot_Read_With_Concurrent_Change_Should_Not_Conflict( await db.WriteAsync((tr) => { - tr.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("foo")); + tr.Set(location.Keys.Encode("foo"), Slice.FromString("foo")); }, this.Cancellation); using (var trA = db.BeginTransaction(this.Cancellation)) using (var trB = db.BeginTransaction(this.Cancellation)) { // reading with snapshot mode should not conflict - var foo = await trA.Snapshot.GetAsync(location.Tuples.EncodeKey("foo")); - trA.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("bar")); + var foo = await trA.Snapshot.GetAsync(location.Keys.Encode("foo")); + trA.Set(location.Keys.Encode("foo"), Slice.FromString("bar")); // this would normally conflicts with the previous read if it wasn't a snapshot read - trB.Set(location.Tuples.EncodeKey("foo"), Slice.FromString("bar")); + trB.Set(location.Keys.Encode("foo"), Slice.FromString("bar")); await trB.CommitAsync(); // should succeed @@ -958,7 +958,7 @@ public async Task Test_GetRange_With_Concurrent_Change_Should_Conflict() await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); // we will read the first key from [0, 100), expected 50 @@ -969,19 +969,19 @@ await db.WriteAsync((tr) => { // [0, 100) limit 1 => 50 var kvp = await tr1 - .GetRange(loc.Tuples.EncodeKey("foo"), loc.Tuples.EncodeKey("foo", 100)) + .GetRange(loc.Keys.Encode("foo"), loc.Keys.Encode("foo", 100)) .FirstOrDefaultAsync(); - Assert.That(kvp.Key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 50))); + Assert.That(kvp.Key, Is.EqualTo(loc.Keys.Encode("foo", 50))); // 42 < 50 > conflict !!! using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Tuples.EncodeKey("foo", 42), Slice.FromAscii("forty-two")); + tr2.Set(loc.Keys.Encode("foo", 42), Slice.FromAscii("forty-two")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(42) in TR2 should have conflicted with the GetRange(0, 100) in TR1"); } @@ -993,26 +993,26 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // [0, 100) limit 1 => 50 var kvp = await tr1 - .GetRange(loc.Tuples.EncodeKey("foo"), loc.Tuples.EncodeKey("foo", 100)) + .GetRange(loc.Keys.Encode("foo"), loc.Keys.Encode("foo", 100)) .FirstOrDefaultAsync(); - Assert.That(kvp.Key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 50))); + Assert.That(kvp.Key, Is.EqualTo(loc.Keys.Encode("foo", 50))); // 77 > 50 => no conflict using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Tuples.EncodeKey("foo", 77), Slice.FromAscii("docm")); + tr2.Set(loc.Keys.Encode("foo", 77), Slice.FromAscii("docm")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); // should not conflict! await tr1.CommitAsync(); @@ -1032,7 +1032,7 @@ public async Task Test_GetKey_With_Concurrent_Change_Should_Conflict() await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); // we will ask for the first key from >= 0, expecting 50, but if another transaction inserts something BEFORE 50, our key selector would have returned a different result, causing a conflict @@ -1040,18 +1040,18 @@ await db.WriteAsync((tr) => using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGE{0} => 50 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Tuples.EncodeKey("foo", 0))); - Assert.That(key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 50))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Keys.Encode("foo", 0))); + Assert.That(key, Is.EqualTo(loc.Keys.Encode("foo", 50))); // 42 < 50 => conflict !!! using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Tuples.EncodeKey("foo", 42), Slice.FromAscii("forty-two")); + tr2.Set(loc.Keys.Encode("foo", 42), Slice.FromAscii("forty-two")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(42) in TR2 should have conflicted with the GetKey(fGE{0}) in TR1"); } @@ -1061,24 +1061,24 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGE{0} => 50 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Tuples.EncodeKey("foo", 0))); - Assert.That(key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 50))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Keys.Encode("foo", 0))); + Assert.That(key, Is.EqualTo(loc.Keys.Encode("foo", 50))); // 77 > 50 => no conflict using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Tuples.EncodeKey("foo", 77), Slice.FromAscii("docm")); + tr2.Set(loc.Keys.Encode("foo", 77), Slice.FromAscii("docm")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); // should not conflict! await tr1.CommitAsync(); @@ -1089,25 +1089,25 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); - tr.Set(loc.Tuples.EncodeKey("foo", 100), Slice.FromAscii("one hundred")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 100), Slice.FromAscii("one hundred")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGE{50} + 1 => 100 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Tuples.EncodeKey("foo", 50)) + 1); - Assert.That(key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 100))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterOrEqual(loc.Keys.Encode("foo", 50)) + 1); + Assert.That(key, Is.EqualTo(loc.Keys.Encode("foo", 100))); // 77 between 50 and 100 => conflict !!! using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Tuples.EncodeKey("foo", 77), Slice.FromAscii("docm")); + tr2.Set(loc.Keys.Encode("foo", 77), Slice.FromAscii("docm")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); // should conflict! await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(77) in TR2 should have conflicted with the GetKey(fGE{50} + 1) in TR1"); @@ -1118,25 +1118,25 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); - tr.Set(loc.Tuples.EncodeKey("foo", 100), Slice.FromAscii("one hundred")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 100), Slice.FromAscii("one hundred")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // fGT{50} => 100 - var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterThan(loc.Tuples.EncodeKey("foo", 50))); - Assert.That(key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 100))); + var key = await tr1.GetKeyAsync(FdbKeySelector.FirstGreaterThan(loc.Keys.Encode("foo", 50))); + Assert.That(key, Is.EqualTo(loc.Keys.Encode("foo", 100))); // another transaction changes the VALUE of 50 and 100 (but does not change the fact that they exist nor add keys in between) using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(loc.Tuples.EncodeKey("foo", 100), Slice.FromAscii("cent")); + tr2.Set(loc.Keys.Encode("foo", 100), Slice.FromAscii("cent")); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); // this causes a conflict in the current version of FDB await TestHelpers.AssertThrowsFdbErrorAsync(() => tr1.CommitAsync(), FdbError.NotCommitted, "The Set(100) in TR2 should have conflicted with the GetKey(fGT{50}) in TR1"); @@ -1147,25 +1147,25 @@ await db.WriteAsync((tr) => await db.WriteAsync((tr) => { tr.ClearRange(loc); - tr.Set(loc.Tuples.EncodeKey("foo", 50), Slice.FromAscii("fifty")); - tr.Set(loc.Tuples.EncodeKey("foo", 100), Slice.FromAscii("one hundred")); + tr.Set(loc.Keys.Encode("foo", 50), Slice.FromAscii("fifty")); + tr.Set(loc.Keys.Encode("foo", 100), Slice.FromAscii("one hundred")); }, this.Cancellation); using (var tr1 = db.BeginTransaction(this.Cancellation)) { // lLT{100} => 50 - var key = await tr1.GetKeyAsync(FdbKeySelector.LastLessThan(loc.Tuples.EncodeKey("foo", 100))); - Assert.That(key, Is.EqualTo(loc.Tuples.EncodeKey("foo", 50))); + var key = await tr1.GetKeyAsync(FdbKeySelector.LastLessThan(loc.Keys.Encode("foo", 100))); + Assert.That(key, Is.EqualTo(loc.Keys.Encode("foo", 50))); // another transaction changes the VALUE of 50 and 100 (but does not change the fact that they exist nor add keys in between) using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Clear(loc.Tuples.EncodeKey("foo", 100)); + tr2.Clear(loc.Keys.Encode("foo", 100)); await tr2.CommitAsync(); } // we need to write something to force a conflict - tr1.Set(loc.Tuples.EncodeKey("bar"), Slice.Empty); + tr1.Set(loc.Keys.Encode("bar"), Slice.Empty); // this causes a conflict in the current version of FDB await tr1.CommitAsync(); @@ -1191,7 +1191,7 @@ public async Task Test_Read_Isolation() using (var db = await OpenTestPartitionAsync()) { var location = db.Partition.ByKey("test"); - var key = location.Tuples.EncodeKey("A"); + var key = location.Keys.Encode("A"); await db.ClearRangeAsync(location, this.Cancellation); @@ -1259,10 +1259,10 @@ public async Task Test_Read_Isolation_From_Writes() var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); - var a = location.Tuples.EncodeKey("A"); - var b = location.Tuples.EncodeKey("B"); - var c = location.Tuples.EncodeKey("C"); - var d = location.Tuples.EncodeKey("D"); + var a = location.Keys.Encode("A"); + var b = location.Keys.Encode("B"); + var c = location.Keys.Encode("C"); + var d = location.Keys.Encode("D"); // Reads (before and after): // - A and B will use regular reads @@ -1320,7 +1320,7 @@ public async Task Test_ReadYourWritesDisable_Isolation() var location = db.Partition.ByKey("test"); await db.ClearRangeAsync(location, this.Cancellation); - var a = location.Tuples.EncodeKey("A"); + var a = location.Keys.Encode("A"); var b = location.Partition.ByKey("B"); #region Default behaviour... @@ -1330,23 +1330,23 @@ public async Task Test_ReadYourWritesDisable_Isolation() await db.WriteAsync((tr) => { tr.Set(a, Slice.FromString("a")); - tr.Set(b.Tuples.EncodeKey(10), Slice.FromString("PRINT \"HELLO\"")); - tr.Set(b.Tuples.EncodeKey(20), Slice.FromString("GOTO 10")); + tr.Set(b.Keys.Encode(10), Slice.FromString("PRINT \"HELLO\"")); + tr.Set(b.Keys.Encode(20), Slice.FromString("GOTO 10")); }, this.Cancellation); using(var tr = db.BeginTransaction(this.Cancellation)) { var data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("a")); - var res = await tr.GetRange(b.Tuples.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + var res = await tr.GetRange(b.Keys.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new [] { "PRINT \"HELLO\"", "GOTO 10" })); tr.Set(a, Slice.FromString("aa")); - tr.Set(b.Tuples.EncodeKey(15), Slice.FromString("PRINT \"WORLD\"")); + tr.Set(b.Keys.Encode(15), Slice.FromString("PRINT \"WORLD\"")); data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("aa"), "The transaction own writes should be visible by default"); - res = await tr.GetRange(b.Tuples.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + res = await tr.GetRange(b.Keys.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new[] { "PRINT \"HELLO\"", "PRINT \"WORLD\"", "GOTO 10" }), "The transaction own writes should be visible by default"); //note: don't commit @@ -1364,15 +1364,15 @@ await db.WriteAsync((tr) => var data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("a")); - var res = await tr.GetRange(b.Tuples.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + var res = await tr.GetRange(b.Keys.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new[] { "PRINT \"HELLO\"", "GOTO 10" })); tr.Set(a, Slice.FromString("aa")); - tr.Set(b.Tuples.EncodeKey(15), Slice.FromString("PRINT \"WORLD\"")); + tr.Set(b.Keys.Encode(15), Slice.FromString("PRINT \"WORLD\"")); data = await tr.GetAsync(a); Assert.That(data.ToUnicode(), Is.EqualTo("a"), "The transaction own writes should not be seen with ReadYourWritesDisable option enabled"); - res = await tr.GetRange(b.Tuples.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); + res = await tr.GetRange(b.Keys.ToRange()).Select(kvp => kvp.Value.ToString()).ToArrayAsync(); Assert.That(res, Is.EqualTo(new[] { "PRINT \"HELLO\"", "GOTO 10" }), "The transaction own writes should not be seen with ReadYourWritesDisable option enabled"); //note: don't commit @@ -1399,7 +1399,7 @@ public async Task Test_Can_Set_Read_Version() // create first version using (var tr1 = db.BeginTransaction(this.Cancellation)) { - tr1.Set(location.Tuples.EncodeKey("concurrent"), Slice.FromByte(1)); + tr1.Set(location.Keys.Encode("concurrent"), Slice.FromByte(1)); await tr1.CommitAsync(); // get this version @@ -1409,7 +1409,7 @@ public async Task Test_Can_Set_Read_Version() // mutate in another transaction using (var tr2 = db.BeginTransaction(this.Cancellation)) { - tr2.Set(location.Tuples.EncodeKey("concurrent"), Slice.FromByte(2)); + tr2.Set(location.Keys.Encode("concurrent"), Slice.FromByte(2)); await tr2.CommitAsync(); } @@ -1421,7 +1421,7 @@ public async Task Test_Can_Set_Read_Version() long ver = await tr3.GetReadVersionAsync(); Assert.That(ver, Is.EqualTo(commitedVersion), "GetReadVersion should return the same value as SetReadVersion!"); - var bytes = await tr3.GetAsync(location.Tuples.EncodeKey("concurrent")); + var bytes = await tr3.GetAsync(location.Keys.Encode("concurrent")); Assert.That(bytes.GetBytes(), Is.EqualTo(new byte[] { 1 }), "Should have seen the first version!"); } @@ -1613,8 +1613,8 @@ public async Task Test_Can_Add_Read_Conflict_Range() await db.ClearRangeAsync(location, this.Cancellation); - var key1 = location.Tuples.EncodeKey(1); - var key2 = location.Tuples.EncodeKey(2); + var key1 = location.Keys.Encode(1); + var key2 = location.Keys.Encode(2); using (var tr1 = db.BeginTransaction(this.Cancellation)) { @@ -1652,9 +1652,9 @@ public async Task Test_Can_Add_Write_Conflict_Range() await db.ClearRangeAsync(location, this.Cancellation); - var keyConflict = location.Tuples.EncodeKey(0); - var key1 = location.Tuples.EncodeKey(1); - var key2 = location.Tuples.EncodeKey(2); + var keyConflict = location.Keys.Encode(0); + var key1 = location.Keys.Encode(1); + var key2 = location.Keys.Encode(2); using (var tr1 = db.BeginTransaction(this.Cancellation)) { @@ -1694,8 +1694,8 @@ public async Task Test_Can_Setup_And_Cancel_Watches() await db.ClearRangeAsync(location, this.Cancellation); - var key1 = location.Tuples.EncodeKey("watched"); - var key2 = location.Tuples.EncodeKey("witness"); + var key1 = location.Keys.Encode("watched"); + var key2 = location.Keys.Encode("witness"); await db.WriteAsync((tr) => { @@ -1753,8 +1753,8 @@ public async Task Test_Can_Get_Addresses_For_Key() await db.ClearRangeAsync(location, this.Cancellation); - var key1 = location.Tuples.EncodeKey(1); - var key404 = location.Tuples.EncodeKey(404); + var key1 = location.Keys.Encode(1); + var key404 = location.Keys.Encode(404); await db.WriteAsync((tr) => { @@ -1942,7 +1942,7 @@ await db.WriteAsync((tr) => { for (int i = 0; i < R; i++) { - tr.Set(location.Tuples.EncodeKey(i), Slice.FromInt32(i)); + tr.Set(location.Keys.Encode(i), Slice.FromInt32(i)); } }, this.Cancellation); @@ -2005,7 +2005,7 @@ await db.WriteAsync((tr) => int x = rnd.Next(R); try { - var res = await tr.GetAsync(location.Tuples.EncodeKey(x)); + var res = await tr.GetAsync(location.Keys.Encode(x)); } catch (FdbException) { @@ -2023,7 +2023,7 @@ await db.WriteAsync((tr) => var tr = m_alive[p]; int x = rnd.Next(R); - var t = tr.GetAsync(location.Tuples.EncodeKey(x)).ContinueWith((_) => Console.Write('!'), TaskContinuationOptions.NotOnRanToCompletion); + var t = tr.GetAsync(location.Keys.Encode(x)).ContinueWith((_) => Console.Write('!'), TaskContinuationOptions.NotOnRanToCompletion); // => t is not stored break; } diff --git a/FoundationDB.Tests/TransactionalFacts.cs b/FoundationDB.Tests/TransactionalFacts.cs index b1674c1a1..f3a55e4d3 100644 --- a/FoundationDB.Tests/TransactionalFacts.cs +++ b/FoundationDB.Tests/TransactionalFacts.cs @@ -51,7 +51,7 @@ public async Task Test_ReadAsync_Should_Normally_Execute_Only_Once() using(var tr = db.BeginTransaction(this.Cancellation)) { - tr.Set(location.Tuples.EncodeKey("Hello"), Slice.FromString(secret)); + tr.Set(location.Keys.Encode("Hello"), Slice.FromString(secret)); await tr.CommitAsync(); } @@ -64,7 +64,7 @@ public async Task Test_ReadAsync_Should_Normally_Execute_Only_Once() Assert.That(tr.Context.Database, Is.SameAs(db)); Assert.That(tr.Context.Shared, Is.True); - return tr.GetAsync(location.Tuples.EncodeKey("Hello")); + return tr.GetAsync(location.Keys.Encode("Hello")); }, this.Cancellation); Assert.That(called, Is.EqualTo(1)); // note: if this assert fails, first ensure that you did not get a transient error while running this test! @@ -161,7 +161,7 @@ public async Task Test_Transactionals_Retries_Do_Not_Leak_When_Reading_Too_Much( var sw = Stopwatch.StartNew(); Console.WriteLine("Inserting test data (this may take a few minutes)..."); var rnd = new Random(); - await Fdb.Bulk.WriteAsync(db, Enumerable.Range(0, 100 * 1000).Select(i => new KeyValuePair(location.Tuples.EncodeKey(i), Slice.Random(rnd, 4096))), this.Cancellation); + await Fdb.Bulk.WriteAsync(db, Enumerable.Range(0, 100 * 1000).Select(i => new KeyValuePair(location.Keys.Encode(i), Slice.Random(rnd, 4096))), this.Cancellation); sw.Stop(); Console.WriteLine("> done in " + sw.Elapsed); @@ -172,7 +172,7 @@ public async Task Test_Transactionals_Retries_Do_Not_Leak_When_Reading_Too_Much( var result = await db.ReadAsync((tr) => { Console.WriteLine("Retry #" + tr.Context.Retries + " @ " + tr.Context.ElapsedTotal); - return tr.GetRange(location.Tuples.ToRange()).ToListAsync(); + return tr.GetRange(location.Keys.ToRange()).ToListAsync(); }, this.Cancellation); Assert.Fail("Too fast! increase the amount of inserted data, or slow down the system!"); @@ -246,7 +246,7 @@ public async Task Test_Transactionals_ReadOnly_Should_Deny_Write_Attempts() Assume.That(hijack, Is.Not.Null, "This test requires the transaction to implement IFdbTransaction !"); // this call should fail ! - hijack.Set(location.Tuples.EncodeKey("Hello"), Slice.FromString("Hijacked")); + hijack.Set(location.Keys.Encode("Hello"), Slice.FromString("Hijacked")); Assert.Fail("Calling Set() on a read-only transaction should fail"); return Task.FromResult(123); @@ -271,7 +271,7 @@ await db.WriteAsync((tr) => { for (int i = 0; i < 10; i++) { - tr.Set(location.Tuples.EncodeKey(i), Slice.FromInt32(i)); + tr.Set(location.Keys.Encode(i), Slice.FromInt32(i)); } }, this.Cancellation); @@ -285,25 +285,25 @@ await db.WriteAsync((tr) => // read 0..2 for (int i = 0; i < 3; i++) { - values[i] = (await tr.GetAsync(location.Tuples.EncodeKey(i))).ToInt32(); + values[i] = (await tr.GetAsync(location.Keys.Encode(i))).ToInt32(); } // another transaction commits a change to 3 before we read it - await db.WriteAsync((tr2) => tr2.Set(location.Tuples.EncodeKey(3), Slice.FromInt32(42)), this.Cancellation); + await db.WriteAsync((tr2) => tr2.Set(location.Keys.Encode(3), Slice.FromInt32(42)), this.Cancellation); // read 3 to 7 for (int i = 3; i < 7; i++) { - values[i] = (await tr.GetAsync(location.Tuples.EncodeKey(i))).ToInt32(); + values[i] = (await tr.GetAsync(location.Keys.Encode(i))).ToInt32(); } // another transaction commits a change to 6 after it has been read - await db.WriteAsync((tr2) => tr2.Set(location.Tuples.EncodeKey(6), Slice.FromInt32(66)), this.Cancellation); + await db.WriteAsync((tr2) => tr2.Set(location.Keys.Encode(6), Slice.FromInt32(66)), this.Cancellation); // read 7 to 9 for (int i = 7; i < 10; i++) { - values[i] = (await tr.GetAsync(location.Tuples.EncodeKey(i))).ToInt32(); + values[i] = (await tr.GetAsync(location.Keys.Encode(i))).ToInt32(); } return values; From 4678625fc8404413b07674a5d3133befbb87b65f Mon Sep 17 00:00:00 2001 From: Christophe Chevalier Date: Fri, 6 Feb 2015 15:03:18 +0100 Subject: [PATCH 47/63] Introduced the notion of KeyEncoding and KeyEncoder - The Type System is a "Key Encoding" which has methods like GetDynamicEncoder(), GetEncoder(..) and returns specialized encoders - Key Encoders have a specific shape (dynamic vs static) and produce keys using the correct encoding scheme - Added subspace encoders up to T4 - cleaned up some old classes, moved some files around --- FoundationDB.Client/FdbDatabase.cs | 4 +- .../Filters/FdbDatabaseFilter.cs | 4 +- .../FoundationDB.Client.csproj | 23 +- .../Layers/Directories/FdbDirectoryLayer.cs | 2 +- .../Directories/FdbDirectoryPartition.cs | 2 +- .../Directories/FdbDirectorySubspace.cs | 6 +- FoundationDB.Client/Layers/Tuples/FdbTuple.cs | 16 + .../Tuples/TypeSystem/TupleKeyEncoder.cs} | 96 ++- .../Tuples/TypeSystem/TupleKeyEncoding.cs | 62 ++ .../Subspaces/FdbDynamicSubspace.cs | 24 +- .../Subspaces/FdbDynamicSubspaceKeys.cs | 96 +-- .../Subspaces/FdbDynamicSubspacePartition.cs | 42 +- .../Subspaces/FdbEncoderSubspaceKeys`1.cs | 5 + .../Subspaces/FdbEncoderSubspaceKeys`2.cs | 7 +- .../Subspaces/FdbEncoderSubspaceKeys`3.cs | 2 +- .../Subspaces/FdbEncoderSubspaceKeys`4.cs | 87 +++ .../FdbEncoderSubspacePartition`1.cs | 29 +- .../FdbEncoderSubspacePartition`2.cs | 28 +- .../FdbEncoderSubspacePartition`3.cs | 27 +- .../FdbEncoderSubspacePartition`4.cs | 87 +++ .../Subspaces/FdbEncoderSubspace`4.cs | 87 +++ FoundationDB.Client/Subspaces/FdbSubspace.cs | 145 +++- .../Subspaces/FdbSubspaceExtensions.cs | 89 ++- .../Subspaces/FdbSubspaceKeys_OLD.cs | 163 ---- .../Subspaces/FdbSubspaceTuples_OLD.cs | 736 ------------------ .../Subspaces/IFdbDynamicSubspace.cs | 6 +- .../Subspaces/IFdbEncoderSubspace.cs | 18 + FoundationDB.Client/Subspaces/IFdbSubspace.cs | 24 +- .../Encoders/DynamicKeyEncoderBase.cs | 142 ++++ .../Encoders/KeyValueEncoders.cs | 102 ++- .../TypeSystem/FdbTypeCodec`1.cs | 2 +- .../ICompositeKeyEncoder.cs} | 49 +- ...FdbTypeSystem.cs => IDynamicKeyEncoder.cs} | 170 +++- .../TypeSystem/IFdbKeyEncoding.cs | 80 ++ .../IKeyValueEncoder.cs} | 12 +- .../TypeSystem/IValueEncoder.cs | 47 ++ .../TypeSystem/Tuples/Tupspace.cs | 31 - FoundationDB.Client/TypeSystem/TypeSystem.cs | 54 +- .../Experimental/JsonNetCodec.cs | 11 +- .../Experimental/ProtobufCodec.cs | 3 +- 40 files changed, 1479 insertions(+), 1141 deletions(-) rename FoundationDB.Client/{TypeSystem/Tuples/Tuples.cs => Layers/Tuples/TypeSystem/TupleKeyEncoder.cs} (54%) create mode 100644 FoundationDB.Client/Layers/Tuples/TypeSystem/TupleKeyEncoding.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspaceKeys`4.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspacePartition`4.cs create mode 100644 FoundationDB.Client/Subspaces/FdbEncoderSubspace`4.cs delete mode 100644 FoundationDB.Client/Subspaces/FdbSubspaceKeys_OLD.cs delete mode 100644 FoundationDB.Client/Subspaces/FdbSubspaceTuples_OLD.cs create mode 100644 FoundationDB.Client/TypeSystem/Encoders/DynamicKeyEncoderBase.cs rename FoundationDB.Client/{ => TypeSystem}/Encoders/KeyValueEncoders.cs (91%) rename FoundationDB.Client/{Encoders/IKeyValueEncoder.cs => TypeSystem/ICompositeKeyEncoder.cs} (77%) rename FoundationDB.Client/TypeSystem/{IFdbTypeSystem.cs => IDynamicKeyEncoder.cs} (52%) create mode 100644 FoundationDB.Client/TypeSystem/IFdbKeyEncoding.cs rename FoundationDB.Client/{Encoders/ISliceSerializable.cs => TypeSystem/IKeyValueEncoder.cs} (82%) create mode 100644 FoundationDB.Client/TypeSystem/IValueEncoder.cs delete mode 100644 FoundationDB.Client/TypeSystem/Tuples/Tupspace.cs diff --git a/FoundationDB.Client/FdbDatabase.cs b/FoundationDB.Client/FdbDatabase.cs index ebf52627e..d2d001755 100644 --- a/FoundationDB.Client/FdbDatabase.cs +++ b/FoundationDB.Client/FdbDatabase.cs @@ -619,9 +619,9 @@ public FdbDynamicSubspacePartition Partition get { return m_globalSpace.Partition; } } - IFdbTypeSystem IFdbDynamicSubspace.Protocol + IDynamicKeyEncoder IFdbDynamicSubspace.Encoder { - get { return m_globalSpace.Protocol; } + get { return m_globalSpace.Encoder; } } public FdbDynamicSubspaceKeys Keys diff --git a/FoundationDB.Client/Filters/FdbDatabaseFilter.cs b/FoundationDB.Client/Filters/FdbDatabaseFilter.cs index 05fec0431..0592a017d 100644 --- a/FoundationDB.Client/Filters/FdbDatabaseFilter.cs +++ b/FoundationDB.Client/Filters/FdbDatabaseFilter.cs @@ -220,9 +220,9 @@ public virtual SliceWriter GetWriter(int capacity = 0) return m_database.GetWriter(capacity); } - public virtual IFdbTypeSystem Protocol + public virtual IDynamicKeyEncoder Encoder { - get { return m_database.Protocol; } + get { return m_database.Encoder; } } #endregion diff --git a/FoundationDB.Client/FoundationDB.Client.csproj b/FoundationDB.Client/FoundationDB.Client.csproj index 3f3888ecf..cea93251c 100644 --- a/FoundationDB.Client/FoundationDB.Client.csproj +++ b/FoundationDB.Client/FoundationDB.Client.csproj @@ -65,13 +65,17 @@ + + + - + + @@ -170,8 +174,6 @@ - - @@ -216,17 +218,19 @@ - + + + - - + + + - + - @@ -265,6 +269,9 @@ + + +