diff --git a/README.md b/README.md
index 20f9b2f6..92f251b6 100644
--- a/README.md
+++ b/README.md
@@ -17,7 +17,7 @@ This is a fork of [DotNetty](https://github.com/azure/dotnetty).
| .NET Netstandard (Windows) Unit Tests | [](https://ci.appveyor.com/project/cuteant/SpanNetty/branch/main) |
## Features
- - Align with [Netty-4.1.51.Final](https://github.com/netty/netty/tree/netty-4.1.51.Final)
+ - Align with [Netty-4.1.52.Final](https://github.com/netty/netty/tree/netty-4.1.52.Final)
- ArrayPooledByteBuffer
- Support **Span<byte>** and **Memory<byte>** in Buffer/Common APIs
- Add support for IBufferWriter<byte> to the **IByteBuffer**
diff --git a/src/DotNetty.Buffers/AbstractArrayPooledDerivedByteBuffer.cs b/src/DotNetty.Buffers/AbstractArrayPooledDerivedByteBuffer.cs
index d4a9ea40..d58f253a 100644
--- a/src/DotNetty.Buffers/AbstractArrayPooledDerivedByteBuffer.cs
+++ b/src/DotNetty.Buffers/AbstractArrayPooledDerivedByteBuffer.cs
@@ -136,14 +136,16 @@ protected IByteBuffer Duplicate0()
sealed class ArrayPooledNonRetainedDuplicateByteBuffer : UnpooledDuplicatedByteBuffer
{
- readonly IReferenceCounted _referenceCountDelegate;
+ readonly IByteBuffer _referenceCountDelegate;
- internal ArrayPooledNonRetainedDuplicateByteBuffer(IReferenceCounted referenceCountDelegate, AbstractByteBuffer buffer)
+ internal ArrayPooledNonRetainedDuplicateByteBuffer(IByteBuffer referenceCountDelegate, AbstractByteBuffer buffer)
: base(buffer)
{
_referenceCountDelegate = referenceCountDelegate;
}
+ protected override bool IsAccessible0() => _referenceCountDelegate.IsAccessible;
+
protected override int ReferenceCount0() => _referenceCountDelegate.ReferenceCount;
protected override IByteBuffer Retain0()
@@ -192,14 +194,16 @@ public override IByteBuffer Slice(int index, int length)
sealed class ArrayPooledNonRetainedSlicedByteBuffer : UnpooledSlicedByteBuffer
{
- readonly IReferenceCounted _referenceCountDelegate;
+ readonly IByteBuffer _referenceCountDelegate;
- public ArrayPooledNonRetainedSlicedByteBuffer(IReferenceCounted referenceCountDelegate, AbstractByteBuffer buffer, int index, int length)
+ public ArrayPooledNonRetainedSlicedByteBuffer(IByteBuffer referenceCountDelegate, AbstractByteBuffer buffer, int index, int length)
: base(buffer, index, length)
{
_referenceCountDelegate = referenceCountDelegate;
}
+ protected override bool IsAccessible0() => _referenceCountDelegate.IsAccessible;
+
protected override int ReferenceCount0() => _referenceCountDelegate.ReferenceCount;
protected override IByteBuffer Retain0()
diff --git a/src/DotNetty.Buffers/AbstractDerivedByteBuffer.cs b/src/DotNetty.Buffers/AbstractDerivedByteBuffer.cs
index d3d24952..c0d0ffe4 100644
--- a/src/DotNetty.Buffers/AbstractDerivedByteBuffer.cs
+++ b/src/DotNetty.Buffers/AbstractDerivedByteBuffer.cs
@@ -40,7 +40,9 @@ protected AbstractDerivedByteBuffer(int maxCapacity)
{
}
- public sealed override bool IsAccessible => Unwrap().IsAccessible;
+ public sealed override bool IsAccessible => IsAccessible0();
+
+ protected virtual bool IsAccessible0() => Unwrap().IsAccessible;
public sealed override int ReferenceCount => ReferenceCount0();
diff --git a/src/DotNetty.Buffers/AbstractPooledDerivedByteBuffer.cs b/src/DotNetty.Buffers/AbstractPooledDerivedByteBuffer.cs
index 9d45a2a7..03b47ad3 100644
--- a/src/DotNetty.Buffers/AbstractPooledDerivedByteBuffer.cs
+++ b/src/DotNetty.Buffers/AbstractPooledDerivedByteBuffer.cs
@@ -138,14 +138,16 @@ protected IByteBuffer Duplicate0()
sealed class PooledNonRetainedDuplicateByteBuffer : UnpooledDuplicatedByteBuffer
{
- readonly IReferenceCounted _referenceCountDelegate;
+ readonly IByteBuffer _referenceCountDelegate;
- internal PooledNonRetainedDuplicateByteBuffer(IReferenceCounted referenceCountDelegate, AbstractByteBuffer buffer)
+ internal PooledNonRetainedDuplicateByteBuffer(IByteBuffer referenceCountDelegate, AbstractByteBuffer buffer)
: base(buffer)
{
_referenceCountDelegate = referenceCountDelegate;
}
+ protected override bool IsAccessible0() => _referenceCountDelegate.IsAccessible;
+
protected override int ReferenceCount0() => _referenceCountDelegate.ReferenceCount;
protected override IByteBuffer Retain0()
@@ -198,14 +200,16 @@ public override IByteBuffer Slice(int index, int length)
sealed class PooledNonRetainedSlicedByteBuffer : UnpooledSlicedByteBuffer
{
- readonly IReferenceCounted _referenceCountDelegate;
+ readonly IByteBuffer _referenceCountDelegate;
- public PooledNonRetainedSlicedByteBuffer(IReferenceCounted referenceCountDelegate, AbstractByteBuffer buffer, int index, int length)
+ public PooledNonRetainedSlicedByteBuffer(IByteBuffer referenceCountDelegate, AbstractByteBuffer buffer, int index, int length)
: base(buffer, index, length)
{
_referenceCountDelegate = referenceCountDelegate;
}
+ protected override bool IsAccessible0() => _referenceCountDelegate.IsAccessible;
+
protected override int ReferenceCount0() => _referenceCountDelegate.ReferenceCount;
protected override IByteBuffer Retain0()
diff --git a/src/DotNetty.Buffers/ByteBufferUtil.cs b/src/DotNetty.Buffers/ByteBufferUtil.cs
index aed62214..6f993e0e 100644
--- a/src/DotNetty.Buffers/ByteBufferUtil.cs
+++ b/src/DotNetty.Buffers/ByteBufferUtil.cs
@@ -75,12 +75,33 @@ static ByteBufferUtil()
AsciiByteProcessor = new FindNonAscii();
}
+ [MethodImpl(InlineMethod.AggressiveOptimization)]
public static bool EnsureWritableSuccess(int ensureWritableResult)
{
var nresult = (uint)ensureWritableResult;
return 0u >= nresult || 2u == nresult;
}
+ /// whether the specified buffer has a nonzero ref count.
+ [MethodImpl(InlineMethod.AggressiveOptimization)]
+ public static bool IsAccessible(IByteBuffer buffer)
+ {
+ //if (buffer is null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.buffer); }
+ return buffer.IsAccessible;
+ }
+
+ /// throws IllegalReferenceCountException if the buffer has a zero ref count.
+ [MethodImpl(InlineMethod.AggressiveOptimization)]
+ public static IByteBuffer EnsureAccessible(IByteBuffer buffer)
+ {
+ //if (buffer is null) { ThrowHelper.ThrowArgumentNullException(ExceptionArgument.buffer); }
+ if (!buffer.IsAccessible)
+ {
+ ThrowHelper.ThrowIllegalReferenceCountException(buffer.ReferenceCount);
+ }
+ return buffer;
+ }
+
///
/// Read the given amount of bytes into a new that is allocated from the .
///
@@ -148,16 +169,17 @@ public static byte[] GetBytes(IByteBuffer buf, int start, int length, bool copy)
if (buf.HasArray)
{
- if (copy || start != 0 || length != capacity)
+ int baseOffset = buf.ArrayOffset + start;
+ var bytes = buf.Array;
+ if (copy || baseOffset != 0 || length != bytes.Length)
{
- int baseOffset = buf.ArrayOffset + start;
- var bytes = new byte[length];
- PlatformDependent.CopyMemory(buf.Array, baseOffset, bytes, 0, length);
- return bytes;
+ var result = new byte[length];
+ PlatformDependent.CopyMemory(bytes, baseOffset, result, 0, length);
+ return result;
}
else
{
- return buf.Array;
+ return bytes;
}
}
diff --git a/src/DotNetty.Buffers/DefaultByteBufferHolder.cs b/src/DotNetty.Buffers/DefaultByteBufferHolder.cs
index 701d1b97..83b6ede7 100644
--- a/src/DotNetty.Buffers/DefaultByteBufferHolder.cs
+++ b/src/DotNetty.Buffers/DefaultByteBufferHolder.cs
@@ -39,19 +39,7 @@ public DefaultByteBufferHolder(IByteBuffer data)
_data = data;
}
- public IByteBuffer Content
- {
- get
- {
- var refCnt = _data.ReferenceCount;
- if ((uint)(refCnt - 1) > SharedConstants.TooBigOrNegative) // <= 0
- {
- ThrowHelper.ThrowIllegalReferenceCountException(refCnt);
- }
-
- return _data;
- }
- }
+ public IByteBuffer Content => ByteBufferUtil.EnsureAccessible(_data);
public virtual IByteBufferHolder Copy() => Replace(_data.Copy());
diff --git a/src/DotNetty.Buffers/IPoolArenaMetric.cs b/src/DotNetty.Buffers/IPoolArenaMetric.cs
index 850b8198..11c72057 100644
--- a/src/DotNetty.Buffers/IPoolArenaMetric.cs
+++ b/src/DotNetty.Buffers/IPoolArenaMetric.cs
@@ -25,77 +25,84 @@
namespace DotNetty.Buffers
{
+ using System;
using System.Collections.Generic;
- public interface IPoolArenaMetric
+ /// Expose metrics for an arena.
+ public interface IPoolArenaMetric : ISizeClassesMetric
{
- /// Returns the number of thread caches backed by this arena.
+ /// Returns the number of thread caches backed by this arena.
int NumThreadCaches { get; }
- /// Returns the number of tiny sub-pages for the arena.
+ /// Returns the number of tiny sub-pages for the arena.
+ [Obsolete("Tiny sub-pages have been merged into small sub-pages.")]
int NumTinySubpages { get; }
- /// Returns the number of small sub-pages for the arena.
+ /// Returns the number of small sub-pages for the arena.
int NumSmallSubpages { get; }
- /// Returns the number of chunk lists for the arena.
+ /// Returns the number of chunk lists for the arena.
int NumChunkLists { get; }
- /// Returns an unmodifiable {@link List} which holds {@link PoolSubpageMetric}s for tiny sub-pages.
+ /// Returns an unmodifiable which holds s for tiny sub-pages.
+ [Obsolete("Tiny sub-pages have been merged into small sub-pages.")]
IReadOnlyList TinySubpages { get; }
- /// Returns an unmodifiable {@link List} which holds {@link PoolSubpageMetric}s for small sub-pages.
+ /// Returns an unmodifiable which holds s for small sub-pages.
IReadOnlyList SmallSubpages { get; }
- /// Returns an unmodifiable {@link List} which holds {@link PoolChunkListMetric}s.
+ /// Returns an unmodifiable which holds s.
IReadOnlyList ChunkLists { get; }
- /// Return the number of allocations done via the arena. This includes all sizes.
+ /// Return the number of allocations done via the arena. This includes all sizes.
long NumAllocations { get; }
- /// Return the number of tiny allocations done via the arena.
+ /// Return the number of tiny allocations done via the arena.
+ [Obsolete("Tiny allocations have been merged into small allocations.")]
long NumTinyAllocations { get; }
- /// Return the number of small allocations done via the arena.
+ /// Return the number of small allocations done via the arena.
long NumSmallAllocations { get; }
- /// Return the number of normal allocations done via the arena.
+ /// Return the number of normal allocations done via the arena.
long NumNormalAllocations { get; }
- /// Return the number of huge allocations done via the arena.
+ /// Return the number of huge allocations done via the arena.
long NumHugeAllocations { get; }
- /// Return the number of deallocations done via the arena. This includes all sizes.
+ /// Return the number of deallocations done via the arena. This includes all sizes.
long NumDeallocations { get; }
- /// Return the number of tiny deallocations done via the arena.
+ /// Return the number of tiny deallocations done via the arena.
+ [Obsolete("Tiny deallocations have been merged into small deallocations.")]
long NumTinyDeallocations { get; }
- /// Return the number of small deallocations done via the arena.
+ /// Return the number of small deallocations done via the arena.
long NumSmallDeallocations { get; }
- /// Return the number of normal deallocations done via the arena.
+ /// Return the number of normal deallocations done via the arena.
long NumNormalDeallocations { get; }
- /// Return the number of huge deallocations done via the arena.
+ /// Return the number of huge deallocations done via the arena.
long NumHugeDeallocations { get; }
- /// Return the number of currently active allocations.
+ /// Return the number of currently active allocations.
long NumActiveAllocations { get; }
- /// Return the number of currently active tiny allocations.
+ /// Return the number of currently active tiny allocations.
+ [Obsolete("Tiny allocations have been merged into small allocations.")]
long NumActiveTinyAllocations { get; }
- /// Return the number of currently active small allocations.
+ /// Return the number of currently active small allocations.
long NumActiveSmallAllocations { get; }
- /// Return the number of currently active normal allocations.
+ /// Return the number of currently active normal allocations.
long NumActiveNormalAllocations { get; }
- /// Return the number of currently active huge allocations.
+ /// Return the number of currently active huge allocations.
long NumActiveHugeAllocations { get; }
- /// Return the number of active bytes that are currently allocated by the arena.
+ /// Return the number of active bytes that are currently allocated by the arena.
long NumActiveBytes { get; }
}
}
\ No newline at end of file
diff --git a/src/DotNetty.Buffers/IPoolChunkListMetric.cs b/src/DotNetty.Buffers/IPoolChunkListMetric.cs
index 5695cbeb..06daddf2 100644
--- a/src/DotNetty.Buffers/IPoolChunkListMetric.cs
+++ b/src/DotNetty.Buffers/IPoolChunkListMetric.cs
@@ -29,10 +29,10 @@ namespace DotNetty.Buffers
public interface IPoolChunkListMetric : IEnumerable
{
- /// Return the minum usage of the chunk list before which chunks are promoted to the previous list.
+ /// Return the minum usage of the chunk list before which chunks are promoted to the previous list.
int MinUsage { get; }
- /// Return the minum usage of the chunk list after which chunks are promoted to the next list.
+ /// Return the minum usage of the chunk list after which chunks are promoted to the next list.
int MaxUsage { get; }
}
}
\ No newline at end of file
diff --git a/src/DotNetty.Buffers/IPoolChunkMetric.cs b/src/DotNetty.Buffers/IPoolChunkMetric.cs
index 5bf87ace..ed8fde03 100644
--- a/src/DotNetty.Buffers/IPoolChunkMetric.cs
+++ b/src/DotNetty.Buffers/IPoolChunkMetric.cs
@@ -27,13 +27,13 @@ namespace DotNetty.Buffers
{
public interface IPoolChunkMetric
{
- /// Return the percentage of the current usage of the chunk.
+ /// Return the percentage of the current usage of the chunk.
int Usage { get; }
- /// Return the size of the chunk in bytes, this is the maximum of bytes that can be served out of the chunk.
+ /// Return the size of the chunk in bytes, this is the maximum of bytes that can be served out of the chunk.
int ChunkSize { get; }
- /// Return the number of free bytes in the chunk.
+ /// Return the number of free bytes in the chunk.
int FreeBytes { get; }
}
}
\ No newline at end of file
diff --git a/src/DotNetty.Buffers/IPoolSubpageMetric.cs b/src/DotNetty.Buffers/IPoolSubpageMetric.cs
index 9844f96b..3f90fe79 100644
--- a/src/DotNetty.Buffers/IPoolSubpageMetric.cs
+++ b/src/DotNetty.Buffers/IPoolSubpageMetric.cs
@@ -24,16 +24,16 @@ namespace DotNetty.Buffers
{
public interface IPoolSubpageMetric
{
- /// Return the number of maximal elements that can be allocated out of the sub-page.
+ /// Return the number of maximal elements that can be allocated out of the sub-page.
int MaxNumElements { get; }
- /// Return the number of available elements to be allocated.
+ /// Return the number of available elements to be allocated.
int NumAvailable { get; }
- /// Return the size (in bytes) of the elements that will be allocated.
+ /// Return the size (in bytes) of the elements that will be allocated.
int ElementSize { get; }
- /// Return the size (in bytes) of this page.
+ /// Return the page size (in bytes) of this page.
int PageSize { get; }
}
}
\ No newline at end of file
diff --git a/src/DotNetty.Buffers/ISizeClassesMetric.cs b/src/DotNetty.Buffers/ISizeClassesMetric.cs
new file mode 100644
index 00000000..9cd1edfe
--- /dev/null
+++ b/src/DotNetty.Buffers/ISizeClassesMetric.cs
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012 The Netty Project
+ *
+ * The Netty Project licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ * Copyright (c) Microsoft. All rights reserved.
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
+ *
+ * Copyright (c) 2020 The Dotnetty-Span-Fork Project (cuteant@outlook.com)
+ *
+ * https://github.com/cuteant/spannetty
+ *
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
+ */
+
+namespace DotNetty.Buffers
+{
+ /// Expose metrics for an SizeClasses.
+ public interface ISizeClassesMetric
+ {
+ /// Computes size from lookup table according to sizeIdx.
+ /// TBD
+ /// size
+ int SizeIdx2Size(int sizeIdx);
+
+ /// Computes size according to sizeIdx.
+ /// TBD
+ /// size
+ int SizeIdx2SizeCompute(int sizeIdx);
+
+ /// Computes size from lookup table according to pageIdx.
+ /// TBD
+ /// size which is multiples of pageSize.
+ long PageIdx2Size(int pageIdx);
+
+ /// Computes size according to pageIdx.
+ /// TBD
+ /// size which is multiples of pageSize
+ long PageIdx2SizeCompute(int pageIdx);
+
+ /// Normalizes request size up to the nearest size class.
+ /// request size
+ /// sizeIdx of the size class
+ int Size2SizeIdx(int size);
+
+ /// Normalizes request size up to the nearest pageSize class.
+ /// multiples of pageSizes
+ /// pageIdx of the pageSize class
+ int Pages2PageIdx(int pages);
+
+ /// Normalizes request size down to the nearest pageSize class.
+ /// multiples of pageSizes
+ /// pageIdx of the pageSize class
+ int Pages2PageIdxFloor(int pages);
+
+ /// Normalizes usable size that would result from allocating an object with the
+ /// specified size and alignment.
+ /// request size
+ /// normalized size
+ int NormalizeSize(int size);
+ }
+}
\ No newline at end of file
diff --git a/src/DotNetty.Buffers/Internal/ThrowHelper.Extensions.cs b/src/DotNetty.Buffers/Internal/ThrowHelper.Extensions.cs
index 4bb91fea..de0ebce1 100644
--- a/src/DotNetty.Buffers/Internal/ThrowHelper.Extensions.cs
+++ b/src/DotNetty.Buffers/Internal/ThrowHelper.Extensions.cs
@@ -225,17 +225,6 @@ static ArgumentException GetArgumentException()
}
}
- [MethodImpl(MethodImplOptions.NoInlining)]
- internal static void ThrowArgumentException_CheckMaxOrder30(int maxOrder)
- {
- throw GetArgumentException(maxOrder);
-
- static ArgumentException GetArgumentException(int maxOrder)
- {
- return new ArgumentException("maxOrder should be < 30, but is: " + maxOrder, nameof(maxOrder));
- }
- }
-
[MethodImpl(MethodImplOptions.NoInlining)]
internal static void ThrowArgumentException_CheckMaxOrder14(int maxOrder)
{
@@ -327,9 +316,9 @@ static ArgumentException GetArgumentException(int pageSize, int maxOrder, int ma
[MethodImpl(MethodImplOptions.NoInlining)]
internal static void ThrowArgumentException_InvalidOffLen()
{
- throw GetArgumentOutOfRangeException();
+ throw GetArgumentException();
- static ArgumentException GetArgumentOutOfRangeException()
+ static ArgumentException GetArgumentException()
{
return new ArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection.");
}
@@ -338,9 +327,9 @@ static ArgumentException GetArgumentOutOfRangeException()
[MethodImpl(MethodImplOptions.NoInlining)]
internal static void ThrowArgumentException_FailedToGetLargerSpan()
{
- throw GetArgumentOutOfRangeException();
+ throw GetArgumentException();
- static ArgumentException GetArgumentOutOfRangeException()
+ static ArgumentException GetArgumentException()
{
return new ArgumentException("The 'IByteBuffer' could not provide an output buffer that is large enough to continue writing.");
}
@@ -349,14 +338,25 @@ static ArgumentException GetArgumentOutOfRangeException()
[MethodImpl(MethodImplOptions.NoInlining)]
internal static void ThrowArgumentException_FailedToGetMinimumSizeSpan(int minimumSize)
{
- throw GetArgumentOutOfRangeException(minimumSize);
+ throw GetArgumentException(minimumSize);
- static ArgumentException GetArgumentOutOfRangeException(int minimumSize)
+ static ArgumentException GetArgumentException(int minimumSize)
{
return new ArgumentException($"The 'IByteBuffer' could not provide an output buffer that is large enough to continue writing. Need at least {minimumSize} bytes.");
}
}
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ internal static void ThrowArgumentException_NoValueCannotBeAdded()
+ {
+ throw GetArgumentException();
+
+ static ArgumentException GetArgumentException()
+ {
+ return new ArgumentException("The NO_VALUE (" + LongPriorityQueue.NO_VALUE + ") cannot be added to the queue.");
+ }
+ }
+
#endregion
#region -- InvalidOperationException --
diff --git a/src/DotNetty.Buffers/LongLongHashMap.cs b/src/DotNetty.Buffers/LongLongHashMap.cs
new file mode 100644
index 00000000..1f827cf0
--- /dev/null
+++ b/src/DotNetty.Buffers/LongLongHashMap.cs
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2012 The Netty Project
+ *
+ * The Netty Project licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ * Copyright (c) 2020 The Dotnetty-Span-Fork Project (cuteant@outlook.com)
+ *
+ * https://github.com/cuteant/dotnetty-span-fork
+ *
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
+ */
+
+namespace DotNetty.Buffers
+{
+ using System;
+ using DotNetty.Common.Utilities;
+
+ internal sealed class LongLongHashMap
+ {
+ private const int MASK_TEMPLATE = ~1;
+ private int _mask;
+ private long[] _array;
+ private int _maxProbe;
+ private long _zeroVal;
+ private readonly long _emptyVal;
+
+ public LongLongHashMap(long emptyVal)
+ {
+ _emptyVal = emptyVal;
+ _zeroVal = emptyVal;
+ int initialSize = 32;
+ _array = new long[initialSize];
+ _mask = initialSize - 1;
+ ComputeMaskAndProbe();
+ }
+
+ public long Put(long key, long value)
+ {
+ if (0ul >= (ulong)key)
+ {
+ long prev = _zeroVal;
+ _zeroVal = value;
+ return prev;
+ }
+
+ for (; ; )
+ {
+ int index = Index(key);
+ for (int i = 0; i < _maxProbe; i++)
+ {
+ long existing = _array[index];
+ if (existing == key || 0ul >= (ulong)existing)
+ {
+ long prev = 0ul >= (ulong)existing ? _emptyVal : _array[index + 1];
+ _array[index] = key;
+ _array[index + 1] = value;
+ for (; i < _maxProbe; i++)
+ { // Nerf any existing misplaced entries.
+ index = index + 2 & _mask;
+ if (_array[index] == key)
+ {
+ _array[index] = 0;
+ prev = _array[index + 1];
+ break;
+ }
+ }
+ return prev;
+ }
+ index = index + 2 & _mask;
+ }
+ Expand(); // Grow array and re-hash.
+ }
+ }
+
+ public void Remove(long key)
+ {
+ if (0ul >= (ulong)key)
+ {
+ _zeroVal = _emptyVal;
+ return;
+ }
+ int index = Index(key);
+ for (int i = 0; i < _maxProbe; i++)
+ {
+ long existing = _array[index];
+ if (existing == key)
+ {
+ _array[index] = 0;
+ break;
+ }
+ index = index + 2 & _mask;
+ }
+ }
+
+ public long Get(long key)
+ {
+ if (0ul >= (ulong)key)
+ {
+ return _zeroVal;
+ }
+ int index = Index(key);
+ for (int i = 0; i < _maxProbe; i++)
+ {
+ long existing = _array[index];
+ if (existing == key)
+ {
+ return _array[index + 1];
+ }
+ index = index + 2 & _mask;
+ }
+ return _emptyVal;
+ }
+
+ private int Index(long key)
+ {
+ // Hash with murmur64, and mask.
+ key ^= key.RightUShift(33);
+ key *= -49064778989728563L; // 0xff51afd7ed558ccdL
+ key ^= key.RightUShift(33);
+ key *= -4265267296055464877L; // 0xc4ceb9fe1a85ec53L
+ key ^= key.RightUShift(33);
+ return (int)key & _mask;
+ }
+
+ private void Expand()
+ {
+ long[] prev = _array;
+ _array = new long[prev.Length * 2];
+ ComputeMaskAndProbe();
+ for (int i = 0; i < prev.Length; i += 2)
+ {
+ long key = prev[i];
+ if (key != 0)
+ {
+ long val = prev[i + 1];
+ Put(key, val);
+ }
+ }
+ }
+
+ private void ComputeMaskAndProbe()
+ {
+ int length = _array.Length;
+ _mask = length - 1 & MASK_TEMPLATE;
+ _maxProbe = (int)Math.Log(length);
+ }
+ }
+}
diff --git a/src/DotNetty.Buffers/LongPriorityQueue.cs b/src/DotNetty.Buffers/LongPriorityQueue.cs
new file mode 100644
index 00000000..5d043f83
--- /dev/null
+++ b/src/DotNetty.Buffers/LongPriorityQueue.cs
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2012 The Netty Project
+ *
+ * The Netty Project licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ * Copyright (c) 2020 The Dotnetty-Span-Fork Project (cuteant@outlook.com)
+ *
+ * https://github.com/cuteant/dotnetty-span-fork
+ *
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
+ */
+
+namespace DotNetty.Buffers
+{
+ using System;
+
+ internal sealed class LongPriorityQueue
+ {
+ private const int DefaultCapacity = 9;
+ public const long NO_VALUE = -1L;
+
+ private long[] _array = new long[DefaultCapacity];
+ private int _size;
+
+ public void Offer(long handle)
+ {
+ if (0ul >= (ulong)(NO_VALUE - handle))
+ {
+ ThrowHelper.ThrowArgumentException_NoValueCannotBeAdded();
+ }
+ _size++;
+ if (_size == _array.Length)
+ {
+ // Grow queue capacity.
+ Array.Resize(ref _array, 2 * _array.Length);
+ }
+ _array[_size] = handle;
+ Lift(_size);
+ }
+
+ public void Remove(long value)
+ {
+ for (int i = 1; i <= _size; i++)
+ {
+ if (_array[i] == value)
+ {
+ _array[i] = _array[_size--];
+ Lift(i);
+ Sink(i);
+ return;
+ }
+ }
+ }
+
+ public long Peek()
+ {
+ if (0u >= (uint)_size)
+ {
+ return NO_VALUE;
+ }
+ return _array[1];
+ }
+
+ public long Poll()
+ {
+ if (0u >= (uint)_size)
+ {
+ return NO_VALUE;
+ }
+ long val = _array[1];
+ _array[1] = _array[_size];
+ _array[_size] = 0;
+ _size--;
+ Sink(1);
+ return val;
+ }
+
+ public bool IsEmpty()
+ {
+ return 0u >= (uint)_size;
+ }
+
+ private void Lift(int index)
+ {
+ int parentIndex;
+ while (index > 1 && Subord(parentIndex = index >> 1, index))
+ {
+ Swap(index, parentIndex);
+ index = parentIndex;
+ }
+ }
+
+ private void Sink(int index)
+ {
+ int child;
+ while ((child = index << 1) <= _size)
+ {
+ if (child < _size && Subord(child, child + 1))
+ {
+ child++;
+ }
+ if (!Subord(index, child))
+ {
+ break;
+ }
+ Swap(index, child);
+ index = child;
+ }
+ }
+
+ private bool Subord(int a, int b)
+ {
+ return _array[a] > _array[b];
+ }
+
+ private void Swap(int a, int b)
+ {
+ long value = _array[a];
+ _array[a] = _array[b];
+ _array[b] = value;
+ }
+ }
+}
diff --git a/src/DotNetty.Buffers/PoolArena.cs b/src/DotNetty.Buffers/PoolArena.cs
index 1ca6fc55..0a19c2de 100644
--- a/src/DotNetty.Buffers/PoolArena.cs
+++ b/src/DotNetty.Buffers/PoolArena.cs
@@ -36,26 +36,17 @@ namespace DotNetty.Buffers
using DotNetty.Common.Internal;
using DotNetty.Common.Utilities;
- enum SizeClass
+ internal enum SizeClass
{
- Tiny,
Small,
Normal
}
- abstract class PoolArena : IPoolArenaMetric
+ internal abstract class PoolArena : SizeClasses, IPoolArenaMetric
{
- internal const int NumTinySubpagePools = 512 >> 4;
-
internal readonly PooledByteBufferAllocator Parent;
- private readonly int _maxOrder;
- internal readonly int PageSize;
- internal readonly int PageShifts;
- internal readonly int ChunkSize;
- internal readonly int SubpageOverflowMask;
internal readonly int NumSmallSubpagePools;
- private readonly PoolSubpage[] _tinySubpagePools;
private readonly PoolSubpage[] _smallSubpagePools;
private readonly PoolChunkList _q050;
@@ -71,13 +62,10 @@ abstract class PoolArena : IPoolArenaMetric
private long _allocationsNormal;
// We need to use the LongCounter here as this is not guarded via synchronized block.
- private long _allocationsTiny;
-
private long _allocationsSmall;
private long _allocationsHuge;
private long _activeBytesHuge;
- private long _deallocationsTiny;
private long _deallocationsSmall;
private long _deallocationsNormal;
@@ -90,30 +78,16 @@ abstract class PoolArena : IPoolArenaMetric
// TODO: Test if adding padding helps under contention
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
- protected PoolArena(
- PooledByteBufferAllocator parent,
- int pageSize,
- int maxOrder,
- int pageShifts,
- int chunkSize)
+ protected PoolArena(PooledByteBufferAllocator parent, int pageSize, int pageShifts, int chunkSize)
+ : base(pageSize, pageShifts, chunkSize, 0)
{
Parent = parent;
- PageSize = pageSize;
- _maxOrder = maxOrder;
- PageShifts = pageShifts;
- ChunkSize = chunkSize;
- SubpageOverflowMask = ~(pageSize - 1);
- _tinySubpagePools = NewSubpagePoolArray(NumTinySubpagePools);
- for (int i = 0; i < _tinySubpagePools.Length; i++)
- {
- _tinySubpagePools[i] = NewSubpagePoolHead(pageSize);
- }
- NumSmallSubpagePools = pageShifts - 9;
+ NumSmallSubpagePools = _nSubpages;
_smallSubpagePools = NewSubpagePoolArray(NumSmallSubpagePools);
for (int i = 0; i < _smallSubpagePools.Length; i++)
{
- _smallSubpagePools[i] = NewSubpagePoolHead(pageSize);
+ _smallSubpagePools[i] = NewSubpagePoolHead();
}
_q100 = new PoolChunkList(this, null, 100, int.MaxValue, chunkSize);
@@ -140,15 +114,15 @@ protected PoolArena(
_chunkListMetrics = metrics;
}
- PoolSubpage NewSubpagePoolHead(int pageSize)
+ private PoolSubpage NewSubpagePoolHead()
{
- var head = new PoolSubpage(pageSize);
+ var head = new PoolSubpage();
head.Prev = head;
head.Next = head;
return head;
}
- PoolSubpage[] NewSubpagePoolArray(int size) => new PoolSubpage[size];
+ private PoolSubpage[] NewSubpagePoolArray(int size) => new PoolSubpage[size];
internal abstract bool IsDirect { get; }
@@ -159,137 +133,103 @@ internal PooledByteBuffer Allocate(PoolThreadCache cache, int reqCapacity,
return buf;
}
- internal static int TinyIdx(int normCapacity) => normCapacity.RightUShift(4);
-
- internal static int SmallIdx(int normCapacity)
+ private void Allocate(PoolThreadCache cache, PooledByteBuffer buf, int reqCapacity)
{
- int tableIdx = 0;
- int i = normCapacity.RightUShift(10);
- while (i != 0)
+ int sizeIdx = Size2SizeIdx(reqCapacity);
+
+ if (sizeIdx <= _smallMaxSizeIdx)
{
- i = i.RightUShift(1);
- tableIdx++;
+ TCacheAllocateSmall(cache, buf, reqCapacity, sizeIdx);
+ }
+ else if (sizeIdx < _nSizes)
+ {
+ TCacheAllocateNormal(cache, buf, reqCapacity, sizeIdx);
+ }
+ else
+ {
+ //int normCapacity = directMemoryCacheAlignment > 0
+ // ? NormalizeSize(reqCapacity) : reqCapacity;
+ int normCapacity = reqCapacity;
+ // Huge allocations are never served via the cache so just call allocateHuge
+ AllocateHuge(buf, normCapacity);
}
- return tableIdx;
}
- // capacity < pageSize
- internal bool IsTinyOrSmall(int normCapacity) => 0u >= (uint)(normCapacity & SubpageOverflowMask);
-
- // normCapacity < 512
- internal static bool IsTiny(int normCapacity) => 0u >= (uint)(normCapacity & 0xFFFFFE00);
-
- void Allocate(PoolThreadCache cache, PooledByteBuffer buf, int reqCapacity)
+ private void TCacheAllocateSmall(PoolThreadCache cache, PooledByteBuffer buf, int reqCapacity, int sizeIdx)
{
- int normCapacity = NormalizeCapacity(reqCapacity);
- if (IsTinyOrSmall(normCapacity))
+ if (cache.AllocateSmall(this, buf, reqCapacity, sizeIdx))
{
- // capacity < pageSize
- int tableIdx;
- PoolSubpage[] table;
- bool tiny = IsTiny(normCapacity);
- if (tiny)
- {
- // < 512
- if (cache.AllocateTiny(this, buf, reqCapacity, normCapacity))
- {
- // was able to allocate out of the cache so move on
- return;
- }
- tableIdx = TinyIdx(normCapacity);
- table = _tinySubpagePools;
- }
- else
- {
- if (cache.AllocateSmall(this, buf, reqCapacity, normCapacity))
- {
- // was able to allocate out of the cache so move on
- return;
- }
- tableIdx = SmallIdx(normCapacity);
- table = _smallSubpagePools;
- }
-
- PoolSubpage head = table[tableIdx];
-
- //
- // Synchronize on the head. This is needed as {@link PoolSubpage#allocate()} and
- // {@link PoolSubpage#free(int)} may modify the doubly linked list as well.
- //
- lock (head)
- {
- PoolSubpage s = head.Next;
- if (s != head)
- {
- Debug.Assert(s.DoNotDestroy && s.ElemSize == normCapacity);
- long handle = s.Allocate();
- Debug.Assert(handle >= 0);
- s.Chunk.InitBufWithSubpage(buf, handle, reqCapacity, cache);
- IncTinySmallAllocation(tiny);
- return;
- }
- }
-
- lock (this)
- {
- AllocateNormal(buf, reqCapacity, normCapacity, cache);
- }
-
- IncTinySmallAllocation(tiny);
+ // was able to allocate out of the cache so move on
return;
}
- if (normCapacity <= ChunkSize)
+
+ // Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and
+ // {@link PoolChunk#free(long)} may modify the doubly linked list as well.
+ PoolSubpage head = _smallSubpagePools[sizeIdx];
+ bool needsNormalAllocation;
+ lock (head)
{
- if (cache.AllocateNormal(this, buf, reqCapacity, normCapacity))
+ PoolSubpage s = head.Next;
+ needsNormalAllocation = s == head;
+ if (!needsNormalAllocation)
{
- // was able to allocate out of the cache so move on
- return;
+ Debug.Assert(s.DoNotDestroy && s.ElemSize == SizeIdx2Size(sizeIdx));
+ long handle = s.Allocate();
+ Debug.Assert(handle >= 0);
+ s.Chunk.InitBufWithSubpage(buf, handle, reqCapacity, cache);
}
+ }
+ if (needsNormalAllocation)
+ {
lock (this)
{
- AllocateNormal(buf, reqCapacity, normCapacity, cache);
- _allocationsNormal++;
+ AllocateNormal(buf, reqCapacity, sizeIdx, cache);
}
}
- else
+
+ IncSmallAllocation();
+ }
+
+ private void TCacheAllocateNormal(PoolThreadCache cache, PooledByteBuffer buf, int reqCapacity, int sizeIdx)
+ {
+ if (cache.AllocateNormal(this, buf, reqCapacity, sizeIdx))
{
- // Huge allocations are never served via the cache so just call allocateHuge
- AllocateHuge(buf, reqCapacity);
+ // was able to allocate out of the cache so move on
+ return;
+ }
+ lock (this)
+ {
+ AllocateNormal(buf, reqCapacity, sizeIdx, cache);
+ ++_allocationsNormal;
}
}
- void AllocateNormal(PooledByteBuffer buf, int reqCapacity, int normCapacity, PoolThreadCache threadCache)
+ // Method must be called inside synchronized(this) { ... } block
+ private void AllocateNormal(PooledByteBuffer buf, int reqCapacity, int sizeIdx, PoolThreadCache threadCache)
{
- if (_q050.Allocate(buf, reqCapacity, normCapacity, threadCache) ||
- _q025.Allocate(buf, reqCapacity, normCapacity, threadCache) ||
- _q000.Allocate(buf, reqCapacity, normCapacity, threadCache) ||
- _qInit.Allocate(buf, reqCapacity, normCapacity, threadCache) ||
- _q075.Allocate(buf, reqCapacity, normCapacity, threadCache))
+ if (_q050.Allocate(buf, reqCapacity, sizeIdx, threadCache) ||
+ _q025.Allocate(buf, reqCapacity, sizeIdx, threadCache) ||
+ _q000.Allocate(buf, reqCapacity, sizeIdx, threadCache) ||
+ _qInit.Allocate(buf, reqCapacity, sizeIdx, threadCache) ||
+ _q075.Allocate(buf, reqCapacity, sizeIdx, threadCache))
{
return;
}
// Add a new chunk.
- PoolChunk c = NewChunk(PageSize, _maxOrder, PageShifts, ChunkSize);
- bool success = c.Allocate(buf, reqCapacity, normCapacity, threadCache);
+ PoolChunk c = NewChunk(PageSize, _nPSizes, PageShifts, ChunkSize);
+ bool success = c.Allocate(buf, reqCapacity, sizeIdx, threadCache);
Debug.Assert(success);
_qInit.Add(c);
}
- void IncTinySmallAllocation(bool tiny)
+ private void IncSmallAllocation()
{
- if (tiny)
- {
- _ = Interlocked.Increment(ref _allocationsTiny);
- }
- else
- {
- _ = Interlocked.Increment(ref _allocationsSmall);
- }
+ Interlocked.Increment(ref _allocationsSmall);
}
- void AllocateHuge(PooledByteBuffer buf, int reqCapacity)
+ private void AllocateHuge(PooledByteBuffer buf, int reqCapacity)
{
PoolChunk chunk = NewUnpooledChunk(reqCapacity);
_ = Interlocked.Add(ref _activeBytesHuge, chunk.ChunkSize);
@@ -308,28 +248,23 @@ internal void Free(PoolChunk chunk, long handle, int normCapacity, PoolThread
}
else
{
- SizeClass sizeClass = SizeClass(normCapacity);
+ SizeClass sizeClass = SizeClass(handle);
if (cache is object && cache.Add(this, chunk, handle, normCapacity, sizeClass))
{
// cached so not free it.
return;
}
- FreeChunk(chunk, handle, sizeClass, false);
+ FreeChunk(chunk, handle, normCapacity, sizeClass, false);
}
}
- SizeClass SizeClass(int normCapacity)
+ private SizeClass SizeClass(long handle)
{
- if (!IsTinyOrSmall(normCapacity))
- {
- return Buffers.SizeClass.Normal;
- }
-
- return IsTiny(normCapacity) ? Buffers.SizeClass.Tiny : Buffers.SizeClass.Small;
+ return PoolChunk.IsSubpage(handle) ? Buffers.SizeClass.Small : Buffers.SizeClass.Normal;
}
- internal void FreeChunk(PoolChunk chunk, long handle, SizeClass sizeClass, bool finalizer)
+ internal void FreeChunk(PoolChunk chunk, long handle, int normCapacity, SizeClass sizeClass, bool finalizer)
{
bool destroyChunk;
lock (this)
@@ -344,14 +279,11 @@ internal void FreeChunk(PoolChunk chunk, long handle, SizeClass sizeClass, bo
case Buffers.SizeClass.Small:
++_deallocationsSmall;
break;
- case Buffers.SizeClass.Tiny:
- ++_deallocationsTiny;
- break;
default:
ThrowHelper.ThrowArgumentOutOfRangeException(); break;
}
}
- destroyChunk = !chunk.Parent.Free(chunk, handle);
+ destroyChunk = !chunk.Parent.Free(chunk, handle, normCapacity);
}
if (destroyChunk)
{
@@ -360,64 +292,9 @@ internal void FreeChunk(PoolChunk chunk, long handle, SizeClass sizeClass, bo
}
}
- internal PoolSubpage FindSubpagePoolHead(int elemSize)
+ internal PoolSubpage FindSubpagePoolHead(int sizeIdx)
{
- int tableIdx;
- PoolSubpage[] table;
- if (IsTiny(elemSize))
- {
- // < 512
- tableIdx = TinyIdx(elemSize);
- table = _tinySubpagePools;
- }
- else
- {
- tableIdx = SmallIdx(elemSize);
- table = _smallSubpagePools;
- }
-
- return table[tableIdx];
- }
-
- internal int NormalizeCapacity(int reqCapacity)
- {
- uint ureqCapacity = (uint)reqCapacity;
- if (ureqCapacity > SharedConstants.TooBigOrNegative) { ThrowHelper.ThrowArgumentException_PositiveOrZero(reqCapacity, ExceptionArgument.reqCapacity); }
-
- if (ureqCapacity >= (uint)ChunkSize)
- {
- return reqCapacity;
- }
-
- if (!IsTiny(reqCapacity))
- {
- // >= 512
- // Doubled
-
- int normalizedCapacity = reqCapacity;
- normalizedCapacity--;
- normalizedCapacity |= normalizedCapacity.RightUShift(1);
- normalizedCapacity |= normalizedCapacity.RightUShift(2);
- normalizedCapacity |= normalizedCapacity.RightUShift(4);
- normalizedCapacity |= normalizedCapacity.RightUShift(8);
- normalizedCapacity |= normalizedCapacity.RightUShift(16);
- normalizedCapacity++;
-
- if (normalizedCapacity < 0)
- {
- normalizedCapacity = normalizedCapacity.RightUShift(1);
- }
-
- return normalizedCapacity;
- }
-
- // Quantum-spaced
- if (0u >= (uint)(reqCapacity & 15))
- {
- return reqCapacity;
- }
-
- return (reqCapacity & ~15) + 16;
+ return _smallSubpagePools[sizeIdx];
}
internal void Reallocate(PooledByteBuffer buf, int newCapacity, bool freeOldMemory)
@@ -462,19 +339,19 @@ internal void Reallocate(PooledByteBuffer buf, int newCapacity, bool freeOldM
public int NumThreadCaches => Volatile.Read(ref _numThreadCaches);
- public int NumTinySubpages => _tinySubpagePools.Length;
+ public int NumTinySubpages => 0;
public int NumSmallSubpages => _smallSubpagePools.Length;
public int NumChunkLists => _chunkListMetrics.Count;
- public IReadOnlyList TinySubpages => SubPageMetricList(_tinySubpagePools);
+ public IReadOnlyList TinySubpages => EmptyArray.Instance;
public IReadOnlyList SmallSubpages => SubPageMetricList(_smallSubpagePools);
public IReadOnlyList ChunkLists => _chunkListMetrics;
- static List SubPageMetricList(PoolSubpage[] pages)
+ private static List SubPageMetricList(PoolSubpage[] pages)
{
var metrics = new List();
foreach (PoolSubpage head in pages)
@@ -507,11 +384,11 @@ public long NumAllocations
allocsNormal = _allocationsNormal;
}
- return NumTinyAllocations + NumSmallAllocations + allocsNormal + NumHugeAllocations;
+ return NumSmallAllocations + allocsNormal + NumHugeAllocations;
}
}
- public long NumTinyAllocations => Volatile.Read(ref _allocationsTiny);
+ public long NumTinyAllocations => 0;
public long NumSmallAllocations => Volatile.Read(ref _allocationsSmall);
@@ -533,23 +410,14 @@ public long NumDeallocations
long deallocs;
lock (this)
{
- deallocs = _deallocationsTiny + _deallocationsSmall + _deallocationsNormal;
+ deallocs = _deallocationsSmall + _deallocationsNormal;
}
return deallocs + NumHugeDeallocations;
}
}
- public long NumTinyDeallocations
- {
- get
- {
- lock (this)
- {
- return _deallocationsTiny;
- }
- }
- }
+ public long NumTinyDeallocations => 0L;
public long NumSmallDeallocations
{
@@ -581,17 +449,16 @@ public long NumActiveAllocations
{
get
{
- long val = NumTinyAllocations + NumSmallAllocations + NumHugeAllocations
- - NumHugeDeallocations;
+ long val = NumSmallAllocations + NumHugeAllocations - NumHugeDeallocations;
lock (this)
{
- val += _allocationsNormal - (_deallocationsTiny + _deallocationsSmall + _deallocationsNormal);
+ val += _allocationsNormal - (_deallocationsSmall + _deallocationsNormal);
}
return Math.Max(val, 0);
}
}
- public long NumActiveTinyAllocations => Math.Max(NumTinyAllocations - NumTinyDeallocations, 0);
+ public long NumActiveTinyAllocations => 0L;
public long NumActiveSmallAllocations => Math.Max(NumSmallAllocations - NumSmallDeallocations, 0);
@@ -630,7 +497,7 @@ public long NumActiveBytes
}
}
- protected abstract PoolChunk NewChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize);
+ protected abstract PoolChunk NewChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize);
protected abstract PoolChunk NewUnpooledChunk(int capacity);
@@ -669,9 +536,6 @@ public override string ToString()
.Append(StringUtil.Newline)
.Append(_q100)
.Append(StringUtil.Newline)
- .Append("tiny subpages:");
- AppendPoolSubPages(buf, _tinySubpagePools);
- _ = buf.Append(StringUtil.Newline)
.Append("small subpages:");
AppendPoolSubPages(buf, _smallSubpagePools);
_ = buf.Append(StringUtil.Newline);
@@ -680,7 +544,7 @@ public override string ToString()
}
}
- static void AppendPoolSubPages(StringBuilder buf, PoolSubpage[] subpages)
+ private static void AppendPoolSubPages(StringBuilder buf, PoolSubpage[] subpages)
{
for (int i = 0; i < subpages.Length; i++)
{
@@ -709,11 +573,10 @@ static void AppendPoolSubPages(StringBuilder buf, PoolSubpage[] subpages)
~PoolArena()
{
DestroyPoolSubPages(_smallSubpagePools);
- DestroyPoolSubPages(_tinySubpagePools);
DestroyPoolChunkLists(_qInit, _q000, _q025, _q050, _q075, _q100);
}
- static void DestroyPoolSubPages(PoolSubpage[] pages)
+ private static void DestroyPoolSubPages(PoolSubpage[] pages)
{
for (int i = 0; i < pages.Length; i++)
{
@@ -721,7 +584,7 @@ static void DestroyPoolSubPages(PoolSubpage[] pages)
}
}
- void DestroyPoolChunkLists(params PoolChunkList[] chunkLists)
+ private void DestroyPoolChunkLists(params PoolChunkList[] chunkLists)
{
for (int i = 0; i < chunkLists.Length; i++)
{
@@ -730,19 +593,19 @@ void DestroyPoolChunkLists(params PoolChunkList[] chunkLists)
}
}
- sealed class HeapArena : PoolArena
+ internal sealed class HeapArena : PoolArena
{
- public HeapArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize)
- : base(parent, pageSize, maxOrder, pageShifts, chunkSize)
+ public HeapArena(PooledByteBufferAllocator parent, int pageSize, int pageShifts, int chunkSize)
+ : base(parent, pageSize, pageShifts, chunkSize)
{
}
- static byte[] NewByteArray(int size) => new byte[size];
+ private static byte[] NewByteArray(int size) => new byte[size];
internal override bool IsDirect => false;
- protected override PoolChunk NewChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize) =>
- new PoolChunk(this, NewByteArray(chunkSize), pageSize, maxOrder, pageShifts, chunkSize, 0, IntPtr.Zero);
+ protected override PoolChunk NewChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) =>
+ new PoolChunk(this, NewByteArray(chunkSize), pageSize, pageShifts, chunkSize, maxPageIdx, 0, IntPtr.Zero);
protected override PoolChunk NewUnpooledChunk(int capacity) =>
new PoolChunk(this, NewByteArray(capacity), capacity, 0, IntPtr.Zero);
@@ -766,25 +629,25 @@ protected override void MemoryCopy(byte[] src, int srcOffset, byte[] dst, int ds
// 1、IByteBuffer直接操作数组性能更高,参考 System.IO.Pipelines 和 System.Buffers 的内部实现
// 2、IByetBuffer实现 IReferenceCounted 接口,IMemoryOwner的管理会更加混乱
// 3、现在 IByteBuffer 已经实现了 IBufferWriter 接口
- sealed class DirectArena : PoolArena
+ internal sealed class DirectArena : PoolArena
{
private readonly List _memoryChunks;
- public DirectArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize)
- : base(parent, pageSize, maxOrder, pageShifts, chunkSize)
+ public DirectArena(PooledByteBufferAllocator parent, int pageSize, int pageShifts, int chunkSize)
+ : base(parent, pageSize, pageShifts, chunkSize)
{
_memoryChunks = new List();
}
- static MemoryChunk NewMemoryChunk(int size) => new MemoryChunk(size);
+ private static MemoryChunk NewMemoryChunk(int size) => new MemoryChunk(size);
internal override bool IsDirect => true;
- protected override PoolChunk NewChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize)
+ protected override PoolChunk NewChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize)
{
MemoryChunk memoryChunk = NewMemoryChunk(chunkSize);
_memoryChunks.Add(memoryChunk);
- var chunk = new PoolChunk(this, memoryChunk.Bytes, pageSize, maxOrder, pageShifts, chunkSize, 0, memoryChunk.NativePointer);
+ var chunk = new PoolChunk(this, memoryChunk.Bytes, pageSize, pageShifts, chunkSize, maxPageIdx, 0, memoryChunk.NativePointer);
return chunk;
}
@@ -816,7 +679,7 @@ protected internal override void DestroyChunk(PoolChunk chunk)
}
}
- sealed class MemoryChunk : IDisposable
+ private sealed class MemoryChunk : IDisposable
{
internal byte[] Bytes;
private GCHandle _handle;
@@ -829,7 +692,7 @@ internal MemoryChunk(int size)
NativePointer = _handle.AddrOfPinnedObject();
}
- void Release()
+ private void Release()
{
if (_handle.IsAllocated)
{
@@ -858,7 +721,7 @@ public void Dispose()
}
}
- sealed class OwnedPinnedBlock : MemoryManager, IPoolMemoryOwner
+ private sealed class OwnedPinnedBlock : MemoryManager, IPoolMemoryOwner
{
private byte[] _array;
private IntPtr _origin;
diff --git a/src/DotNetty.Buffers/PoolChunk.cs b/src/DotNetty.Buffers/PoolChunk.cs
index f143d9ef..21e306fd 100644
--- a/src/DotNetty.Buffers/PoolChunk.cs
+++ b/src/DotNetty.Buffers/PoolChunk.cs
@@ -26,81 +26,130 @@
namespace DotNetty.Buffers
{
using System;
+ using System.Collections.Generic;
using System.Diagnostics;
using System.Runtime.CompilerServices;
using DotNetty.Common.Internal;
- using DotNetty.Common.Utilities;
///
- /// Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk
- /// Notation: The following terms are important to understand the code
- /// > page - a page is the smallest unit of memory chunk that can be allocated
- /// > chunk - a chunk is a collection of pages
- /// > in this code chunkSize = 2^{maxOrder} /// pageSize
- /// To begin we allocate a byte array of size = chunkSize
- /// Whenever a ByteBuf of given size needs to be created we search for the first position
- /// in the byte array that has enough empty space to accommodate the requested size and
- /// return a (long) handle that encodes this offset information, (this memory segment is then
- /// marked as reserved so it is always used by exactly one ByteBuf and no more)
- /// For simplicity all sizes are normalized according to PoolArena#normalizeCapacity method
- /// This ensures that when we request for memory segments of size >= pageSize the normalizedCapacity
- /// equals the next nearest power of 2
- /// To search for the first offset in chunk that has at least requested size available we construct a
- /// complete balanced binary tree and store it in an array (just like heaps) - memoryMap
- /// The tree looks like this (the size of each node being mentioned in the parenthesis)
- /// depth=0 1 node (chunkSize)
- /// depth=1 2 nodes (chunkSize/2)
- /// ..
- /// ..
- /// depth=d 2^d nodes (chunkSize/2^d)
- /// ..
- /// depth=maxOrder 2^maxOrder nodes (chunkSize/2^{maxOrder} = pageSize)
- /// depth=maxOrder is the last level and the leafs consist of pages
- /// With this tree available searching in chunkArray translates like this:
- /// To allocate a memory segment of size chunkSize/2^k we search for the first node (from left) at height k
- /// which is unused
- /// Algorithm:
- /// ----------
- /// Encode the tree in memoryMap with the notation
- /// memoryMap[id] = x => in the subtree rooted at id, the first node that is free to be allocated
- /// is at depth x (counted from depth=0) i.e., at depths [depth_of_id, x), there is no node that is free
- /// As we allocate and free nodes, we update values stored in memoryMap so that the property is maintained
- /// Initialization -
- /// In the beginning we construct the memoryMap array by storing the depth of a node at each node
- /// i.e., memoryMap[id] = depth_of_id
- /// Observations:
- /// -------------
- /// 1) memoryMap[id] = depth_of_id => it is free / unallocated
- /// 2) memoryMap[id] > depth_of_id => at least one of its child nodes is allocated, so we cannot allocate it, but
- /// some of its children can still be allocated based on their availability
- /// 3) memoryMap[id] = maxOrder + 1 => the node is fully allocated and thus none of its children can be allocated, it
- /// is thus marked as unusable
- /// Algorithm: [allocateNode(d) => we want to find the first node (from left) at height h that can be allocated]
- /// ----------
- /// 1) start at root (i.e., depth = 0 or id = 1)
- /// 2) if memoryMap[1] > d => cannot be allocated from this chunk
- /// 3) if left node value <= h; we can allocate from left subtree so move to left and repeat until found
- /// 4) else try in right subtree
- /// Algorithm: [allocateRun(size)]
- /// ----------
- /// 1) Compute d = log_2(chunkSize/size)
- /// 2) Return allocateNode(d)
- /// Algorithm: [allocateSubpage(size)]
- /// ----------
- /// 1) use allocateNode(maxOrder) to find an empty (i.e., unused) leaf (i.e., page)
- /// 2) use this handle to construct the PoolSubpage object or if it already exists just call init(normCapacity)
- /// note that this PoolSubpage object is added to subpagesPool in the PoolArena when we init() it
- /// Note:
- /// -----
- /// In the implementation for improving cache coherence,
- /// we store 2 pieces of information depth_of_id and x as two byte values in memoryMap and depthMap respectively
+ /// Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk
///
- /// memoryMap[id] = depth_of_id is defined above
- /// depthMap[id] = x indicates that the first node which is free to be allocated is at depth x(from root)
+ /// Notation: The following terms are important to understand the code
+ /// > page - a page is the smallest unit of memory chunk that can be allocated
+ /// > run - a run is a collection of pages
+ /// > chunk - a chunk is a collection of runs
+ /// > in this code chunkSize = maxPages / pageSize
+ ///
+ /// To begin we allocate a byte array of size = chunkSize
+ /// Whenever a ByteBuf of given size needs to be created we search for the first position
+ /// in the byte array that has enough empty space to accommodate the requested size and
+ /// return a (long) handle that encodes this offset information, (this memory segment is then
+ /// marked as reserved so it is always used by exactly one ByteBuf and no more)
+ ///
+ /// For simplicity all sizes are normalized according to method.
+ /// This ensures that when we request for memory segments of size > pageSize the normalizedCapacity
+ /// equals the next nearest size in .
+ ///
+ ///
+ /// A chunk has the following layout:
+ ///
+ /// /-----------------\
+ /// | run |
+ /// | |
+ /// | |
+ /// |-----------------|
+ /// | run |
+ /// | |
+ /// |-----------------|
+ /// | unalloctated |
+ /// | (freed) |
+ /// | |
+ /// |-----------------|
+ /// | subpage |
+ /// |-----------------|
+ /// | unallocated |
+ /// | (freed) |
+ /// | ... |
+ /// | ... |
+ /// | ... |
+ /// | |
+ /// | |
+ /// | |
+ /// \-----------------/
+ ///
+ ///
+ /// handle:
+ /// -------
+ /// a handle is a long number, the bit layout of a run looks like:
+ ///
+ /// oooooooo ooooooos ssssssss ssssssue bbbbbbbb bbbbbbbb bbbbbbbb bbbbbbbb
+ ///
+ /// o: runOffset (page offset in the chunk), 15bit
+ /// s: size (number of pages) of this run, 15bit
+ /// u: isUsed?, 1bit
+ /// e: isSubpage?, 1bit
+ /// b: bitmapIdx of subpage, zero if it's not subpage, 32bit
+ ///
+ /// runsAvailMap:
+ /// ------
+ /// a map which manages all runs (used and not in used).
+ /// For each run, the first runOffset and last runOffset are stored in runsAvailMap.
+ /// key: runOffset
+ /// value: handle
+ ///
+ /// runsAvail:
+ /// ----------
+ /// an array of .
+ /// Each queue manages same size of runs.
+ /// Runs are sorted by offset, so that we always allocate runs with smaller offset.
+ ///
+ ///
+ /// Algorithm:
+ /// ----------
+ ///
+ /// As we allocate runs, we update values stored in runsAvailMap and runsAvail so that the property is maintained.
+ ///
+ /// Initialization -
+ /// In the beginning we store the initial run which is the whole chunk.
+ /// The initial run:
+ /// runOffset = 0
+ /// size = chunkSize
+ /// isUsed = no
+ /// isSubpage = no
+ /// bitmapIdx = 0
+ ///
+ ///
+ /// Algorithm: [allocateRun(size)]
+ /// ----------
+ /// 1) find the first avail run using in runsAvails according to size
+ /// 2) if pages of run is larger than request pages then split it, and save the tailing run
+ /// for later using
+ ///
+ /// Algorithm: [allocateSubpage(size)]
+ /// ----------
+ /// 1) find a not full subpage according to size.
+ /// if it already exists just return, otherwise allocate a new PoolSubpage and call init()
+ /// note that this subpage object is added to subpagesPool in the PoolArena when we init() it
+ /// 2) call subpage.allocate()
+ ///
+ /// Algorithm: [free(handle, length, nioBuffer)]
+ /// ----------
+ /// 1) if it is a subpage, return the slab back into this subpage
+ /// 2) if the subpage is not used or it is a run, then start free this run
+ /// 3) merge continuous avail runs
+ /// 4) save the merged run
///
- sealed class PoolChunk : IPoolChunkMetric
+ internal sealed class PoolChunk : IPoolChunkMetric
{
- const int IntegerSizeMinusOne = IntegerExtensions.SizeInBits - 1;
+ private const int SIZE_BIT_LENGTH = 15;
+ private const int INUSED_BIT_LENGTH = 1;
+ private const int SUBPAGE_BIT_LENGTH = 1;
+ private const int BITMAP_IDX_BIT_LENGTH = 32;
+
+ internal const int IS_SUBPAGE_SHIFT = BITMAP_IDX_BIT_LENGTH;
+ internal const int IS_USED_SHIFT = SUBPAGE_BIT_LENGTH + IS_SUBPAGE_SHIFT;
+ internal const int SIZE_SHIFT = INUSED_BIT_LENGTH + IS_USED_SHIFT;
+ internal const int RUN_OFFSET_SHIFT = SIZE_BIT_LENGTH + SIZE_SHIFT;
internal readonly PoolArena Arena;
internal readonly T Memory;
@@ -108,19 +157,18 @@ sealed class PoolChunk : IPoolChunkMetric
internal readonly int Offset;
internal readonly IntPtr NativePointer;
- private readonly sbyte[] _memoryMap;
- private readonly sbyte[] _depthMap;
+ /// store the first page and last page of each avail run
+ private LongLongHashMap _runsAvailMap;
+
+ /// manage all avail runs
+ private LongPriorityQueue[] _runsAvail;
+
+ /// manage all subpages in this chunk
private readonly PoolSubpage[] _subpages;
- /** Used to determine if the requested capacity is equal to or greater than pageSize. */
- private readonly int _subpageOverflowMask;
+
private readonly int _pageSize;
private readonly int _pageShifts;
- private readonly int _maxOrder;
private readonly int _chunkSize;
- private readonly int _log2ChunkSize;
- private readonly int _maxSubpageAllocs;
- /** Used to mark memory as unusable */
- private readonly sbyte _unusable;
internal int _freeBytes;
@@ -131,49 +179,29 @@ sealed class PoolChunk : IPoolChunkMetric
// TODO: Test if adding padding helps under contention
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
- internal PoolChunk(PoolArena arena, T memory, int pageSize, int maxOrder, int pageShifts, int chunkSize, int offset, IntPtr pointer)
+ internal PoolChunk(PoolArena arena, T memory, int pageSize, int pageShifts, int chunkSize, int maxPageIdx, int offset, IntPtr pointer)
{
- if (maxOrder >= 30) { ThrowHelper.ThrowArgumentException_CheckMaxOrder30(maxOrder); }
-
Unpooled = false;
Arena = arena;
Memory = memory;
_pageSize = pageSize;
_pageShifts = pageShifts;
- _maxOrder = maxOrder;
_chunkSize = chunkSize;
Offset = offset;
NativePointer = pointer;
- _unusable = (sbyte)(maxOrder + 1);
- _log2ChunkSize = Log2(chunkSize);
- _subpageOverflowMask = ~(pageSize - 1);
_freeBytes = chunkSize;
- Debug.Assert(maxOrder < 30, "maxOrder should be < 30, but is: " + maxOrder);
- _maxSubpageAllocs = 1 << maxOrder;
+ _runsAvail = NewRunsAvailqueueArray(maxPageIdx);
+ _runsAvailMap = new LongLongHashMap(LongPriorityQueue.NO_VALUE);
+ _subpages = new PoolSubpage[chunkSize >> pageShifts];
- // Generate the memory map.
- _memoryMap = new sbyte[_maxSubpageAllocs << 1];
- _depthMap = new sbyte[_memoryMap.Length];
- int memoryMapIndex = 1;
- for (int d = 0; d <= maxOrder; ++d)
- {
- // move down the tree one level at a time
- int depth = 1 << d;
- for (int p = 0; p < depth; ++p)
- {
- // in each level traverse left to right and set value to the depth of subtree
- _memoryMap[memoryMapIndex] = (sbyte)d;
- _depthMap[memoryMapIndex] = (sbyte)d;
- memoryMapIndex++;
- }
- }
-
- _subpages = NewSubpageArray(_maxSubpageAllocs);
+ //insert initial run, offset = 0, pages = chunkSize / pageSize
+ int pages = chunkSize >> pageShifts;
+ long initHandle = (long)pages << SIZE_SHIFT;
+ InsertAvailRun(0, pages, initHandle);
}
- /** Creates a special chunk that is not pooled. */
-
+ /// Creates a special chunk that is not pooled.
internal PoolChunk(PoolArena arena, T memory, int size, int offset, IntPtr pointer)
{
Unpooled = true;
@@ -181,20 +209,76 @@ internal PoolChunk(PoolArena arena, T memory, int size, int offset, IntPtr po
Memory = memory;
Offset = offset;
NativePointer = pointer;
- _memoryMap = null;
- _depthMap = null;
- _subpages = null;
- _subpageOverflowMask = 0;
_pageSize = 0;
_pageShifts = 0;
- _maxOrder = 0;
- _unusable = (sbyte)(_maxOrder + 1);
+ _runsAvailMap = null;
+ _runsAvail = null;
+ _subpages = null;
_chunkSize = size;
- _log2ChunkSize = Log2(_chunkSize);
- _maxSubpageAllocs = 0;
}
- PoolSubpage[] NewSubpageArray(int size) => new PoolSubpage[size];
+ private static LongPriorityQueue[] NewRunsAvailqueueArray(int size)
+ {
+ var queueArray = new LongPriorityQueue[size];
+ for (int i = 0; i < queueArray.Length; i++)
+ {
+ queueArray[i] = new LongPriorityQueue();
+ }
+ return queueArray;
+ }
+
+ private void InsertAvailRun(int runOffset, int pages, long handle)
+ {
+ int pageIdxFloor = Arena.Pages2PageIdxFloor(pages);
+ var queue = _runsAvail[pageIdxFloor];
+ queue.Offer(handle);
+
+ // insert first page of run
+ InsertAvailRun0(runOffset, handle);
+ if (pages > 1)
+ {
+ // insert last page of run
+ InsertAvailRun0(LastPage(runOffset, pages), handle);
+ }
+ }
+
+ private void InsertAvailRun0(int runOffset, long handle)
+ {
+ var pre = _runsAvailMap.Put(runOffset, handle);
+ Debug.Assert(pre == LongPriorityQueue.NO_VALUE);
+ }
+
+ private void RemoveAvailRun(long handle)
+ {
+ int pageIdxFloor = Arena.Pages2PageIdxFloor(RunPages(handle));
+ var queue = _runsAvail[pageIdxFloor];
+ RemoveAvailRun(queue, handle);
+ }
+
+ private void RemoveAvailRun(LongPriorityQueue queue, long handle)
+ {
+ queue.Remove(handle);
+
+ int runOffset = RunOffset(handle);
+ int pages = RunPages(handle);
+ // remove first page of run
+ _runsAvailMap.Remove(runOffset);
+ if (pages > 1)
+ {
+ // remove last page of run
+ _runsAvailMap.Remove(LastPage(runOffset, pages));
+ }
+ }
+
+ private static int LastPage(int runOffset, int pages)
+ {
+ return runOffset + pages - 1;
+ }
+
+ private long GetAvailRunByOffset(int runOffset)
+ {
+ return _runsAvailMap.Get(runOffset);
+ }
public int Usage
{
@@ -210,7 +294,7 @@ public int Usage
}
}
- int GetUsage(int freeBytes)
+ private int GetUsage(int freeBytes)
{
if (0u >= (uint)freeBytes)
{
@@ -226,273 +310,324 @@ int GetUsage(int freeBytes)
return 100 - freePercentage;
}
-
- internal bool Allocate(PooledByteBuffer buf, int reqCapacity, int normCapacity, PoolThreadCache threadCache)
+ internal bool Allocate(PooledByteBuffer buf, int reqCapacity, int sizeIdx, PoolThreadCache cache)
{
long handle;
- if ((normCapacity & _subpageOverflowMask) != 0)
+ if (sizeIdx <= Arena._smallMaxSizeIdx)
{
- // >= pageSize
- handle = AllocateRun(normCapacity);
+ // small
+ handle = AllocateSubpage(sizeIdx);
+ if (handle < 0L)
+ {
+ return false;
+ }
+ Debug.Assert(IsSubpage(handle));
}
else
{
- handle = AllocateSubpage(normCapacity);
+ // normal
+ // runSize must be multiple of pageSize
+ int runSize = Arena.SizeIdx2Size(sizeIdx);
+ handle = AllocateRun(runSize);
+ if (handle < 0L)
+ {
+ return false;
+ }
}
- if (handle < 0) { return false; }
-
- InitBuf(buf, handle, reqCapacity, threadCache);
+ InitBuf(buf, handle, reqCapacity, cache);
return true;
}
- /**
- * Update method used by allocate
- * This is triggered only when a successor is allocated and all its predecessors
- * need to update their state
- * The minimal depth at which subtree rooted at id has some free space
- *
- * @param id id
- */
-
- void UpdateParentsAlloc(int id)
+ private long AllocateRun(int runSize)
{
- while (id > 1)
- {
- int parentId = id.RightUShift(1);
- sbyte val1 = Value(id);
- sbyte val2 = Value(id ^ 1);
- sbyte val = val1 < val2 ? val1 : val2;
- SetValue(parentId, val);
- id = parentId;
- }
- }
-
- /**
- * Update method used by free
- * This needs to handle the special case when both children are completely free
- * in which case parent be directly allocated on request of size = child-size * 2
- *
- * @param id id
- */
+ int pages = runSize >> _pageShifts;
+ int pageIdx = Arena.Pages2PageIdx(pages);
- void UpdateParentsFree(int id)
- {
- int logChild = Depth(id) + 1;
- while (id > 1)
+ lock (_runsAvail)
{
- int parentId = id.RightUShift(1);
- sbyte val1 = Value(id);
- sbyte val2 = Value(id ^ 1);
- logChild -= 1; // in first iteration equals log, subsequently reduce 1 from logChild as we traverse up
-
- if (val1 == logChild && val2 == logChild)
+ // find first queue which has at least one big enough run
+ int queueIdx = RunFirstBestFit(pageIdx);
+ if (queueIdx == -1)
{
- SetValue(parentId, (sbyte)(logChild - 1));
+ return -1;
}
- else
+
+ // get run with min offset in this queue
+ var queue = _runsAvail[queueIdx];
+
+ long handle = queue.Poll();
+
+ Debug.Assert(handle != LongPriorityQueue.NO_VALUE && !IsUsed(handle), "invalid handle: " + handle);
+
+ RemoveAvailRun(queue, handle);
+
+ if (handle != -1)
{
- sbyte val = val1 < val2 ? val1 : val2;
- SetValue(parentId, val);
+ handle = SplitLargeRun(handle, pages);
}
- id = parentId;
+ _freeBytes -= RunSize(_pageShifts, handle);
+ return handle;
}
}
- /**
- * Algorithm to allocate an index in memoryMap when we query for a free node
- * at depth d
- *
- * @param d depth
- * @return index in memoryMap
- */
+ private int CalculateRunSize(int sizeIdx)
+ {
+ int maxElements = 1 << _pageShifts - SizeClasses.LOG2_QUANTUM;
+ int runSize = 0;
+ int nElements;
+
+ int elemSize = Arena.SizeIdx2Size(sizeIdx);
+
+ // find lowest common multiple of pageSize and elemSize
+ do
+ {
+ runSize += _pageSize;
+ nElements = runSize / elemSize;
+ } while (nElements < maxElements && runSize != nElements * elemSize);
+
+ while (nElements > maxElements)
+ {
+ runSize -= _pageSize;
+ nElements = runSize / elemSize;
+ }
- int AllocateNode(int d)
+ Debug.Assert(nElements > 0);
+ Debug.Assert(runSize <= _chunkSize);
+ Debug.Assert(runSize >= elemSize);
+
+ return runSize;
+ }
+
+ private int RunFirstBestFit(int pageIdx)
{
- int id = 1;
- int initial = -(1 << d); // has last d bits = 0 and rest all = 1
- sbyte val = Value(id);
- if (val > d)
+ if (_freeBytes == _chunkSize)
{
- // unusable
- return -1;
+ return Arena._nPSizes - 1;
}
- while (val < d || 0u >= (uint)(id & initial))
+ for (int i = pageIdx; i < Arena._nPSizes; i++)
{
- // id & initial == 1 << d for all ids at depth d, for < d it is 0
- id <<= 1;
- val = Value(id);
- if (val > d)
+ var queue = _runsAvail[i];
+ if (queue is object && !queue.IsEmpty())
{
- id ^= 1;
- val = Value(id);
+ return i;
}
}
- sbyte value = Value(id);
- Debug.Assert(value == d && (id & initial) == 1 << d, $"val = {value}, id & initial = {id & initial}, d = {d}");
- SetValue(id, _unusable); // mark as unusable
- UpdateParentsAlloc(id);
- return id;
+ return -1;
}
- /**
- * Allocate a run of pages (>=1)
- *
- * @param normCapacity normalized capacity
- * @return index in memoryMap
- */
-
- long AllocateRun(int normCapacity)
+ private long SplitLargeRun(long handle, int needPages)
{
- int d = _maxOrder - (Log2(normCapacity) - _pageShifts);
- int id = AllocateNode(d);
- if (id < 0)
+ Debug.Assert(needPages > 0);
+
+ int totalPages = RunPages(handle);
+ Debug.Assert(needPages <= totalPages);
+
+ int remPages = totalPages - needPages;
+
+ if (remPages > 0)
{
- return id;
+ int runOffset = RunOffset(handle);
+
+ // keep track of trailing unused pages for later use
+ int availOffset = runOffset + needPages;
+ long availRun = ToRunHandle(availOffset, remPages, 0);
+ InsertAvailRun(availOffset, remPages, availRun);
+
+ // not avail
+ return ToRunHandle(runOffset, needPages, 1);
}
- _freeBytes -= RunLength(id);
- return id;
- }
- /**
- * Create/ initialize a new PoolSubpage of normCapacity
- * Any PoolSubpage created/ initialized here is added to subpage pool in the PoolArena that owns this PoolChunk
- *
- * @param normCapacity normalized capacity
- * @return index in memoryMap
- */
+ //mark it as used
+ handle |= 1L << IS_USED_SHIFT;
+ return handle;
+ }
- long AllocateSubpage(int normCapacity)
+ ///
+ /// Create / initialize a new PoolSubpage of normCapacity. Any PoolSubpage created / initialized here is added to
+ /// subpage pool in the PoolArena that owns this PoolChunk
+ ///
+ /// sizeIdx of normalized size
+ /// index in memoryMap
+ private long AllocateSubpage(int sizeIdx)
{
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
- PoolSubpage head = Arena.FindSubpagePoolHead(normCapacity);
+ PoolSubpage head = Arena.FindSubpagePoolHead(sizeIdx);
lock (head)
{
- int d = _maxOrder; // subpages are only be allocated from pages i.e., leaves
- int id = AllocateNode(d);
- if (id < 0)
+ // allocate a new run
+ int runSize = CalculateRunSize(sizeIdx);
+ // runSize must be multiples of pageSize
+ long runHandle = AllocateRun(runSize);
+ if (runHandle < 0L)
{
- return id;
+ return -1;
}
- PoolSubpage[] subpages = _subpages;
- int pageSize = _pageSize;
+ int runOffset = RunOffset(runHandle);
+ Debug.Assert(_subpages[runOffset] is null);
+ int elemSize = Arena.SizeIdx2Size(sizeIdx);
- _freeBytes -= pageSize;
-
- int subpageIdx = SubpageIdx(id);
- PoolSubpage subpage = subpages[subpageIdx];
- if (subpage is null)
- {
- subpage = new PoolSubpage(head, this, id, RunOffset(id), pageSize, normCapacity);
- subpages[subpageIdx] = subpage;
- }
- else
- {
- subpage.Init(head, normCapacity);
- }
+ PoolSubpage subpage = new(head, this, _pageShifts, runOffset,
+ RunSize(_pageShifts, runHandle), elemSize);
+ _subpages[runOffset] = subpage;
return subpage.Allocate();
}
}
- /**
- * Free a subpage or a run of pages
- * When a subpage is freed from PoolSubpage, it might be added back to subpage pool of the owning PoolArena
- * If the subpage pool in PoolArena has at least one other PoolSubpage of given elemSize, we can
- * completely free the owning Page so it is available for subsequent allocations
- *
- * @param handle handle to free
- */
-
- internal void Free(long handle)
+ ///
+ /// Free a subpage or a run of pages When a subpage is freed from PoolSubpage, it might be added back to subpage pool
+ /// of the owning PoolArena. If the subpage pool in PoolArena has at least one other PoolSubpage of given elemSize,
+ /// we can completely free the owning Page so it is available for subsequent allocations
+ ///
+ /// handle to free
+ ///
+ internal void Free(long handle, int normCapacity)
{
- int memoryMapIdx = MemoryMapIdx(handle);
- int bitmapIdx = BitmapIdx(handle);
-
- if (bitmapIdx != 0)
+ if (IsSubpage(handle))
{
- // free a subpage
- PoolSubpage subpage = _subpages[SubpageIdx(memoryMapIdx)];
+ int sizeIdx = Arena.Size2SizeIdx(normCapacity);
+ PoolSubpage head = Arena.FindSubpagePoolHead(sizeIdx);
+
+ int sIdx = RunOffset(handle);
+ PoolSubpage subpage = _subpages[sIdx];
Debug.Assert(subpage is object && subpage.DoNotDestroy);
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
- PoolSubpage head = Arena.FindSubpagePoolHead(subpage.ElemSize);
lock (head)
{
- if (subpage.Free(head, bitmapIdx & 0x3FFFFFFF))
+ if (subpage.Free(head, BitmapIdx(handle)))
{
+ //the subpage is still used, do not free it
return;
}
+ Debug.Assert(!subpage.DoNotDestroy);
+ // Null out slot in the array as it was freed and we should not use it anymore.
+ _subpages[sIdx] = null;
}
}
- _freeBytes += RunLength(memoryMapIdx);
- SetValue(memoryMapIdx, Depth(memoryMapIdx));
- UpdateParentsFree(memoryMapIdx);
- }
- internal void InitBuf(PooledByteBuffer buf, long handle, int reqCapacity, PoolThreadCache threadCache)
- {
- int memoryMapIdx = MemoryMapIdx(handle);
- int bitmapIdx = BitmapIdx(handle);
- if (0u >= (uint)bitmapIdx)
- {
- sbyte val = Value(memoryMapIdx);
- Debug.Assert(val == _unusable, val.ToString());
- buf.Init(this, handle, RunOffset(memoryMapIdx) + Offset, reqCapacity, RunLength(memoryMapIdx), threadCache);
- }
- else
+ // start free run
+ int pages = RunPages(handle);
+
+ lock (_runsAvail)
{
- InitBufWithSubpage(buf, handle, bitmapIdx, reqCapacity, threadCache);
+ // collapse continuous runs, successfully collapsed runs
+ // will be removed from runsAvail and runsAvailMap
+ long finalRun = CollapseRuns(handle);
+
+ // set run as not used
+ finalRun &= ~(1L << IS_USED_SHIFT);
+ // if it is a subpage, set it to run
+ finalRun &= ~(1L << IS_SUBPAGE_SHIFT);
+
+ InsertAvailRun(RunOffset(finalRun), RunPages(finalRun), finalRun);
+ _freeBytes += pages << _pageShifts;
}
}
- internal void InitBufWithSubpage(PooledByteBuffer buf, long handle, int reqCapacity, PoolThreadCache threadCache) =>
- InitBufWithSubpage(buf, handle, BitmapIdx(handle), reqCapacity, threadCache);
+ private long CollapseRuns(long handle)
+ {
+ return CollapseNext(CollapsePast(handle));
+ }
- void InitBufWithSubpage(PooledByteBuffer buf, long handle, int bitmapIdx, int reqCapacity, PoolThreadCache threadCache)
+ private long CollapsePast(long handle)
{
- Debug.Assert(bitmapIdx != 0);
+ for (; ; )
+ {
+ int runOffset = RunOffset(handle);
+ int runPages = RunPages(handle);
- int memoryMapIdx = MemoryMapIdx(handle);
+ var pastRun = GetAvailRunByOffset(runOffset - 1);
+ if (0ul >= (ulong)(LongPriorityQueue.NO_VALUE - pastRun))
+ {
+ return handle;
+ }
- PoolSubpage subpage = _subpages[SubpageIdx(memoryMapIdx)];
- Debug.Assert(subpage.DoNotDestroy);
- Debug.Assert(reqCapacity <= subpage.ElemSize);
+ int pastOffset = RunOffset(pastRun);
+ int pastPages = RunPages(pastRun);
- buf.Init(
- this, handle,
- RunOffset(memoryMapIdx) + (bitmapIdx & 0x3FFFFFFF) * subpage.ElemSize + Offset,
- reqCapacity, subpage.ElemSize, threadCache);
+ // is continuous
+ if (pastRun != handle && pastOffset + pastPages == runOffset)
+ {
+ // remove past run
+ RemoveAvailRun(pastRun);
+ handle = ToRunHandle(pastOffset, pastPages + runPages, 0);
+ }
+ else
+ {
+ return handle;
+ }
+ }
}
- sbyte Value(int id) => _memoryMap[id];
+ private long CollapseNext(long handle)
+ {
+ for (; ; )
+ {
+ int runOffset = RunOffset(handle);
+ int runPages = RunPages(handle);
- void SetValue(int id, sbyte val) => _memoryMap[id] = val;
+ var nextRun = GetAvailRunByOffset(runOffset + runPages);
+ if (0ul >= (ulong)(LongPriorityQueue.NO_VALUE - nextRun))
+ {
+ return handle;
+ }
- sbyte Depth(int id) => _depthMap[id];
+ int nextOffset = RunOffset(nextRun);
+ int nextPages = RunPages(nextRun);
- // compute the (0-based, with lsb = 0) position of highest set bit i.e, log2
- static int Log2(int val) => IntegerSizeMinusOne - val.NumberOfLeadingZeros();
+ // is continuous
+ if (nextRun != handle && runOffset + runPages == nextOffset)
+ {
+ // remove next run
+ RemoveAvailRun(nextRun);
+ handle = ToRunHandle(runOffset, runPages + nextPages, 0);
+ }
+ else
+ {
+ return handle;
+ }
+ }
+ }
- /// represents the size in #bytes supported by node 'id' in the tree
- int RunLength(int id) => 1 << _log2ChunkSize - Depth(id);
+ private static long ToRunHandle(int runOffset, int runPages, int inUsed)
+ {
+ return (long)runOffset << RUN_OFFSET_SHIFT
+ | (long)runPages << SIZE_SHIFT
+ | (long)inUsed << IS_USED_SHIFT;
+ }
- int RunOffset(int id)
+ internal void InitBuf(PooledByteBuffer buf, long handle, int reqCapacity, PoolThreadCache threadCache)
{
- // represents the 0-based offset in #bytes from start of the byte-array chunk
- int shift = id ^ 1 << Depth(id);
- return shift * RunLength(id);
+ if (IsRun(handle))
+ {
+ buf.Init(this, handle, RunOffset(handle) << _pageShifts,
+ reqCapacity, RunSize(_pageShifts, handle), Arena.Parent.ThreadCache());
+ }
+ else
+ {
+ InitBufWithSubpage(buf, handle, reqCapacity, threadCache);
+ }
}
- int SubpageIdx(int memoryMapIdx) => memoryMapIdx ^ _maxSubpageAllocs; // remove highest set bit, to get offset
+ internal void InitBufWithSubpage(PooledByteBuffer buf, long handle, int reqCapacity, PoolThreadCache threadCache)
+ {
+ int runOffset = RunOffset(handle);
+ int bitmapIdx = BitmapIdx(handle);
- static int MemoryMapIdx(long handle) => (int)handle;
+ PoolSubpage s = _subpages[runOffset];
+ Debug.Assert(s.DoNotDestroy);
+ Debug.Assert(reqCapacity <= s.ElemSize);
- static int BitmapIdx(long handle) => (int)handle.RightUShift(IntegerExtensions.SizeInBits);
+ buf.Init(this, handle,
+ (runOffset << _pageShifts) + bitmapIdx * s.ElemSize + Offset,
+ reqCapacity, s.ElemSize, threadCache);
+ }
public int ChunkSize => _chunkSize;
@@ -524,5 +659,40 @@ public override string ToString()
}
internal void Destroy() => Arena.DestroyChunk(this);
+
+ internal static int RunOffset(long handle)
+ {
+ return (int)(handle >> RUN_OFFSET_SHIFT);
+ }
+
+ internal static int RunSize(int pageShifts, long handle)
+ {
+ return RunPages(handle) << pageShifts;
+ }
+
+ internal static int RunPages(long handle)
+ {
+ return (int)(handle >> SIZE_SHIFT & 0x7fff);
+ }
+
+ private static bool IsUsed(long handle)
+ {
+ return (handle >> IS_USED_SHIFT & 1L) == 1L;
+ }
+
+ private static bool IsRun(long handle)
+ {
+ return !IsSubpage(handle);
+ }
+
+ internal static bool IsSubpage(long handle)
+ {
+ return (handle >> IS_SUBPAGE_SHIFT & 1L) == 1L;
+ }
+
+ private static int BitmapIdx(long handle)
+ {
+ return (int)handle;
+ }
}
}
\ No newline at end of file
diff --git a/src/DotNetty.Buffers/PoolChunkList.cs b/src/DotNetty.Buffers/PoolChunkList.cs
index 1663cc8e..34fc040d 100644
--- a/src/DotNetty.Buffers/PoolChunkList.cs
+++ b/src/DotNetty.Buffers/PoolChunkList.cs
@@ -33,7 +33,7 @@ namespace DotNetty.Buffers
using DotNetty.Common.Internal;
using DotNetty.Common.Utilities;
- sealed class PoolChunkList : IPoolChunkListMetric
+ internal sealed class PoolChunkList : IPoolChunkListMetric
{
private readonly PoolArena _arena;
private readonly PoolChunkList _nextList;
@@ -44,7 +44,7 @@ sealed class PoolChunkList : IPoolChunkListMetric
private readonly int _freeMinThreshold;
private readonly int _freeMaxThreshold;
- // This is only update once when create the linked like list of PoolChunkList in PoolArena constructor.
+ /// This is only update once when create the linked like list of PoolChunkList in PoolArena constructor.
private PoolChunkList _prevList;
// TODO: Test if adding padding helps under contention
@@ -106,9 +106,11 @@ private static int CalculateThresholdWithOverflow(int chunkSize, int usage)
return freeThreshold;
}
- /// Calculates the maximum capacity of a buffer that will ever be possible to allocate out of the {@link PoolChunk}s
- /// that belong to the {@link PoolChunkList} with the given {@code minUsage} and {@code maxUsage} settings.
- static int CalculateMaxCapacity(int minUsage, int chunkSize)
+ ///
+ /// Calculates the maximum capacity of a buffer that will ever be possible to allocate out of the s
+ /// that belong to the with the given minUsage and maxUsage settings.
+ ///
+ private static int CalculateMaxCapacity(int minUsage, int chunkSize)
{
minUsage = MinUsage0(minUsage);
@@ -132,8 +134,9 @@ internal void PrevList(PoolChunkList list)
_prevList = list;
}
- internal bool Allocate(PooledByteBuffer buf, int reqCapacity, int normCapacity, PoolThreadCache threadCache)
+ internal bool Allocate(PooledByteBuffer buf, int reqCapacity, int sizeIdx, PoolThreadCache threadCache)
{
+ int normCapacity = _arena.SizeIdx2Size(sizeIdx);
if (_head is null || normCapacity > _maxCapacity)
{
// Either this PoolChunkList is empty or the requested capacity is larger then the capacity which can
@@ -143,7 +146,7 @@ internal bool Allocate(PooledByteBuffer buf, int reqCapacity, int normCapacit
for (PoolChunk cur = _head; cur is object; cur = cur.Next)
{
- if (cur.Allocate(buf, reqCapacity, normCapacity, threadCache))
+ if (cur.Allocate(buf, reqCapacity, sizeIdx, threadCache))
{
if (cur._freeBytes <= _freeMinThreshold)
{
@@ -156,9 +159,9 @@ internal bool Allocate(PooledByteBuffer buf, int reqCapacity, int normCapacit
return false;
}
- internal bool Free(PoolChunk chunk, long handle)
+ internal bool Free(PoolChunk chunk, long handle, int normCapacity)
{
- chunk.Free(handle);
+ chunk.Free(handle, normCapacity);
if (chunk._freeBytes > _freeMaxThreshold)
{
Remove(chunk);
@@ -168,7 +171,7 @@ internal bool Free(PoolChunk chunk, long handle)
return true;
}
- bool Move(PoolChunk chunk)
+ private bool Move(PoolChunk chunk)
{
Debug.Assert(chunk.Usage < _maxUsage);
@@ -183,9 +186,10 @@ bool Move(PoolChunk chunk)
return true;
}
- /// Moves the {@link PoolChunk} down the {@link PoolChunkList} linked-list so it will end up in the right
- /// {@link PoolChunkList} that has the correct minUsage / maxUsage in respect to {@link PoolChunk#usage()}.
- bool Move0(PoolChunk chunk)
+ /// Moves the down the linked-list so it will end up in the right
+ /// that has the correct minUsage / maxUsage in respect to .
+ ///
+ private bool Move0(PoolChunk chunk)
{
if (_prevList is null)
{
@@ -207,8 +211,9 @@ internal void Add(PoolChunk chunk)
Add0(chunk);
}
- /// Adds the {@link PoolChunk} to this {@link PoolChunkList}.
- void Add0(PoolChunk chunk)
+ /// Adds the to this .
+ ///
+ private void Add0(PoolChunk chunk)
{
chunk.Parent = this;
if (_head is null)
@@ -226,7 +231,7 @@ void Add0(PoolChunk chunk)
}
}
- void Remove(PoolChunk cur)
+ private void Remove(PoolChunk cur)
{
if (cur == _head)
{
@@ -247,11 +252,13 @@ void Remove(PoolChunk cur)
}
}
+ ///
public int MinUsage => MinUsage0(_minUsage);
+ ///
public int MaxUsage => Math.Min(_maxUsage, 100);
- static int MinUsage0(int value) => Math.Max(1, value);
+ private static int MinUsage0(int value) => Math.Max(1, value);
public IEnumerator GetEnumerator()
{
diff --git a/src/DotNetty.Buffers/PoolSubpage.cs b/src/DotNetty.Buffers/PoolSubpage.cs
index c8513f7c..68a9f6fa 100644
--- a/src/DotNetty.Buffers/PoolSubpage.cs
+++ b/src/DotNetty.Buffers/PoolSubpage.cs
@@ -28,12 +28,12 @@ namespace DotNetty.Buffers
using System.Diagnostics;
using DotNetty.Common.Utilities;
- sealed class PoolSubpage : IPoolSubpageMetric
+ internal sealed class PoolSubpage : IPoolSubpageMetric
{
internal readonly PoolChunk Chunk;
- private readonly int _memoryMapIdx;
+ private readonly int _pageShifts;
private readonly int _runOffset;
- private readonly int _pageSize;
+ private readonly int _runSize;
private readonly long[] _bitmap;
internal PoolSubpage Prev;
@@ -49,35 +49,30 @@ sealed class PoolSubpage : IPoolSubpageMetric
// TODO: Test if adding padding helps under contention
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
- /** Special constructor that creates a linked list head */
-
- public PoolSubpage(int pageSize)
+ /// Special constructor that creates a linked list head.
+ public PoolSubpage()
{
Chunk = null;
- _memoryMapIdx = -1;
+ _pageShifts = -1;
_runOffset = -1;
ElemSize = -1;
- _pageSize = pageSize;
+ _runSize = -1;
_bitmap = null;
}
- public PoolSubpage(PoolSubpage head, PoolChunk chunk, int memoryMapIdx, int runOffset, int pageSize, int elemSize)
+ public PoolSubpage(PoolSubpage head, PoolChunk chunk, int pageShifts, int runOffset, int runSize, int elemSize)
{
Chunk = chunk;
- _memoryMapIdx = memoryMapIdx;
+ _pageShifts = pageShifts;
_runOffset = runOffset;
- _pageSize = pageSize;
- _bitmap = new long[pageSize.RightUShift(10)]; // pageSize / 16 / 64
- Init(head, elemSize);
- }
+ _runSize = runSize;
+ ElemSize = elemSize;
+ _bitmap = new long[runSize.RightUShift(6) + SizeClasses.LOG2_QUANTUM]; // runSize / 64 / QUANTUM
- public void Init(PoolSubpage head, int elemSize)
- {
DoNotDestroy = true;
- ElemSize = elemSize;
if (elemSize != 0)
{
- _maxNumElems = _numAvail = _pageSize / elemSize;
+ _maxNumElems = _numAvail = _runSize / elemSize;
_nextAvail = 0;
_bitmapLength = _maxNumElems.RightUShift(6);
if ((_maxNumElems & 63) != 0)
@@ -94,17 +89,9 @@ public void Init(PoolSubpage head, int elemSize)
AddToPool(head);
}
- /**
- * Returns the bitmap index of the subpage allocation.
- */
-
+ /// Returns the bitmap index of the subpage allocation.
internal long Allocate()
{
- if (0u >= (uint)ElemSize)
- {
- return ToHandle(0);
- }
-
if (0u >= (uint)_numAvail || !DoNotDestroy)
{
return -1;
@@ -124,11 +111,11 @@ internal long Allocate()
return ToHandle(bitmapIdx);
}
- /**
- * @return true if this subpage is in use.
- * false if this subpage is not used by its chunk and thus it's OK to be released.
- */
-
+ /// TBD
+ ///
+ /// true if this subpage is in use.
+ /// false if this subpage is not used by its chunk and thus it's OK to be released.
+ ///
internal bool Free(PoolSubpage head, int bitmapIdx)
{
if (0u >= (uint)ElemSize)
@@ -169,7 +156,7 @@ internal bool Free(PoolSubpage head, int bitmapIdx)
}
}
- void AddToPool(PoolSubpage head)
+ private void AddToPool(PoolSubpage head)
{
Debug.Assert(Prev is null && Next is null);
@@ -179,7 +166,7 @@ void AddToPool(PoolSubpage head)
head.Next = this;
}
- void RemoveFromPool()
+ private void RemoveFromPool()
{
Debug.Assert(Prev is object && Next is object);
@@ -189,9 +176,9 @@ void RemoveFromPool()
Prev = null;
}
- void SetNextAvail(int bitmapIdx) => _nextAvail = bitmapIdx;
+ private void SetNextAvail(int bitmapIdx) => _nextAvail = bitmapIdx;
- int GetNextAvail()
+ private int GetNextAvail()
{
int nextAvail = _nextAvail;
if (nextAvail >= 0)
@@ -202,7 +189,7 @@ int GetNextAvail()
return FindNextAvail();
}
- int FindNextAvail()
+ private int FindNextAvail()
{
long[] bitmap = _bitmap;
int bitmapLength = _bitmapLength;
@@ -217,7 +204,7 @@ int FindNextAvail()
return -1;
}
- int FindNextAvail0(int i, long bits)
+ private int FindNextAvail0(int i, long bits)
{
int maxNumElems = _maxNumElems;
int baseVal = i << 6;
@@ -241,7 +228,15 @@ int FindNextAvail0(int i, long bits)
return -1;
}
- long ToHandle(int bitmapIdx) => 0x4000000000000000L | (long)bitmapIdx << 32 | (uint)_memoryMapIdx;
+ private long ToHandle(int bitmapIdx)
+ {
+ int pages = _runSize >> _pageShifts;
+ return ((long)_runOffset << PoolChunk.RUN_OFFSET_SHIFT)
+ | ((long)pages << PoolChunk.SIZE_SHIFT)
+ | (1L << PoolChunk.IS_USED_SHIFT)
+ | (1L << PoolChunk.IS_SUBPAGE_SHIFT)
+ | (long)bitmapIdx;
+ }
public override string ToString()
{
@@ -281,11 +276,11 @@ public override string ToString()
if (!doNotDestroy)
{
- return "(" + _memoryMapIdx + ": not in use)";
+ return "(" + _runOffset + ": not in use)";
}
- return "(" + _memoryMapIdx + ": " + (maxNumElems - numAvail) + "/" + maxNumElems +
- ", offset: " + _runOffset + ", length: " + _pageSize + ", elemSize: " + elemSize + ")";
+ return "(" + _runOffset + ": " + (maxNumElems - numAvail) + "/" + maxNumElems +
+ ", offset: " + _runOffset + ", length: " + _runSize + ", elemSize: " + elemSize + ")";
}
public int MaxNumElements
@@ -342,7 +337,7 @@ public int ElementSize
}
}
- public int PageSize => _pageSize;
+ public int PageSize => 1 << _pageShifts;
internal void Destroy() => Chunk?.Destroy();
}
diff --git a/src/DotNetty.Buffers/PoolThreadCache.cs b/src/DotNetty.Buffers/PoolThreadCache.cs
index 4b1a3807..a08bdd5b 100644
--- a/src/DotNetty.Buffers/PoolThreadCache.cs
+++ b/src/DotNetty.Buffers/PoolThreadCache.cs
@@ -26,8 +26,10 @@
namespace DotNetty.Buffers
{
using System;
+ using System.Collections.Generic;
using System.Diagnostics;
using System.Runtime.CompilerServices;
+ using System.Threading;
using DotNetty.Common;
using DotNetty.Common.Internal;
using DotNetty.Common.Internal.Logging;
@@ -41,28 +43,23 @@ namespace DotNetty.Buffers
///
/// Scalable memory allocation using jemalloc.
///
- sealed class PoolThreadCache
+ internal sealed class PoolThreadCache
{
private static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance>();
- private static readonly int s_integerSizeMinusOne = IntegerExtensions.SizeInBits - 1;
+ private const int c_integerSizeMinusOne = IntegerExtensions.SizeInBits - 1;
internal readonly PoolArena HeapArena;
internal readonly PoolArena DirectArena;
// Hold the caches for the different size classes, which are tiny, small and normal.
- private readonly MemoryRegionCache[] tinySubPageHeapCaches;
- private readonly MemoryRegionCache[] smallSubPageHeapCaches;
- private readonly MemoryRegionCache[] tinySubPageDirectCaches;
- private readonly MemoryRegionCache[] smallSubPageDirectCaches;
- private readonly MemoryRegionCache[] normalHeapCaches;
- private readonly MemoryRegionCache[] normalDirectCaches;
-
- // Used for bitshifting when calculate the index of normal caches later
- private readonly int _numShiftsNormalDirect;
- private readonly int _numShiftsNormalHeap;
+ private readonly MemoryRegionCache[] _smallSubPageHeapCaches;
+ private readonly MemoryRegionCache[] _smallSubPageDirectCaches;
+ private readonly MemoryRegionCache[] _normalHeapCaches;
+ private readonly MemoryRegionCache[] _normalDirectCaches;
+
private readonly int _freeSweepAllocationThreshold;
+ private int _freed = SharedConstants.False;
- //int freed = SharedConstants.False; // TODO
private int _allocations;
private readonly Thread _deathWatchThread;
@@ -72,8 +69,7 @@ sealed class PoolThreadCache
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
internal PoolThreadCache(PoolArena heapArena, PoolArena directArena,
- int tinyCacheSize, int smallCacheSize, int normalCacheSize,
- int maxCachedBufferCapacity, int freeSweepAllocationThreshold)
+ int smallCacheSize, int normalCacheSize, int maxCachedBufferCapacity, int freeSweepAllocationThreshold)
{
if ((uint)maxCachedBufferCapacity > SharedConstants.TooBigOrNegative) { ThrowHelper.ThrowArgumentException_PositiveOrZero(maxCachedBufferCapacity, ExceptionArgument.maxCachedBufferCapacity); }
@@ -82,13 +78,10 @@ internal PoolThreadCache(PoolArena heapArena, PoolArena directArena,
DirectArena = directArena;
if (directArena is object)
{
- tinySubPageDirectCaches = CreateSubPageCaches(
- tinyCacheSize, PoolArena.NumTinySubpagePools, SizeClass.Tiny);
- smallSubPageDirectCaches = CreateSubPageCaches(
- smallCacheSize, directArena.NumSmallSubpagePools, SizeClass.Small);
+ _smallSubPageDirectCaches = CreateSubPageCaches(
+ smallCacheSize, directArena.NumSmallSubpagePools);
- _numShiftsNormalDirect = Log2(directArena.PageSize);
- normalDirectCaches = CreateNormalCaches(
+ _normalDirectCaches = CreateNormalCaches(
normalCacheSize, maxCachedBufferCapacity, directArena);
directArena.IncrementNumThreadCaches();
@@ -96,21 +89,16 @@ internal PoolThreadCache(PoolArena heapArena, PoolArena directArena,
else
{
// No directArea is configured so just null out all caches
- tinySubPageDirectCaches = null;
- smallSubPageDirectCaches = null;
- normalDirectCaches = null;
- _numShiftsNormalDirect = -1;
+ _smallSubPageDirectCaches = null;
+ _normalDirectCaches = null;
}
if (heapArena is object)
{
// Create the caches for the heap allocations
- tinySubPageHeapCaches = CreateSubPageCaches(
- tinyCacheSize, PoolArena.NumTinySubpagePools, SizeClass.Tiny);
- smallSubPageHeapCaches = CreateSubPageCaches(
- smallCacheSize, heapArena.NumSmallSubpagePools, SizeClass.Small);
+ _smallSubPageHeapCaches = CreateSubPageCaches(
+ smallCacheSize, heapArena.NumSmallSubpagePools);
- _numShiftsNormalHeap = Log2(heapArena.PageSize);
- normalHeapCaches = CreateNormalCaches(
+ _normalHeapCaches = CreateNormalCaches(
normalCacheSize, maxCachedBufferCapacity, heapArena);
heapArena.IncrementNumThreadCaches();
@@ -118,18 +106,16 @@ internal PoolThreadCache(PoolArena heapArena, PoolArena directArena,
else
{
// No heapArea is configured so just null out all caches
- tinySubPageHeapCaches = null;
- smallSubPageHeapCaches = null;
- normalHeapCaches = null;
- _numShiftsNormalHeap = -1;
+ _smallSubPageHeapCaches = null;
+ _normalHeapCaches = null;
}
// We only need to watch the thread when any cache is used.
- if (tinySubPageDirectCaches is object || smallSubPageDirectCaches is object || normalDirectCaches is object
- || tinySubPageHeapCaches is object || smallSubPageHeapCaches is object || normalHeapCaches is object)
+ if (_smallSubPageDirectCaches is object || _normalDirectCaches is object ||
+ _smallSubPageHeapCaches is object || _normalHeapCaches is object)
{
if (freeSweepAllocationThreshold < 1) { ThrowHelper.ThrowArgumentException_Positive(freeSweepAllocationThreshold, ExceptionArgument.freeSweepAllocationThreshold); }
- _freeTask = Free0;
+ _freeTask = () => FreeImpl(true);
_deathWatchThread = Thread.CurrentThread;
// The thread-local cache will keep a list of pooled buffers which must be returned to
@@ -143,8 +129,7 @@ internal PoolThreadCache(PoolArena heapArena, PoolArena directArena,
}
}
- static MemoryRegionCache[] CreateSubPageCaches(
- int cacheSize, int numCaches, SizeClass sizeClass)
+ private static MemoryRegionCache[] CreateSubPageCaches(int cacheSize, int numCaches)
{
if (cacheSize > 0 && numCaches > 0)
{
@@ -152,7 +137,7 @@ static MemoryRegionCache[] CreateSubPageCaches(
for (int i = 0; i < cache.Length; i++)
{
// TODO: maybe use cacheSize / cache.length
- cache[i] = new SubPageMemoryRegionCache(cacheSize, sizeClass);
+ cache[i] = new SubPageMemoryRegionCache(cacheSize);
}
return cache;
}
@@ -162,20 +147,20 @@ static MemoryRegionCache[] CreateSubPageCaches(
}
}
- static MemoryRegionCache[] CreateNormalCaches(
+ private static MemoryRegionCache[] CreateNormalCaches(
int cacheSize, int maxCachedBufferCapacity, PoolArena area)
{
if (cacheSize > 0 && maxCachedBufferCapacity > 0)
{
int max = Math.Min(area.ChunkSize, maxCachedBufferCapacity);
- int arraySize = Math.Max(1, Log2(max / area.PageSize) + 1);
-
- var cache = new MemoryRegionCache[arraySize];
- for (int i = 0; i < cache.Length; i++)
+ // Create as many normal caches as we support based on how many sizeIdx we have and what the upper
+ // bound is that we want to cache in general.
+ List cache = new List();
+ for (int idx = area.NumSmallSubpagePools; idx < area._nSizes && area.SizeIdx2Size(idx) <= max; idx++)
{
- cache[i] = new NormalMemoryRegionCache(cacheSize);
+ cache.Add(new NormalMemoryRegionCache(cacheSize));
}
- return cache;
+ return cache.ToArray();
}
else
{
@@ -185,33 +170,22 @@ static MemoryRegionCache[] CreateNormalCaches(
// val > 0
[MethodImpl(InlineMethod.AggressiveOptimization)]
- private static int Log2(int val)
+ internal static int Log2(int val)
{
- return s_integerSizeMinusOne - IntegerExtensions.NumberOfLeadingZeros(val);
+ return c_integerSizeMinusOne - val.NumberOfLeadingZeros();
}
- ///
- /// Try to allocate a tiny buffer out of the cache.
- ///
- /// true if successful false otherwise
- internal bool AllocateTiny(PoolArena area, PooledByteBuffer buf, int reqCapacity, int normCapacity) =>
- Allocate(CacheForTiny(area, normCapacity), buf, reqCapacity);
-
- ///
- /// Try to allocate a small buffer out of the cache.
- ///
+ /// Try to allocate a small buffer out of the cache.
/// true if successful false otherwise
- internal bool AllocateSmall(PoolArena area, PooledByteBuffer buf, int reqCapacity, int normCapacity) =>
- Allocate(CacheForSmall(area, normCapacity), buf, reqCapacity);
+ internal bool AllocateSmall(PoolArena area, PooledByteBuffer buf, int reqCapacity, int sizeIdx) =>
+ Allocate(CacheForSmall(area, sizeIdx), buf, reqCapacity);
- ///
- /// Try to allocate a small buffer out of the cache
- ///
+ /// Try to allocate a small buffer out of the cache
/// true if successful false otherwise
- internal bool AllocateNormal(PoolArena area, PooledByteBuffer buf, int reqCapacity, int normCapacity) =>
- Allocate(CacheForNormal(area, normCapacity), buf, reqCapacity);
+ internal bool AllocateNormal(PoolArena area, PooledByteBuffer buf, int reqCapacity, int sizeIdx) =>
+ Allocate(CacheForNormal(area, sizeIdx), buf, reqCapacity);
- bool Allocate(MemoryRegionCache cache, PooledByteBuffer buf, int reqCapacity)
+ private bool Allocate(MemoryRegionCache cache, PooledByteBuffer buf, int reqCapacity)
{
if (cache is null)
{
@@ -227,38 +201,33 @@ bool Allocate(MemoryRegionCache cache, PooledByteBuffer buf, int reqCapacity)
return allocated;
}
- ///
- /// Add and to the cache if there is enough room.
- ///
+ /// Add and to the cache if there is enough room.
/// true if it fit into the cache false otherwise.
internal bool Add(PoolArena area, PoolChunk chunk, long handle, int normCapacity, SizeClass sizeClass)
{
- MemoryRegionCache cache = Cache(area, normCapacity, sizeClass);
+ int sizeIdx = area.Size2SizeIdx(normCapacity);
+ MemoryRegionCache cache = Cache(area, sizeIdx, sizeClass);
if (cache is null)
{
return false;
}
- return cache.Add(chunk, handle);
+ return cache.Add(chunk, handle, normCapacity);
}
- MemoryRegionCache Cache(PoolArena area, int normCapacity, SizeClass sizeClass)
+ private MemoryRegionCache Cache(PoolArena area, int sizeIdx, SizeClass sizeClass)
{
switch (sizeClass)
{
case SizeClass.Normal:
- return CacheForNormal(area, normCapacity);
+ return CacheForNormal(area, sizeIdx);
case SizeClass.Small:
- return CacheForSmall(area, normCapacity);
- case SizeClass.Tiny:
- return CacheForTiny(area, normCapacity);
+ return CacheForSmall(area, sizeIdx);
default:
ThrowHelper.ThrowArgumentOutOfRangeException(); return default;
}
}
- ///
- /// Should be called if the Thread that uses this cache is about to exist to release resources out of the cache
- ///
+ /// Should be called if the Thread that uses this cache is about to exist to release resources out of the cache
internal void Free()
{
if (_freeTask is object)
@@ -267,17 +236,18 @@ internal void Free()
ThreadDeathWatcher.Unwatch(_deathWatchThread, _freeTask);
}
- Free0();
+ FreeImpl(true);
}
- void Free0()
+ private void FreeImpl(bool finalizer)
{
- int numFreed = Free(tinySubPageDirectCaches) +
- Free(smallSubPageDirectCaches) +
- Free(normalDirectCaches) +
- Free(tinySubPageHeapCaches) +
- Free(smallSubPageHeapCaches) +
- Free(normalHeapCaches);
+ // need to ensure we only call this one time.
+ if (0u >= (uint)(SharedConstants.True - Interlocked.Exchange(ref _freed, SharedConstants.True))) { return; }
+
+ int numFreed = Free(_smallSubPageDirectCaches, finalizer) +
+ Free(_normalDirectCaches, finalizer) +
+ Free(_smallSubPageHeapCaches, finalizer) +
+ Free(_normalHeapCaches, finalizer);
if (numFreed > 0 && Logger.DebugEnabled)
{
@@ -288,7 +258,7 @@ void Free0()
HeapArena?.DecrementNumThreadCaches();
}
- static int Free(MemoryRegionCache[] caches)
+ private static int Free(MemoryRegionCache[] caches, bool finalizer)
{
if (caches is null)
{
@@ -298,83 +268,65 @@ static int Free(MemoryRegionCache[] caches)
int numFreed = 0;
foreach (MemoryRegionCache c in caches)
{
- numFreed += Free(c);
+ numFreed += Free(c, finalizer);
}
return numFreed;
}
- static int Free(MemoryRegionCache cache)
+ private static int Free(MemoryRegionCache cache, bool finalizer)
{
if (cache is null)
{
return 0;
}
- return cache.Free();
+ return cache.Free(finalizer);
}
internal void Trim()
{
- Trim(tinySubPageDirectCaches);
- Trim(smallSubPageDirectCaches);
- Trim(normalDirectCaches);
- Trim(tinySubPageHeapCaches);
- Trim(smallSubPageHeapCaches);
- Trim(normalHeapCaches);
+ Trim(_smallSubPageDirectCaches);
+ Trim(_normalDirectCaches);
+ Trim(_smallSubPageHeapCaches);
+ Trim(_normalHeapCaches);
}
- static void Trim(MemoryRegionCache[] caches)
+ private static void Trim(MemoryRegionCache[] caches)
{
- if (caches is null)
- {
- return;
- }
+ if (caches is null) { return; }
foreach (MemoryRegionCache c in caches)
{
Trim(c);
}
}
- static void Trim(MemoryRegionCache cache) => cache?.Trim();
+ private static void Trim(MemoryRegionCache cache) => cache?.Trim();
- MemoryRegionCache CacheForTiny(PoolArena area, int normCapacity)
+ private MemoryRegionCache CacheForSmall(PoolArena area, int sizeIdx)
{
- int idx = PoolArena.TinyIdx(normCapacity);
- return Cache(area.IsDirect ? tinySubPageDirectCaches : tinySubPageHeapCaches, idx);
+ return Cache(area.IsDirect ? _smallSubPageDirectCaches : _smallSubPageHeapCaches, sizeIdx);
}
- MemoryRegionCache CacheForSmall(PoolArena area, int normCapacity)
+ private MemoryRegionCache CacheForNormal(PoolArena area, int sizeIdx)
{
- int idx = PoolArena.SmallIdx(normCapacity);
- return Cache(area.IsDirect ? smallSubPageDirectCaches : smallSubPageHeapCaches, idx);
+ // We need to substract area.numSmallSubpagePools as sizeIdx is the overall index for all sizes.
+ int idx = sizeIdx - area.NumSmallSubpagePools;
+ return Cache(area.IsDirect ? _normalDirectCaches : _normalHeapCaches, idx);
}
- MemoryRegionCache CacheForNormal(PoolArena area, int normCapacity)
+ private static MemoryRegionCache Cache(MemoryRegionCache[] cache, int sizeIdx)
{
- if (area.IsDirect)
- {
- int idx = Log2(normCapacity >> _numShiftsNormalDirect);
- return Cache(normalDirectCaches, idx);
- }
- int idx1 = Log2(normCapacity >> _numShiftsNormalHeap);
- return Cache(normalHeapCaches, idx1);
- }
-
- static MemoryRegionCache Cache(MemoryRegionCache[] cache, int idx)
- {
- if (cache is null || idx > cache.Length - 1)
+ if (cache is null || sizeIdx > cache.Length - 1)
{
return null;
}
- return cache[idx];
+ return cache[sizeIdx];
}
- ///
- /// Cache used for buffers which are backed by TINY or SMALL size.
- ///
- sealed class SubPageMemoryRegionCache : MemoryRegionCache
+ /// Cache used for buffers which are backed by TINY or SMALL size.
+ private sealed class SubPageMemoryRegionCache : MemoryRegionCache
{
- internal SubPageMemoryRegionCache(int size, SizeClass sizeClass)
- : base(size, sizeClass)
+ internal SubPageMemoryRegionCache(int size)
+ : base(size, SizeClass.Small)
{
}
@@ -386,7 +338,7 @@ protected override void InitBuf(
///
/// Cache used for buffers which are backed by NORMAL size.
///
- sealed class NormalMemoryRegionCache : MemoryRegionCache
+ private sealed class NormalMemoryRegionCache : MemoryRegionCache
{
internal NormalMemoryRegionCache(int size)
: base(size, SizeClass.Normal)
@@ -398,12 +350,12 @@ protected override void InitBuf(
chunk.InitBuf(buf, handle, reqCapacity, threadCache);
}
- abstract class MemoryRegionCache
+ private abstract class MemoryRegionCache
{
- readonly int _size;
- readonly IQueue _queue;
- readonly SizeClass _sizeClass;
- int _allocations;
+ private readonly int _size;
+ private readonly IQueue _queue;
+ private readonly SizeClass _sizeClass;
+ private int _allocations;
protected MemoryRegionCache(int size, SizeClass sizeClass)
{
@@ -421,9 +373,9 @@ protected abstract void InitBuf(PoolChunk chunk, long handle,
///
/// Add to cache if not already full.
///
- public bool Add(PoolChunk chunk, long handle)
+ public bool Add(PoolChunk chunk, long handle, int normCapacity)
{
- Entry entry = NewEntry(chunk, handle);
+ Entry entry = NewEntry(chunk, handle, normCapacity);
bool queued = _queue.TryEnqueue(entry);
if (!queued)
{
@@ -451,19 +403,17 @@ public bool Allocate(PooledByteBuffer buf, int reqCapacity, PoolThreadCache
- /// Clear out this cache and free up all previous cached s and {@code handle}s.
- ///
- public int Free() => Free(int.MaxValue);
+ /// Clear out this cache and free up all previous cached s and handles.
+ public int Free(bool finalizer) => Free(int.MaxValue, finalizer);
- int Free(int max)
+ private int Free(int max, bool finalizer)
{
int numFreed = 0;
for (; numFreed < max; numFreed++)
{
if (_queue.TryDequeue(out Entry entry))
{
- FreeEntry(entry);
+ FreeEntry(entry, finalizer);
}
else
{
@@ -474,9 +424,7 @@ int Free(int max)
return numFreed;
}
- ///
- /// Free up cached s if not allocated frequently enough.
- ///
+ /// Free up cached s if not allocated frequently enough.
public void Trim()
{
int toFree = _size - _allocations;
@@ -485,26 +433,30 @@ public void Trim()
// We not even allocated all the number that are
if (toFree > 0)
{
- _ = Free(toFree);
+ _ = Free(toFree, false);
}
}
- void FreeEntry(Entry entry)
+ private void FreeEntry(Entry entry, bool finalizer)
{
PoolChunk chunk = entry.Chunk;
long handle = entry.Handle;
- // recycle now so PoolChunk can be GC'ed.
- entry.Recycle();
+ if (!finalizer)
+ {
+ // recycle now so PoolChunk can be GC'ed. This will only be done if this is not freed because of a finalizer.
+ entry.Recycle();
+ }
- chunk.Arena.FreeChunk(chunk, handle, _sizeClass, false);
+ chunk.Arena.FreeChunk(chunk, handle, entry.NormCapacity, _sizeClass, finalizer);
}
- sealed class Entry
+ private sealed class Entry
{
- readonly ThreadLocalPool.Handle _recyclerHandle;
+ private readonly ThreadLocalPool.Handle _recyclerHandle;
public PoolChunk Chunk;
public long Handle = -1;
+ public int NormCapacity;
public Entry(ThreadLocalPool.Handle recyclerHandle)
{
@@ -519,15 +471,16 @@ internal void Recycle()
}
}
- static Entry NewEntry(PoolChunk chunk, long handle)
+ private static Entry NewEntry(PoolChunk chunk, long handle, int normCapacity)
{
Entry entry = Recycler.Take();
entry.Chunk = chunk;
entry.Handle = handle;
+ entry.NormCapacity = normCapacity;
return entry;
}
- static readonly ThreadLocalPool Recycler = new ThreadLocalPool(handle => new Entry(handle));
+ private static readonly ThreadLocalPool Recycler = new(handle => new Entry(handle));
}
}
}
\ No newline at end of file
diff --git a/src/DotNetty.Buffers/PooledByteBufferAllocator.cs b/src/DotNetty.Buffers/PooledByteBufferAllocator.cs
index 3f0d3cc3..8647ed63 100644
--- a/src/DotNetty.Buffers/PooledByteBufferAllocator.cs
+++ b/src/DotNetty.Buffers/PooledByteBufferAllocator.cs
@@ -34,14 +34,14 @@ namespace DotNetty.Buffers
public class PooledByteBufferAllocator : AbstractByteBufferAllocator, IByteBufferAllocatorMetricProvider
{
- static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance();
+ private static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance();
+ private const int c_integerSizeMinusOne = IntegerExtensions.SizeInBits - 1;
public static readonly int DefaultNumHeapArena;
public static readonly int DefaultNumDirectArena;
public static readonly int DefaultPageSize;
public static readonly int DefaultMaxOrder; // 8192 << 11 = 16 MiB per chunk
- public static readonly int DefaultTinyCacheSize;
public static readonly int DefaultSmallCacheSize;
public static readonly int DefaultNormalCacheSize;
@@ -94,7 +94,6 @@ static PooledByteBufferAllocator()
DefaultNumDirectArena = Math.Max(0, SystemPropertyUtil.GetInt("io.netty.allocator.numDirectArenas", defaultMinNumArena));
// cache sizes
- DefaultTinyCacheSize = SystemPropertyUtil.GetInt("io.netty.allocator.tinyCacheSize", 512);
DefaultSmallCacheSize = SystemPropertyUtil.GetInt("io.netty.allocator.smallCacheSize", 256);
DefaultNormalCacheSize = SystemPropertyUtil.GetInt("io.netty.allocator.normalCacheSize", 64);
@@ -127,7 +126,6 @@ static PooledByteBufferAllocator()
Logger.Debug("-Dio.netty.allocator.maxOrder: {}", DefaultMaxOrder, maxOrderFallbackCause);
}
Logger.Debug("-Dio.netty.allocator.chunkSize: {}", DefaultPageSize << DefaultMaxOrder);
- Logger.Debug("-Dio.netty.allocator.tinyCacheSize: {}", DefaultTinyCacheSize);
Logger.Debug("-Dio.netty.allocator.smallCacheSize: {}", DefaultSmallCacheSize);
Logger.Debug("-Dio.netty.allocator.normalCacheSize: {}", DefaultNormalCacheSize);
Logger.Debug("-Dio.netty.allocator.maxCachedBufferCapacity: {}", DefaultMaxCachedBufferCapacity);
@@ -141,7 +139,6 @@ static PooledByteBufferAllocator()
private readonly PoolArena[] _heapArenas;
private readonly PoolArena[] _directArenas;
- private readonly int _tinyCacheSize;
private readonly int _smallCacheSize;
private readonly int _normalCacheSize;
private readonly IReadOnlyList _heapArenaMetrics;
@@ -165,25 +162,38 @@ public PooledByteBufferAllocator(int nHeapArena, int nDirectArena, int pageSize,
}
public unsafe PooledByteBufferAllocator(bool preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder)
- : this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder,
- DefaultTinyCacheSize, DefaultSmallCacheSize, DefaultNormalCacheSize)
+ : this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder, DefaultSmallCacheSize, DefaultNormalCacheSize)
{
}
+ [Obsolete("")]
public PooledByteBufferAllocator(int nHeapArena, int nDirectArena, int pageSize, int maxOrder,
int tinyCacheSize, int smallCacheSize, int normalCacheSize)
- : this(false, nHeapArena, nDirectArena, pageSize, maxOrder, tinyCacheSize, smallCacheSize, normalCacheSize)
- { }
+ : this(false, nHeapArena, nDirectArena, pageSize, maxOrder, smallCacheSize, normalCacheSize)
+ {
+ }
+
+ public PooledByteBufferAllocator(int nHeapArena, int nDirectArena, int pageSize, int maxOrder,
+ int smallCacheSize, int normalCacheSize)
+ : this(false, nHeapArena, nDirectArena, pageSize, maxOrder, smallCacheSize, normalCacheSize)
+ {
+ }
+ [Obsolete("")]
public unsafe PooledByteBufferAllocator(bool preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder,
int tinyCacheSize, int smallCacheSize, int normalCacheSize)
+ : this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder, smallCacheSize, normalCacheSize)
+ {
+ }
+
+ public unsafe PooledByteBufferAllocator(bool preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder,
+ int smallCacheSize, int normalCacheSize)
: base(preferDirect)
{
if ((uint)nHeapArena > SharedConstants.TooBigOrNegative) { ThrowHelper.ThrowArgumentException_PositiveOrZero(nHeapArena, ExceptionArgument.nHeapArena); }
if ((uint)nDirectArena > SharedConstants.TooBigOrNegative) { ThrowHelper.ThrowArgumentException_PositiveOrZero(nHeapArena, ExceptionArgument.nDirectArena); }
_threadCache = new PoolThreadLocalCache(this);
- _tinyCacheSize = tinyCacheSize;
_smallCacheSize = smallCacheSize;
_normalCacheSize = normalCacheSize;
_chunkSize = ValidateAndCalculateChunkSize(pageSize, maxOrder);
@@ -201,7 +211,7 @@ public unsafe PooledByteBufferAllocator(bool preferDirect, int nHeapArena, int n
var metrics = new List