|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
|
|
/* |
|
* This file is available under and governed by the GNU General Public |
|
* License version 2 only, as published by the Free Software Foundation. |
|
* However, the following notice accompanied the original version of this |
|
* file: |
|
* |
|
* Written by Doug Lea with assistance from members of JCP JSR-166 |
|
* Expert Group and released to the public domain, as explained at |
|
* http://creativecommons.org/publicdomain/zero/1.0/ |
|
*/ |
|
|
|
package java.util.concurrent; |
|
|
|
import java.util.AbstractQueue; |
|
import java.util.Collection; |
|
import java.util.Iterator; |
|
import java.util.NoSuchElementException; |
|
import java.util.Queue; |
|
import java.util.concurrent.TimeUnit; |
|
import java.util.concurrent.locks.LockSupport; |
|
import java.util.Spliterator; |
|
import java.util.Spliterators; |
|
import java.util.function.Consumer; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public class LinkedTransferQueue<E> extends AbstractQueue<E> |
|
implements TransferQueue<E>, java.io.Serializable { |
|
private static final long serialVersionUID = -3223113410248163686L; |
|
|
|
/* |
|
* *** Overview of Dual Queues with Slack *** |
|
* |
|
* Dual Queues, introduced by Scherer and Scott |
|
* (http://www.cs.rice.edu/~wns1/papers/2004-DISC-DDS.pdf) are |
|
* (linked) queues in which nodes may represent either data or |
|
* requests. When a thread tries to enqueue a data node, but |
|
* encounters a request node, it instead "matches" and removes it; |
|
* and vice versa for enqueuing requests. Blocking Dual Queues |
|
* arrange that threads enqueuing unmatched requests block until |
|
* other threads provide the match. Dual Synchronous Queues (see |
|
* Scherer, Lea, & Scott |
|
* http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf) |
|
* additionally arrange that threads enqueuing unmatched data also |
|
* block. Dual Transfer Queues support all of these modes, as |
|
* dictated by callers. |
|
* |
|
* A FIFO dual queue may be implemented using a variation of the |
|
* Michael & Scott (M&S) lock-free queue algorithm |
|
* (http://www.cs.rochester.edu/u/scott/papers/1996_PODC_queues.pdf). |
|
* It maintains two pointer fields, "head", pointing to a |
|
* (matched) node that in turn points to the first actual |
|
* (unmatched) queue node (or null if empty); and "tail" that |
|
* points to the last node on the queue (or again null if |
|
* empty). For example, here is a possible queue with four data |
|
* elements: |
|
* |
|
* head tail |
|
* | | |
|
* v v |
|
* M -> U -> U -> U -> U |
|
* |
|
* The M&S queue algorithm is known to be prone to scalability and |
|
* overhead limitations when maintaining (via CAS) these head and |
|
* tail pointers. This has led to the development of |
|
* contention-reducing variants such as elimination arrays (see |
|
* Moir et al http://portal.acm.org/citation.cfm?id=1074013) and |
|
* optimistic back pointers (see Ladan-Mozes & Shavit |
|
* http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf). |
|
* However, the nature of dual queues enables a simpler tactic for |
|
* improving M&S-style implementations when dual-ness is needed. |
|
* |
|
* In a dual queue, each node must atomically maintain its match |
|
* status. While there are other possible variants, we implement |
|
* this here as: for a data-mode node, matching entails CASing an |
|
* "item" field from a non-null data value to null upon match, and |
|
* vice-versa for request nodes, CASing from null to a data |
|
* value. (Note that the linearization properties of this style of |
|
* queue are easy to verify -- elements are made available by |
|
* linking, and unavailable by matching.) Compared to plain M&S |
|
* queues, this property of dual queues requires one additional |
|
* successful atomic operation per enq/deq pair. But it also |
|
* enables lower cost variants of queue maintenance mechanics. (A |
|
* variation of this idea applies even for non-dual queues that |
|
* support deletion of interior elements, such as |
|
* j.u.c.ConcurrentLinkedQueue.) |
|
* |
|
* Once a node is matched, its match status can never again |
|
* change. We may thus arrange that the linked list of them |
|
* contain a prefix of zero or more matched nodes, followed by a |
|
* suffix of zero or more unmatched nodes. (Note that we allow |
|
* both the prefix and suffix to be zero length, which in turn |
|
* means that we do not use a dummy header.) If we were not |
|
* concerned with either time or space efficiency, we could |
|
* correctly perform enqueue and dequeue operations by traversing |
|
* from a pointer to the initial node; CASing the item of the |
|
* first unmatched node on match and CASing the next field of the |
|
* trailing node on appends. (Plus some special-casing when |
|
* initially empty). While this would be a terrible idea in |
|
* itself, it does have the benefit of not requiring ANY atomic |
|
* updates on head/tail fields. |
|
* |
|
* We introduce here an approach that lies between the extremes of |
|
* never versus always updating queue (head and tail) pointers. |
|
* This offers a tradeoff between sometimes requiring extra |
|
* traversal steps to locate the first and/or last unmatched |
|
* nodes, versus the reduced overhead and contention of fewer |
|
* updates to queue pointers. For example, a possible snapshot of |
|
* a queue is: |
|
* |
|
* head tail |
|
* | | |
|
* v v |
|
* M -> M -> U -> U -> U -> U |
|
* |
|
* The best value for this "slack" (the targeted maximum distance |
|
* between the value of "head" and the first unmatched node, and |
|
* similarly for "tail") is an empirical matter. We have found |
|
* that using very small constants in the range of 1-3 work best |
|
* over a range of platforms. Larger values introduce increasing |
|
* costs of cache misses and risks of long traversal chains, while |
|
* smaller values increase CAS contention and overhead. |
|
* |
|
* Dual queues with slack differ from plain M&S dual queues by |
|
* virtue of only sometimes updating head or tail pointers when |
|
* matching, appending, or even traversing nodes; in order to |
|
* maintain a targeted slack. The idea of "sometimes" may be |
|
* operationalized in several ways. The simplest is to use a |
|
* per-operation counter incremented on each traversal step, and |
|
* to try (via CAS) to update the associated queue pointer |
|
* whenever the count exceeds a threshold. Another, that requires |
|
* more overhead, is to use random number generators to update |
|
* with a given probability per traversal step. |
|
* |
|
* In any strategy along these lines, because CASes updating |
|
* fields may fail, the actual slack may exceed targeted |
|
* slack. However, they may be retried at any time to maintain |
|
* targets. Even when using very small slack values, this |
|
* approach works well for dual queues because it allows all |
|
* operations up to the point of matching or appending an item |
|
* (hence potentially allowing progress by another thread) to be |
|
* read-only, thus not introducing any further contention. As |
|
* described below, we implement this by performing slack |
|
* maintenance retries only after these points. |
|
* |
|
* As an accompaniment to such techniques, traversal overhead can |
|
* be further reduced without increasing contention of head |
|
* pointer updates: Threads may sometimes shortcut the "next" link |
|
* path from the current "head" node to be closer to the currently |
|
* known first unmatched node, and similarly for tail. Again, this |
|
* may be triggered with using thresholds or randomization. |
|
* |
|
* These ideas must be further extended to avoid unbounded amounts |
|
* of costly-to-reclaim garbage caused by the sequential "next" |
|
* links of nodes starting at old forgotten head nodes: As first |
|
* described in detail by Boehm |
|
* (http://portal.acm.org/citation.cfm?doid=503272.503282) if a GC |
|
* delays noticing that any arbitrarily old node has become |
|
* garbage, all newer dead nodes will also be unreclaimed. |
|
* (Similar issues arise in non-GC environments.) To cope with |
|
* this in our implementation, upon CASing to advance the head |
|
* pointer, we set the "next" link of the previous head to point |
|
* only to itself; thus limiting the length of connected dead lists. |
|
* (We also take similar care to wipe out possibly garbage |
|
* retaining values held in other Node fields.) However, doing so |
|
* adds some further complexity to traversal: If any "next" |
|
* pointer links to itself, it indicates that the current thread |
|
* has lagged behind a head-update, and so the traversal must |
|
* continue from the "head". Traversals trying to find the |
|
* current tail starting from "tail" may also encounter |
|
* self-links, in which case they also continue at "head". |
|
* |
|
* It is tempting in slack-based scheme to not even use CAS for |
|
* updates (similarly to Ladan-Mozes & Shavit). However, this |
|
* cannot be done for head updates under the above link-forgetting |
|
* mechanics because an update may leave head at a detached node. |
|
* And while direct writes are possible for tail updates, they |
|
* increase the risk of long retraversals, and hence long garbage |
|
* chains, which can be much more costly than is worthwhile |
|
* considering that the cost difference of performing a CAS vs |
|
* write is smaller when they are not triggered on each operation |
|
* (especially considering that writes and CASes equally require |
|
* additional GC bookkeeping ("write barriers") that are sometimes |
|
* more costly than the writes themselves because of contention). |
|
* |
|
* *** Overview of implementation *** |
|
* |
|
* We use a threshold-based approach to updates, with a slack |
|
* threshold of two -- that is, we update head/tail when the |
|
* current pointer appears to be two or more steps away from the |
|
* first/last node. The slack value is hard-wired: a path greater |
|
* than one is naturally implemented by checking equality of |
|
* traversal pointers except when the list has only one element, |
|
* in which case we keep slack threshold at one. Avoiding tracking |
|
* explicit counts across method calls slightly simplifies an |
|
* already-messy implementation. Using randomization would |
|
* probably work better if there were a low-quality dirt-cheap |
|
* per-thread one available, but even ThreadLocalRandom is too |
|
* heavy for these purposes. |
|
* |
|
* With such a small slack threshold value, it is not worthwhile |
|
* to augment this with path short-circuiting (i.e., unsplicing |
|
* interior nodes) except in the case of cancellation/removal (see |
|
* below). |
|
* |
|
* We allow both the head and tail fields to be null before any |
|
* nodes are enqueued; initializing upon first append. This |
|
* simplifies some other logic, as well as providing more |
|
* efficient explicit control paths instead of letting JVMs insert |
|
* implicit NullPointerExceptions when they are null. While not |
|
* currently fully implemented, we also leave open the possibility |
|
* of re-nulling these fields when empty (which is complicated to |
|
* arrange, for little benefit.) |
|
* |
|
* All enqueue/dequeue operations are handled by the single method |
|
* "xfer" with parameters indicating whether to act as some form |
|
* of offer, put, poll, take, or transfer (each possibly with |
|
* timeout). The relative complexity of using one monolithic |
|
* method outweighs the code bulk and maintenance problems of |
|
* using separate methods for each case. |
|
* |
|
* Operation consists of up to three phases. The first is |
|
* implemented within method xfer, the second in tryAppend, and |
|
* the third in method awaitMatch. |
|
* |
|
* 1. Try to match an existing node |
|
* |
|
* Starting at head, skip already-matched nodes until finding |
|
* an unmatched node of opposite mode, if one exists, in which |
|
* case matching it and returning, also if necessary updating |
|
* head to one past the matched node (or the node itself if the |
|
* list has no other unmatched nodes). If the CAS misses, then |
|
* a loop retries advancing head by two steps until either |
|
* success or the slack is at most two. By requiring that each |
|
* attempt advances head by two (if applicable), we ensure that |
|
* the slack does not grow without bound. Traversals also check |
|
* if the initial head is now off-list, in which case they |
|
* start at the new head. |
|
* |
|
* If no candidates are found and the call was untimed |
|
* poll/offer, (argument "how" is NOW) return. |
|
* |
|
* 2. Try to append a new node (method tryAppend) |
|
* |
|
* Starting at current tail pointer, find the actual last node |
|
* and try to append a new node (or if head was null, establish |
|
* the first node). Nodes can be appended only if their |
|
* predecessors are either already matched or are of the same |
|
* mode. If we detect otherwise, then a new node with opposite |
|
* mode must have been appended during traversal, so we must |
|
* restart at phase 1. The traversal and update steps are |
|
* otherwise similar to phase 1: Retrying upon CAS misses and |
|
* checking for staleness. In particular, if a self-link is |
|
* encountered, then we can safely jump to a node on the list |
|
* by continuing the traversal at current head. |
|
* |
|
* On successful append, if the call was ASYNC, return. |
|
* |
|
* 3. Await match or cancellation (method awaitMatch) |
|
* |
|
* Wait for another thread to match node; instead cancelling if |
|
* the current thread was interrupted or the wait timed out. On |
|
* multiprocessors, we use front-of-queue spinning: If a node |
|
* appears to be the first unmatched node in the queue, it |
|
* spins a bit before blocking. In either case, before blocking |
|
* it tries to unsplice any nodes between the current "head" |
|
* and the first unmatched node. |
|
* |
|
* Front-of-queue spinning vastly improves performance of |
|
* heavily contended queues. And so long as it is relatively |
|
* brief and "quiet", spinning does not much impact performance |
|
* of less-contended queues. During spins threads check their |
|
* interrupt status and generate a thread-local random number |
|
* to decide to occasionally perform a Thread.yield. While |
|
* yield has underdefined specs, we assume that it might help, |
|
* and will not hurt, in limiting impact of spinning on busy |
|
* systems. We also use smaller (1/2) spins for nodes that are |
|
* not known to be front but whose predecessors have not |
|
* blocked -- these "chained" spins avoid artifacts of |
|
* front-of-queue rules which otherwise lead to alternating |
|
* nodes spinning vs blocking. Further, front threads that |
|
* represent phase changes (from data to request node or vice |
|
* versa) compared to their predecessors receive additional |
|
* chained spins, reflecting longer paths typically required to |
|
* unblock threads during phase changes. |
|
* |
|
* |
|
* ** Unlinking removed interior nodes ** |
|
* |
|
* In addition to minimizing garbage retention via self-linking |
|
* described above, we also unlink removed interior nodes. These |
|
* may arise due to timed out or interrupted waits, or calls to |
|
* remove(x) or Iterator.remove. Normally, given a node that was |
|
* at one time known to be the predecessor of some node s that is |
|
* to be removed, we can unsplice s by CASing the next field of |
|
* its predecessor if it still points to s (otherwise s must |
|
* already have been removed or is now offlist). But there are two |
|
* situations in which we cannot guarantee to make node s |
|
* unreachable in this way: (1) If s is the trailing node of list |
|
* (i.e., with null next), then it is pinned as the target node |
|
* for appends, so can only be removed later after other nodes are |
|
* appended. (2) We cannot necessarily unlink s given a |
|
* predecessor node that is matched (including the case of being |
|
* cancelled): the predecessor may already be unspliced, in which |
|
* case some previous reachable node may still point to s. |
|
* (For further explanation see Herlihy & Shavit "The Art of |
|
* Multiprocessor Programming" chapter 9). Although, in both |
|
* cases, we can rule out the need for further action if either s |
|
* or its predecessor are (or can be made to be) at, or fall off |
|
* from, the head of list. |
|
* |
|
* Without taking these into account, it would be possible for an |
|
* unbounded number of supposedly removed nodes to remain |
|
* reachable. Situations leading to such buildup are uncommon but |
|
* can occur in practice; for example when a series of short timed |
|
* calls to poll repeatedly time out but never otherwise fall off |
|
* the list because of an untimed call to take at the front of the |
|
* queue. |
|
* |
|
* When these cases arise, rather than always retraversing the |
|
* entire list to find an actual predecessor to unlink (which |
|
* won't help for case (1) anyway), we record a conservative |
|
* estimate of possible unsplice failures (in "sweepVotes"). |
|
* We trigger a full sweep when the estimate exceeds a threshold |
|
* ("SWEEP_THRESHOLD") indicating the maximum number of estimated |
|
* removal failures to tolerate before sweeping through, unlinking |
|
* cancelled nodes that were not unlinked upon initial removal. |
|
* We perform sweeps by the thread hitting threshold (rather than |
|
* background threads or by spreading work to other threads) |
|
* because in the main contexts in which removal occurs, the |
|
* caller is already timed-out, cancelled, or performing a |
|
* potentially O(n) operation (e.g. remove(x)), none of which are |
|
* time-critical enough to warrant the overhead that alternatives |
|
* would impose on other threads. |
|
* |
|
* Because the sweepVotes estimate is conservative, and because |
|
* nodes become unlinked "naturally" as they fall off the head of |
|
* the queue, and because we allow votes to accumulate even while |
|
* sweeps are in progress, there are typically significantly fewer |
|
* such nodes than estimated. Choice of a threshold value |
|
* balances the likelihood of wasted effort and contention, versus |
|
* providing a worst-case bound on retention of interior nodes in |
|
* quiescent queues. The value defined below was chosen |
|
* empirically to balance these under various timeout scenarios. |
|
* |
|
* Note that we cannot self-link unlinked interior nodes during |
|
* sweeps. However, the associated garbage chains terminate when |
|
* some successor ultimately falls off the head of the list and is |
|
* self-linked. |
|
*/ |
|
|
|
|
|
private static final boolean MP = |
|
Runtime.getRuntime().availableProcessors() > 1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private static final int FRONT_SPINS = 1 << 7; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
static final int SWEEP_THRESHOLD = 32; |
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
static final class Node { |
|
final boolean isData; |
|
volatile Object item; |
|
volatile Node next; |
|
volatile Thread waiter; |
|
|
|
|
|
final boolean casNext(Node cmp, Node val) { |
|
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val); |
|
} |
|
|
|
final boolean casItem(Object cmp, Object val) { |
|
|
|
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val); |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
Node(Object item, boolean isData) { |
|
UNSAFE.putObject(this, itemOffset, item); |
|
this.isData = isData; |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
final void forgetNext() { |
|
UNSAFE.putObject(this, nextOffset, this); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
final void forgetContents() { |
|
UNSAFE.putObject(this, itemOffset, this); |
|
UNSAFE.putObject(this, waiterOffset, null); |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
final boolean isMatched() { |
|
Object x = item; |
|
return (x == this) || ((x == null) == isData); |
|
} |
|
|
|
|
|
|
|
*/ |
|
final boolean isUnmatchedRequest() { |
|
return !isData && item == null; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
final boolean cannotPrecede(boolean haveData) { |
|
boolean d = isData; |
|
Object x; |
|
return d != haveData && (x = item) != this && (x != null) == d; |
|
} |
|
|
|
|
|
|
|
*/ |
|
final boolean tryMatchData() { |
|
|
|
Object x = item; |
|
if (x != null && x != this && casItem(x, null)) { |
|
LockSupport.unpark(waiter); |
|
return true; |
|
} |
|
return false; |
|
} |
|
|
|
private static final long serialVersionUID = -3375979862319811754L; |
|
|
|
|
|
private static final sun.misc.Unsafe UNSAFE; |
|
private static final long itemOffset; |
|
private static final long nextOffset; |
|
private static final long waiterOffset; |
|
static { |
|
try { |
|
UNSAFE = sun.misc.Unsafe.getUnsafe(); |
|
Class<?> k = Node.class; |
|
itemOffset = UNSAFE.objectFieldOffset |
|
(k.getDeclaredField("item")); |
|
nextOffset = UNSAFE.objectFieldOffset |
|
(k.getDeclaredField("next")); |
|
waiterOffset = UNSAFE.objectFieldOffset |
|
(k.getDeclaredField("waiter")); |
|
} catch (Exception e) { |
|
throw new Error(e); |
|
} |
|
} |
|
} |
|
|
|
|
|
transient volatile Node head; |
|
|
|
|
|
private transient volatile Node tail; |
|
|
|
|
|
private transient volatile int sweepVotes; |
|
|
|
|
|
private boolean casTail(Node cmp, Node val) { |
|
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val); |
|
} |
|
|
|
private boolean casHead(Node cmp, Node val) { |
|
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val); |
|
} |
|
|
|
private boolean casSweepVotes(int cmp, int val) { |
|
return UNSAFE.compareAndSwapInt(this, sweepVotesOffset, cmp, val); |
|
} |
|
|
|
/* |
|
* Possible values for "how" argument in xfer method. |
|
*/ |
|
private static final int NOW = 0; |
|
private static final int ASYNC = 1; |
|
private static final int SYNC = 2; |
|
private static final int TIMED = 3; |
|
|
|
@SuppressWarnings("unchecked") |
|
static <E> E cast(Object item) { |
|
|
|
return (E) item; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private E xfer(E e, boolean haveData, int how, long nanos) { |
|
if (haveData && (e == null)) |
|
throw new NullPointerException(); |
|
Node s = null; |
|
|
|
retry: |
|
for (;;) { // restart on append race |
|
|
|
for (Node h = head, p = h; p != null;) { |
|
boolean isData = p.isData; |
|
Object item = p.item; |
|
if (item != p && (item != null) == isData) { |
|
if (isData == haveData) |
|
break; |
|
if (p.casItem(item, e)) { |
|
for (Node q = p; q != h;) { |
|
Node n = q.next; |
|
if (head == h && casHead(h, n == null ? q : n)) { |
|
h.forgetNext(); |
|
break; |
|
} |
|
if ((h = head) == null || |
|
(q = h.next) == null || !q.isMatched()) |
|
break; |
|
} |
|
LockSupport.unpark(p.waiter); |
|
return LinkedTransferQueue.<E>cast(item); |
|
} |
|
} |
|
Node n = p.next; |
|
p = (p != n) ? n : (h = head); |
|
} |
|
|
|
if (how != NOW) { |
|
if (s == null) |
|
s = new Node(e, haveData); |
|
Node pred = tryAppend(s, haveData); |
|
if (pred == null) |
|
continue retry; |
|
if (how != ASYNC) |
|
return awaitMatch(s, pred, e, (how == TIMED), nanos); |
|
} |
|
return e; |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private Node tryAppend(Node s, boolean haveData) { |
|
for (Node t = tail, p = t;;) { // move p to last node and append |
|
Node n, u; |
|
if (p == null && (p = head) == null) { |
|
if (casHead(null, s)) |
|
return s; |
|
} |
|
else if (p.cannotPrecede(haveData)) |
|
return null; |
|
else if ((n = p.next) != null) |
|
p = p != t && t != (u = tail) ? (t = u) : |
|
(p != n) ? n : null; |
|
else if (!p.casNext(null, s)) |
|
p = p.next; |
|
else { |
|
if (p != t) { |
|
while ((tail != t || !casTail(t, s)) && |
|
(t = tail) != null && |
|
(s = t.next) != null && |
|
(s = s.next) != null && s != t); |
|
} |
|
return p; |
|
} |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { |
|
final long deadline = timed ? System.nanoTime() + nanos : 0L; |
|
Thread w = Thread.currentThread(); |
|
int spins = -1; |
|
ThreadLocalRandom randomYields = null; |
|
|
|
for (;;) { |
|
Object item = s.item; |
|
if (item != e) { // matched |
|
// assert item != s; |
|
s.forgetContents(); |
|
return LinkedTransferQueue.<E>cast(item); |
|
} |
|
if ((w.isInterrupted() || (timed && nanos <= 0)) && |
|
s.casItem(e, s)) { |
|
unsplice(pred, s); |
|
return e; |
|
} |
|
|
|
if (spins < 0) { |
|
if ((spins = spinsFor(pred, s.isData)) > 0) |
|
randomYields = ThreadLocalRandom.current(); |
|
} |
|
else if (spins > 0) { |
|
--spins; |
|
if (randomYields.nextInt(CHAINED_SPINS) == 0) |
|
Thread.yield(); |
|
} |
|
else if (s.waiter == null) { |
|
s.waiter = w; |
|
} |
|
else if (timed) { |
|
nanos = deadline - System.nanoTime(); |
|
if (nanos > 0L) |
|
LockSupport.parkNanos(this, nanos); |
|
} |
|
else { |
|
LockSupport.park(this); |
|
} |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private static int spinsFor(Node pred, boolean haveData) { |
|
if (MP && pred != null) { |
|
if (pred.isData != haveData) |
|
return FRONT_SPINS + CHAINED_SPINS; |
|
if (pred.isMatched()) |
|
return FRONT_SPINS; |
|
if (pred.waiter == null) |
|
return CHAINED_SPINS; |
|
} |
|
return 0; |
|
} |
|
|
|
/* -------------- Traversal methods -------------- */ |
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
final Node succ(Node p) { |
|
Node next = p.next; |
|
return (p == next) ? head : next; |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private Node firstOfMode(boolean isData) { |
|
for (Node p = head; p != null; p = succ(p)) { |
|
if (!p.isMatched()) |
|
return (p.isData == isData) ? p : null; |
|
} |
|
return null; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
final Node firstDataNode() { |
|
for (Node p = head; p != null;) { |
|
Object item = p.item; |
|
if (p.isData) { |
|
if (item != null && item != p) |
|
return p; |
|
} |
|
else if (item == null) |
|
break; |
|
if (p == (p = p.next)) |
|
p = head; |
|
} |
|
return null; |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private E firstDataItem() { |
|
for (Node p = head; p != null; p = succ(p)) { |
|
Object item = p.item; |
|
if (p.isData) { |
|
if (item != null && item != p) |
|
return LinkedTransferQueue.<E>cast(item); |
|
} |
|
else if (item == null) |
|
return null; |
|
} |
|
return null; |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private int countOfMode(boolean data) { |
|
int count = 0; |
|
for (Node p = head; p != null; ) { |
|
if (!p.isMatched()) { |
|
if (p.isData != data) |
|
return 0; |
|
if (++count == Integer.MAX_VALUE) |
|
break; |
|
} |
|
Node n = p.next; |
|
if (n != p) |
|
p = n; |
|
else { |
|
count = 0; |
|
p = head; |
|
} |
|
} |
|
return count; |
|
} |
|
|
|
final class Itr implements Iterator<E> { |
|
private Node nextNode; |
|
private E nextItem; |
|
private Node lastRet; |
|
private Node lastPred; |
|
|
|
|
|
|
|
*/ |
|
private void advance(Node prev) { |
|
/* |
|
* To track and avoid buildup of deleted nodes in the face |
|
* of calls to both Queue.remove and Itr.remove, we must |
|
* include variants of unsplice and sweep upon each |
|
* advance: Upon Itr.remove, we may need to catch up links |
|
* from lastPred, and upon other removes, we might need to |
|
* skip ahead from stale nodes and unsplice deleted ones |
|
* found while advancing. |
|
*/ |
|
|
|
Node r, b; |
|
if ((r = lastRet) != null && !r.isMatched()) |
|
lastPred = r; |
|
else if ((b = lastPred) == null || b.isMatched()) |
|
lastPred = null; |
|
else { |
|
Node s, n; |
|
while ((s = b.next) != null && |
|
s != b && s.isMatched() && |
|
(n = s.next) != null && n != s) |
|
b.casNext(s, n); |
|
} |
|
|
|
this.lastRet = prev; |
|
|
|
for (Node p = prev, s, n;;) { |
|
s = (p == null) ? head : p.next; |
|
if (s == null) |
|
break; |
|
else if (s == p) { |
|
p = null; |
|
continue; |
|
} |
|
Object item = s.item; |
|
if (s.isData) { |
|
if (item != null && item != s) { |
|
nextItem = LinkedTransferQueue.<E>cast(item); |
|
nextNode = s; |
|
return; |
|
} |
|
} |
|
else if (item == null) |
|
break; |
|
|
|
if (p == null) |
|
p = s; |
|
else if ((n = s.next) == null) |
|
break; |
|
else if (s == n) |
|
p = null; |
|
else |
|
p.casNext(s, n); |
|
} |
|
nextNode = null; |
|
nextItem = null; |
|
} |
|
|
|
Itr() { |
|
advance(null); |
|
} |
|
|
|
public final boolean hasNext() { |
|
return nextNode != null; |
|
} |
|
|
|
public final E next() { |
|
Node p = nextNode; |
|
if (p == null) throw new NoSuchElementException(); |
|
E e = nextItem; |
|
advance(p); |
|
return e; |
|
} |
|
|
|
public final void remove() { |
|
final Node lastRet = this.lastRet; |
|
if (lastRet == null) |
|
throw new IllegalStateException(); |
|
this.lastRet = null; |
|
if (lastRet.tryMatchData()) |
|
unsplice(lastPred, lastRet); |
|
} |
|
} |
|
|
|
|
|
static final class LTQSpliterator<E> implements Spliterator<E> { |
|
static final int MAX_BATCH = 1 << 25; |
|
final LinkedTransferQueue<E> queue; |
|
Node current; |
|
int batch; |
|
boolean exhausted; |
|
LTQSpliterator(LinkedTransferQueue<E> queue) { |
|
this.queue = queue; |
|
} |
|
|
|
public Spliterator<E> trySplit() { |
|
Node p; |
|
final LinkedTransferQueue<E> q = this.queue; |
|
int b = batch; |
|
int n = (b <= 0) ? 1 : (b >= MAX_BATCH) ? MAX_BATCH : b + 1; |
|
if (!exhausted && |
|
((p = current) != null || (p = q.firstDataNode()) != null) && |
|
p.next != null) { |
|
Object[] a = new Object[n]; |
|
int i = 0; |
|
do { |
|
Object e = p.item; |
|
if (e != p && (a[i] = e) != null) |
|
++i; |
|
if (p == (p = p.next)) |
|
p = q.firstDataNode(); |
|
} while (p != null && i < n && p.isData); |
|
if ((current = p) == null) |
|
exhausted = true; |
|
if (i > 0) { |
|
batch = i; |
|
return Spliterators.spliterator |
|
(a, 0, i, Spliterator.ORDERED | Spliterator.NONNULL | |
|
Spliterator.CONCURRENT); |
|
} |
|
} |
|
return null; |
|
} |
|
|
|
@SuppressWarnings("unchecked") |
|
public void forEachRemaining(Consumer<? super E> action) { |
|
Node p; |
|
if (action == null) throw new NullPointerException(); |
|
final LinkedTransferQueue<E> q = this.queue; |
|
if (!exhausted && |
|
((p = current) != null || (p = q.firstDataNode()) != null)) { |
|
exhausted = true; |
|
do { |
|
Object e = p.item; |
|
if (e != null && e != p) |
|
action.accept((E)e); |
|
if (p == (p = p.next)) |
|
p = q.firstDataNode(); |
|
} while (p != null && p.isData); |
|
} |
|
} |
|
|
|
@SuppressWarnings("unchecked") |
|
public boolean tryAdvance(Consumer<? super E> action) { |
|
Node p; |
|
if (action == null) throw new NullPointerException(); |
|
final LinkedTransferQueue<E> q = this.queue; |
|
if (!exhausted && |
|
((p = current) != null || (p = q.firstDataNode()) != null)) { |
|
Object e; |
|
do { |
|
if ((e = p.item) == p) |
|
e = null; |
|
if (p == (p = p.next)) |
|
p = q.firstDataNode(); |
|
} while (e == null && p != null && p.isData); |
|
if ((current = p) == null) |
|
exhausted = true; |
|
if (e != null) { |
|
action.accept((E)e); |
|
return true; |
|
} |
|
} |
|
return false; |
|
} |
|
|
|
public long estimateSize() { return Long.MAX_VALUE; } |
|
|
|
public int characteristics() { |
|
return Spliterator.ORDERED | Spliterator.NONNULL | |
|
Spliterator.CONCURRENT; |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public Spliterator<E> spliterator() { |
|
return new LTQSpliterator<E>(this); |
|
} |
|
|
|
/* -------------- Removal methods -------------- */ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
final void unsplice(Node pred, Node s) { |
|
s.forgetContents(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
if (pred != null && pred != s && pred.next == s) { |
|
Node n = s.next; |
|
if (n == null || |
|
(n != s && pred.casNext(s, n) && pred.isMatched())) { |
|
for (;;) { |
|
Node h = head; |
|
if (h == pred || h == s || h == null) |
|
return; |
|
if (!h.isMatched()) |
|
break; |
|
Node hn = h.next; |
|
if (hn == null) |
|
return; |
|
if (hn != h && casHead(h, hn)) |
|
h.forgetNext(); |
|
} |
|
if (pred.next != pred && s.next != s) { |
|
for (;;) { |
|
int v = sweepVotes; |
|
if (v < SWEEP_THRESHOLD) { |
|
if (casSweepVotes(v, v + 1)) |
|
break; |
|
} |
|
else if (casSweepVotes(v, 0)) { |
|
sweep(); |
|
break; |
|
} |
|
} |
|
} |
|
} |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private void sweep() { |
|
for (Node p = head, s, n; p != null && (s = p.next) != null; ) { |
|
if (!s.isMatched()) |
|
|
|
p = s; |
|
else if ((n = s.next) == null) |
|
break; |
|
else if (s == n) |
|
|
|
p = head; |
|
else |
|
p.casNext(s, n); |
|
} |
|
} |
|
|
|
|
|
|
|
*/ |
|
private boolean findAndRemove(Object e) { |
|
if (e != null) { |
|
for (Node pred = null, p = head; p != null; ) { |
|
Object item = p.item; |
|
if (p.isData) { |
|
if (item != null && item != p && e.equals(item) && |
|
p.tryMatchData()) { |
|
unsplice(pred, p); |
|
return true; |
|
} |
|
} |
|
else if (item == null) |
|
break; |
|
pred = p; |
|
if ((p = p.next) == pred) { |
|
pred = null; |
|
p = head; |
|
} |
|
} |
|
} |
|
return false; |
|
} |
|
|
|
|
|
|
|
*/ |
|
public LinkedTransferQueue() { |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public LinkedTransferQueue(Collection<? extends E> c) { |
|
this(); |
|
addAll(c); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public void put(E e) { |
|
xfer(e, true, ASYNC, 0); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean offer(E e, long timeout, TimeUnit unit) { |
|
xfer(e, true, ASYNC, 0); |
|
return true; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean offer(E e) { |
|
xfer(e, true, ASYNC, 0); |
|
return true; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean add(E e) { |
|
xfer(e, true, ASYNC, 0); |
|
return true; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean tryTransfer(E e) { |
|
return xfer(e, true, NOW, 0) == null; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public void transfer(E e) throws InterruptedException { |
|
if (xfer(e, true, SYNC, 0) != null) { |
|
Thread.interrupted(); |
|
throw new InterruptedException(); |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean tryTransfer(E e, long timeout, TimeUnit unit) |
|
throws InterruptedException { |
|
if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) |
|
return true; |
|
if (!Thread.interrupted()) |
|
return false; |
|
throw new InterruptedException(); |
|
} |
|
|
|
public E take() throws InterruptedException { |
|
E e = xfer(null, false, SYNC, 0); |
|
if (e != null) |
|
return e; |
|
Thread.interrupted(); |
|
throw new InterruptedException(); |
|
} |
|
|
|
public E poll(long timeout, TimeUnit unit) throws InterruptedException { |
|
E e = xfer(null, false, TIMED, unit.toNanos(timeout)); |
|
if (e != null || !Thread.interrupted()) |
|
return e; |
|
throw new InterruptedException(); |
|
} |
|
|
|
public E poll() { |
|
return xfer(null, false, NOW, 0); |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
public int drainTo(Collection<? super E> c) { |
|
if (c == null) |
|
throw new NullPointerException(); |
|
if (c == this) |
|
throw new IllegalArgumentException(); |
|
int n = 0; |
|
for (E e; (e = poll()) != null;) { |
|
c.add(e); |
|
++n; |
|
} |
|
return n; |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
public int drainTo(Collection<? super E> c, int maxElements) { |
|
if (c == null) |
|
throw new NullPointerException(); |
|
if (c == this) |
|
throw new IllegalArgumentException(); |
|
int n = 0; |
|
for (E e; n < maxElements && (e = poll()) != null;) { |
|
c.add(e); |
|
++n; |
|
} |
|
return n; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public Iterator<E> iterator() { |
|
return new Itr(); |
|
} |
|
|
|
public E peek() { |
|
return firstDataItem(); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean isEmpty() { |
|
for (Node p = head; p != null; p = succ(p)) { |
|
if (!p.isMatched()) |
|
return !p.isData; |
|
} |
|
return true; |
|
} |
|
|
|
public boolean hasWaitingConsumer() { |
|
return firstOfMode(false) != null; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public int size() { |
|
return countOfMode(true); |
|
} |
|
|
|
public int getWaitingConsumerCount() { |
|
return countOfMode(false); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean remove(Object o) { |
|
return findAndRemove(o); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean contains(Object o) { |
|
if (o == null) return false; |
|
for (Node p = head; p != null; p = succ(p)) { |
|
Object item = p.item; |
|
if (p.isData) { |
|
if (item != null && item != p && o.equals(item)) |
|
return true; |
|
} |
|
else if (item == null) |
|
break; |
|
} |
|
return false; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public int remainingCapacity() { |
|
return Integer.MAX_VALUE; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private void writeObject(java.io.ObjectOutputStream s) |
|
throws java.io.IOException { |
|
s.defaultWriteObject(); |
|
for (E e : this) |
|
s.writeObject(e); |
|
|
|
s.writeObject(null); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private void readObject(java.io.ObjectInputStream s) |
|
throws java.io.IOException, ClassNotFoundException { |
|
s.defaultReadObject(); |
|
for (;;) { |
|
@SuppressWarnings("unchecked") |
|
E item = (E) s.readObject(); |
|
if (item == null) |
|
break; |
|
else |
|
offer(item); |
|
} |
|
} |
|
|
|
// Unsafe mechanics |
|
|
|
private static final sun.misc.Unsafe UNSAFE; |
|
private static final long headOffset; |
|
private static final long tailOffset; |
|
private static final long sweepVotesOffset; |
|
static { |
|
try { |
|
UNSAFE = sun.misc.Unsafe.getUnsafe(); |
|
Class<?> k = LinkedTransferQueue.class; |
|
headOffset = UNSAFE.objectFieldOffset |
|
(k.getDeclaredField("head")); |
|
tailOffset = UNSAFE.objectFieldOffset |
|
(k.getDeclaredField("tail")); |
|
sweepVotesOffset = UNSAFE.objectFieldOffset |
|
(k.getDeclaredField("sweepVotes")); |
|
} catch (Exception e) { |
|
throw new Error(e); |
|
} |
|
} |
|
} |