|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
|
|
/* |
|
* This file is available under and governed by the GNU General Public |
|
* License version 2 only, as published by the Free Software Foundation. |
|
* However, the following notice accompanied the original version of this |
|
* file: |
|
* |
|
* Written by Doug Lea with assistance from members of JCP JSR-166 |
|
* Expert Group and released to the public domain, as explained at |
|
* http://creativecommons.org/publicdomain/zero/1.0/ |
|
*/ |
|
|
|
package java.util.concurrent; |
|
|
|
import java.lang.invoke.MethodHandles; |
|
import java.lang.invoke.VarHandle; |
|
import java.util.AbstractQueue; |
|
import java.util.Arrays; |
|
import java.util.Collection; |
|
import java.util.Iterator; |
|
import java.util.NoSuchElementException; |
|
import java.util.Objects; |
|
import java.util.Queue; |
|
import java.util.Spliterator; |
|
import java.util.Spliterators; |
|
import java.util.concurrent.locks.LockSupport; |
|
import java.util.function.Consumer; |
|
import java.util.function.Predicate; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public class LinkedTransferQueue<E> extends AbstractQueue<E> |
|
implements TransferQueue<E>, java.io.Serializable { |
|
private static final long serialVersionUID = -3223113410248163686L; |
|
|
|
/* |
|
* *** Overview of Dual Queues with Slack *** |
|
* |
|
* Dual Queues, introduced by Scherer and Scott |
|
* (http://www.cs.rochester.edu/~scott/papers/2004_DISC_dual_DS.pdf) |
|
* are (linked) queues in which nodes may represent either data or |
|
* requests. When a thread tries to enqueue a data node, but |
|
* encounters a request node, it instead "matches" and removes it; |
|
* and vice versa for enqueuing requests. Blocking Dual Queues |
|
* arrange that threads enqueuing unmatched requests block until |
|
* other threads provide the match. Dual Synchronous Queues (see |
|
* Scherer, Lea, & Scott |
|
* http://www.cs.rochester.edu/u/scott/papers/2009_Scherer_CACM_SSQ.pdf) |
|
* additionally arrange that threads enqueuing unmatched data also |
|
* block. Dual Transfer Queues support all of these modes, as |
|
* dictated by callers. |
|
* |
|
* A FIFO dual queue may be implemented using a variation of the |
|
* Michael & Scott (M&S) lock-free queue algorithm |
|
* (http://www.cs.rochester.edu/~scott/papers/1996_PODC_queues.pdf). |
|
* It maintains two pointer fields, "head", pointing to a |
|
* (matched) node that in turn points to the first actual |
|
* (unmatched) queue node (or null if empty); and "tail" that |
|
* points to the last node on the queue (or again null if |
|
* empty). For example, here is a possible queue with four data |
|
* elements: |
|
* |
|
* head tail |
|
* | | |
|
* v v |
|
* M -> U -> U -> U -> U |
|
* |
|
* The M&S queue algorithm is known to be prone to scalability and |
|
* overhead limitations when maintaining (via CAS) these head and |
|
* tail pointers. This has led to the development of |
|
* contention-reducing variants such as elimination arrays (see |
|
* Moir et al http://portal.acm.org/citation.cfm?id=1074013) and |
|
* optimistic back pointers (see Ladan-Mozes & Shavit |
|
* http://people.csail.mit.edu/edya/publications/OptimisticFIFOQueue-journal.pdf). |
|
* However, the nature of dual queues enables a simpler tactic for |
|
* improving M&S-style implementations when dual-ness is needed. |
|
* |
|
* In a dual queue, each node must atomically maintain its match |
|
* status. While there are other possible variants, we implement |
|
* this here as: for a data-mode node, matching entails CASing an |
|
* "item" field from a non-null data value to null upon match, and |
|
* vice-versa for request nodes, CASing from null to a data |
|
* value. (Note that the linearization properties of this style of |
|
* queue are easy to verify -- elements are made available by |
|
* linking, and unavailable by matching.) Compared to plain M&S |
|
* queues, this property of dual queues requires one additional |
|
* successful atomic operation per enq/deq pair. But it also |
|
* enables lower cost variants of queue maintenance mechanics. (A |
|
* variation of this idea applies even for non-dual queues that |
|
* support deletion of interior elements, such as |
|
* j.u.c.ConcurrentLinkedQueue.) |
|
* |
|
* Once a node is matched, its match status can never again |
|
* change. We may thus arrange that the linked list of them |
|
* contain a prefix of zero or more matched nodes, followed by a |
|
* suffix of zero or more unmatched nodes. (Note that we allow |
|
* both the prefix and suffix to be zero length, which in turn |
|
* means that we do not use a dummy header.) If we were not |
|
* concerned with either time or space efficiency, we could |
|
* correctly perform enqueue and dequeue operations by traversing |
|
* from a pointer to the initial node; CASing the item of the |
|
* first unmatched node on match and CASing the next field of the |
|
* trailing node on appends. While this would be a terrible idea |
|
* in itself, it does have the benefit of not requiring ANY atomic |
|
* updates on head/tail fields. |
|
* |
|
* We introduce here an approach that lies between the extremes of |
|
* never versus always updating queue (head and tail) pointers. |
|
* This offers a tradeoff between sometimes requiring extra |
|
* traversal steps to locate the first and/or last unmatched |
|
* nodes, versus the reduced overhead and contention of fewer |
|
* updates to queue pointers. For example, a possible snapshot of |
|
* a queue is: |
|
* |
|
* head tail |
|
* | | |
|
* v v |
|
* M -> M -> U -> U -> U -> U |
|
* |
|
* The best value for this "slack" (the targeted maximum distance |
|
* between the value of "head" and the first unmatched node, and |
|
* similarly for "tail") is an empirical matter. We have found |
|
* that using very small constants in the range of 1-3 work best |
|
* over a range of platforms. Larger values introduce increasing |
|
* costs of cache misses and risks of long traversal chains, while |
|
* smaller values increase CAS contention and overhead. |
|
* |
|
* Dual queues with slack differ from plain M&S dual queues by |
|
* virtue of only sometimes updating head or tail pointers when |
|
* matching, appending, or even traversing nodes; in order to |
|
* maintain a targeted slack. The idea of "sometimes" may be |
|
* operationalized in several ways. The simplest is to use a |
|
* per-operation counter incremented on each traversal step, and |
|
* to try (via CAS) to update the associated queue pointer |
|
* whenever the count exceeds a threshold. Another, that requires |
|
* more overhead, is to use random number generators to update |
|
* with a given probability per traversal step. |
|
* |
|
* In any strategy along these lines, because CASes updating |
|
* fields may fail, the actual slack may exceed targeted slack. |
|
* However, they may be retried at any time to maintain targets. |
|
* Even when using very small slack values, this approach works |
|
* well for dual queues because it allows all operations up to the |
|
* point of matching or appending an item (hence potentially |
|
* allowing progress by another thread) to be read-only, thus not |
|
* introducing any further contention. As described below, we |
|
* implement this by performing slack maintenance retries only |
|
* after these points. |
|
* |
|
* As an accompaniment to such techniques, traversal overhead can |
|
* be further reduced without increasing contention of head |
|
* pointer updates: Threads may sometimes shortcut the "next" link |
|
* path from the current "head" node to be closer to the currently |
|
* known first unmatched node, and similarly for tail. Again, this |
|
* may be triggered with using thresholds or randomization. |
|
* |
|
* These ideas must be further extended to avoid unbounded amounts |
|
* of costly-to-reclaim garbage caused by the sequential "next" |
|
* links of nodes starting at old forgotten head nodes: As first |
|
* described in detail by Boehm |
|
* (http://portal.acm.org/citation.cfm?doid=503272.503282), if a GC |
|
* delays noticing that any arbitrarily old node has become |
|
* garbage, all newer dead nodes will also be unreclaimed. |
|
* (Similar issues arise in non-GC environments.) To cope with |
|
* this in our implementation, upon CASing to advance the head |
|
* pointer, we set the "next" link of the previous head to point |
|
* only to itself; thus limiting the length of chains of dead nodes. |
|
* (We also take similar care to wipe out possibly garbage |
|
* retaining values held in other Node fields.) However, doing so |
|
* adds some further complexity to traversal: If any "next" |
|
* pointer links to itself, it indicates that the current thread |
|
* has lagged behind a head-update, and so the traversal must |
|
* continue from the "head". Traversals trying to find the |
|
* current tail starting from "tail" may also encounter |
|
* self-links, in which case they also continue at "head". |
|
* |
|
* It is tempting in slack-based scheme to not even use CAS for |
|
* updates (similarly to Ladan-Mozes & Shavit). However, this |
|
* cannot be done for head updates under the above link-forgetting |
|
* mechanics because an update may leave head at a detached node. |
|
* And while direct writes are possible for tail updates, they |
|
* increase the risk of long retraversals, and hence long garbage |
|
* chains, which can be much more costly than is worthwhile |
|
* considering that the cost difference of performing a CAS vs |
|
* write is smaller when they are not triggered on each operation |
|
* (especially considering that writes and CASes equally require |
|
* additional GC bookkeeping ("write barriers") that are sometimes |
|
* more costly than the writes themselves because of contention). |
|
* |
|
* *** Overview of implementation *** |
|
* |
|
* We use a threshold-based approach to updates, with a slack |
|
* threshold of two -- that is, we update head/tail when the |
|
* current pointer appears to be two or more steps away from the |
|
* first/last node. The slack value is hard-wired: a path greater |
|
* than one is naturally implemented by checking equality of |
|
* traversal pointers except when the list has only one element, |
|
* in which case we keep slack threshold at one. Avoiding tracking |
|
* explicit counts across method calls slightly simplifies an |
|
* already-messy implementation. Using randomization would |
|
* probably work better if there were a low-quality dirt-cheap |
|
* per-thread one available, but even ThreadLocalRandom is too |
|
* heavy for these purposes. |
|
* |
|
* With such a small slack threshold value, it is not worthwhile |
|
* to augment this with path short-circuiting (i.e., unsplicing |
|
* interior nodes) except in the case of cancellation/removal (see |
|
* below). |
|
* |
|
* All enqueue/dequeue operations are handled by the single method |
|
* "xfer" with parameters indicating whether to act as some form |
|
* of offer, put, poll, take, or transfer (each possibly with |
|
* timeout). The relative complexity of using one monolithic |
|
* method outweighs the code bulk and maintenance problems of |
|
* using separate methods for each case. |
|
* |
|
* Operation consists of up to two phases. The first is implemented |
|
* in method xfer, the second in method awaitMatch. |
|
* |
|
* 1. Traverse until matching or appending (method xfer) |
|
* |
|
* Conceptually, we simply traverse all nodes starting from head. |
|
* If we encounter an unmatched node of opposite mode, we match |
|
* it and return, also updating head (by at least 2 hops) to |
|
* one past the matched node (or the node itself if it's the |
|
* pinned trailing node). Traversals also check for the |
|
* possibility of falling off-list, in which case they restart. |
|
* |
|
* If the trailing node of the list is reached, a match is not |
|
* possible. If this call was untimed poll or tryTransfer |
|
* (argument "how" is NOW), return empty-handed immediately. |
|
* Else a new node is CAS-appended. On successful append, if |
|
* this call was ASYNC (e.g. offer), an element was |
|
* successfully added to the end of the queue and we return. |
|
* |
|
* Of course, this naive traversal is O(n) when no match is |
|
* possible. We optimize the traversal by maintaining a tail |
|
* pointer, which is expected to be "near" the end of the list. |
|
* It is only safe to fast-forward to tail (in the presence of |
|
* arbitrary concurrent changes) if it is pointing to a node of |
|
* the same mode, even if it is dead (in this case no preceding |
|
* node could still be matchable by this traversal). If we |
|
* need to restart due to falling off-list, we can again |
|
* fast-forward to tail, but only if it has changed since the |
|
* last traversal (else we might loop forever). If tail cannot |
|
* be used, traversal starts at head (but in this case we |
|
* expect to be able to match near head). As with head, we |
|
* CAS-advance the tail pointer by at least two hops. |
|
* |
|
* 2. Await match or cancellation (method awaitMatch) |
|
* |
|
* Wait for another thread to match node; instead cancelling if |
|
* the current thread was interrupted or the wait timed out. On |
|
* multiprocessors, we use front-of-queue spinning: If a node |
|
* appears to be the first unmatched node in the queue, it |
|
* spins a bit before blocking. In either case, before blocking |
|
* it tries to unsplice any nodes between the current "head" |
|
* and the first unmatched node. |
|
* |
|
* Front-of-queue spinning vastly improves performance of |
|
* heavily contended queues. And so long as it is relatively |
|
* brief and "quiet", spinning does not much impact performance |
|
* of less-contended queues. During spins threads check their |
|
* interrupt status and generate a thread-local random number |
|
* to decide to occasionally perform a Thread.yield. While |
|
* yield has underdefined specs, we assume that it might help, |
|
* and will not hurt, in limiting impact of spinning on busy |
|
* systems. We also use smaller (1/2) spins for nodes that are |
|
* not known to be front but whose predecessors have not |
|
* blocked -- these "chained" spins avoid artifacts of |
|
* front-of-queue rules which otherwise lead to alternating |
|
* nodes spinning vs blocking. Further, front threads that |
|
* represent phase changes (from data to request node or vice |
|
* versa) compared to their predecessors receive additional |
|
* chained spins, reflecting longer paths typically required to |
|
* unblock threads during phase changes. |
|
* |
|
* |
|
* ** Unlinking removed interior nodes ** |
|
* |
|
* In addition to minimizing garbage retention via self-linking |
|
* described above, we also unlink removed interior nodes. These |
|
* may arise due to timed out or interrupted waits, or calls to |
|
* remove(x) or Iterator.remove. Normally, given a node that was |
|
* at one time known to be the predecessor of some node s that is |
|
* to be removed, we can unsplice s by CASing the next field of |
|
* its predecessor if it still points to s (otherwise s must |
|
* already have been removed or is now offlist). But there are two |
|
* situations in which we cannot guarantee to make node s |
|
* unreachable in this way: (1) If s is the trailing node of list |
|
* (i.e., with null next), then it is pinned as the target node |
|
* for appends, so can only be removed later after other nodes are |
|
* appended. (2) We cannot necessarily unlink s given a |
|
* predecessor node that is matched (including the case of being |
|
* cancelled): the predecessor may already be unspliced, in which |
|
* case some previous reachable node may still point to s. |
|
* (For further explanation see Herlihy & Shavit "The Art of |
|
* Multiprocessor Programming" chapter 9). Although, in both |
|
* cases, we can rule out the need for further action if either s |
|
* or its predecessor are (or can be made to be) at, or fall off |
|
* from, the head of list. |
|
* |
|
* Without taking these into account, it would be possible for an |
|
* unbounded number of supposedly removed nodes to remain reachable. |
|
* Situations leading to such buildup are uncommon but can occur |
|
* in practice; for example when a series of short timed calls to |
|
* poll repeatedly time out at the trailing node but otherwise |
|
* never fall off the list because of an untimed call to take() at |
|
* the front of the queue. |
|
* |
|
* When these cases arise, rather than always retraversing the |
|
* entire list to find an actual predecessor to unlink (which |
|
* won't help for case (1) anyway), we record a conservative |
|
* estimate of possible unsplice failures (in "sweepVotes"). |
|
* We trigger a full sweep when the estimate exceeds a threshold |
|
* ("SWEEP_THRESHOLD") indicating the maximum number of estimated |
|
* removal failures to tolerate before sweeping through, unlinking |
|
* cancelled nodes that were not unlinked upon initial removal. |
|
* We perform sweeps by the thread hitting threshold (rather than |
|
* background threads or by spreading work to other threads) |
|
* because in the main contexts in which removal occurs, the |
|
* caller is timed-out or cancelled, which are not time-critical |
|
* enough to warrant the overhead that alternatives would impose |
|
* on other threads. |
|
* |
|
* Because the sweepVotes estimate is conservative, and because |
|
* nodes become unlinked "naturally" as they fall off the head of |
|
* the queue, and because we allow votes to accumulate even while |
|
* sweeps are in progress, there are typically significantly fewer |
|
* such nodes than estimated. Choice of a threshold value |
|
* balances the likelihood of wasted effort and contention, versus |
|
* providing a worst-case bound on retention of interior nodes in |
|
* quiescent queues. The value defined below was chosen |
|
* empirically to balance these under various timeout scenarios. |
|
* |
|
* Because traversal operations on the linked list of nodes are a |
|
* natural opportunity to sweep dead nodes, we generally do so, |
|
* including all the operations that might remove elements as they |
|
* traverse, such as removeIf and Iterator.remove. This largely |
|
* eliminates long chains of dead interior nodes, except from |
|
* cancelled or timed out blocking operations. |
|
* |
|
* Note that we cannot self-link unlinked interior nodes during |
|
* sweeps. However, the associated garbage chains terminate when |
|
* some successor ultimately falls off the head of the list and is |
|
* self-linked. |
|
*/ |
|
|
|
|
|
private static final boolean MP = |
|
Runtime.getRuntime().availableProcessors() > 1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private static final int FRONT_SPINS = 1 << 7; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private static final int CHAINED_SPINS = FRONT_SPINS >>> 1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
static final int SWEEP_THRESHOLD = 32; |
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
static final class Node { |
|
final boolean isData; |
|
volatile Object item; |
|
volatile Node next; |
|
volatile Thread waiter; |
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
Node(Object item) { |
|
ITEM.set(this, item); |
|
isData = (item != null); |
|
} |
|
|
|
|
|
Node() { |
|
isData = true; |
|
} |
|
|
|
final boolean casNext(Node cmp, Node val) { |
|
|
|
return NEXT.compareAndSet(this, cmp, val); |
|
} |
|
|
|
final boolean casItem(Object cmp, Object val) { |
|
// assert isData == (cmp != null); |
|
// assert isData == (val == null); |
|
|
|
return ITEM.compareAndSet(this, cmp, val); |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
final void selfLink() { |
|
|
|
NEXT.setRelease(this, this); |
|
} |
|
|
|
final void appendRelaxed(Node next) { |
|
// assert next != null; |
|
|
|
NEXT.set(this, next); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
final void forgetContents() { |
|
|
|
if (!isData) |
|
ITEM.set(this, this); |
|
WAITER.set(this, null); |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
final boolean isMatched() { |
|
return isData == (item == null); |
|
} |
|
|
|
|
|
final boolean tryMatch(Object cmp, Object val) { |
|
if (casItem(cmp, val)) { |
|
LockSupport.unpark(waiter); |
|
return true; |
|
} |
|
return false; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
final boolean cannotPrecede(boolean haveData) { |
|
boolean d = isData; |
|
return d != haveData && d != (item == null); |
|
} |
|
|
|
private static final long serialVersionUID = -3375979862319811754L; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
transient volatile Node head; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private transient volatile Node tail; |
|
|
|
|
|
private transient volatile int sweepVotes; |
|
|
|
private boolean casTail(Node cmp, Node val) { |
|
// assert cmp != null; |
|
|
|
return TAIL.compareAndSet(this, cmp, val); |
|
} |
|
|
|
private boolean casHead(Node cmp, Node val) { |
|
return HEAD.compareAndSet(this, cmp, val); |
|
} |
|
|
|
|
|
private int incSweepVotes() { |
|
return (int) SWEEPVOTES.getAndAdd(this, 1) + 1; |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private boolean tryCasSuccessor(Node pred, Node c, Node p) { |
|
// assert p != null; |
|
// assert c.isData != (c.item != null); |
|
|
|
if (pred != null) |
|
return pred.casNext(c, p); |
|
if (casHead(c, p)) { |
|
c.selfLink(); |
|
return true; |
|
} |
|
return false; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private Node skipDeadNodes(Node pred, Node c, Node p, Node q) { |
|
// assert pred != c; |
|
// assert p != q; |
|
// assert c.isMatched(); |
|
|
|
if (q == null) { |
|
|
|
if (c == p) return pred; |
|
q = p; |
|
} |
|
return (tryCasSuccessor(pred, c, q) |
|
&& (pred == null || !pred.isMatched())) |
|
? pred : p; |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private void skipDeadNodesNearHead(Node h, Node p) { |
|
// assert h != null; |
|
// assert h != p; |
|
|
|
for (;;) { |
|
final Node q; |
|
if ((q = p.next) == null) break; |
|
else if (!q.isMatched()) { p = q; break; } |
|
else if (p == (p = q)) return; |
|
} |
|
if (casHead(h, p)) |
|
h.selfLink(); |
|
} |
|
|
|
/* Possible values for "how" argument in xfer method. */ |
|
|
|
private static final int NOW = 0; |
|
private static final int ASYNC = 1; |
|
private static final int SYNC = 2; |
|
private static final int TIMED = 3; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
@SuppressWarnings("unchecked") |
|
private E xfer(E e, boolean haveData, int how, long nanos) { |
|
if (haveData && (e == null)) |
|
throw new NullPointerException(); |
|
|
|
restart: for (Node s = null, t = null, h = null;;) { |
|
for (Node p = (t != (t = tail) && t.isData == haveData) ? t |
|
: (h = head);; ) { |
|
final Node q; final Object item; |
|
if (p.isData != haveData |
|
&& haveData == ((item = p.item) == null)) { |
|
if (h == null) h = head; |
|
if (p.tryMatch(item, e)) { |
|
if (h != p) skipDeadNodesNearHead(h, p); |
|
return (E) item; |
|
} |
|
} |
|
if ((q = p.next) == null) { |
|
if (how == NOW) return e; |
|
if (s == null) s = new Node(e); |
|
if (!p.casNext(null, s)) continue; |
|
if (p != t) casTail(t, s); |
|
if (how == ASYNC) return e; |
|
return awaitMatch(s, p, e, (how == TIMED), nanos); |
|
} |
|
if (p == (p = q)) continue restart; |
|
} |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { |
|
final long deadline = timed ? System.nanoTime() + nanos : 0L; |
|
Thread w = Thread.currentThread(); |
|
int spins = -1; |
|
ThreadLocalRandom randomYields = null; |
|
|
|
for (;;) { |
|
final Object item; |
|
if ((item = s.item) != e) { // matched |
|
// assert item != s; |
|
s.forgetContents(); |
|
@SuppressWarnings("unchecked") E itemE = (E) item; |
|
return itemE; |
|
} |
|
else if (w.isInterrupted() || (timed && nanos <= 0L)) { |
|
|
|
if (s.casItem(e, s.isData ? null : s)) { |
|
unsplice(pred, s); |
|
return e; |
|
} |
|
// return normally if lost CAS |
|
} |
|
else if (spins < 0) { |
|
if ((spins = spinsFor(pred, s.isData)) > 0) |
|
randomYields = ThreadLocalRandom.current(); |
|
} |
|
else if (spins > 0) { |
|
--spins; |
|
if (randomYields.nextInt(CHAINED_SPINS) == 0) |
|
Thread.yield(); |
|
} |
|
else if (s.waiter == null) { |
|
s.waiter = w; |
|
} |
|
else if (timed) { |
|
nanos = deadline - System.nanoTime(); |
|
if (nanos > 0L) |
|
LockSupport.parkNanos(this, nanos); |
|
} |
|
else { |
|
LockSupport.park(this); |
|
} |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private static int spinsFor(Node pred, boolean haveData) { |
|
if (MP && pred != null) { |
|
if (pred.isData != haveData) |
|
return FRONT_SPINS + CHAINED_SPINS; |
|
if (pred.isMatched()) |
|
return FRONT_SPINS; |
|
if (pred.waiter == null) |
|
return CHAINED_SPINS; |
|
} |
|
return 0; |
|
} |
|
|
|
/* -------------- Traversal methods -------------- */ |
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
final Node firstDataNode() { |
|
Node first = null; |
|
restartFromHead: for (;;) { |
|
Node h = head, p = h; |
|
while (p != null) { |
|
if (p.item != null) { |
|
if (p.isData) { |
|
first = p; |
|
break; |
|
} |
|
} |
|
else if (!p.isData) |
|
break; |
|
final Node q; |
|
if ((q = p.next) == null) |
|
break; |
|
if (p == (p = q)) |
|
continue restartFromHead; |
|
} |
|
if (p != h && casHead(h, p)) |
|
h.selfLink(); |
|
return first; |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private int countOfMode(boolean data) { |
|
restartFromHead: for (;;) { |
|
int count = 0; |
|
for (Node p = head; p != null;) { |
|
if (!p.isMatched()) { |
|
if (p.isData != data) |
|
return 0; |
|
if (++count == Integer.MAX_VALUE) |
|
break; |
|
} |
|
if (p == (p = p.next)) |
|
continue restartFromHead; |
|
} |
|
return count; |
|
} |
|
} |
|
|
|
public String toString() { |
|
String[] a = null; |
|
restartFromHead: for (;;) { |
|
int charLength = 0; |
|
int size = 0; |
|
for (Node p = head; p != null;) { |
|
Object item = p.item; |
|
if (p.isData) { |
|
if (item != null) { |
|
if (a == null) |
|
a = new String[4]; |
|
else if (size == a.length) |
|
a = Arrays.copyOf(a, 2 * size); |
|
String s = item.toString(); |
|
a[size++] = s; |
|
charLength += s.length(); |
|
} |
|
} else if (item == null) |
|
break; |
|
if (p == (p = p.next)) |
|
continue restartFromHead; |
|
} |
|
|
|
if (size == 0) |
|
return "[]"; |
|
|
|
return Helpers.toString(a, size, charLength); |
|
} |
|
} |
|
|
|
private Object[] toArrayInternal(Object[] a) { |
|
Object[] x = a; |
|
restartFromHead: for (;;) { |
|
int size = 0; |
|
for (Node p = head; p != null;) { |
|
Object item = p.item; |
|
if (p.isData) { |
|
if (item != null) { |
|
if (x == null) |
|
x = new Object[4]; |
|
else if (size == x.length) |
|
x = Arrays.copyOf(x, 2 * (size + 4)); |
|
x[size++] = item; |
|
} |
|
} else if (item == null) |
|
break; |
|
if (p == (p = p.next)) |
|
continue restartFromHead; |
|
} |
|
if (x == null) |
|
return new Object[0]; |
|
else if (a != null && size <= a.length) { |
|
if (a != x) |
|
System.arraycopy(x, 0, a, 0, size); |
|
if (size < a.length) |
|
a[size] = null; |
|
return a; |
|
} |
|
return (size == x.length) ? x : Arrays.copyOf(x, size); |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public Object[] toArray() { |
|
return toArrayInternal(null); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
@SuppressWarnings("unchecked") |
|
public <T> T[] toArray(T[] a) { |
|
Objects.requireNonNull(a); |
|
return (T[]) toArrayInternal(a); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
final class Itr implements Iterator<E> { |
|
private Node nextNode; |
|
private E nextItem; |
|
private Node lastRet; |
|
private Node ancestor; |
|
|
|
|
|
|
|
*/ |
|
@SuppressWarnings("unchecked") |
|
private void advance(Node pred) { |
|
for (Node p = (pred == null) ? head : pred.next, c = p; |
|
p != null; ) { |
|
final Object item; |
|
if ((item = p.item) != null && p.isData) { |
|
nextNode = p; |
|
nextItem = (E) item; |
|
if (c != p) |
|
tryCasSuccessor(pred, c, p); |
|
return; |
|
} |
|
else if (!p.isData && item == null) |
|
break; |
|
if (c != p && !tryCasSuccessor(pred, c, c = p)) { |
|
pred = p; |
|
c = p = p.next; |
|
} |
|
else if (p == (p = p.next)) { |
|
pred = null; |
|
c = p = head; |
|
} |
|
} |
|
nextItem = null; |
|
nextNode = null; |
|
} |
|
|
|
Itr() { |
|
advance(null); |
|
} |
|
|
|
public final boolean hasNext() { |
|
return nextNode != null; |
|
} |
|
|
|
public final E next() { |
|
final Node p; |
|
if ((p = nextNode) == null) throw new NoSuchElementException(); |
|
E e = nextItem; |
|
advance(lastRet = p); |
|
return e; |
|
} |
|
|
|
public void forEachRemaining(Consumer<? super E> action) { |
|
Objects.requireNonNull(action); |
|
Node q = null; |
|
for (Node p; (p = nextNode) != null; advance(q = p)) |
|
action.accept(nextItem); |
|
if (q != null) |
|
lastRet = q; |
|
} |
|
|
|
public final void remove() { |
|
final Node lastRet = this.lastRet; |
|
if (lastRet == null) |
|
throw new IllegalStateException(); |
|
this.lastRet = null; |
|
if (lastRet.item == null) |
|
return; |
|
|
|
Node pred = ancestor; |
|
for (Node p = (pred == null) ? head : pred.next, c = p, q; |
|
p != null; ) { |
|
if (p == lastRet) { |
|
final Object item; |
|
if ((item = p.item) != null) |
|
p.tryMatch(item, null); |
|
if ((q = p.next) == null) q = p; |
|
if (c != q) tryCasSuccessor(pred, c, q); |
|
ancestor = pred; |
|
return; |
|
} |
|
final Object item; final boolean pAlive; |
|
if (pAlive = ((item = p.item) != null && p.isData)) { |
|
// exceptionally, nothing to do |
|
} |
|
else if (!p.isData && item == null) |
|
break; |
|
if ((c != p && !tryCasSuccessor(pred, c, c = p)) || pAlive) { |
|
pred = p; |
|
c = p = p.next; |
|
} |
|
else if (p == (p = p.next)) { |
|
pred = null; |
|
c = p = head; |
|
} |
|
} |
|
// traversal failed to find lastRet; must have been deleted; |
|
// leave ancestor at original location to avoid overshoot; |
|
// better luck next time! |
|
|
|
// assert lastRet.isMatched(); |
|
} |
|
} |
|
|
|
|
|
final class LTQSpliterator implements Spliterator<E> { |
|
static final int MAX_BATCH = 1 << 25; |
|
Node current; |
|
int batch; |
|
boolean exhausted; |
|
LTQSpliterator() {} |
|
|
|
public Spliterator<E> trySplit() { |
|
Node p, q; |
|
if ((p = current()) == null || (q = p.next) == null) |
|
return null; |
|
int i = 0, n = batch = Math.min(batch + 1, MAX_BATCH); |
|
Object[] a = null; |
|
do { |
|
final Object item = p.item; |
|
if (p.isData) { |
|
if (item != null) { |
|
if (a == null) |
|
a = new Object[n]; |
|
a[i++] = item; |
|
} |
|
} else if (item == null) { |
|
p = null; |
|
break; |
|
} |
|
if (p == (p = q)) |
|
p = firstDataNode(); |
|
} while (p != null && (q = p.next) != null && i < n); |
|
setCurrent(p); |
|
return (i == 0) ? null : |
|
Spliterators.spliterator(a, 0, i, (Spliterator.ORDERED | |
|
Spliterator.NONNULL | |
|
Spliterator.CONCURRENT)); |
|
} |
|
|
|
public void forEachRemaining(Consumer<? super E> action) { |
|
Objects.requireNonNull(action); |
|
final Node p; |
|
if ((p = current()) != null) { |
|
current = null; |
|
exhausted = true; |
|
forEachFrom(action, p); |
|
} |
|
} |
|
|
|
@SuppressWarnings("unchecked") |
|
public boolean tryAdvance(Consumer<? super E> action) { |
|
Objects.requireNonNull(action); |
|
Node p; |
|
if ((p = current()) != null) { |
|
E e = null; |
|
do { |
|
final Object item = p.item; |
|
final boolean isData = p.isData; |
|
if (p == (p = p.next)) |
|
p = head; |
|
if (isData) { |
|
if (item != null) { |
|
e = (E) item; |
|
break; |
|
} |
|
} |
|
else if (item == null) |
|
p = null; |
|
} while (p != null); |
|
setCurrent(p); |
|
if (e != null) { |
|
action.accept(e); |
|
return true; |
|
} |
|
} |
|
return false; |
|
} |
|
|
|
private void setCurrent(Node p) { |
|
if ((current = p) == null) |
|
exhausted = true; |
|
} |
|
|
|
private Node current() { |
|
Node p; |
|
if ((p = current) == null && !exhausted) |
|
setCurrent(p = firstDataNode()); |
|
return p; |
|
} |
|
|
|
public long estimateSize() { return Long.MAX_VALUE; } |
|
|
|
public int characteristics() { |
|
return (Spliterator.ORDERED | |
|
Spliterator.NONNULL | |
|
Spliterator.CONCURRENT); |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public Spliterator<E> spliterator() { |
|
return new LTQSpliterator(); |
|
} |
|
|
|
/* -------------- Removal methods -------------- */ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
final void unsplice(Node pred, Node s) { |
|
// assert pred != null; |
|
// assert pred != s; |
|
// assert s != null; |
|
// assert s.isMatched(); |
|
// assert (SWEEP_THRESHOLD & (SWEEP_THRESHOLD - 1)) == 0; |
|
s.waiter = null; |
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
if (pred != null && pred.next == s) { |
|
Node n = s.next; |
|
if (n == null || |
|
(n != s && pred.casNext(s, n) && pred.isMatched())) { |
|
for (;;) { |
|
Node h = head; |
|
if (h == pred || h == s) |
|
return; |
|
if (!h.isMatched()) |
|
break; |
|
Node hn = h.next; |
|
if (hn == null) |
|
return; |
|
if (hn != h && casHead(h, hn)) |
|
h.selfLink(); |
|
} |
|
|
|
if (pred.next != pred && s.next != s |
|
&& (incSweepVotes() & (SWEEP_THRESHOLD - 1)) == 0) |
|
sweep(); |
|
} |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private void sweep() { |
|
for (Node p = head, s, n; p != null && (s = p.next) != null; ) { |
|
if (!s.isMatched()) |
|
|
|
p = s; |
|
else if ((n = s.next) == null) |
|
break; |
|
else if (s == n) |
|
|
|
p = head; |
|
else |
|
p.casNext(s, n); |
|
} |
|
} |
|
|
|
|
|
|
|
*/ |
|
public LinkedTransferQueue() { |
|
head = tail = new Node(); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public LinkedTransferQueue(Collection<? extends E> c) { |
|
Node h = null, t = null; |
|
for (E e : c) { |
|
Node newNode = new Node(Objects.requireNonNull(e)); |
|
if (h == null) |
|
h = t = newNode; |
|
else |
|
t.appendRelaxed(t = newNode); |
|
} |
|
if (h == null) |
|
h = t = new Node(); |
|
head = h; |
|
tail = t; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public void put(E e) { |
|
xfer(e, true, ASYNC, 0); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean offer(E e, long timeout, TimeUnit unit) { |
|
xfer(e, true, ASYNC, 0); |
|
return true; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean offer(E e) { |
|
xfer(e, true, ASYNC, 0); |
|
return true; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean add(E e) { |
|
xfer(e, true, ASYNC, 0); |
|
return true; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean tryTransfer(E e) { |
|
return xfer(e, true, NOW, 0) == null; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public void transfer(E e) throws InterruptedException { |
|
if (xfer(e, true, SYNC, 0) != null) { |
|
Thread.interrupted(); |
|
throw new InterruptedException(); |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean tryTransfer(E e, long timeout, TimeUnit unit) |
|
throws InterruptedException { |
|
if (xfer(e, true, TIMED, unit.toNanos(timeout)) == null) |
|
return true; |
|
if (!Thread.interrupted()) |
|
return false; |
|
throw new InterruptedException(); |
|
} |
|
|
|
public E take() throws InterruptedException { |
|
E e = xfer(null, false, SYNC, 0); |
|
if (e != null) |
|
return e; |
|
Thread.interrupted(); |
|
throw new InterruptedException(); |
|
} |
|
|
|
public E poll(long timeout, TimeUnit unit) throws InterruptedException { |
|
E e = xfer(null, false, TIMED, unit.toNanos(timeout)); |
|
if (e != null || !Thread.interrupted()) |
|
return e; |
|
throw new InterruptedException(); |
|
} |
|
|
|
public E poll() { |
|
return xfer(null, false, NOW, 0); |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
public int drainTo(Collection<? super E> c) { |
|
Objects.requireNonNull(c); |
|
if (c == this) |
|
throw new IllegalArgumentException(); |
|
int n = 0; |
|
for (E e; (e = poll()) != null; n++) |
|
c.add(e); |
|
return n; |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
public int drainTo(Collection<? super E> c, int maxElements) { |
|
Objects.requireNonNull(c); |
|
if (c == this) |
|
throw new IllegalArgumentException(); |
|
int n = 0; |
|
for (E e; n < maxElements && (e = poll()) != null; n++) |
|
c.add(e); |
|
return n; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public Iterator<E> iterator() { |
|
return new Itr(); |
|
} |
|
|
|
public E peek() { |
|
restartFromHead: for (;;) { |
|
for (Node p = head; p != null;) { |
|
Object item = p.item; |
|
if (p.isData) { |
|
if (item != null) { |
|
@SuppressWarnings("unchecked") E e = (E) item; |
|
return e; |
|
} |
|
} |
|
else if (item == null) |
|
break; |
|
if (p == (p = p.next)) |
|
continue restartFromHead; |
|
} |
|
return null; |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean isEmpty() { |
|
return firstDataNode() == null; |
|
} |
|
|
|
public boolean hasWaitingConsumer() { |
|
restartFromHead: for (;;) { |
|
for (Node p = head; p != null;) { |
|
Object item = p.item; |
|
if (p.isData) { |
|
if (item != null) |
|
break; |
|
} |
|
else if (item == null) |
|
return true; |
|
if (p == (p = p.next)) |
|
continue restartFromHead; |
|
} |
|
return false; |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public int size() { |
|
return countOfMode(true); |
|
} |
|
|
|
public int getWaitingConsumerCount() { |
|
return countOfMode(false); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean remove(Object o) { |
|
if (o == null) return false; |
|
restartFromHead: for (;;) { |
|
for (Node p = head, pred = null; p != null; ) { |
|
Node q = p.next; |
|
final Object item; |
|
if ((item = p.item) != null) { |
|
if (p.isData) { |
|
if (o.equals(item) && p.tryMatch(item, null)) { |
|
skipDeadNodes(pred, p, p, q); |
|
return true; |
|
} |
|
pred = p; p = q; continue; |
|
} |
|
} |
|
else if (!p.isData) |
|
break; |
|
for (Node c = p;; q = p.next) { |
|
if (q == null || !q.isMatched()) { |
|
pred = skipDeadNodes(pred, c, p, q); p = q; break; |
|
} |
|
if (p == (p = q)) continue restartFromHead; |
|
} |
|
} |
|
return false; |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public boolean contains(Object o) { |
|
if (o == null) return false; |
|
restartFromHead: for (;;) { |
|
for (Node p = head, pred = null; p != null; ) { |
|
Node q = p.next; |
|
final Object item; |
|
if ((item = p.item) != null) { |
|
if (p.isData) { |
|
if (o.equals(item)) |
|
return true; |
|
pred = p; p = q; continue; |
|
} |
|
} |
|
else if (!p.isData) |
|
break; |
|
for (Node c = p;; q = p.next) { |
|
if (q == null || !q.isMatched()) { |
|
pred = skipDeadNodes(pred, c, p, q); p = q; break; |
|
} |
|
if (p == (p = q)) continue restartFromHead; |
|
} |
|
} |
|
return false; |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
public int remainingCapacity() { |
|
return Integer.MAX_VALUE; |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private void writeObject(java.io.ObjectOutputStream s) |
|
throws java.io.IOException { |
|
s.defaultWriteObject(); |
|
for (E e : this) |
|
s.writeObject(e); |
|
|
|
s.writeObject(null); |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
*/ |
|
private void readObject(java.io.ObjectInputStream s) |
|
throws java.io.IOException, ClassNotFoundException { |
|
|
|
|
|
Node h = null, t = null; |
|
for (Object item; (item = s.readObject()) != null; ) { |
|
Node newNode = new Node(item); |
|
if (h == null) |
|
h = t = newNode; |
|
else |
|
t.appendRelaxed(t = newNode); |
|
} |
|
if (h == null) |
|
h = t = new Node(); |
|
head = h; |
|
tail = t; |
|
} |
|
|
|
|
|
|
|
*/ |
|
public boolean removeIf(Predicate<? super E> filter) { |
|
Objects.requireNonNull(filter); |
|
return bulkRemove(filter); |
|
} |
|
|
|
|
|
|
|
*/ |
|
public boolean removeAll(Collection<?> c) { |
|
Objects.requireNonNull(c); |
|
return bulkRemove(e -> c.contains(e)); |
|
} |
|
|
|
|
|
|
|
*/ |
|
public boolean retainAll(Collection<?> c) { |
|
Objects.requireNonNull(c); |
|
return bulkRemove(e -> !c.contains(e)); |
|
} |
|
|
|
public void clear() { |
|
bulkRemove(e -> true); |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
private static final int MAX_HOPS = 8; |
|
|
|
|
|
@SuppressWarnings("unchecked") |
|
private boolean bulkRemove(Predicate<? super E> filter) { |
|
boolean removed = false; |
|
restartFromHead: for (;;) { |
|
int hops = MAX_HOPS; |
|
// c will be CASed to collapse intervening dead nodes between |
|
|
|
for (Node p = head, c = p, pred = null, q; p != null; p = q) { |
|
q = p.next; |
|
final Object item; boolean pAlive; |
|
if (pAlive = ((item = p.item) != null && p.isData)) { |
|
if (filter.test((E) item)) { |
|
if (p.tryMatch(item, null)) |
|
removed = true; |
|
pAlive = false; |
|
} |
|
} |
|
else if (!p.isData && item == null) |
|
break; |
|
if (pAlive || q == null || --hops == 0) { |
|
// p might already be self-linked here, but if so: |
|
// - CASing head will surely fail |
|
|
|
if ((c != p && !tryCasSuccessor(pred, c, c = p)) |
|
|| pAlive) { |
|
|
|
hops = MAX_HOPS; |
|
pred = p; |
|
c = q; |
|
} |
|
} else if (p == q) |
|
continue restartFromHead; |
|
} |
|
return removed; |
|
} |
|
} |
|
|
|
|
|
|
|
|
|
*/ |
|
@SuppressWarnings("unchecked") |
|
void forEachFrom(Consumer<? super E> action, Node p) { |
|
for (Node pred = null; p != null; ) { |
|
Node q = p.next; |
|
final Object item; |
|
if ((item = p.item) != null) { |
|
if (p.isData) { |
|
action.accept((E) item); |
|
pred = p; p = q; continue; |
|
} |
|
} |
|
else if (!p.isData) |
|
break; |
|
for (Node c = p;; q = p.next) { |
|
if (q == null || !q.isMatched()) { |
|
pred = skipDeadNodes(pred, c, p, q); p = q; break; |
|
} |
|
if (p == (p = q)) { pred = null; p = head; break; } |
|
} |
|
} |
|
} |
|
|
|
|
|
|
|
*/ |
|
public void forEach(Consumer<? super E> action) { |
|
Objects.requireNonNull(action); |
|
forEachFrom(action, head); |
|
} |
|
|
|
|
|
private static final VarHandle HEAD; |
|
private static final VarHandle TAIL; |
|
private static final VarHandle SWEEPVOTES; |
|
static final VarHandle ITEM; |
|
static final VarHandle NEXT; |
|
static final VarHandle WAITER; |
|
static { |
|
try { |
|
MethodHandles.Lookup l = MethodHandles.lookup(); |
|
HEAD = l.findVarHandle(LinkedTransferQueue.class, "head", |
|
Node.class); |
|
TAIL = l.findVarHandle(LinkedTransferQueue.class, "tail", |
|
Node.class); |
|
SWEEPVOTES = l.findVarHandle(LinkedTransferQueue.class, "sweepVotes", |
|
int.class); |
|
ITEM = l.findVarHandle(Node.class, "item", Object.class); |
|
NEXT = l.findVarHandle(Node.class, "next", Node.class); |
|
WAITER = l.findVarHandle(Node.class, "waiter", Thread.class); |
|
} catch (ReflectiveOperationException e) { |
|
throw new ExceptionInInitializerError(e); |
|
} |
|
|
|
// Reduce the risk of rare disastrous classloading in first call to |
|
|
|
Class<?> ensureLoaded = LockSupport.class; |
|
} |
|
} |