Skip to content

Commit

Permalink
IGNITE-21151 MVCC caching removal (#11140)
Browse files Browse the repository at this point in the history
  • Loading branch information
shishkovilja committed Jan 17, 2024
1 parent 48cf657 commit ab4d60d
Show file tree
Hide file tree
Showing 13 changed files with 4 additions and 754 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@
import static org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPreloader.DFLT_PRELOAD_RESEND_TIMEOUT;
import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition.DFLT_ATOMIC_CACHE_DELETE_HISTORY_SIZE;
import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition.DFLT_CACHE_REMOVE_ENTRIES_TTL;
import static org.apache.ignite.internal.processors.cache.mvcc.MvccCachingManager.DFLT_MVCC_TX_SIZE_CACHING_THRESHOLD;
import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DFLT_DEFRAGMENTATION_REGION_SIZE_PERCENTAGE;
import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DFLT_PDS_WAL_REBALANCE_THRESHOLD;
import static org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointHistory.DFLT_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE;
Expand Down Expand Up @@ -1577,14 +1576,6 @@ public final class IgniteSystemProperties {
defaults = "10")
public static final String IGNITE_ZOOKEEPER_DISCOVERY_MAX_RETRY_COUNT = "IGNITE_ZOOKEEPER_DISCOVERY_MAX_RETRY_COUNT";

/**
* Maximum number for cached MVCC transaction updates. This caching is used for continuous query with MVCC caches.
*/
@SystemProperty(value = "Maximum number for cached MVCC transaction updates. This caching is used " +
"for continuous query with MVCC caches", type = Integer.class,
defaults = "" + DFLT_MVCC_TX_SIZE_CACHING_THRESHOLD)
public static final String IGNITE_MVCC_TX_SIZE_CACHING_THRESHOLD = "IGNITE_MVCC_TX_SIZE_CACHING_THRESHOLD";

/**
* Try reuse memory on deactivation. Useful in case of huge page memory region size.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@
import org.apache.ignite.internal.processors.cache.distributed.near.GridNearAtomicCache;
import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTransactionalCache;
import org.apache.ignite.internal.processors.cache.dr.GridCacheDrManager;
import org.apache.ignite.internal.processors.cache.mvcc.MvccCachingManager;
import org.apache.ignite.internal.processors.cache.persistence.DataRegion;
import org.apache.ignite.internal.processors.cache.persistence.DatabaseLifecycleListener;
import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
Expand Down Expand Up @@ -3104,7 +3103,6 @@ private GridCacheSharedContext createSharedContext(
.setTtlCleanupManager(new GridCacheSharedTtlCleanupManager())
.setPartitionsEvictManager(new PartitionsEvictManager())
.setJtaManager(JTA.createOptional())
.setMvccCachingManager(new MvccCachingManager())
.setDiagnosticManager(new CacheDiagnosticManager())
.setCdcManager(cdcMgr)
.build(kernalCtx, storeSesLsnrs);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@
import org.apache.ignite.internal.processors.cache.distributed.dht.topology.PartitionsEvictManager;
import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
import org.apache.ignite.internal.processors.cache.jta.CacheJtaManagerAdapter;
import org.apache.ignite.internal.processors.cache.mvcc.MvccCachingManager;
import org.apache.ignite.internal.processors.cache.persistence.DataRegion;
import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager;
import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteSnapshotManager;
Expand Down Expand Up @@ -139,9 +138,6 @@ public class GridCacheSharedContext<K, V> {
/** Partitons evict manager. */
private PartitionsEvictManager evictMgr;

/** Mvcc caching manager. */
private MvccCachingManager mvccCachingMgr;

/** Cache contexts map. */
private final ConcurrentHashMap<Integer, GridCacheContext<K, V>> ctxMap;

Expand Down Expand Up @@ -212,7 +208,6 @@ public static Builder builder() {
* @param evictMgr Partitons evict manager.
* @param jtaMgr JTA manager.
* @param storeSesLsnrs Store session listeners.
* @param mvccCachingMgr Mvcc caching manager.
*/
private GridCacheSharedContext(
GridKernalContext kernalCtx,
Expand All @@ -232,7 +227,6 @@ private GridCacheSharedContext(
PartitionsEvictManager evictMgr,
CacheJtaManagerAdapter jtaMgr,
Collection<CacheStoreSessionListener> storeSesLsnrs,
MvccCachingManager mvccCachingMgr,
CacheDiagnosticManager diagnosticMgr,
CdcManager cdcMgr
) {
Expand All @@ -256,7 +250,6 @@ private GridCacheSharedContext(
ioMgr,
ttlMgr,
evictMgr,
mvccCachingMgr,
diagnosticMgr,
cdcMgr
);
Expand Down Expand Up @@ -435,7 +428,6 @@ void onReconnected(boolean active) throws IgniteCheckedException {
ioMgr,
ttlMgr,
evictMgr,
mvccCachingMgr,
diagnosticMgr,
cdcMgr
);
Expand Down Expand Up @@ -485,7 +477,6 @@ private void setManagers(
GridCacheIoManager ioMgr,
GridCacheSharedTtlCleanupManager ttlMgr,
PartitionsEvictManager evictMgr,
MvccCachingManager mvccCachingMgr,
CacheDiagnosticManager diagnosticMgr,
CdcManager cdcMgr
) {
Expand All @@ -510,7 +501,6 @@ private void setManagers(
this.ioMgr = add(mgrs, ioMgr);
this.ttlMgr = add(mgrs, ttlMgr);
this.evictMgr = add(mgrs, evictMgr);
this.mvccCachingMgr = add(mgrs, mvccCachingMgr);
}

/**
Expand Down Expand Up @@ -866,13 +856,6 @@ public PartitionsEvictManager evict() {
return evictMgr;
}

/**
* @return Mvcc transaction enlist caching manager.
*/
public MvccCachingManager mvccCaching() {
return mvccCachingMgr;
}

/**
* @return Diagnostic manager.
*/
Expand Down Expand Up @@ -1268,9 +1251,6 @@ public static class Builder {
/** */
private PartitionsEvictManager evictMgr;

/** */
private MvccCachingManager mvccCachingMgr;

/** */
private CacheDiagnosticManager diagnosticMgr;

Expand Down Expand Up @@ -1305,7 +1285,6 @@ public <K, V> GridCacheSharedContext<K, V> build(
evictMgr,
jtaMgr,
storeSesLsnrs,
mvccCachingMgr,
diagnosticMgr,
cdcMgr
);
Expand Down Expand Up @@ -1416,13 +1395,6 @@ public Builder setPartitionsEvictManager(PartitionsEvictManager evictMgr) {
return this;
}

/** */
public Builder setMvccCachingManager(MvccCachingManager mvccCachingMgr) {
this.mvccCachingMgr = mvccCachingMgr;

return this;
}

/** */
public Builder setDiagnosticManager(CacheDiagnosticManager diagnosticMgr) {
this.diagnosticMgr = diagnosticMgr;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -778,8 +778,6 @@ else if (op == READ) {
if (txCntrs != null)
cctx.tm().txHandler().applyPartitionsUpdatesCounters(txCntrs.updateCounters());

cctx.mvccCaching().onTxFinished(this, true);

if (!near() && !F.isEmpty(dataEntries) && cctx.wal(true) != null)
ptr = cctx.wal(true).log(new DataRecord(dataEntries));

Expand Down Expand Up @@ -925,8 +923,6 @@ public void forceCommit() throws IgniteCheckedException {
cctx.tm().txHandler().applyPartitionsUpdatesCounters(counters.updateCounters(), true, false);

state(ROLLED_BACK);

cctx.mvccCaching().onTxFinished(this, false);
}
}
catch (IgniteCheckedException | RuntimeException | Error e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -417,8 +417,6 @@ private void continueLoop(boolean ignoreCntr) {

assert entryProc != null || !op.isInvoke();

boolean needOldVal = tx.txState().useMvccCaching(cctx.cacheId());

GridCacheUpdateTxResult res;

while (true) {
Expand All @@ -433,7 +431,7 @@ private void continueLoop(boolean ignoreCntr) {
topVer,
mvccSnapshot,
isMoving(key.partition(), backups),
needOldVal,
false,
filter,
needResult());

Expand All @@ -455,7 +453,7 @@ private void continueLoop(boolean ignoreCntr) {
op.cacheOperation(),
isMoving(key.partition(), backups),
op.noCreate(),
needOldVal,
false,
filter,
needResult(),
keepBinary);
Expand Down Expand Up @@ -632,9 +630,6 @@ private void processEntry(GridDhtCacheEntry entry, EnlistOperation op,
|| op == EnlistOperation.LOCK)
return;

cctx.shared().mvccCaching().addEnlisted(entry.key(), updRes.newValue(), 0, 0, lockVer,
updRes.oldValue(), tx.local(), tx.topologyVersion(), mvccSnapshot, cctx.cacheId(), tx, null, -1);

addToBatch(entry.key(), val, updRes.mvccHistory(), entry.context().cacheId(), backups);
}

Expand Down
Loading

0 comments on commit ab4d60d

Please sign in to comment.