diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/ActiveDeletedBlobCollectorMBean.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/ActiveDeletedBlobCollectorMBean.java
index 12853982fa..5285116ce1 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/ActiveDeletedBlobCollectorMBean.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/ActiveDeletedBlobCollectorMBean.java
@@ -21,6 +21,7 @@ package org.apache.jackrabbit.oak.plugins.index.lucene;
 
 import javax.annotation.Nonnull;
 import javax.management.openmbean.CompositeData;
+import java.util.Map;
 
 /**
  * MBean for starting and monitoring the progress of
@@ -56,4 +57,21 @@ public interface ActiveDeletedBlobCollectorMBean {
      */
     @Nonnull
     CompositeData getActiveCollectionStatus();
+
+    /**
+     * Flag current blobs (head state) referred by all indexes so that they won't
+     * be marked to be collected by active deletion later. It would also set an
+     * in-memory flag so that new blobs also are flagged to be not marked for deletion
+     * by active deletion
+     */
+    @Nonnull
+    void dontCollectCurrentAndPauseMarkingDeletedBlobs();
+
+    /**
+     * Resets the in-memory flag so that new blobs are not flagged anymore and hence
+     * would get marked for active deletion when active deletion is active.
+     *
+     * @return
+     */
+    void resumeMarkingDeletedBlobs();
 }
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/ActiveDeletedBlobCollectorMBeanImpl.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/ActiveDeletedBlobCollectorMBeanImpl.java
index 8923361722..97062b496b 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/ActiveDeletedBlobCollectorMBeanImpl.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/ActiveDeletedBlobCollectorMBeanImpl.java
@@ -19,33 +19,57 @@
 
 package org.apache.jackrabbit.oak.plugins.index.lucene;
 
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.jackrabbit.oak.api.CommitFailedException;
 import org.apache.jackrabbit.oak.api.jmx.CheckpointMBean;
+import org.apache.jackrabbit.oak.api.jmx.IndexStatsMBean;
+import org.apache.jackrabbit.oak.commons.PathUtils;
 import org.apache.jackrabbit.oak.commons.jmx.ManagementOperation;
+import org.apache.jackrabbit.oak.plugins.index.IndexPathService;
 import org.apache.jackrabbit.oak.plugins.index.lucene.directory.ActiveDeletedBlobCollectorFactory.ActiveDeletedBlobCollector;
 import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore;
+import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
+import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.spi.state.NodeStore;
 import org.apache.jackrabbit.oak.spi.whiteboard.Tracker;
 import org.apache.jackrabbit.oak.spi.whiteboard.Whiteboard;
 import org.apache.jackrabbit.oak.stats.Clock;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.component.annotations.ReferenceCardinality;
+import org.osgi.service.component.annotations.ReferencePolicy;
+import org.osgi.service.component.annotations.ReferencePolicyOption;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nonnull;
 import javax.management.openmbean.CompositeData;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Executor;
 import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
 
 import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.collect.Iterables.transform;
+import static org.apache.jackrabbit.oak.api.Type.STRING;
+import static org.apache.jackrabbit.oak.api.jmx.IndexStatsMBean.STATUS_RUNNING;
 import static org.apache.jackrabbit.oak.commons.jmx.ManagementOperation.Status.failed;
 import static org.apache.jackrabbit.oak.commons.jmx.ManagementOperation.Status.initiated;
 import static org.apache.jackrabbit.oak.commons.jmx.ManagementOperation.done;
 import static org.apache.jackrabbit.oak.commons.jmx.ManagementOperation.newManagementOperation;
+import static org.apache.jackrabbit.oak.plugins.index.IndexConstants.TYPE_PROPERTY_NAME;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.INDEX_DATA_CHILD_NAME;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.TYPE_LUCENE;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.directory.OakDirectory.PROP_UNSAFE_FOR_ACTIVE_DELETION;
 
 public class ActiveDeletedBlobCollectorMBeanImpl implements ActiveDeletedBlobCollectorMBean {
     private static final Logger LOG = LoggerFactory.getLogger(ActiveDeletedBlobCollectorMBeanImpl.class);
 
-    public static final String OP_NAME = "Active lucene index blobs collection";
+    private static final String OP_NAME = "Active lucene index blobs collection";
 
     /**
      * Actively deleted blob must be deleted for at least this long (in seconds)
@@ -67,14 +91,26 @@ public class ActiveDeletedBlobCollectorMBeanImpl implements ActiveDeletedBlobCol
     @Nonnull
     private final Executor executor;
 
+    @Reference
+    private IndexPathService indexPathService;
+
+    @Reference
+    private NodeStore store;
+
+    private final Map<String, IndexStatsMBean> statsMBeans = new ConcurrentHashMap<>();
+
 
     private ManagementOperation<Void> gcOp = done(OP_NAME, null);
 
     /**
      * @param activeDeletedBlobCollector    deleted index blobs collector
+     * @param whiteboard                    An instance of {@link Whiteboard}. It will be
+     *                                      used to get checkpoing manager mbean.
+     * @param blobStore                     An instance of (@link {@link GarbageCollectableBlobStore}. It will
+     *                                      be used to purge blobs which have been deleted from lucene indexes.
      * @param executor                      executor for running the collection task
      */
-    public ActiveDeletedBlobCollectorMBeanImpl(
+    ActiveDeletedBlobCollectorMBeanImpl(
             @Nonnull ActiveDeletedBlobCollector activeDeletedBlobCollector,
             @Nonnull Whiteboard whiteboard,
             @Nonnull GarbageCollectableBlobStore blobStore,
@@ -128,6 +164,120 @@ public class ActiveDeletedBlobCollectorMBeanImpl implements ActiveDeletedBlobCol
         return gcOp.getStatus().toCompositeData();
     }
 
+    @Override
+    public void dontCollectCurrentAndPauseMarkingDeletedBlobs() {
+        activeDeletedBlobCollector.pauseMarkingDeletedBlobs(true);
+
+        if (!waitForRunningIndexCycles()) {
+            LOG.warn("Some indexers were still found running. Resume and quit gracefully");
+            activeDeletedBlobCollector.pauseMarkingDeletedBlobs(false);
+        }
+
+        try {
+            markCurrentIndexFilesUnsafeForSafeDeletion();
+        } catch (CommitFailedException e) {
+            LOG.warn("Could not set current index files unsafe for active deletion. Resume and quit gracefully", e);
+            activeDeletedBlobCollector.pauseMarkingDeletedBlobs(false);
+        }
+    }
+
+    @Override
+    public void resumeMarkingDeletedBlobs() {
+        activeDeletedBlobCollector.pauseMarkingDeletedBlobs(false);
+    }
+
+    /**
+     * Wait for running index cycles for 2 minutes.
+     *
+     * @return true if all running index cycles have been through; false otherwise
+     */
+    private boolean waitForRunningIndexCycles() {
+        Map<IndexStatsMBean, String> origIndexLaneToRefCPMap = Maps.asMap(
+                Sets.newHashSet(statsMBeans.values().stream().filter(bean -> {
+                    String beanStatus;
+                    try {
+                        if (bean != null) {
+                            beanStatus = bean.getStatus();
+                        } else {
+                            return false;
+                        }
+                    } catch (Exception e) {
+                        LOG.warn("Exception during getting status for {}. Ignoring this indexer lane", bean.getName(), e);
+                        return false;
+                    }
+                    return !STATUS_RUNNING.equals(beanStatus);
+                }).collect(Collectors.toList())),
+                IndexStatsMBean::getReferenceCheckpoint);
+
+        if (!origIndexLaneToRefCPMap.isEmpty()) {
+            LOG.info("Found running index lanes ({}). Sleep a bit before continuing.",
+                    transform(origIndexLaneToRefCPMap.keySet(), IndexStatsMBean::getName));
+            try {
+                clock.waitUntil(clock.getTime() + TimeUnit.SECONDS.toMillis(1));
+            } catch (InterruptedException e) {
+                LOG.info("Thread interrupted during initial wait", e);
+                Thread.currentThread().interrupt();
+            }
+        }
+
+        while (!origIndexLaneToRefCPMap.isEmpty()) {
+            Map.Entry<IndexStatsMBean, String> indexLaneEntry = origIndexLaneToRefCPMap.entrySet().iterator().next();
+            IndexStatsMBean indexLaneBean = indexLaneEntry.getKey();
+
+            String oldRefCP = indexLaneEntry.getValue();
+            String currentRefCP = indexLaneBean.getReferenceCheckpoint();
+
+            if (!oldRefCP.equals(currentRefCP)) {
+                origIndexLaneToRefCPMap.remove(indexLaneBean);
+                LOG.info("Lane {} has moved - oldRefCP {}, newRefCP {}", indexLaneBean.getName(), oldRefCP, currentRefCP);
+            } else {
+                LOG.info("Lane {} still on refCP {}. Waiting....", indexLaneBean.getName(), currentRefCP);
+
+                try {
+                    clock.waitUntil(clock.getTime() + TimeUnit.SECONDS.toMillis(1));
+                } catch (InterruptedException e) {
+                    LOG.info("Thread interrupted", e);
+                    Thread.currentThread().interrupt();
+                    break;
+                }
+            }
+        }
+
+        return origIndexLaneToRefCPMap.isEmpty();
+    }
+
+    private void markCurrentIndexFilesUnsafeForSafeDeletion() throws CommitFailedException {
+        NodeBuilder rootBuilder = store.getRoot().builder();
+        for (String indexPath : indexPathService.getIndexPaths()) {
+            markCurrentIndexFilesUnsafeForSafeDeletionFor(rootBuilder, indexPath);
+        }
+
+        store.merge(rootBuilder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+    }
+
+    private void markCurrentIndexFilesUnsafeForSafeDeletionFor(NodeBuilder rootBuilder, String indexPath) {
+        NodeBuilder indexPathBuilder = getBuilderForPath(rootBuilder, indexPath);
+        if (!TYPE_LUCENE.equals(indexPathBuilder.getProperty(TYPE_PROPERTY_NAME).getValue(STRING))) {
+            LOG.debug("Ignoring index {} as it's not a lucene index", indexPath);
+            return;
+        }
+
+        NodeBuilder dataNodeBuilder = indexPathBuilder.getChildNode(INDEX_DATA_CHILD_NAME);
+        for (String indexFileName : dataNodeBuilder.getChildNodeNames()) {
+            NodeBuilder indexFileBuilder = dataNodeBuilder.getChildNode(indexFileName);
+
+            indexFileBuilder.setProperty(PROP_UNSAFE_FOR_ACTIVE_DELETION, true);
+        }
+    }
+
+    private static NodeBuilder getBuilderForPath(NodeBuilder rootBuilder, String path) {
+        NodeBuilder builder = rootBuilder;
+        for (String elem : PathUtils.elements(path)) {
+            builder = builder.getChildNode(elem);
+        }
+        return builder;
+    }
+
     private long getSafeTimestampForDeletedBlobs() {
         long timestamp = clock.getTime() - TimeUnit.SECONDS.toMillis(MIN_BLOB_AGE_TO_ACTIVELY_DELETE);
 
@@ -164,4 +314,18 @@ public class ActiveDeletedBlobCollectorMBeanImpl implements ActiveDeletedBlobCol
             tracker.stop();
         }
     }
+
+    @Reference(name = "statsMBeans",
+            policy = ReferencePolicy.DYNAMIC,
+            cardinality = ReferenceCardinality.MULTIPLE,
+            policyOption = ReferencePolicyOption.GREEDY,
+            service = IndexStatsMBean.class
+    )
+    protected void bindStatsMBeans(IndexStatsMBean mBean) {
+        statsMBeans.put(mBean.getName(), mBean);
+    }
+
+    protected void unbindStatsMBeans(IndexStatsMBean mBean) {
+        statsMBeans.remove(mBean.getName());
+    }
 }
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorFactory.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorFactory.java
index 691b3008f7..1ed918275a 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorFactory.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorFactory.java
@@ -70,9 +70,15 @@ public class ActiveDeletedBlobCollectorFactory {
         void purgeBlobsDeleted(long before, GarbageCollectableBlobStore blobStore);
 
         void cancelBlobCollection();
+
+        void pauseMarkingDeletedBlobs(boolean toPause);
+
+        boolean shouldMarkDeletedBlobs();
     }
 
     public static ActiveDeletedBlobCollector NOOP = new ActiveDeletedBlobCollector() {
+        private volatile boolean markingDeletedBlobsPaused = false;
+
         @Override
         public BlobDeletionCallback getBlobDeletionCallback() {
             return BlobDeletionCallback.NOOP;
@@ -87,6 +93,16 @@ public class ActiveDeletedBlobCollectorFactory {
         public void cancelBlobCollection() {
 
         }
+
+        @Override
+        public void pauseMarkingDeletedBlobs(boolean toPause) {
+            markingDeletedBlobsPaused = toPause;
+        }
+
+        @Override
+        public boolean shouldMarkDeletedBlobs() {
+            return markingDeletedBlobsPaused;
+        }
     };
 
     public interface BlobDeletionCallback extends IndexCommitCallback {
@@ -99,6 +115,9 @@ public class ActiveDeletedBlobCollectorFactory {
          *            blobs.
          */
         void deleted(String blobId, Iterable<String> ids);
+
+        boolean isMarkingForActiveDeletionUnsafe();
+
         BlobDeletionCallback NOOP = new BlobDeletionCallback() {
             @Override
             public void deleted(String blobId, Iterable<String> ids) {
@@ -107,6 +126,11 @@ public class ActiveDeletedBlobCollectorFactory {
             @Override
             public void commitProgress(IndexProgress indexProgress) {
             }
+
+            @Override
+            public boolean isMarkingForActiveDeletionUnsafe() {
+                return ActiveDeletedBlobCollectorFactory.NOOP.shouldMarkDeletedBlobs();
+            }
         };
     }
 
@@ -144,6 +168,8 @@ public class ActiveDeletedBlobCollectorFactory {
         private final ExecutorService executorService;
 
         private volatile boolean cancelled;
+        private volatile boolean markingDeletedBlobsPaused = false;
+
 
         private static final String BLOB_FILE_PATTERN_PREFIX = "blobs-";
         private static final String BLOB_FILE_PATTERN_SUFFIX = ".txt";
@@ -327,6 +353,16 @@ public class ActiveDeletedBlobCollectorFactory {
             cancelled = true;
         }
 
+        @Override
+        public void pauseMarkingDeletedBlobs(boolean toPause) {
+            markingDeletedBlobsPaused = toPause;
+        }
+
+        @Override
+        public boolean shouldMarkDeletedBlobs() {
+            return markingDeletedBlobsPaused;
+        }
+
         private long readLastCheckedBlobTimestamp() {
             File blobCollectorInfoFile = new File(rootDirectory, "collection-info.txt");
             if (!blobCollectorInfoFile.exists()) {
@@ -485,6 +521,11 @@ public class ActiveDeletedBlobCollectorFactory {
 
                 deletedBlobs.clear();
             }
+
+            @Override
+            public boolean isMarkingForActiveDeletionUnsafe() {
+                return markingDeletedBlobsPaused;
+            }
         }
 
         private class BlobIdInfoStruct {
diff --git a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectory.java b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectory.java
index 1fd2965c9d..10eb380af8 100644
--- a/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectory.java
+++ b/oak-lucene/src/main/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectory.java
@@ -35,6 +35,7 @@ import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.Lock;
 import org.apache.lucene.store.LockFactory;
 import org.apache.lucene.store.NoLockFactory;
+import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nonnull;
@@ -49,6 +50,7 @@ import static com.google.common.base.Preconditions.checkArgument;
 import static org.apache.jackrabbit.JcrConstants.JCR_DATA;
 import static org.apache.jackrabbit.oak.api.Type.BINARIES;
 import static org.apache.jackrabbit.oak.api.Type.BINARY;
+import static org.apache.jackrabbit.oak.api.Type.BOOLEAN;
 import static org.apache.jackrabbit.oak.api.Type.STRINGS;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.INDEX_DATA_CHILD_NAME;
 import static org.apache.jackrabbit.oak.plugins.memory.EmptyNodeState.EMPTY_NODE;
@@ -60,9 +62,11 @@ import static org.apache.jackrabbit.oak.plugins.memory.PropertyStates.createProp
  */
 public class OakDirectory extends Directory {
     static final PerfLogger PERF_LOGGER = new PerfLogger(LoggerFactory.getLogger(OakDirectory.class.getName() + ".perf"));
+    static final Logger LOG = LoggerFactory.getLogger(OakDirectory.class.getName());
     public static final String PROP_DIR_LISTING = "dirListing";
     static final String PROP_BLOB_SIZE = "blobSize";
     static final String PROP_UNIQUE_KEY = "uniqueKey";
+    public static final String PROP_UNSAFE_FOR_ACTIVE_DELETION = "unsafeForActiveDeletion";
     static final int UNIQUE_KEY_SIZE = 16;
 
     private final static SecureRandom secureRandom = new SecureRandom();
@@ -147,19 +151,25 @@ public class OakDirectory extends Directory {
         checkArgument(!readOnly, "Read only directory");
         fileNames.remove(name);
         NodeBuilder f = directoryBuilder.getChildNode(name);
-        PropertyState property = f.getProperty(JCR_DATA);
-        if (property != null) {
-            if (property.getType() == BINARIES || property.getType() == BINARY) {
-                for (Blob b : property.getValue(BINARIES)) {
-                    //Mark the blob as deleted. Also, post index path, type of directory
-                    //(:suggest, :data, etc) and filename being deleted
-                    String blobId = b.getContentIdentity();
-                    if (blobId != null) {
-                        blobDeletionCallback.deleted(blobId,
-                                Lists.newArrayList(definition.getIndexPath(), dataNodeName, name));
+
+        if (!f.hasProperty(PROP_UNSAFE_FOR_ACTIVE_DELETION)
+                || !f.getProperty(PROP_UNSAFE_FOR_ACTIVE_DELETION).getValue(BOOLEAN)) {
+            PropertyState property = f.getProperty(JCR_DATA);
+            if (property != null) {
+                if (property.getType() == BINARIES || property.getType() == BINARY) {
+                    for (Blob b : property.getValue(BINARIES)) {
+                        //Mark the blob as deleted. Also, post index path, type of directory
+                        //(:suggest, :data, etc) and filename being deleted
+                        String blobId = b.getContentIdentity();
+                        if (blobId != null) {
+                            blobDeletionCallback.deleted(blobId,
+                                    Lists.newArrayList(definition.getIndexPath(), dataNodeName, name));
+                        }
                     }
                 }
             }
+        } else {
+            LOG.debug("Not marking {} under {} for active deletion", name, indexName);
         }
         f.remove();
         markDirty();
@@ -198,6 +208,10 @@ public class OakDirectory extends Directory {
         String key = StringUtils.convertBytesToHex(uniqueKey);
         file.setProperty(PROP_UNIQUE_KEY, key);
         file.setProperty(PROP_BLOB_SIZE, definition.getBlobSize());
+        if (blobDeletionCallback.isMarkingForActiveDeletionUnsafe()) {
+            file.setProperty(PROP_UNSAFE_FOR_ACTIVE_DELETION, true);
+            LOG.debug("Setting {} under {} as unsafe for active deletion", name, indexName);
+        }
 
         fileNames.add(name);
         markDirty();
diff --git a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorTest.java b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorTest.java
index fb168cb83d..f7c6a012a9 100644
--- a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorTest.java
+++ b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/ActiveDeletedBlobCollectorTest.java
@@ -464,6 +464,37 @@ public class ActiveDeletedBlobCollectorTest {
         warnLogCustomizer.finished();
     }
 
+    // OAK-6950
+    @Test
+    public void pauseMarkingDeletedBlobs() {
+        BlobDeletionCallback bdc = adbc.getBlobDeletionCallback();
+        assertFalse("Active deletion should be safe by default", bdc.isMarkingForActiveDeletionUnsafe());
+
+        adbc.pauseMarkingDeletedBlobs(true);
+        bdc = adbc.getBlobDeletionCallback();
+        assertTrue("Active deletion should be unsafe", bdc.isMarkingForActiveDeletionUnsafe());
+
+        adbc.pauseMarkingDeletedBlobs(false);
+        bdc = adbc.getBlobDeletionCallback();
+        assertFalse("Active deletion should be safe after unpausing", bdc.isMarkingForActiveDeletionUnsafe());
+    }
+
+    // OAK-6950
+    @Test
+    public void pauseMarkingDeletedBlobsNOOP() {
+        adbc = ActiveDeletedBlobCollectorFactory.NOOP;
+        BlobDeletionCallback bdc = adbc.getBlobDeletionCallback();
+        assertFalse("Active deletion should be safe by default", bdc.isMarkingForActiveDeletionUnsafe());
+
+        adbc.pauseMarkingDeletedBlobs(true);
+        bdc = adbc.getBlobDeletionCallback();
+        assertTrue("Active deletion should be unsafe", bdc.isMarkingForActiveDeletionUnsafe());
+
+        adbc.pauseMarkingDeletedBlobs(false);
+        bdc = adbc.getBlobDeletionCallback();
+        assertFalse("Active deletion should be safe after unpausing", bdc.isMarkingForActiveDeletionUnsafe());
+    }
+
     private void verifyBlobsDeleted(String ... blobIds) throws IOException {
         List<String> chunkIds = new ArrayList<>();
         for (String blobId : blobIds) {
diff --git a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectoryTestBase.java b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectoryTestBase.java
index a60cdcce4b..703a970fd7 100644
--- a/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectoryTestBase.java
+++ b/oak-lucene/src/test/java/org/apache/jackrabbit/oak/plugins/index/lucene/directory/OakDirectoryTestBase.java
@@ -27,10 +27,13 @@ import static org.apache.jackrabbit.oak.InitialContent.INITIAL_CONTENT;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexConstants.INDEX_DATA_CHILD_NAME;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.directory.OakDirectory.PROP_BLOB_SIZE;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.directory.OakDirectory.PROP_UNIQUE_KEY;
+import static org.apache.jackrabbit.oak.plugins.index.lucene.directory.OakDirectory.PROP_UNSAFE_FOR_ACTIVE_DELETION;
 import static org.apache.jackrabbit.oak.plugins.index.lucene.directory.OakDirectory.UNIQUE_KEY_SIZE;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -48,6 +51,7 @@ import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import com.google.common.collect.Sets;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.io.input.NullInputStream;
 import org.apache.jackrabbit.oak.api.Blob;
@@ -505,6 +509,11 @@ abstract public class OakDirectoryTestBase {
                         @Override
                         public void commitProgress(IndexProgress indexProgress) {
                         }
+
+                        @Override
+                        public boolean isMarkingForActiveDeletionUnsafe() {
+                            return false;
+                        }
                     })
                 .build();
 
@@ -519,6 +528,112 @@ abstract public class OakDirectoryTestBase {
         dir.close();
     }
 
+    // OAK-6950
+    @Test
+    public void blobsCreatedWhenActiveDeletionIsUnsafe() throws Exception {
+        final int fileSize = 1;
+
+        IndexDefinition def = new IndexDefinition(root, builder.getNodeState(), "/foo");
+        BlobFactory factory = in -> {
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            IOUtils.copy(in, out);
+            byte[] data = out.toByteArray();
+            return new ArrayBasedBlob(data);
+        };
+
+        final AtomicBoolean markingForceActiveDeletionUnsafe = new AtomicBoolean();
+        OakDirectory dir = getOakDirectoryBuilder(builder, def).setReadOnly(false)
+                .with(factory).
+                        with(
+                                new ActiveDeletedBlobCollectorFactory.BlobDeletionCallback() {
+                                    @Override
+                                    public void deleted(String blobId, Iterable<String> ids) {
+                                    }
+
+                                    @Override
+                                    public void commitProgress(IndexProgress indexProgress) {
+                                    }
+
+                                    @Override
+                                    public boolean isMarkingForActiveDeletionUnsafe() {
+                                        return markingForceActiveDeletionUnsafe.get();
+                                    }
+                                })
+                .build();
+
+        // file1 created before marking was flagged as unsafe
+        writeFile(dir, "file1", fileSize);
+
+        markingForceActiveDeletionUnsafe.set(true);
+
+        // file2 created after marking was flagged as unsafe
+        writeFile(dir, "file2", fileSize);
+        dir.close();
+
+        NodeBuilder dataBuilder = builder.getChildNode(INDEX_DATA_CHILD_NAME);
+
+        assertNull("file1 must not get flagged to be unsafe to be actively deleted",
+                dataBuilder.getChildNode("file1").getProperty(PROP_UNSAFE_FOR_ACTIVE_DELETION));
+        assertTrue("file2 must get flagged to be unsafe to be actively deleted",
+                dataBuilder.getChildNode("file2").getProperty(PROP_UNSAFE_FOR_ACTIVE_DELETION).getValue(Type.BOOLEAN));
+    }
+
+    // OAK-6950
+    @Test
+    public void dontReportFilesMarkedUnsafeForActiveDeletion() throws Exception {
+        IndexDefinition def = new IndexDefinition(root, builder.getNodeState(), "/foo");
+        BlobFactory factory = in -> {
+            ByteArrayOutputStream out = new ByteArrayOutputStream();
+            IOUtils.copy(in, out);
+            byte[] data = out.toByteArray();
+            return new ArrayBasedBlob(data) {
+                @Override
+                public String getContentIdentity() {
+                    return Long.toString(length() - UNIQUE_KEY_SIZE);
+                }
+            };
+        };
+
+        final AtomicBoolean markingForceActiveDeletionUnsafe = new AtomicBoolean();
+        final Set<String> deletedBlobs = Sets.newHashSet();
+        OakDirectory dir = getOakDirectoryBuilder(builder, def).setReadOnly(false)
+            .with(factory).
+                    with(
+                        new ActiveDeletedBlobCollectorFactory.BlobDeletionCallback() {
+                            @Override
+                            public void deleted(String blobId, Iterable<String> ids) {
+                                deletedBlobs.add(blobId);
+                            }
+
+                            @Override
+                            public void commitProgress(IndexProgress indexProgress) {
+                            }
+
+                            @Override
+                            public boolean isMarkingForActiveDeletionUnsafe() {
+                                return markingForceActiveDeletionUnsafe.get();
+                            }
+                        })
+                .build();
+
+        // file1 created before marking was flagged as unsafe
+        writeFile(dir, "file1", 1);
+
+        markingForceActiveDeletionUnsafe.set(true);
+
+        // file2 created after marking was flagged as unsafe
+        writeFile(dir, "file2", 2);
+
+        dir.deleteFile("file1");
+        dir.deleteFile("file2");
+
+        dir.close();
+
+        assertEquals("Only one blobs must be reported as deleted", 1, deletedBlobs.size());
+        assertTrue("file1 blob must be reported as deleted", deletedBlobs.contains("1"));
+        assertFalse("file2 blob must not be reported as deleted", deletedBlobs.contains("2"));
+    }
+
     @Test
     public void blobFactory() throws Exception {
         final AtomicInteger numBlobs = new AtomicInteger();
