Index: src/test/repository/workspaces/indexing-test/indexing-configuration.xml
===================================================================
--- src/test/repository/workspaces/indexing-test/indexing-configuration.xml	(revision 1128221)
+++ src/test/repository/workspaces/indexing-test/indexing-configuration.xml	(working copy)
@@ -46,6 +46,11 @@
         <!-- do not index any properties -->
     </index-rule>
 
+    <aggregate primaryType="nt:folder">
+        <include primaryType="nt:folder">*</include>
+        <include primaryType="nt:file">*</include>
+    </aggregate>
+
     <aggregate primaryType="nt:file">
         <include>jcr:content</include>
         <include>jcr:content/*</include>
Index: src/test/java/org/apache/jackrabbit/core/query/lucene/SQL2IndexingAggregateTest.java
===================================================================
--- src/test/java/org/apache/jackrabbit/core/query/lucene/SQL2IndexingAggregateTest.java	(revision 1128325)
+++ src/test/java/org/apache/jackrabbit/core/query/lucene/SQL2IndexingAggregateTest.java	(working copy)
@@ -46,6 +46,74 @@
         testRootNode.getSession().save();
     }
 
+    /**
+     * 
+     * this test is very similar to
+     * {@link SQL2IndexingAggregateTest#testNtFileAggregate()
+     * testNtFileAggregate} but checks embedded index aggregates.
+     * 
+     * The aggregation hierarchy is defined in
+     * src/test/repository/workspaces/indexing-test/indexing-configuration.xml
+     * 
+     * basically a folder aggregates other folders and files that aggregate a
+     * stream of content.
+     * 
+     * see <a href="https://issues.apache.org/jira/browse/JCR-2989">JCR-2989</a>
+     * 
+     */
+    public void testDeepHierarchy() throws Exception {
+        int levelsDeep = 5;
+
+        String sqlBase = "SELECT * FROM [nt:folder] as f"
+                + " WHERE ISCHILDNODE([" + testRoot + "])";
+        String sqlCat = sqlBase + " AND CONTAINS (f.*, 'cat')";
+        String sqlDog = sqlBase + " AND CONTAINS (f.*, 'dog')";
+
+        ByteArrayOutputStream out = new ByteArrayOutputStream();
+        Writer writer = new OutputStreamWriter(out, "UTF-8");
+        writer.write("the quick brown fox jumps over the lazy dog.");
+        writer.flush();
+
+        Node folderRoot = testRootNode.addNode("myFolder", "nt:folder");
+        Node folderChild = folderRoot;
+        for (int i = 0; i < levelsDeep; i++) {
+            folderChild = folderChild.addNode("0" + i, "nt:folder");
+        }
+
+        Node file = folderChild.addNode("myFile", "nt:file");
+        Node resource = file.addNode("jcr:content", "nt:resource");
+        resource.setProperty("jcr:lastModified", Calendar.getInstance());
+        resource.setProperty("jcr:encoding", "UTF-8");
+        resource.setProperty("jcr:mimeType", "text/plain");
+        resource.setProperty("jcr:data", session.getValueFactory()
+                .createBinary(new ByteArrayInputStream(out.toByteArray())));
+
+        testRootNode.getSession().save();
+        executeSQL2Query(sqlDog, new Node[] { folderRoot });
+
+        // update jcr:data
+        out.reset();
+        writer.write("the quick brown fox jumps over the lazy cat.");
+        writer.flush();
+        resource.setProperty("jcr:data", session.getValueFactory()
+                .createBinary(new ByteArrayInputStream(out.toByteArray())));
+        testRootNode.getSession().save();
+        executeSQL2Query(sqlCat, new Node[] { folderRoot });
+
+        // replace jcr:content with unstructured
+        resource.remove();
+        Node unstrContent = file.addNode("jcr:content", "nt:unstructured");
+        Node foo = unstrContent.addNode("foo");
+        foo.setProperty("text", "the quick brown fox jumps over the lazy dog.");
+        testRootNode.getSession().save();
+        executeSQL2Query(sqlDog, new Node[] { folderRoot });
+        
+        // remove foo
+        foo.remove();
+        testRootNode.getSession().save();
+        executeSQL2Query(sqlDog, new Node[] {});
+    }
+
     public void testNtFileAggregate() throws Exception {
 
         String sqlBase = "SELECT * FROM [nt:file] as f"
Index: src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java
===================================================================
--- src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java	(revision 1131652)
+++ src/main/java/org/apache/jackrabbit/core/query/lucene/SearchIndex.java	(working copy)
@@ -484,6 +484,12 @@
      * anymore.
      */
     private boolean closed = false;
+    
+    /**
+     * same type nodes recursive aggregation limit. embedded aggregation of
+     * nodes that have the same type can go only this levels up.
+     */
+    private static final int SAME_NODE_TYPE_AGGREGATION_LIMIT = 100;
 
     /**
      * Initializes this <code>QueryHandler</code>. This implementation requires
@@ -1583,29 +1589,72 @@
 
     /**
      * Retrieves the root of the indexing aggregate for <code>state</code> and
-     * puts it into <code>map</code>.
+     * puts it into <code>aggregates</code>  map.
      *
      * @param state the node state for which we want to retrieve the aggregate
      *              root.
-     * @param map   aggregate roots are collected in this map.
+     * @param aggregates aggregate roots are collected in this map.
      */
-    protected void retrieveAggregateRoot(
-            NodeState state, Map<NodeId, NodeState> map) {
-        if (indexingConfig != null) {
-            AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
-            if (aggregateRules == null) {
-                return;
-            }
+    protected void retrieveAggregateRoot(NodeState state,
+            Map<NodeId, NodeState> aggregates) {
+        retrieveAggregateRoot(state, aggregates, state.getNodeId().toString(), 0);
+    }
+    
+    /**
+     * Retrieves the root of the indexing aggregate for <code>state</code> and
+     * puts it into <code>aggregates</code> map.
+     * 
+     * @param state
+     *            the node state for which we want to retrieve the aggregate
+     *            root.
+     * @param aggregates
+     *            aggregate roots are collected in this map.
+     * @param originNodeId
+     *            the originating node, used for reporting only
+     * @param level
+     *            current aggregation level, used to limit recursive aggregation
+     *            of nodes that have the same type
+     */
+    private void retrieveAggregateRoot(NodeState state,
+            Map<NodeId, NodeState> aggregates, String originNodeId, int level) {
+        if (indexingConfig == null) {
+            return;
+        }
+        AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
+        if (aggregateRules == null) {
+            return;
+        }
+        for (AggregateRule aggregateRule : aggregateRules) {
+            NodeState root = null;
             try {
-                for (AggregateRule aggregateRule : aggregateRules) {
-                    NodeState root = aggregateRule.getAggregateRoot(state);
-                    if (root != null) {
-                        map.put(root.getNodeId(), root);
-                    }
-                }
+                root = aggregateRule.getAggregateRoot(state);
             } catch (Exception e) {
-                log.warn("Unable to get aggregate root for "
-                        + state.getNodeId(), e);
+                log.warn("Unable to get aggregate root for " + state.getNodeId(), e);
+            }
+            if (root == null) {
+                continue;
+            }
+
+            // JCR-2989 Support for embedded index aggregates
+            // check if the update parent is already in the
+            // map, then all its parents are already there so I can
+            // skip this update subtree
+            if (aggregates.put(root.getNodeId(), root) == null) {
+                if (root.getNodeTypeName().equals(state.getNodeTypeName())) {
+                    level++;
+                } else {
+                    level = 0;
+                }
+                // soft cap on the level so we can secure against heavy
+                // recursive aggregation
+                if (level < SAME_NODE_TYPE_AGGREGATION_LIMIT) {
+                    retrieveAggregateRoot(root, aggregates, originNodeId, level);
+                } else {
+                    log.warn(
+                            "Reached {} levels of recursive aggregation for nodeId {}, type {}, will stop at nodeId {}. Are you sure this is not a mistake? Please check the indexing-configuration.xml.",
+                            new Object[] { level, originNodeId,
+                                    root.getNodeTypeName(), root.getNodeId() });
+                }
             }
         }
     }
@@ -1614,11 +1663,11 @@
      * Retrieves the root of the indexing aggregate for <code>removedIds</code>
      * and puts it into <code>map</code>.
      *
-     * @param removedIds     the ids of removed nodes.
-     * @param map            aggregate roots are collected in this map
+     * @param removedIds the ids of removed nodes.
+     * @param aggregates aggregate roots are collected in this map
      */
     protected void retrieveAggregateRoot(
-            Set<NodeId> removedIds, Map<NodeId, NodeState> map) {
+            Set<NodeId> removedIds, Map<NodeId, NodeState> aggregates) {
         if (indexingConfig != null) {
             AggregateRule[] aggregateRules = indexingConfig.getAggregateRules();
             if (aggregateRules == null) {
@@ -1642,8 +1691,14 @@
                                 Document doc = reader.document(
                                         tDocs.doc(), FieldSelectors.UUID);
                                 NodeId nId = new NodeId(doc.get(FieldNames.UUID));
-                                map.put(nId, (NodeState) ism.getItemState(nId));
+                                NodeState nodeState = (NodeState) ism.getItemState(nId);
+                                aggregates.put(nId, nodeState);
                                 found++;
+                                
+                                // JCR-2989 Support for embedded index aggregates
+                                int sizeBefore = aggregates.size();
+                                retrieveAggregateRoot(nodeState, aggregates);
+                                found += aggregates.size() - sizeBefore;
                             }
                         }
                     } finally {
